language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
public boolean hasAncestorOfType( Type firstType, Type... additionalTypes ) { return hasAncestorOfType(EnumSet.of(firstType, additionalTypes)); }
java
public static TopicWrapper cloneTopic(final DataProviderFactory providerFactory, final ITopicNode specTopic, final ServerEntitiesWrapper serverEntities) { final TopicProvider topicProvider = providerFactory.getProvider(TopicProvider.class); final TopicSourceURLProvider topicSourceUrlProvider = providerFactory.getProvider(TopicSourceURLProvider.class); final TagProvider tagProvider = providerFactory.getProvider(TagProvider.class); final PropertyTagProvider propertyTagProvider = providerFactory.getProvider(PropertyTagProvider.class); // Get the existing topic from the database int clonedId = Integer.parseInt(specTopic.getId().substring(1)); final TopicWrapper originalTopic = topicProvider.getTopic(clonedId, null); final TopicWrapper cloneTopic = topicProvider.newTopic(); LOG.debug("Cloning existing topic " + originalTopic.getId()); // Set the ID to null so a new ID will be created cloneTopic.setId(null); // Set-up the basic parameters cloneTopic.setTitle(originalTopic.getTitle()); cloneTopic.setDescription(originalTopic.getDescription()); cloneTopic.setXml(originalTopic.getXml()); cloneTopic.setXmlFormat(originalTopic.getXmlFormat()); cloneTopic.setLocale(originalTopic.getLocale()); // Go through each collection and add the original topics data if (originalTopic.getIncomingRelationships() != null && !originalTopic.getIncomingRelationships().isEmpty()) { final CollectionWrapper<TopicWrapper> cloneIncomingTopics = topicProvider.newTopicCollection(); for (final TopicWrapper incomingRelationship : originalTopic.getIncomingRelationships().getItems()) { cloneIncomingTopics.addNewItem(incomingRelationship); } cloneTopic.setIncomingRelationships(cloneIncomingTopics); } if (originalTopic.getOutgoingRelationships() != null && !originalTopic.getOutgoingRelationships().isEmpty()) { final CollectionWrapper<TopicWrapper> cloneOutgoingTopics = topicProvider.newTopicCollection(); for (final TopicWrapper outgoingRelationship : originalTopic.getOutgoingRelationships().getItems()) { cloneOutgoingTopics.addNewItem(outgoingRelationship); } cloneTopic.setOutgoingRelationships(cloneOutgoingTopics); } // SOURCE URLS if (originalTopic.getSourceURLs() != null && !originalTopic.getSourceURLs().isEmpty()) { final UpdateableCollectionWrapper<TopicSourceURLWrapper> cloneSourceUrls = topicSourceUrlProvider.newTopicSourceURLCollection( cloneTopic); for (final TopicSourceURLWrapper sourceUrl : originalTopic.getSourceURLs().getItems()) { final TopicSourceURLWrapper cloneSourceUrl = cloneTopicSourceUrl(topicSourceUrlProvider, sourceUrl, cloneTopic); cloneSourceUrls.addNewItem(cloneSourceUrl); } cloneTopic.setSourceURLs(cloneSourceUrls); } // TAGS if (originalTopic.getTags() != null && !originalTopic.getTags().isEmpty()) { final CollectionWrapper<TagWrapper> newTags = tagProvider.newTagCollection(); final List<TagWrapper> tags = originalTopic.getTags().getItems(); for (final TagWrapper tag : tags) { // Remove the old writer tag as it will get replaced if (!tag.containedInCategory(serverEntities.getWriterCategoryId())) { newTags.addNewItem(tag); } } // Set the tags if any tags exist if (!newTags.isEmpty()) { cloneTopic.setTags(newTags); } } // Copy all the existing property tags final UpdateableCollectionWrapper<PropertyTagInTopicWrapper> newProperties = propertyTagProvider.newPropertyTagInTopicCollection( cloneTopic); final List<PropertyTagInTopicWrapper> propertyItems = originalTopic.getProperties().getItems(); for (final PropertyTagInTopicWrapper property : propertyItems) { final PropertyTagInTopicWrapper clonedProperty = cloneTopicProperty(cloneTopic, propertyTagProvider, property); // Ignore the CSP and Added By Property ID as we will add a new one later if (!(property.getId().equals(serverEntities.getCspIdPropertyTagId()) || property.getId().equals(serverEntities.getAddedByPropertyTagId()))) { newProperties.addNewItem(clonedProperty); } } // Add the added by property tag final String assignedWriter = specTopic.getAssignedWriter(true); if (assignedWriter != null) { final PropertyTagWrapper addedByPropertyTag = propertyTagProvider.getPropertyTag(serverEntities.getAddedByPropertyTagId()); final PropertyTagInTopicWrapper addedByProperty = propertyTagProvider.newPropertyTagInTopic(addedByPropertyTag, cloneTopic); addedByProperty.setValue(assignedWriter); newProperties.addNewItem(addedByProperty); } if (!newProperties.isEmpty()) { cloneTopic.setProperties(newProperties); } return cloneTopic; }
python
def from_spacegroup(cls, sg, lattice, species, coords, site_properties=None, coords_are_cartesian=False, tol=1e-5): """ Generate a structure using a spacegroup. Note that only symmetrically distinct species and coords should be provided. All equivalent sites are generated from the spacegroup operations. Args: sg (str/int): The spacegroup. If a string, it will be interpreted as one of the notations supported by pymatgen.symmetry.groups.Spacegroup. E.g., "R-3c" or "Fm-3m". If an int, it will be interpreted as an international number. lattice (Lattice/3x3 array): The lattice, either as a :class:`pymatgen.core.lattice.Lattice` or simply as any 2D array. Each row should correspond to a lattice vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30]. Note that no attempt is made to check that the lattice is compatible with the spacegroup specified. This may be introduced in a future version. species ([Specie]): Sequence of species on each site. Can take in flexible input, including: i. A sequence of element / specie specified either as string symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers, e.g., (3, 56, ...) or actual Element or Specie objects. ii. List of dict of elements/species and occupancies, e.g., [{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of disordered structures. coords (Nx3 array): list of fractional/cartesian coordinates of each species. coords_are_cartesian (bool): Set to True if you are providing coordinates in cartesian coordinates. Defaults to False. site_properties (dict): Properties associated with the sites as a dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences have to be the same length as the atomic species and fractional_coords. Defaults to None for no properties. tol (float): A fractional tolerance to deal with numerical precision issues in determining if orbits are the same. """ from pymatgen.symmetry.groups import SpaceGroup try: i = int(sg) sgp = SpaceGroup.from_int_number(i) except ValueError: sgp = SpaceGroup(sg) if isinstance(lattice, Lattice): latt = lattice else: latt = Lattice(lattice) if not sgp.is_compatible(latt): raise ValueError( "Supplied lattice with parameters %s is incompatible with " "supplied spacegroup %s!" % (latt.lengths_and_angles, sgp.symbol) ) if len(species) != len(coords): raise ValueError( "Supplied species and coords lengths (%d vs %d) are " "different!" % (len(species), len(coords)) ) frac_coords = np.array(coords, dtype=np.float) \ if not coords_are_cartesian else \ lattice.get_fractional_coords(coords) props = {} if site_properties is None else site_properties all_sp = [] all_coords = [] all_site_properties = collections.defaultdict(list) for i, (sp, c) in enumerate(zip(species, frac_coords)): cc = sgp.get_orbit(c, tol=tol) all_sp.extend([sp] * len(cc)) all_coords.extend(cc) for k, v in props.items(): all_site_properties[k].extend([v[i]] * len(cc)) return cls(latt, all_sp, all_coords, site_properties=all_site_properties)
python
def client_getname(self, encoding=_NOTSET): """Get the current connection name.""" return self.execute(b'CLIENT', b'GETNAME', encoding=encoding)
java
public void parseAndExecuteCommand() { CommandLineParser parser = new DefaultParser(); try { CommandLine parsedOpts = parser.parse(this.options, this.args, true); GlobalOptions globalOptions = createGlobalOptions(parsedOpts); // Fetch the command and fail if there is ambiguity String[] remainingArgs = parsedOpts.getArgs(); if (remainingArgs.length == 0) { printHelpAndExit("Command not specified!"); } String commandName = remainingArgs[0].toLowerCase(); remainingArgs = remainingArgs.length > 1 ? Arrays.copyOfRange(remainingArgs, 1, remainingArgs.length) : new String[]{}; Command command = commandList.get(commandName); if (command == null) { System.out.println("Command " + commandName + " not known."); printHelpAndExit(); } else { command.execute(globalOptions, remainingArgs); } } catch (ParseException e) { printHelpAndExit("Ran into an error parsing args."); } }
python
def get_source_var_declaration(self, var): """ Return the source mapping where the variable is declared Args: var (str): variable name Returns: (dict): sourceMapping """ return next((x.source_mapping for x in self.variables if x.name == var))
java
static ProxyConnection getProxyConnection(final PoolEntry poolEntry, final Connection connection, final FastList<Statement> openStatements, final ProxyLeakTask leakTask, final long now, final boolean isReadOnly, final boolean isAutoCommit) { // Body is replaced (injected) by JavassistProxyFactory throw new IllegalStateException("You need to run the CLI build and you need target/classes in your classpath to run."); }
python
def amortize(rate, nper, pv, freq="M"): """Construct an amortization schedule for a fixed-rate loan. Rate -> annualized input Example ------- # a 6.75% $200,000 loan, 30-year tenor, payments due monthly # view the 5 final months print(amortize(rate=.0675, nper=30, pv=200000).round(2).tail()) beg_bal prin interest end_bal 356 6377.95 -1261.32 -35.88 5116.63 357 5116.63 -1268.42 -28.78 3848.22 358 3848.22 -1275.55 -21.65 2572.67 359 2572.67 -1282.72 -14.47 1289.94 360 1289.94 -1289.94 -7.26 -0.00 """ freq = utils.get_anlz_factor(freq) rate = rate / freq nper = nper * freq periods = np.arange(1, nper + 1, dtype=int) principal = np.ppmt(rate, periods, nper, pv) interest = np.ipmt(rate, periods, nper, pv) pmt = np.pmt(rate, nper, pv) def balance(pv, rate, nper, pmt): dfac = (1 + rate) ** nper return pv * dfac - pmt * (dfac - 1) / rate res = pd.DataFrame( { "beg_bal": balance(pv, rate, periods - 1, -pmt), "prin": principal, "interest": interest, "end_bal": balance(pv, rate, periods, -pmt), }, index=periods, )["beg_bal", "prin", "interest", "end_bal"] return res
python
def fire_event(self, evt_name, *args, **kwargs): """触发事件 :params evt_name: 事件名称 :params args: 给事件接受者的参数 :params kwargs: 给事件接受者的参数 """ listeners = self.__get_listeners(evt_name) evt = self.generate_event(evt_name) for listener in listeners: listener(evt, *args, **kwargs)
java
public static <T extends Annotation> List<Method> introspectAnnotationMultiple( final Class<?> klass, final Class<T> annotationType) { final List<Method> result = new ArrayList<Method>(); for (final Method method : klass.getMethods()) { if (method.getAnnotation(annotationType) != null) result.add(method); } return result; }
java
public static <T> T InThread( T anInterface ) { Actor sender = Actor.sender.get(); if ( sender != null ) return sender.getScheduler().inThread(sender.getActor(),anInterface); else return anInterface; }
python
def _GenClientLibCallback(args, client_func=_GenClientLib): """Generate a client library to file. Args: args: An argparse.Namespace object to extract parameters from client_func: A function that generates client libraries and stores them to files, accepting a path to a discovery doc, a client library language, an output directory, and a build system for the client library language. """ client_path = client_func(args.discovery_doc[0], args.language, args.output, args.build_system) print 'API client library written to %s' % client_path
java
protected Object doGetTransaction() { Object dataSourceTransactionObject = super.doGetTransaction(); Object contextSourceTransactionObject = ldapManagerDelegate .doGetTransaction(); return new ContextSourceAndHibernateTransactionObject( contextSourceTransactionObject, dataSourceTransactionObject); }
python
def set_estimate(self, bitrate: int, now_ms: int): """ For testing purposes. """ self.current_bitrate = self._clamp_bitrate(bitrate, bitrate) self.current_bitrate_initialized = True self.last_change_ms = now_ms
python
def classify_wherex(scope_, fromx, wherex): "helper for wherex_to_rowlist. returns [SingleTableCond,...], [CartesianCond,...]" exprs = [] for exp in fromx: if isinstance(exp, sqparse2.JoinX): # todo: probably just add exp.on_stmt as a CartesianCond. don't write this until tests are ready. # todo: do join-on clauses get special scoping w.r.t. column names? check spec. raise NotImplementedError('join') elif isinstance(exp, basestring): exprs.append(exp) def test_and(exp): return isinstance(exp, sqparse2.BinX) and exp.op.op == 'and' def binx_splitter(exp): return [exp.left, exp.right] exprs += treepath.flatten_tree(test_and, binx_splitter, wherex) if wherex else [] # wherex is None if not given single_conds = [] cartesian_conds = [] for exp in exprs: if isinstance(exp, basestring): # note: bare table names need their own case because they don't work with resolve_column single_conds.append(SingleTableCond(exp, exp)) else: tables = zip(*map(scope_.resolve_column, names_from_exp(exp)))[0] if len(tables) > 1: cartesian_conds.append(CartesianCond(exp)) else: single_conds.append(SingleTableCond(tables[0], exp)) return single_conds, cartesian_conds
java
public void setBackupSelectionsList(java.util.Collection<BackupSelectionsListMember> backupSelectionsList) { if (backupSelectionsList == null) { this.backupSelectionsList = null; return; } this.backupSelectionsList = new java.util.ArrayList<BackupSelectionsListMember>(backupSelectionsList); }
java
public static IndexPlanner both( final IndexPlanner planner1, final IndexPlanner planner2 ) { if (planner1 == null) return planner2; if (planner2 == null) return planner1; return new IndexPlanner() { @Override public void applyIndexes( QueryContext context, IndexCostCalculator calculator ) { RuntimeException error = null; try { planner1.applyIndexes(context, calculator); } catch (RuntimeException e) { error = e; } finally { try { planner2.applyIndexes(context, calculator); } catch (RuntimeException e) { if (error == null) error = e; } finally { if (error != null) throw error; } } } }; }
java
private boolean isCertExpired(X509Certificate cert) { if (cert != null && cert.getNotAfter().before(new Date())) { return true; } return false; }
python
def scope_lookup(self, node, name, offset=0): """Lookup where the given names is assigned. :param node: The node to look for assignments up to. Any assignments after the given node are ignored. :type node: NodeNG :param name: The name to find assignments for. :type name: str :param offset: The line offset to filter statements up to. :type offset: int :returns: This scope node and the list of assignments associated to the given name according to the scope where it has been found (locals, globals or builtin). :rtype: tuple(str, list(NodeNG)) """ # pylint: disable=no-member; github.com/pycqa/astroid/issues/291 # args is in fact redefined later on by postinit. Can't be changed # to None due to a strong interaction between Lambda and FunctionDef. if node in self.args.defaults or node in self.args.kw_defaults: frame = self.parent.frame() # line offset to avoid that def func(f=func) resolve the default # value to the defined function offset = -1 else: # check this is not used in function decorators frame = self return frame._scope_lookup(node, name, offset)
python
def _get_shortcut_string(shortcut): """Return a string representation of a shortcut.""" if shortcut is None: return '' if isinstance(shortcut, (tuple, list)): return ', '.join([_get_shortcut_string(s) for s in shortcut]) if isinstance(shortcut, string_types): if hasattr(QKeySequence, shortcut): shortcut = QKeySequence(getattr(QKeySequence, shortcut)) else: return shortcut.lower() assert isinstance(shortcut, QKeySequence) s = shortcut.toString() or '' return str(s).lower()
python
def _get_super_entities_by_ctype(model_objs_by_ctype, model_ids_to_sync, sync_all): """ Given model objects organized by content type and a dictionary of all model IDs that need to be synced, organize all super entity relationships that need to be synced. Ensure that the model_ids_to_sync dict is updated with any new super entities that need to be part of the overall entity sync """ super_entities_by_ctype = defaultdict(lambda: defaultdict(list)) # pragma: no cover for ctype, model_objs_for_ctype in model_objs_by_ctype.items(): entity_config = entity_registry.entity_registry.get(ctype.model_class()) super_entities = entity_config.get_super_entities(model_objs_for_ctype, sync_all) super_entities_by_ctype[ctype] = { ContentType.objects.get_for_model(model_class, for_concrete_model=False): relationships for model_class, relationships in super_entities.items() } # Continue adding to the set of entities that need to be synced for super_entity_ctype, relationships in super_entities_by_ctype[ctype].items(): for sub_entity_id, super_entity_id in relationships: model_ids_to_sync[ctype].add(sub_entity_id) model_ids_to_sync[super_entity_ctype].add(super_entity_id) return super_entities_by_ctype
python
def users(self, team, params={}, **options): """Returns the compact records for all users that are members of the team. Parameters ---------- team : {Id} Globally unique identifier for the team. [params] : {Object} Parameters for the request """ path = "/teams/%s/users" % (team) return self.client.get_collection(path, params, **options)
python
def _make_hlog_numeric(b, r, d): """ Return a function that numerically computes the hlog transformation for given parameter values. """ hlog_obj = lambda y, x, b, r, d: hlog_inv(y, b, r, d) - x find_inv = vectorize(lambda x: brentq(hlog_obj, -2 * r, 2 * r, args=(x, b, r, d))) return find_inv
python
def _data_execute(self, data, program, executor): """Execute the Data object. The activities carried out here include target directory preparation, executor copying, setting serialization and actual execution of the object. :param data: The :class:`~resolwe.flow.models.Data` object to execute. :param program: The process text the manager got out of execution engine evaluation. :param executor: The executor to use for this object. """ if not program: return logger.debug(__("Manager preparing Data with id {} for processing.", data.id)) # Prepare the executor's environment. try: executor_env_vars = self.get_executor().get_environment_variables() program = self._include_environment_variables(program, executor_env_vars) data_dir = self._prepare_data_dir(data) executor_module, runtime_dir = self._prepare_executor(data, executor) # Execute execution engine specific runtime preparation. execution_engine = data.process.run.get('language', None) volume_maps = self.get_execution_engine(execution_engine).prepare_runtime(runtime_dir, data) self._prepare_context(data.id, data_dir, runtime_dir, RUNTIME_VOLUME_MAPS=volume_maps) self._prepare_script(runtime_dir, program) argv = [ '/bin/bash', '-c', self.settings_actual.get('FLOW_EXECUTOR', {}).get('PYTHON', '/usr/bin/env python') + ' -m executors ' + executor_module ] except PermissionDenied as error: data.status = Data.STATUS_ERROR data.process_error.append("Permission denied for process: {}".format(error)) data.save() return except OSError as err: logger.error(__( "OSError occurred while preparing data {} (will skip): {}", data.id, err )) return # Hand off to the run() method for execution. logger.info(__("Running {}", runtime_dir)) self.run(data, runtime_dir, argv)
java
protected void buildModulePackagesIndexFile(String title, boolean includeScript, ModuleElement mdle) throws DocFileIOException { String windowOverview = configuration.getText(title); Content body = getBody(includeScript, getWindowTitle(windowOverview)); addNavigationBarHeader(body); addOverviewHeader(body); addModulePackagesIndex(body, mdle); addOverview(body); addNavigationBarFooter(body); printHtmlDocument(configuration.metakeywords.getOverviewMetaKeywords(title, configuration.doctitle), includeScript, body); }
java
private RootElementInfo createRootElementInfo(org.jsoup.nodes.Element root, String subclass) { List<Attribute> attributes = root.attributes().asList().stream() .filter(attribute -> !attribute.getKey().equals("data-element")) .collect(Collectors.toList()); ExpressionParser expressionParser = new ExpressionParser(); String html = root.children().isEmpty() ? null : JAVA_STRING_ESCAPER.escape(root.html()); Map<String, String> expressions = expressionParser.parse(html); expressions.putAll(expressionParser.parse(root.outerHtml())); return new RootElementInfo(root.tagName(), subclass.toLowerCase() + "_root_element", attributes, html, expressions); }
java
public void initWorklist(boolean addAll) { if (addAll) { Block last = null; for (Block b = blocklistHead; b != null; b = b.nextBlock) { b.nextInWorklist = b.nextBlock; last = b; } worklistHead = blocklistHead; worklistTail = last; } else { Block largest = blocklistHead; if (largest == null) { return; } int largestSize = largest.size(); for (Block b = largest.nextBlock; b != null; b = b.nextBlock) { int size = b.size(); if (size > largestSize) { addToWorklist(largest); largest = b; largestSize = size; } else { addToWorklist(b); } } } }
python
def webui_data_stores_user_profile_saved_query_value(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") webui = ET.SubElement(config, "webui", xmlns="http://tail-f.com/ns/webui") data_stores = ET.SubElement(webui, "data-stores") user_profile = ET.SubElement(data_stores, "user-profile") username_key = ET.SubElement(user_profile, "username") username_key.text = kwargs.pop('username') saved_query = ET.SubElement(user_profile, "saved-query") key_key = ET.SubElement(saved_query, "key") key_key.text = kwargs.pop('key') value = ET.SubElement(saved_query, "value") value.text = kwargs.pop('value') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def network_lpf_contingency(network, snapshots=None, branch_outages=None): """ Computes linear power flow for a selection of branch outages. Parameters ---------- snapshots : list-like|single snapshot A subset or an elements of network.snapshots on which to run the power flow, defaults to network.snapshots NB: currently this only works for a single snapshot branch_outages : list-like A list of passive branches which are to be tested for outages. If None, it's take as all network.passive_branches_i() Returns ------- p0 : pandas.DataFrame num_passive_branch x num_branch_outages DataFrame of new power flows """ if snapshots is None: snapshots = network.snapshots if isinstance(snapshots, collections.Iterable): logger.warning("Apologies LPF contingency, this only works for single snapshots at the moment, taking the first snapshot.") snapshot = snapshots[0] else: snapshot = snapshots network.lpf(snapshot) # Store the flows from the base case passive_branches = network.passive_branches() if branch_outages is None: branch_outages = passive_branches.index p0_base = pd.Series(index=passive_branches.index) for c in network.passive_branch_components: pnl = network.pnl(c) p0_base[c] = pnl.p0.loc[snapshot] for sn in network.sub_networks.obj: sn._branches = sn.branches() sn.calculate_BODF() p0 = pd.DataFrame(index=passive_branches.index) p0["base"] = p0_base for branch in branch_outages: if type(branch) is not tuple: logger.warning("No type given for {}, assuming it is a line".format(branch)) branch = ("Line",branch) sn = network.sub_networks.obj[passive_branches.sub_network[branch]] branch_i = sn._branches.index.get_loc(branch) p0_new = p0_base + pd.Series(sn.BODF[:,branch_i]*p0_base[branch],sn._branches.index) p0[branch] = p0_new return p0
python
def remoteIndexer2to3(oldIndexer): """ The documentType keyword was added to all indexable items. Indexes need to be regenerated for this to take effect. Also, PyLucene no longer stores the text of messages it indexes, so deleting and re-creating the indexes will make them much smaller. """ newIndexer = oldIndexer.upgradeVersion( oldIndexer.typeName, 2, 3, indexCount=oldIndexer.indexCount, installedOn=oldIndexer.installedOn, indexDirectory=oldIndexer.indexDirectory) # the 3->4 upgrader for PyLuceneIndexer calls reset(), so don't do it # here. also, it won't work because it's a DummyItem if oldIndexer.typeName != PyLuceneIndexer.typeName: newIndexer.reset() return newIndexer
java
public CapsuleLauncher setProperties(Properties properties) { this.properties = properties != null ? properties : new Properties(System.getProperties()); set(null, getCapsuleField("PROPERTIES"), this.properties); return this; }
java
protected int isBetter2D(Box a, Box b) { int compare = Long.compare(a.getVolume(), b.getVolume()); if(compare != 0) { return compare; } return Long.compare(b.getFootprint(), a.getFootprint()); // i.e. smaller i better }
java
public <Value> PushMeasure<Value> wrapMeasure( final PushMeasure<Value> wrapped) { if (wrapped == null) { throw new IllegalArgumentException("No measure provided"); } else { @SuppressWarnings("unchecked") PushMeasure<Value> wrapper = (PushMeasure<Value>) measureCache .get(wrapped); if (wrapper == null) { wrapper = new PushMeasure<Value>() { @Override public String getName() { return wrapped.getName(); } @Override public String getDescription() { return wrapped.getDescription(); } @Override public void register(MeasureListener<Value> listener) { wrapped.register(wrapListener(listener)); } @Override public void unregister(MeasureListener<Value> listener) { wrapped.unregister(wrapListener(listener)); } }; measureCache.put(wrapped, wrapper); } else { // reuse existing one } return wrapper; } }
python
def loop_forever(self, timeout=1.0, max_packets=1, retry_first_connection=False): """This function call loop() for you in an infinite blocking loop. It is useful for the case where you only want to run the MQTT client loop in your program. loop_forever() will handle reconnecting for you. If you call disconnect() in a callback it will return. timeout: The time in seconds to wait for incoming/outgoing network traffic before timing out and returning. max_packets: Not currently used. retry_first_connection: Should the first connection attempt be retried on failure. Raises socket.error on first connection failures unless retry_first_connection=True """ run = True while run: if self._state == mqtt_cs_connect_async: try: self.reconnect() except socket.error: if not retry_first_connection: raise self._easy_log(MQTT_LOG_DEBUG, "Connection failed, retrying") self._backoffCore.backOff() # time.sleep(1) else: break while run: rc = MQTT_ERR_SUCCESS while rc == MQTT_ERR_SUCCESS: rc = self.loop(timeout, max_packets) # We don't need to worry about locking here, because we've # either called loop_forever() when in single threaded mode, or # in multi threaded mode when loop_stop() has been called and # so no other threads can access _current_out_packet, # _out_packet or _messages. if (self._thread_terminate is True and self._current_out_packet is None and len(self._out_packet) == 0 and len(self._out_messages) == 0): rc = 1 run = False self._state_mutex.acquire() if self._state == mqtt_cs_disconnecting or run is False or self._thread_terminate is True: run = False self._state_mutex.release() else: self._state_mutex.release() self._backoffCore.backOff() # time.sleep(1) self._state_mutex.acquire() if self._state == mqtt_cs_disconnecting or run is False or self._thread_terminate is True: run = False self._state_mutex.release() else: self._state_mutex.release() try: self.reconnect() except socket.error as err: pass return rc
java
public void implicitDefinitions(SClassDefinition d, Environment publicClasses) { if (d instanceof ASystemClassDefinition) { af.createPDefinitionAssistant().implicitDefinitions(d, publicClasses); // ASystemClassDefinitionAssistantTC.implicitDefinitions((ASystemClassDefinition) d, ); } else { implicitDefinitionsBase(d, publicClasses); } }
java
public CacheSubnetGroup withSubnets(Subnet... subnets) { if (this.subnets == null) { setSubnets(new com.amazonaws.internal.SdkInternalList<Subnet>(subnets.length)); } for (Subnet ele : subnets) { this.subnets.add(ele); } return this; }
java
@Override public CPDefinitionOptionValueRel[] findByUuid_PrevAndNext( long CPDefinitionOptionValueRelId, String uuid, OrderByComparator<CPDefinitionOptionValueRel> orderByComparator) throws NoSuchCPDefinitionOptionValueRelException { CPDefinitionOptionValueRel cpDefinitionOptionValueRel = findByPrimaryKey(CPDefinitionOptionValueRelId); Session session = null; try { session = openSession(); CPDefinitionOptionValueRel[] array = new CPDefinitionOptionValueRelImpl[3]; array[0] = getByUuid_PrevAndNext(session, cpDefinitionOptionValueRel, uuid, orderByComparator, true); array[1] = cpDefinitionOptionValueRel; array[2] = getByUuid_PrevAndNext(session, cpDefinitionOptionValueRel, uuid, orderByComparator, false); return array; } catch (Exception e) { throw processException(e); } finally { closeSession(session); } }
python
def push_note(device=None, title=None, body=None): ''' Pushing a text note. :param device: Pushbullet target device :param title: Note title :param body: Note body :return: Boolean if message was sent successfully. CLI Example: .. code-block:: bash salt "*" pushbullet.push_note device="Chrome" title="Example title" body="Example body." ''' spb = _SaltPushbullet(device) res = spb.push_note(title, body) return res
java
public static Point from(double lon, double lat, double alt) { return new Point(new SinglePosition(lon, lat, alt)); }
java
public int span(CharSequence s, int start, SpanCondition spanCondition) { if (spanCondition == SpanCondition.NOT_CONTAINED) { return spanNot(s, start, null); } int spanLimit = spanSet.span(s, start, SpanCondition.CONTAINED); if (spanLimit == s.length()) { return spanLimit; } return spanWithStrings(s, start, spanLimit, spanCondition); }
java
public PathBuilder cubicTo(Point2d cp1, Point2d cp2, Point2d ep) { add(new CubicTo(cp1, cp2, ep)); return this; }
python
def solve( solver, mzn, *dzn_files, data=None, include=None, stdlib_dir=None, globals_dir=None, allow_multiple_assignments=False, output_mode='item', timeout=None, two_pass=None, pre_passes=None, output_objective=False, non_unique=False, all_solutions=False, num_solutions=None, free_search=False, parallel=None, seed=None, **kwargs ): """Flatten and solve a MiniZinc program. Parameters ---------- solver : Solver The ``Solver`` instance to use. mzn : str The path to the minizinc model file. *dzn_files A list of paths to dzn files to attach to the minizinc execution, provided as positional arguments; by default no data file is attached. data : list of str Additional data as a list of strings containing dzn variables assignments. include : str or list One or more additional paths to search for included ``.mzn`` files. stdlib_dir : str The path to the MiniZinc standard library. Provide it only if it is different from the default one. globals_dir : str The path to the MiniZinc globals directory. Provide it only if it is different from the default one. allow_multiple_assignments : bool Whether to allow multiple assignments of variables. Sometimes is convenient to simply let the data file override the value already assigned in the minizinc file. Default is ``False``. output_mode : {'item', 'dzn', 'json'} The desired output format. The default is ``'item'`` which outputs a stream of strings as returned by the ``solns2out`` tool, formatted according to the output statement of the MiniZinc model. The ``'dzn'`` and ``'json'`` formats output a stream of strings formatted in dzn and json respectively. timeout : int The timeout in seconds for the flattening + solving process. two_pass : bool or int If ``two_pass`` is True, then it is equivalent to the ``--two-pass`` option for the ``minizinc`` executable. If ``two_pass`` is an integer ``<n>``, instead, it is equivalent to the ``-O<n>`` option for the ``minizinc`` executable. pre_passes : int Equivalent to the ``--pre-passes`` option for the ``minizinc`` executable. output_objective : bool Equivalent to the ``--output-objective`` option for the ``minizinc`` executable. Adds a field ``_objective`` to all solutions. non_unique : bool Equivalent to the ``--non-unique`` option for the ``minizinc`` executable. all_solutions : bool Whether all the solutions must be returned. This option might not work if the solver does not support it. Default is ``False``. num_solutions : int The upper bound on the number of solutions to be returned. This option might not work if the solver does not support it. Default is ``1``. free_search : bool If True, instruct the solver to perform free search. parallel : int The number of parallel threads the solver can utilize for the solving. seed : int The random number generator seed to pass to the solver. **kwargs Additional arguments to pass to the solver, provided as additional keyword arguments to this function. Check the solver documentation for the available arguments. Returns ------- Object wrapping the executed process. """ args = _solve_args( solver, timeout=timeout, two_pass=two_pass, pre_passes=pre_passes, output_objective=output_objective, non_unique=non_unique, all_solutions=all_solutions, num_solutions=num_solutions, free_search=free_search, parallel=parallel, seed=seed, **kwargs ) args += _flattening_args( mzn, *dzn_files, data=data, stdlib_dir=stdlib_dir, globals_dir=globals_dir, output_mode=output_mode, include=include, allow_multiple_assignments=allow_multiple_assignments ) input = mzn if args[-1] == '-' else None t0 = _time() try: proc = _run_minizinc_proc(*args, input=input) except RuntimeError as err: raise MiniZincError(mzn_file, args) from err solve_time = _time() - t0 logger.info('Solving completed in {:>3.2f} sec'.format(solve_time)) return proc
java
private void propagateToSharedSketch() { //noinspection StatementWithEmptyBody while (localPropagationInProgress.get()) { } //busy wait until previous propagation completed final CompactSketch compactSketch = compact(propagateOrderedCompact, null); localPropagationInProgress.set(true); shared.propagate(localPropagationInProgress, compactSketch, ConcurrentSharedThetaSketch.NOT_SINGLE_HASH); super.reset(); thetaLong_ = shared.getVolatileTheta(); }
python
def register(self, model, include_fields=[], exclude_fields=[]): """ Register a model with actionslog. Actionslog will then track mutations on this model's instances. :param model: The model to register. :type model: Model :param include_fields: The fields to include. Implicitly excludes all other fields. :type include_fields: list :param exclude_fields: The fields to exclude. Overrides the fields to include. :type exclude_fields: list """ if issubclass(model, Model): self._registry[model] = { 'include_fields': include_fields, 'exclude_fields': exclude_fields, } self._connect_signals(model) else: raise TypeError("Supplied model is not a valid model.")
python
def _listen_for_dweets_from_response(response): """Yields dweets as received from dweet.io's streaming API """ streambuffer = '' for byte in response.iter_content(): if byte: streambuffer += byte.decode('ascii') try: dweet = json.loads(streambuffer.splitlines()[1]) except (IndexError, ValueError): continue if isstr(dweet): yield json.loads(dweet) streambuffer = ''
java
public static CommerceOrderItem[] findByC_I_PrevAndNext( long commerceOrderItemId, long commerceOrderId, long CPInstanceId, OrderByComparator<CommerceOrderItem> orderByComparator) throws com.liferay.commerce.exception.NoSuchOrderItemException { return getPersistence() .findByC_I_PrevAndNext(commerceOrderItemId, commerceOrderId, CPInstanceId, orderByComparator); }
python
def set_pending_symbol(self, pending_symbol=None): """Sets the context's ``pending_symbol`` with the given unicode sequence and resets the context's ``value``. If the input is None, an empty :class:`CodePointArray` is used. """ if pending_symbol is None: pending_symbol = CodePointArray() self.value = bytearray() # reset value self.pending_symbol = pending_symbol self.line_comment = False return self
python
def print_upper_triangular_matrix_as_complete(matrix): """Prints a CVRP data dict upper triangular matrix as a normal matrix Doesn't print headers. Arguments --------- matrix : dict Description """ for i in sorted(matrix.keys()): for j in sorted(matrix.keys()): a, b = i, j if a > b: a, b = b, a print(matrix[a][b], end=' ') print()
java
public static CPDefinitionGroupedEntry fetchByUUID_G(String uuid, long groupId, boolean retrieveFromCache) { return getPersistence().fetchByUUID_G(uuid, groupId, retrieveFromCache); }
python
def _set_queue_size(self, v, load=False): """ Setter method for queue_size, mapped from YANG variable /interface/ethernet/qos/rx_queue/multicast/queue_size (list) If this variable is read-only (config: false) in the source YANG file, then _set_queue_size is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_queue_size() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("traffic_class",queue_size.queue_size, yang_name="queue-size", rest_name="queue-size", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='traffic-class', extensions={u'tailf-common': {u'info': u'Configure multicast queue size', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'intrfc_rx_queue_multicast_qsize'}}), is_container='list', yang_name="queue-size", rest_name="queue-size", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure multicast queue size', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'intrfc_rx_queue_multicast_qsize'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """queue_size must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("traffic_class",queue_size.queue_size, yang_name="queue-size", rest_name="queue-size", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='traffic-class', extensions={u'tailf-common': {u'info': u'Configure multicast queue size', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'intrfc_rx_queue_multicast_qsize'}}), is_container='list', yang_name="queue-size", rest_name="queue-size", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure multicast queue size', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'intrfc_rx_queue_multicast_qsize'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)""", }) self.__queue_size = t if hasattr(self, '_set'): self._set()
java
public void marshall(PutConfigurationSetDeliveryOptionsRequest putConfigurationSetDeliveryOptionsRequest, ProtocolMarshaller protocolMarshaller) { if (putConfigurationSetDeliveryOptionsRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(putConfigurationSetDeliveryOptionsRequest.getConfigurationSetName(), CONFIGURATIONSETNAME_BINDING); protocolMarshaller.marshall(putConfigurationSetDeliveryOptionsRequest.getSendingPoolName(), SENDINGPOOLNAME_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
java
public void updateProfile(ProfileFieldValues values) { getResourceFactory().getApiResource("/user/profile/") .entity(values, MediaType.APPLICATION_JSON_TYPE).put(); }
java
public SearchResponse searchWithTargetCount( SearchRequestBuilder searchRequestBuilder, AggregationBuilder[] aggregationBuilders) { return searchWithTargetCount(getSearchRequestBuilder( getSearchRequestBuilderWithCount(searchRequestBuilder), aggregationBuilders)); }
python
def add_link(self, rel, value, href=None): """ Add a Link metadata element to the IOC. :param rel: Type of the link. :param value: Value of the link text. :param href: A href value assigned to the link. :return: True """ links_node = self.metadata.find('links') if links_node is None: links_node = ioc_et.make_links_node() self.metadata.append(links_node) link_node = ioc_et.make_link_node(rel, value, href) links_node.append(link_node) return True
python
def pattern_for_view(self, view, action): """ Returns the URL pattern for the passed in action. """ # if this view knows how to define a URL pattern, call that if getattr(view, 'derive_url_pattern', None): return view.derive_url_pattern(self.path, action) # otherwise take our best guess else: return r'^%s/%s/$' % (self.path, action)
java
public static ServerSocketPredicate tag(String... tags) { return socket -> socket.tags().containsAll(Arrays.asList(tags)); }
java
public static List<CommerceOrder> findByG_U_O(long groupId, long userId, int orderStatus) { return getPersistence().findByG_U_O(groupId, userId, orderStatus); }
python
def dict_stack(dict_list, key_prefix=''): r""" stacks values from two dicts into a new dict where the values are list of the input values. the keys are the same. DEPRICATE in favor of dict_stack2 Args: dict_list (list): list of dicts with similar keys Returns: dict dict_stacked CommandLine: python -m utool.util_dict --test-dict_stack python -m utool.util_dict --test-dict_stack:1 Example: >>> # ENABLE_DOCTEST >>> from utool.util_dict import * # NOQA >>> import utool as ut >>> dict1_ = {'a': 1, 'b': 2} >>> dict2_ = {'a': 2, 'b': 3, 'c': 4} >>> dict_stacked = dict_stack([dict1_, dict2_]) >>> result = ut.repr2(dict_stacked, sorted_=True) >>> print(result) {'a': [1, 2], 'b': [2, 3], 'c': [4]} Example1: >>> # ENABLE_DOCTEST >>> from utool.util_dict import * # NOQA >>> import utool as ut >>> # Get equivalent behavior with dict_stack2? >>> # Almost, as long as None is not part of the list >>> dict1_ = {'a': 1, 'b': 2} >>> dict2_ = {'a': 2, 'b': 3, 'c': 4} >>> dict_stacked_ = dict_stack2([dict1_, dict2_]) >>> dict_stacked = {key: ut.filter_Nones(val) for key, val in dict_stacked_.items()} >>> result = ut.repr2(dict_stacked, sorted_=True) >>> print(result) {'a': [1, 2], 'b': [2, 3], 'c': [4]} """ dict_stacked_ = defaultdict(list) for dict_ in dict_list: for key, val in six.iteritems(dict_): dict_stacked_[key_prefix + key].append(val) dict_stacked = dict(dict_stacked_) return dict_stacked
python
def get_packages(self, feed_id, protocol_type=None, package_name_query=None, normalized_package_name=None, include_urls=None, include_all_versions=None, is_listed=None, get_top_package_versions=None, is_release=None, include_description=None, top=None, skip=None, include_deleted=None, is_cached=None, direct_upstream_id=None): """GetPackages. [Preview API] Get details about all of the packages in the feed. Use the various filters to include or exclude information from the result set. :param str feed_id: Name or Id of the feed. :param str protocol_type: One of the supported artifact package types. :param str package_name_query: Filter to packages that contain the provided string. Characters in the string must conform to the package name constraints. :param str normalized_package_name: [Obsolete] Used for legacy scenarios and may be removed in future versions. :param bool include_urls: True to return REST Urls with the response. Default is True. :param bool include_all_versions: True to return all versions of the package in the response. Default is false (latest version only). :param bool is_listed: Only applicable for NuGet packages, setting it for other package types will result in a 404. If false, delisted package versions will be returned. Use this to filter the response when includeAllVersions is set to true. Default is unset (do not return delisted packages). :param bool get_top_package_versions: Changes the behavior of $top and $skip to return all versions of each package up to $top. Must be used in conjunction with includeAllVersions=true :param bool is_release: Only applicable for Nuget packages. Use this to filter the response when includeAllVersions is set to true. Default is True (only return packages without prerelease versioning). :param bool include_description: Return the description for every version of each package in the response. Default is False. :param int top: Get the top N packages (or package versions where getTopPackageVersions=true) :param int skip: Skip the first N packages (or package versions where getTopPackageVersions=true) :param bool include_deleted: Return deleted or unpublished versions of packages in the response. Default is False. :param bool is_cached: [Obsolete] Used for legacy scenarios and may be removed in future versions. :param str direct_upstream_id: Filter results to return packages from a specific upstream. :rtype: [Package] """ route_values = {} if feed_id is not None: route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str') query_parameters = {} if protocol_type is not None: query_parameters['protocolType'] = self._serialize.query('protocol_type', protocol_type, 'str') if package_name_query is not None: query_parameters['packageNameQuery'] = self._serialize.query('package_name_query', package_name_query, 'str') if normalized_package_name is not None: query_parameters['normalizedPackageName'] = self._serialize.query('normalized_package_name', normalized_package_name, 'str') if include_urls is not None: query_parameters['includeUrls'] = self._serialize.query('include_urls', include_urls, 'bool') if include_all_versions is not None: query_parameters['includeAllVersions'] = self._serialize.query('include_all_versions', include_all_versions, 'bool') if is_listed is not None: query_parameters['isListed'] = self._serialize.query('is_listed', is_listed, 'bool') if get_top_package_versions is not None: query_parameters['getTopPackageVersions'] = self._serialize.query('get_top_package_versions', get_top_package_versions, 'bool') if is_release is not None: query_parameters['isRelease'] = self._serialize.query('is_release', is_release, 'bool') if include_description is not None: query_parameters['includeDescription'] = self._serialize.query('include_description', include_description, 'bool') if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if skip is not None: query_parameters['$skip'] = self._serialize.query('skip', skip, 'int') if include_deleted is not None: query_parameters['includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool') if is_cached is not None: query_parameters['isCached'] = self._serialize.query('is_cached', is_cached, 'bool') if direct_upstream_id is not None: query_parameters['directUpstreamId'] = self._serialize.query('direct_upstream_id', direct_upstream_id, 'str') response = self._send(http_method='GET', location_id='7a20d846-c929-4acc-9ea2-0d5a7df1b197', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[Package]', self._unwrap_collection(response))
python
def dumpkey(key): """ Helper to convert result of `getch` (string) or `getchars` (list) to hex string. """ def hex3fy(key): """Helper to convert string into hex string (Python 3 compatible)""" from binascii import hexlify # Python 3 strings are no longer binary, encode them for hexlify() if PY3K: key = key.encode('utf-8') keyhex = hexlify(key).upper() if PY3K: keyhex = keyhex.decode('utf-8') return keyhex if type(key) == str: return hex3fy(key) else: return ' '.join( [hex3fy(s) for s in key] )
python
def add_implem(self, transition, attribute, function, **kwargs): """Add an implementation. Args: transition (Transition): the transition for which the implementation is added attribute (str): the name of the attribute where the implementation will be available function (callable): the actual implementation function **kwargs: extra arguments for the related ImplementationProperty. """ implem = ImplementationProperty( field_name=self.state_field, transition=transition, workflow=self.workflow, implementation=function, **kwargs) self.implementations[transition.name] = implem self.transitions_at[transition.name] = attribute return implem
java
public static Map makeMap(Mapper mapper, Iterator i) { return makeMap(mapper, i, false); }
java
public void setSparkLineColor(final LcdColor LCD_COLOR) { this.lineColor = LCD_COLOR.TEXT_COLOR; this.sparkLineColor = LCD_COLOR; recreateImages = true; init(INNER_BOUNDS.width, INNER_BOUNDS.height); repaint(INNER_BOUNDS); }
python
def _doCleanup(self): """ Perform any periodic database cleanup tasks. @returns: Deferred """ # pass on this if we're not configured yet if not self.configured_url: return d = self.changes.pruneChanges(self.master.config.changeHorizon) d.addErrback(log.err, 'while pruning changes') return d
python
def diskwarp_multi(src_ds_list, res='first', extent='intersection', t_srs='first', r='cubic', verbose=True, outdir=None, dst_ndv=None): """Helper function for diskwarp of multiple input GDAL Datasets """ return warp_multi(src_ds_list, res, extent, t_srs, r, verbose=verbose, warptype=diskwarp, outdir=outdir, dst_ndv=dst_ndv)
python
def check_validation(self, cert): """ Checks to see if a certificate has been validated, and if so, returns the ValidationPath used to validate it. :param cert: An asn1crypto.x509.Certificate object :return: None if not validated, or a certvalidator.path.ValidationPath object of the validation path """ # CA certs are automatically trusted since they are from the trust list if self.certificate_registry.is_ca(cert) and cert.signature not in self._validate_map: self._validate_map[cert.signature] = ValidationPath(cert) return self._validate_map.get(cert.signature)
java
@Override public double similarity(VariantCall call1, VariantCall call2) { int minNumberOfGenotypes = Math.min(call1.getGenotypeCount(), call2.getGenotypeCount()); int numberOfSharedAlleles = 0; for (int i = 0; i < minNumberOfGenotypes; ++i) { if (call1.getGenotype(i) == call2.getGenotype(i)) { ++numberOfSharedAlleles; } } int maxNumberOfGenotypes = Math.max(call1.getGenotypeCount(), call2.getGenotypeCount()); return (double) numberOfSharedAlleles / maxNumberOfGenotypes; }
java
public PlateFactor getFactorByName(String name) { int index = factorNames.indexOf(name); if (index == -1) { return null; } else { return plateFactors.get(index); } }
java
public static int parseBlockComment(final char[] query, int offset) { if (offset + 1 < query.length && query[offset + 1] == '*') { // /* /* */ */ nest, according to SQL spec int level = 1; for (offset += 2; offset < query.length; ++offset) { switch (query[offset - 1]) { case '*': if (query[offset] == '/') { --level; ++offset; // don't parse / in */* twice } break; case '/': if (query[offset] == '*') { ++level; ++offset; // don't parse * in /*/ twice } break; default: break; } if (level == 0) { --offset; // reset position to last '/' char break; } } } return offset; }
java
private Optional<String[]> parseCredentials(String url) { if (!Strings.isNullOrEmpty(url)) { int p; if ((p = url.indexOf("://")) != -1) { url = url.substring(p + 3); } if ((p = url.indexOf('@')) != -1) { String[] result = new String[2]; String credentials = url.substring(0, p); if ((p = credentials.indexOf(':')) != -1) { result[0] = credentials.substring(0, p); result[1] = credentials.substring(p + 1); } else { result[0] = credentials; result[1] = ""; } return Optional.of(result); } } return Optional.empty(); }
java
public static Element createElement(String tagName, String id) { return IMPL.createElement(tagName, id); }
java
@RequestMapping(value = "/create-payment", method = RequestMethod.POST) public @ResponseBody Map<String, String> createPayment(@RequestParam("performCheckout") Boolean performCheckout) throws PaymentException { Map<String, String> response = new HashMap<>(); Payment createdPayment = paymentService.createPayPalPaymentForCurrentOrder(performCheckout); response.put("id", createdPayment.getId()); return response; }
python
def j1_2(a=1): r"""Hankel transform pair J1_2 ([Ande75]_).""" def lhs(x): return np.exp(-a*x) def rhs(b): return (np.sqrt(b**2 + a**2) - a)/(b*np.sqrt(b**2 + a**2)) return Ghosh('j1', lhs, rhs)
java
private ResultItem pollResultItem(long timeout, boolean idle) { ResultItem result = getResult(); if (result != null) { result.remainingIdleTimeout = timeout; } if (result == null && timeout > 0) { long start = System.currentTimeMillis(); internalWait(timeout); long end = System.currentTimeMillis(); result = getResult(); if (result != null) { result.remainingIdleTimeout = timeout - (end - start); logger.finest("Remaining timeout: " + result.remainingIdleTimeout); } } if (result == null) { if (idle) { throw new ResponseNotReceivedException("No idle response in a timely fashion"); } else { throw new ResponseNotReceivedException("No response in a timely fashion"); } } return result; }
python
def mark_flags_as_mutual_exclusive(flag_names, required=False, flag_values=FLAGS): """Ensures that only one flag among flag_names is set. Args: flag_names: [str], a list of the flag names to be checked. required: Boolean, if set, exactly one of the flags must be set. Otherwise, it is also valid for none of the flags to be set. flag_values: An optional FlagValues instance to validate against. """ def validate_mutual_exclusion(flags_dict): flag_count = sum(1 for val in flags_dict.values() if val is not None) if flag_count == 1 or (not required and flag_count == 0): return True message = ('%s one of (%s) must be specified.' % ('Exactly' if required else 'At most', ', '.join(flag_names))) raise ValidationError(message) register_multi_flags_validator( flag_names, validate_mutual_exclusion, flag_values=flag_values)
java
public synchronized void setVal(int offset, Constant val) { byte[] byteval = val.asBytes(); // Append the size of value if it is not fixed size if (!val.getType().isFixedSize()) { // check the field capacity and value size if (offset + ByteHelper.INT_SIZE + byteval.length > BLOCK_SIZE) throw new BufferOverflowException(); byte[] sizeBytes = ByteHelper.toBytes(byteval.length); contents.put(offset, sizeBytes); offset += sizeBytes.length; } // Put bytes contents.put(offset, byteval); }
java
public void marshall(InstanceGroup instanceGroup, ProtocolMarshaller protocolMarshaller) { if (instanceGroup == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(instanceGroup.getId(), ID_BINDING); protocolMarshaller.marshall(instanceGroup.getName(), NAME_BINDING); protocolMarshaller.marshall(instanceGroup.getMarket(), MARKET_BINDING); protocolMarshaller.marshall(instanceGroup.getInstanceGroupType(), INSTANCEGROUPTYPE_BINDING); protocolMarshaller.marshall(instanceGroup.getBidPrice(), BIDPRICE_BINDING); protocolMarshaller.marshall(instanceGroup.getInstanceType(), INSTANCETYPE_BINDING); protocolMarshaller.marshall(instanceGroup.getRequestedInstanceCount(), REQUESTEDINSTANCECOUNT_BINDING); protocolMarshaller.marshall(instanceGroup.getRunningInstanceCount(), RUNNINGINSTANCECOUNT_BINDING); protocolMarshaller.marshall(instanceGroup.getStatus(), STATUS_BINDING); protocolMarshaller.marshall(instanceGroup.getConfigurations(), CONFIGURATIONS_BINDING); protocolMarshaller.marshall(instanceGroup.getConfigurationsVersion(), CONFIGURATIONSVERSION_BINDING); protocolMarshaller.marshall(instanceGroup.getLastSuccessfullyAppliedConfigurations(), LASTSUCCESSFULLYAPPLIEDCONFIGURATIONS_BINDING); protocolMarshaller.marshall(instanceGroup.getLastSuccessfullyAppliedConfigurationsVersion(), LASTSUCCESSFULLYAPPLIEDCONFIGURATIONSVERSION_BINDING); protocolMarshaller.marshall(instanceGroup.getEbsBlockDevices(), EBSBLOCKDEVICES_BINDING); protocolMarshaller.marshall(instanceGroup.getEbsOptimized(), EBSOPTIMIZED_BINDING); protocolMarshaller.marshall(instanceGroup.getShrinkPolicy(), SHRINKPOLICY_BINDING); protocolMarshaller.marshall(instanceGroup.getAutoScalingPolicy(), AUTOSCALINGPOLICY_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
python
def html(self): """ str: HTML representation of the page Note: Not settable Warning: This can be slow for very large pages """ if self._html is False: self._html = None query_params = { "prop": "revisions", "rvprop": "content", "rvlimit": 1, "rvparse": "", "titles": self.title, } request = self.mediawiki.wiki_request(query_params) page = request["query"]["pages"][self.pageid] self._html = page["revisions"][0]["*"] return self._html
python
def update_many(cls, filter, update, upsert=False): """ Updates all documents that pass the filter with the update value Will upsert a new document if upsert=True and no document is filtered """ return cls.collection.update_many(filter, update, upsert).raw_result
java
public void api_credential_credentialId_DELETE(Long credentialId) throws IOException { String qPath = "/me/api/credential/{credentialId}"; StringBuilder sb = path(qPath, credentialId); exec(qPath, "DELETE", sb.toString(), null); }
python
def save_controls(self, parameterstep: 'timetools.PeriodConstrArg' = None, simulationstep: 'timetools.PeriodConstrArg' = None, auxfiler: 'Optional[auxfiletools.Auxfiler]' = None): """Save the control parameters of the |Model| object handled by each |Element| object and eventually the ones handled by the given |Auxfiler| object.""" if auxfiler: auxfiler.save(parameterstep, simulationstep) for element in printtools.progressbar(self): element.model.parameters.save_controls( parameterstep=parameterstep, simulationstep=simulationstep, auxfiler=auxfiler)
python
def save_report(self, file_path): """Write coveralls report to file.""" try: report = self.create_report() except coverage.CoverageException as e: log.error('Failure to gather coverage:', exc_info=e) else: with open(file_path, 'w') as report_file: report_file.write(report)
java
public static boolean isTypeOf(final Class<?> clazz, TypeMirror type) { checkNotNull(clazz); return type.accept(new IsTypeOf(clazz), null); }
java
public static Vector zero(int length) { return length > 1000 ? SparseVector.zero(length) : DenseVector.zero(length); }
python
def add_row(self, label='', item=''): """ Add a row to the grid """ self.AppendRows(1) last_row = self.GetNumberRows() - 1 self.SetCellValue(last_row, 0, str(label)) self.row_labels.append(label) self.row_items.append(item)
python
def db_chainstate_get_block(cls, cur, block_height): """ Get the list of virtualchain transactions accepted at a given block. Returns the list of rows, where each row is a dict. """ query = 'SELECT * FROM chainstate WHERE block_id = ? ORDER BY vtxindex;' args = (block_height,) rows = cls.db_query_execute(cur, query, args, verbose=False) ret = [] for r in rows: rowdata = { 'txid': str(r['txid']), 'block_id': r['block_id'], 'txindex': r['txindex'], 'vtxindex': r['vtxindex'], 'opcode': str(r['opcode']), 'data_hex': str(r['data_hex']), 'senders': simplejson.loads(r['senders']), 'tx_hex': str(r['tx_hex']), 'tx_merkle_path': str(r['tx_merkle_path']), 'fee': r['fee'] } ret.append(rowdata) return ret
java
public final static int readMdLinkId(final StringBuilder out, final String in, final int start) { int pos = start; int counter = 1; while (pos < in.length()) { final char ch = in.charAt(pos); boolean endReached = false; switch (ch) { case '\n': out.append(' '); break; case '[': counter++; out.append(ch); break; case ']': counter--; if (counter == 0) { endReached = true; } else { out.append(ch); } break; default: out.append(ch); break; } if (endReached) { break; } pos++; } return (pos == in.length()) ? -1 : pos; }
python
def handle_string(self, strreq): ''' Handle a string representing a jsonrpc-request strreq - jsonrpc-request as a string returns jsonrpc-response as a string ''' #convert to jsonrpc-dict req = None try: req = json.loads(strreq) except: logging.debug('JSONRPC: Format Exception:') logging.debug('-----------------\n' + traceback.format_exc()) return json.dumps(SLOJSONRPCError(-32700).to_json()) #handle single request if isinstance(req, dict): return json.dumps(self.handle_request(req)) #handle multiple requests elif isinstance(req, list): for r in req: if not isinstance(r, dict): logging.debug('JSONRPC: Fmt Error: Item ' + '"%s" in request is no dictionary.' % str(r)) return json.dumps(SLOJSONRPCError(-32700).to_json()) try: self._validate_format(r) self._validate_params(r) except SLOJSONRPCError as e: return json.dumps(e.to_json(r.get('id', None))) res = [] for r in req: res.append(self.handle_request(r, validate=False)) return json.dumps(res) #invalid request else: return json.dumps(SLOJSONRPCError(-32700).to_json())
python
def from_transform(cls, matrix): r""" :param matrix: 4x4 3d affine transform matrix :type matrix: :class:`FreeCAD.Matrix` :return: a unit, zero offset coordinate system transformed by the given matrix :rtype: :class:`CoordSystem` Individual rotation & translation matricies are: .. math:: R_z & = \begin{bmatrix} cos(\alpha) & -sin(\alpha) & 0 & 0 \\ sin(\alpha) & cos(\alpha) & 0 & 0 \\ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1 \end{bmatrix} \qquad & R_y & = \begin{bmatrix} cos(\beta) & 0 & sin(\beta) & 0 \\ 0 & 1 & 0 & 0 \\ -sin(\beta) & 0 & cos(\beta) & 0 \\ 0 & 0 & 0 & 1 \end{bmatrix} \\ \\ R_x & = \begin{bmatrix} 1 & 0 & 0 & 0 \\ 0 & cos(\gamma) & -sin(\gamma) & 0 \\ 0 & sin(\gamma) & cos(\gamma) & 0 \\ 0 & 0 & 0 & 1 \end{bmatrix} \qquad & T_{\text{xyz}} & = \begin{bmatrix} 1 & 0 & 0 & \delta x \\ 0 & 1 & 0 & \delta y \\ 0 & 0 & 1 & \delta z \\ 0 & 0 & 0 & 1 \end{bmatrix} The ``transform`` is the combination of these: .. math:: transform = T_{\text{xyz}} \cdot R_z \cdot R_y \cdot R_x = \begin{bmatrix} a & b & c & \delta x \\ d & e & f & \delta y \\ g & h & i & \delta z \\ 0 & 0 & 0 & 1 \end{bmatrix} Where: .. math:: a & = cos(\alpha) cos(\beta) \\ b & = cos(\alpha) sin(\beta) sin(\gamma) - sin(\alpha) cos(\gamma) \\ c & = cos(\alpha) sin(\beta) cos(\gamma) + sin(\alpha) sin(\gamma) \\ d & = sin(\alpha) cos(\beta) \\ e & = sin(\alpha) sin(\beta) sin(\gamma) + cos(\alpha) cos(\gamma) \\ f & = sin(\alpha) sin(\beta) cos(\gamma) - cos(\alpha) sin(\gamma) \\ g & = -sin(\beta) \\ h & = cos(\beta) sin(\gamma) \\ i & = cos(\beta) cos(\gamma) """ # Create reference points at origin offset = FreeCAD.Vector(0, 0, 0) x_vertex = FreeCAD.Vector(1, 0, 0) # vertex along +X-axis z_vertex = FreeCAD.Vector(0, 0, 1) # vertex along +Z-axis # Transform reference points offset = matrix.multiply(offset) x_vertex = matrix.multiply(x_vertex) z_vertex = matrix.multiply(z_vertex) # Get axis vectors (relative to offset vertex) x_axis = x_vertex - offset z_axis = z_vertex - offset # Return new instance vect_tuple = lambda v: (v.x, v.y, v.z) return cls( origin=vect_tuple(offset), xDir=vect_tuple(x_axis), normal=vect_tuple(z_axis), )
python
def get_transformed_feature_info(features, schema): """Returns information about the transformed features. Returns: Dict in the from {transformed_feature_name: {dtype: tf type, size: int or None}}. If the size is None, then the tensor is a sparse tensor. """ info = collections.defaultdict(dict) for name, transform in six.iteritems(features): transform_name = transform['transform'] source_column = transform['source_column'] if transform_name == IDENTITY_TRANSFORM: schema_type = next(col['type'].lower() for col in schema if col['name'] == source_column) if schema_type == FLOAT_SCHEMA: info[name]['dtype'] = tf.float32 elif schema_type == INTEGER_SCHEMA: info[name]['dtype'] = tf.int64 else: raise ValueError('itentity should only be applied to integer or float' 'columns, but was used on %s' % name) info[name]['size'] = 1 elif transform_name == SCALE_TRANSFORM: info[name]['dtype'] = tf.float32 info[name]['size'] = 1 elif transform_name == ONE_HOT_TRANSFORM: info[name]['dtype'] = tf.int64 info[name]['size'] = 1 elif transform_name == EMBEDDING_TRANSFROM: info[name]['dtype'] = tf.int64 info[name]['size'] = 1 elif transform_name == MULTI_HOT_TRANSFORM: info[name]['dtype'] = tf.int64 info[name]['size'] = None elif transform_name == BOW_TRANSFORM or transform_name == TFIDF_TRANSFORM: info[name + '_ids']['dtype'] = tf.int64 info[name + '_weights']['dtype'] = tf.float32 info[name + '_ids']['size'] = None info[name + '_weights']['size'] = None elif transform_name == KEY_TRANSFORM: schema_type = next(col['type'].lower() for col in schema if col['name'] == source_column) if schema_type == FLOAT_SCHEMA: info[name]['dtype'] = tf.float32 elif schema_type == INTEGER_SCHEMA: info[name]['dtype'] = tf.int64 else: info[name]['dtype'] = tf.string info[name]['size'] = 1 elif transform_name == TARGET_TRANSFORM: # If the input is a string, it gets converted to an int (id) schema_type = next(col['type'].lower() for col in schema if col['name'] == source_column) if schema_type in NUMERIC_SCHEMA: info[name]['dtype'] = tf.float32 else: info[name]['dtype'] = tf.int64 info[name]['size'] = 1 elif transform_name == IMAGE_TRANSFORM: info[name]['dtype'] = tf.float32 info[name]['size'] = IMAGE_BOTTLENECK_TENSOR_SIZE else: raise ValueError('Unknown transfrom %s' % transform_name) return info
python
def _call_command(self, name, *args, **kwargs): """ If a command is called for the main field, without dynamic part, an ImplementationError is raised: commands can only be applied on dynamic versions. On dynamic versions, if the command is a modifier, we add the version in the inventory. """ if self.dynamic_version_of is None: raise ImplementationError('The main version of a dynamic field cannot accept commands') try: result = super(DynamicFieldMixin, self)._call_command(name, *args, **kwargs) except: raise else: if name in self.available_modifiers and name not in ('delete', 'hdel'): self._inventory.sadd(self.dynamic_part) return result
python
def inc_nbrs(self, node): """ List of nodes connected by incoming edges """ l = map(self.head, self.inc_edges(node)) #l.sort() return l
python
def getAsKmlAnimation(self, session, channelInputFile, path=None, documentName=None, styles={}): """ Generate a KML visualization of the the link node dataset file. Link node dataset files are time stamped link node value datasets. This will yield a value for each stream node at each time step that output is written. The resulting KML visualization will be an animation. The stream nodes are represented by cylinders where the z dimension/elevation represents the values. A color ramp is applied to make different values stand out even more. The method attempts to identify an appropriate scale factor for the z dimension, but it can be set manually using the styles dictionary. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database channelInputFile (:class:`gsshapy.orm.ChannelInputFile`): Channel input file object to be associated with this link node dataset file. path (str, optional): Path to file where KML will be written. Defaults to None. documentName (str, optional): Name of the KML document. This will be the name that appears in the legend. Defaults to the name of the link node dataset file. styles (dict, optional): Custom styles to apply to KML geometry. Defaults to empty dictionary. Valid keys (styles) include: * zScale (float): multiplier to apply to the values (z dimension) * radius (float): radius in meters of the node cylinder * colorRampEnum (:mod:`mapkit.ColorRampGenerator.ColorRampEnum` or dict): Use ColorRampEnum to select a default color ramp or a dictionary with keys 'colors' and 'interpolatedPoints' to specify a custom color ramp. The 'colors' key must be a list of RGB integer tuples (e.g.: (255, 0, 0)) and the 'interpolatedPoints' must be an integer representing the number of points to interpolate between each color given in the colors list. Returns: str: KML string """ # Constants DECMIAL_DEGREE_METER = 0.00001 OPTIMAL_Z_MAX = 300 # meters # Default styles radiusMeters = 2 * DECMIAL_DEGREE_METER # 2 meters zScale = 1 colorRamp = ColorRampGenerator.generateDefaultColorRamp(ColorRampEnum.COLOR_RAMP_HUE) # Validate if not documentName: documentName = self.name if 'zScale' in styles: try: float(styles['zScale']) zScale = styles['zScale'] except ValueError: log.warning('zScale must be a valid number representing z dimension multiplier.') if 'radius' in styles: try: float(styles['radius']) radiusMeters = styles['radius'] * DECMIAL_DEGREE_METER except ValueError: log.warning('radius must be a number representing the radius of the value cylinders in meters.') if 'colorRampEnum' in styles: colorRampEnum = styles['colorRampEnum'] if isinstance(colorRampEnum, dict): colorRamp = ColorRampGenerator.generateCustomColorRamp(colorRampEnum['colors'], colorRampEnum['interpolatedPoints']) elif isinstance(colorRampEnum, int): colorRamp = ColorRampGenerator.generateDefaultColorRamp(colorRampEnum) # Link to channel input file self.linkToChannelInputFile(session, channelInputFile) # Create instance of GeometryConverter converter = GeometryConverter(session) # Get LinkNodeTimeSteps linkNodeTimeSteps = self.timeSteps # Get date time parameters timeStepDelta = timedelta(minutes=self.timeStepInterval) startDateTime = datetime(1970, 1, 1) startTimeParts = self.startTime.split() # Calculate min and max values for the color ramp minValue = 0.0 maxValue = session.query(func.max(NodeDataset.value)).\ filter(NodeDataset.linkNodeDatasetFile == self).\ filter(NodeDataset.status == 1).\ scalar() avgValue = session.query(func.avg(NodeDataset.value)).\ filter(NodeDataset.linkNodeDatasetFile == self).\ filter(NodeDataset.status == 1).\ scalar() # Calculate automatic zScale if not assigned if 'zScale' not in styles: zScale = OPTIMAL_Z_MAX / ((maxValue + avgValue) / 2) # Map color ramp to values mappedColorRamp = ColorRampGenerator.mapColorRampToValues(colorRamp, minValue, maxValue) if len(startTimeParts) > 5: # Default start date time to epoch startDateTime = datetime(year=int(startTimeParts[2]) or 1970, month=int(startTimeParts[1]) or 1, day=int(startTimeParts[0]) or 1, hour=int(startTimeParts[3]) or 0, minute=int(startTimeParts[4]) or 0) # Start the Kml Document kml = ET.Element('kml', xmlns='http://www.opengis.net/kml/2.2') document = ET.SubElement(kml, 'Document') docName = ET.SubElement(document, 'name') docName.text = documentName # Apply special style to hide legend items style = ET.SubElement(document, 'Style', id='check-hide-children') listStyle = ET.SubElement(style, 'ListStyle') listItemType = ET.SubElement(listStyle, 'listItemType') listItemType.text = 'checkHideChildren' styleUrl = ET.SubElement(document, 'styleUrl') styleUrl.text = '#check-hide-children' for linkNodeTimeStep in linkNodeTimeSteps: # Create current datetime objects timeSpanBegin = startDateTime + (linkNodeTimeStep.timeStep * timeStepDelta) timeSpanEnd = timeSpanBegin + timeStepDelta # Get Link Datasets linkDatasets = linkNodeTimeStep.linkDatasets for linkDataset in linkDatasets: # Don't process special link datasets (with node counts of -1 or 0) if linkDataset.numNodeDatasets <= 0: break # Get Node Datasets nodeDatasets = linkDataset.nodeDatasets for nodeDataset in nodeDatasets: # Get node node = nodeDataset.node link = node.streamLink extrude = nodeDataset.value # Don't extrude below 0 if nodeDataset.value < 0.0: extrude = 0.0 # Convert to circle circleString = converter.getPointAsKmlCircle(tableName=node.tableName, radius=radiusMeters, extrude=extrude, zScaleFactor=zScale, geometryId=node.id) # Convert alpha from 0.0-1.0 decimal to 00-FF string integerAlpha = mappedColorRamp.getAlphaAsInteger() # Get RGB color from color ramp and convert to KML hex ABGR string with alpha integerRGB = mappedColorRamp.getColorForValue(nodeDataset.value) # Make color ABGR string colorString = '%02X%02X%02X%02X' % (integerAlpha, integerRGB[mappedColorRamp.B], integerRGB[mappedColorRamp.G], integerRGB[mappedColorRamp.R]) # Create placemark placemark = ET.SubElement(document, 'Placemark') # Create style tag and setup styles style = ET.SubElement(placemark, 'Style') # Set polygon line style lineStyle = ET.SubElement(style, 'LineStyle') # Disable lines by setting line width to 0 lineWidth = ET.SubElement(lineStyle, 'width') lineWidth.text = str(0) # Set polygon fill color polyStyle = ET.SubElement(style, 'PolyStyle') polyColor = ET.SubElement(polyStyle, 'color') polyColor.text = colorString if len(linkNodeTimeSteps) > 1: # Create TimeSpan tag timeSpan = ET.SubElement(placemark, 'TimeSpan') # Create begin and end tags begin = ET.SubElement(timeSpan, 'begin') begin.text = timeSpanBegin.strftime('%Y-%m-%dT%H:%M:%S') end = ET.SubElement(timeSpan, 'end') end.text = timeSpanEnd.strftime('%Y-%m-%dT%H:%M:%S') # Append geometry polygonCircle = ET.fromstring(circleString) placemark.append(polygonCircle) # Embed node data nodeExtendedData = ET.SubElement(placemark, 'ExtendedData') nodeNumberData = ET.SubElement(nodeExtendedData, 'Data', name='node_number') nodeNumberValue = ET.SubElement(nodeNumberData, 'value') nodeNumberValue.text = str(node.nodeNumber) nodeLinkNumberData = ET.SubElement(nodeExtendedData, 'Data', name='link_number') nodeLinkNumberValue = ET.SubElement(nodeLinkNumberData, 'value') nodeLinkNumberValue.text = str(link.linkNumber) nodeElevationData = ET.SubElement(nodeExtendedData, 'Data', name='value') nodeElevationValue = ET.SubElement(nodeElevationData, 'value') nodeElevationValue.text = str(nodeDataset.value) kmlString = ET.tostring(kml) if path: with open(path, 'w') as f: f.write(kmlString) return kmlString
java
public Method getEndpointMethod() throws ResourceAdapterInternalException { final String methodName = "getEndpointMethod"; if (TRACE.isEntryEnabled()) { SibTr.entry(this, TRACE, methodName); } if (ON_MESSAGE_METHOD == null) { try { ON_MESSAGE_METHOD = SibRaMessageListener.class.getMethod( "onMessage", new Class[] { SIBusMessage.class, AbstractConsumerSession.class, SITransaction.class }); } catch (final Exception exception) { FFDCFilter.processException(exception, CLASS_NAME + "." + methodName, FFDC_PROBE_2); if (TRACE.isEntryEnabled()) { SibTr.exception(TRACE, exception); } throw new ResourceAdapterInternalException(NLS .getFormattedMessage("ON_MESSAGE_CWSIV0851", new Object[] { exception }, null), exception); } } if (TRACE.isEntryEnabled()) { SibTr.exit(this, TRACE, methodName, ON_MESSAGE_METHOD); } return ON_MESSAGE_METHOD; }
python
def do_authorization(self, transactionid, amt): """Shortcut for the DoAuthorization method. Use the TRANSACTIONID from DoExpressCheckoutPayment for the ``transactionid``. The latest version of the API does not support the creation of an Order from `DoDirectPayment`. The `amt` should be the same as passed to `DoExpressCheckoutPayment`. Flow for a payment involving a `DoAuthorization` call:: 1. One or many calls to `SetExpressCheckout` with pertinent order details, returns `TOKEN` 1. `DoExpressCheckoutPayment` with `TOKEN`, `PAYMENTACTION` set to Order, `AMT` set to the amount of the transaction, returns `TRANSACTIONID` 1. `DoAuthorization` with `TRANSACTIONID` and `AMT` set to the amount of the transaction. 1. `DoCapture` with the `AUTHORIZATIONID` (the `TRANSACTIONID` returned by `DoAuthorization`) """ args = self._sanitize_locals(locals()) return self._call('DoAuthorization', **args)
java
public static Environment merge(Environment env1, Environment env2) { final Environment mergedEnv = new Environment(); // merge tables final Map<String, TableEntry> tables = new LinkedHashMap<>(env1.getTables()); tables.putAll(env2.getTables()); mergedEnv.tables = tables; // merge functions final Map<String, FunctionEntry> functions = new HashMap<>(env1.getFunctions()); functions.putAll(env2.getFunctions()); mergedEnv.functions = functions; // merge execution properties mergedEnv.execution = ExecutionEntry.merge(env1.getExecution(), env2.getExecution()); // merge deployment properties mergedEnv.deployment = DeploymentEntry.merge(env1.getDeployment(), env2.getDeployment()); return mergedEnv; }
python
def nice_number(number, thousands_separator=',', max_ndigits_after_dot=None): """Return nicely printed number NUMBER in language LN. Return nicely printed number NUMBER in language LN using given THOUSANDS_SEPARATOR character. If max_ndigits_after_dot is specified and the number is float, the number is rounded by taking in consideration up to max_ndigits_after_dot digit after the dot. This version does not pay attention to locale. See tmpl_nice_number_via_locale(). """ if isinstance(number, float): if max_ndigits_after_dot is not None: number = round(number, max_ndigits_after_dot) int_part, frac_part = str(number).split('.') return '%s.%s' % (nice_number(int(int_part), thousands_separator), frac_part) else: chars_in = list(str(number)) number = len(chars_in) chars_out = [] for i in range(0, number): if i % 3 == 0 and i != 0: chars_out.append(thousands_separator) chars_out.append(chars_in[number - i - 1]) chars_out.reverse() return ''.join(chars_out)
python
def tx_mean(tasmax, freq='YS'): r"""Mean max temperature The mean of daily maximum temperature. Parameters ---------- tasmax : xarray.DataArray Maximum daily temperature [℃] or [K] freq : str, optional Resampling frequency Returns ------- xarray.DataArray Mean of daily maximum temperature. Notes ----- Let :math:`TX_{ij}` be the maximum temperature at day :math:`i` of period :math:`j`. Then mean values in period :math:`j` are given by: .. math:: TX_{ij} = \frac{ \sum_{i=1}^{I} TX_{ij} }{I} """ arr = tasmax.resample(time=freq) if freq else tasmax return arr.mean(dim='time', keep_attrs=True)
java
protected CLVar var(final int lit) { final int idx = Math.abs(lit); assert 0 < idx && idx < this.vars.size(); return this.vars.get(idx); }
java
private BigInteger getLower(int n) { if (isZero()) { return BigInteger.ZERO; } else if (intLen < n) { return toBigInteger(1); } else { // strip zeros int len = n; while (len > 0 && value[offset+intLen-len] == 0) len--; int sign = len > 0 ? 1 : 0; return new BigInteger(Arrays.copyOfRange(value, offset+intLen-len, offset+intLen), sign); } }