language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def AddAnalogShortIdMsecRecordNoStatus(site_service, tag, time_value, msec, value): """ This function will add an analog value to the specified eDNA service and tag, without an associated point status. :param site_service: The site.service where data will be pushed :param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01) :param time_value: The time of the point, which MUST be in UTC Epoch format. For example, "1483926416" not "2016/01/01 01:01:01". :param msec: The additional milliseconds for the time_value :param value: The value associated with the above time. :return: 0, if the data push is successful """ # Define all required variables in the correct ctypes format szService = c_char_p(site_service.encode('utf-8')) szPointId = c_char_p(tag.encode('utf-8')) tTime = c_long(int(time_value)) dValue = c_double(value) usMsec = c_ushort(msec) # Try to push the data. Function will return 0 if successful. nRet = dnaserv_dll.DnaAddAnalogShortIdMsecRecordNoStatus(szService, szPointId, tTime, dValue, usMsec) return nRet
java
public EClass getProjectDeleted() { if (projectDeletedEClass == null) { projectDeletedEClass = (EClass) EPackage.Registry.INSTANCE.getEPackage(LogPackage.eNS_URI).getEClassifiers() .get(17); } return projectDeletedEClass; }
python
def parse_midi_event(self, fp): """Parse a MIDI event. Return a dictionary and the number of bytes read. """ chunk_size = 0 try: ec = self.bytes_to_int(fp.read(1)) chunk_size += 1 self.bytes_read += 1 except: raise IOError("Couldn't read event type " "and channel data from file.") # Get the nibbles event_type = (ec & 0xf0) >> 4 channel = ec & 0x0f # I don't know what these events are supposed to do, but I keep finding # them. The parser ignores them. if event_type < 8: raise FormatError('Unknown event type %d. Byte %d.' % (event_type, self.bytes_read)) # Meta events can have strings of variable length if event_type == 0x0f: try: meta_event = self.bytes_to_int(fp.read(1)) (length, chunk_delta) = self.parse_varbyte_as_int(fp) data = fp.read(length) chunk_size += 1 + chunk_delta + length self.bytes_read += 1 + length except: raise IOError("Couldn't read meta event from file.") return ({'event': event_type, 'meta_event': meta_event, 'data': data}, chunk_size) elif event_type in [12, 13]: # Program change and Channel aftertouch events only have one # parameter try: param1 = fp.read(1) chunk_size += 1 self.bytes_read += 1 except: raise IOError("Couldn't read MIDI event parameters from file.") param1 = self.bytes_to_int(param1) return ({'event': event_type, 'channel': channel, 'param1': param1}, chunk_size) else: try: param1 = fp.read(1) param2 = fp.read(1) chunk_size += 2 self.bytes_read += 2 except: raise IOError("Couldn't read MIDI event parameters from file.") param1 = self.bytes_to_int(param1) param2 = self.bytes_to_int(param2) return ({'event': event_type, 'channel': channel, 'param1': param1, 'param2': param2}, chunk_size)
python
def get_script_iterator(package_name, verbose = False): """ Args: package_name: name of package Returns: the script_iterators of the package as a dictionary """ packs = hf.explore_package(package_name + '.core') print(packs) script_iterator = {} for p in packs: for name, c in inspect.getmembers(importlib.import_module(p), inspect.isclass): if verbose: print(p, name, c) if issubclass(c, ScriptIterator): # update dictionary with 'Package name , e.g. pylabcontrol or b26_toolkit': <ScriptIterator_class> script_iterator.update({c.__module__.split('.')[0]: c}) return script_iterator
python
def match_one(template, image, options=None): """ Match template and find exactly one match in the Image using specified features. :param template: Template Image :param image: Search Image :param options: Options include - features: List of options for each feature :return: (Box, Score) Bounding box of the matched object, Heatmap value """ heatmap, scale = multi_feat_match(template, image, options) min_val, _, min_loc, _ = cv.minMaxLoc(heatmap) top_left = tuple(scale * x for x in min_loc) score = min_val h, w = template.shape[:2] return Box(top_left[0], top_left[1], w, h), score
java
public AwsSecurityFindingFilters withType(StringFilter... type) { if (this.type == null) { setType(new java.util.ArrayList<StringFilter>(type.length)); } for (StringFilter ele : type) { this.type.add(ele); } return this; }
python
def do_py(self, arg): """ :: Usage: py py COMMAND Arguments: COMMAND the command to be executed Description: The command without a parameter will be executed and the interactive python mode is entered. The python mode can be ended with ``Ctrl-D`` (Unix) / ``Ctrl-Z`` (Windows), ``quit()``,'`exit()``. Non-python commands can be issued with ``cmd("your command")``. If the python code is located in an external file it can be run with ``run("filename.py")``. In case a COMMAND is provided it will be executed and the python interpreter will return to the command shell. This code is copied from Cmd2. """ self.pystate['self'] = self arg = arg.strip() localvars = (self.locals_in_py and self.pystate) or {} interp = InteractiveConsole(locals=localvars) interp.runcode('import sys, os;sys.path.insert(0, os.getcwd())') if arg: interp.runcode(arg) else: def quit(): raise EmbeddedConsoleExit def onecmd(arg): return self.onecmd(arg + '\n') def run(arg): try: f = open(arg) interp.runcode(f.read()) f.close() except IOError, e: self.perror(e) self.pystate['quit'] = quit self.pystate['exit'] = quit self.pystate['cmd'] = onecmd self.pystate['run'] = run try: cprt = 'Type "help", "copyright", "credits" or "license" for more information.' keepstate = Statekeeper(sys, ('stdin', 'stdout')) sys.stdout = self.stdout sys.stdin = self.stdin interp.interact(banner="Python %s on %s\n%s\n(%s)\n%s" % (sys.version, sys.platform, cprt, self.__class__.__name__, self.do_py.__doc__)) except EmbeddedConsoleExit: pass keepstate.restore()
java
@FromString public static Weeks parseWeeks(String periodStr) { if (periodStr == null) { return Weeks.ZERO; } Period p = PARSER.parsePeriod(periodStr); return Weeks.weeks(p.getWeeks()); }
java
public ServiceCall<Classifiers> listClassifiers(ListClassifiersOptions listClassifiersOptions) { String[] pathSegments = { "v3/classifiers" }; RequestBuilder builder = RequestBuilder.get(RequestBuilder.constructHttpUrl(getEndPoint(), pathSegments)); builder.query("version", versionDate); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("watson_vision_combined", "v3", "listClassifiers"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); if (listClassifiersOptions != null) { if (listClassifiersOptions.verbose() != null) { builder.query("verbose", String.valueOf(listClassifiersOptions.verbose())); } } return createServiceCall(builder.build(), ResponseConverterUtils.getObject(Classifiers.class)); }
java
void setTypeConvertersInOrder(Collection<TypeConverter> typeConverters) { LockableList<TypeConverter> newList = new LockableList<>(); newList.addAll(typeConverters); newList.lock(); typeConvertersInOrder = newList; // update list of converters to allow mentioning type converter by name, class name is used if no name provided for (TypeConverter tc : newList) { String name = null; if (tc instanceof Named) { name = ((Named) tc).getName(); } if (null == name) { name = tc.getClass().getName(); } typeConverterInstances.put(name, tc); } }
java
public List<ColumnResult<SqlResultSetMapping<T>>> getAllColumnResult() { List<ColumnResult<SqlResultSetMapping<T>>> list = new ArrayList<ColumnResult<SqlResultSetMapping<T>>>(); List<Node> nodeList = childNode.get("column-result"); for(Node node: nodeList) { ColumnResult<SqlResultSetMapping<T>> type = new ColumnResultImpl<SqlResultSetMapping<T>>(this, "column-result", childNode, node); list.add(type); } return list; }
python
def _setup_language_variables(self, lang: str): # pylint: disable=no-self-use """Check for language availability and presence of tagger files. :param lang: The language argument given to the class. :type lang: str :rtype : dict """ assert lang in TAGGERS.keys(), \ 'POS tagger not available for {0} language.'.format(lang) rel_path = os.path.join('~/cltk_data', lang, 'model/' + lang + '_models_cltk/taggers/pos') # pylint: disable=C0301 path = os.path.expanduser(rel_path) tagger_paths = {} for tagger_key, tagger_val in TAGGERS[lang].items(): tagger_path = os.path.join(path, tagger_val) assert os.path.isfile(tagger_path), \ 'CLTK linguistics models not available for {0}, looking for .'.format([tagger_val, tagger_path]) tagger_paths[tagger_key] = tagger_path return tagger_paths
java
public void registerDeferredService(BundleContext bundleContext, Class<?> providedService, Dictionary dict) { Object obj = serviceReg.get(); if (obj instanceof ServiceRegistration<?>) { // already registered - nothing to do here return; } if (obj instanceof CountDownLatch) { // another thread is in the process of (de)registering - wait for it to finish try { ((CountDownLatch) obj).await(); if (serviceReg.get() instanceof ServiceRegistration<?>) { // Another thread has successfully registered to return out (so we don't go // into recursive loop). return; } } catch (InterruptedException swallowed) { if (tc.isDebugEnabled()) { Tr.debug(tc, "Count down interrrupted", swallowed); } } } else { // This is probably the first thread to register. // Claim the right to register by setting a latch for other threads to wait on. CountDownLatch latch = new CountDownLatch(1); if (serviceReg.compareAndSet(null, latch)) { // This thread won the right to register the service try { serviceReg.set(bundleContext.registerService(providedService.getName(), this, dict)); // successfully registered - nothing more to do return; } finally { // if the serviceReg was not updated for any reason, we need to set it back to null serviceReg.compareAndSet(latch, null); // in any case we need to allow any blocked threads to proceed latch.countDown(); } } } // If we get to here we have not successfully registered // nor seen another thread successfully register, so just recurse. registerDeferredService(bundleContext, providedService, dict); }
python
def save_cookies(self, ignore_discard=True, ignore_expires=True): """Save cookies to the file :attr:`.API.cookies_filename`""" if not isinstance(self.cookies, cookielib.FileCookieJar): m = 'Cookies must be a cookielib.FileCookieJar object to be saved.' raise APIError(m) self.cookies.save(ignore_discard=ignore_discard, ignore_expires=ignore_expires)
java
public void end(Xid xid, int flags) throws XAException { if (pad) xid = convertXid(xid); xaResource.end(xid, flags); }
python
def _get_field_values(item, fldnames, rpt_fmt=None, itemid2name=None): """Return fieldnames and values of either a namedtuple or GOEnrichmentRecord.""" if hasattr(item, "_fldsdefprt"): # Is a GOEnrichmentRecord return item.get_field_values(fldnames, rpt_fmt, itemid2name) if hasattr(item, "_fields"): # Is a namedtuple return [getattr(item, f) for f in fldnames]
java
@BindingAdapter("bind:tv_typeface") public static void setCustomTypeface(TypefaceEditText editText, String type) { editText.mCurrentTypeface = TypefaceType.getTypeface(type != null ? type : ""); Typeface typeface = getFont(editText.getContext(), editText.mCurrentTypeface.getAssetFileName()); editText.setTypeface(typeface); }
java
public Matrix4f orthoSymmetricLH(float width, float height, float zNear, float zFar) { return orthoSymmetricLH(width, height, zNear, zFar, false, thisOrNew()); }
python
def reboot_autopilot(self, hold_in_bootloader=False): '''reboot the autopilot''' if self.mavlink10(): if hold_in_bootloader: param1 = 3 else: param1 = 1 self.mav.command_long_send(self.target_system, self.target_component, mavlink.MAV_CMD_PREFLIGHT_REBOOT_SHUTDOWN, 0, param1, 0, 0, 0, 0, 0, 0) # send an old style reboot immediately afterwards in case it is an older firmware # that doesn't understand the new convention self.mav.command_long_send(self.target_system, self.target_component, mavlink.MAV_CMD_PREFLIGHT_REBOOT_SHUTDOWN, 0, 1, 0, 0, 0, 0, 0, 0)
python
def timeline_home(self, max_id=None, min_id=None, since_id=None, limit=None): """ Fetch the logged-in users home timeline (i.e. followed users and self). Returns a list of `toot dicts`_. """ return self.timeline('home', max_id=max_id, min_id=min_id, since_id=since_id, limit=limit)
python
def bellman_ford(G, seeds, maxiter=None): """Bellman-Ford iteration. Parameters ---------- G : sparse matrix Returns ------- distances : array nearest_seed : array References ---------- CLR """ G = asgraph(G) N = G.shape[0] if maxiter is not None and maxiter < 0: raise ValueError('maxiter must be positive') if G.dtype == complex: raise ValueError('Bellman-Ford algorithm only defined for real\ weights') seeds = np.asarray(seeds, dtype='intc') distances = np.empty(N, dtype=G.dtype) distances[:] = max_value(G.dtype) distances[seeds] = 0 nearest_seed = np.empty(N, dtype='intc') nearest_seed[:] = -1 nearest_seed[seeds] = seeds old_distances = np.empty_like(distances) iter = 0 while maxiter is None or iter < maxiter: old_distances[:] = distances amg_core.bellman_ford(N, G.indptr, G.indices, G.data, distances, nearest_seed) if (old_distances == distances).all(): break return (distances, nearest_seed)
python
def main(): """Main entry point for CLI commands.""" options = docopt(__doc__, version=__version__) if options['segment']: segment( options['<file>'], options['--output'], options['--target-duration'], options['--mpegts'], )
python
def extract_from_stream(stream, detections, pad=5.0, length=30.0): """ Extract waveforms for a list of detections from a stream. :type stream: obspy.core.stream.Stream :param stream: Stream containing the detections. :type detections: list :param detections: list of eqcorrscan.core.match_filter.detection :type pad: float :param pad: Pre-detection extract time in seconds. :type length: float :param length: Total extracted length in seconds. :returns: list of :class:`obspy.core.stream.Stream`, one for each detection. :type: list """ streams = [] for detection in detections: cut_stream = Stream() for pick in detection.event.picks: tr = stream.select(station=pick.waveform_id.station_code, channel=pick.waveform_id.channel_code) if len(tr) == 0: print('No data in stream for pick:') print(pick) continue cut_stream += tr.slice( starttime=pick.time - pad, endtime=pick.time - pad + length).copy() streams.append(cut_stream) return streams
python
def _compile(cls, lines): '''Return macro or block name from the current line.''' m = cls.RE_PASTE.match(lines.current) if m is None: raise MacroBlockUsageError( 'Incorrect macro or block usage at line {}, {}\nShould be ' 'something like: #my_macro'.format(lines.pos, lines.current)) return m.group(1)
python
def _get_aug_flow( left, left_type, aug_opnode, right, right_type, context, reverse_context ): """Get the flow for augmented binary operations. The rules are a bit messy: * if left and right have the same type, then left.__augop__(right) is first tried and then left.__op__(right). * if left and right are unrelated typewise, then left.__augop__(right) is tried, then left.__op__(right) is tried and then right.__rop__(left) is tried. * if left is a subtype of right, then left.__augop__(right) is tried and then left.__op__(right). * if left is a supertype of right, then left.__augop__(right) is tried, then right.__rop__(left) and then left.__op__(right) """ bin_op = aug_opnode.op.strip("=") aug_op = aug_opnode.op if _same_type(left_type, right_type): methods = [ _aug_op(left, aug_opnode, aug_op, right, context), _bin_op(left, aug_opnode, bin_op, right, context), ] elif helpers.is_subtype(left_type, right_type): methods = [ _aug_op(left, aug_opnode, aug_op, right, context), _bin_op(left, aug_opnode, bin_op, right, context), ] elif helpers.is_supertype(left_type, right_type): methods = [ _aug_op(left, aug_opnode, aug_op, right, context), _bin_op(right, aug_opnode, bin_op, left, reverse_context, reverse=True), _bin_op(left, aug_opnode, bin_op, right, context), ] else: methods = [ _aug_op(left, aug_opnode, aug_op, right, context), _bin_op(left, aug_opnode, bin_op, right, context), _bin_op(right, aug_opnode, bin_op, left, reverse_context, reverse=True), ] return methods
java
private void addGroup(List<Token> group, List<List<Token>> groups) { if(group.isEmpty()) return; // remove trailing tokens that should be ignored while(!group.isEmpty() && IGNORED_TRAILING_TOKENS.contains( group.get(group.size() - 1).getType())) { group.remove(group.size() - 1); } // if the group still has some tokens left, we'll add it to our list of groups if(!group.isEmpty()) { groups.add(group); } }
java
public void setQualifierNameMajorTopic(boolean v) { if (MeshHeading_Type.featOkTst && ((MeshHeading_Type)jcasType).casFeat_qualifierNameMajorTopic == null) jcasType.jcas.throwFeatMissing("qualifierNameMajorTopic", "de.julielab.jules.types.MeshHeading"); jcasType.ll_cas.ll_setBooleanValue(addr, ((MeshHeading_Type)jcasType).casFeatCode_qualifierNameMajorTopic, v);}
java
public final boolean isItemEnabled(final int index) { AbstractItem item = items.get(index); return item instanceof Item && ((Item) item).isEnabled(); }
java
public Observable<AppServiceCertificateResourceInner> getCertificateAsync(String resourceGroupName, String certificateOrderName, String name) { return getCertificateWithServiceResponseAsync(resourceGroupName, certificateOrderName, name).map(new Func1<ServiceResponse<AppServiceCertificateResourceInner>, AppServiceCertificateResourceInner>() { @Override public AppServiceCertificateResourceInner call(ServiceResponse<AppServiceCertificateResourceInner> response) { return response.body(); } }); }
java
public boolean isTriggeringEvent( final Appender appender, final LoggingEvent event, final String file, final long fileLength) { //System.out.println("Size"+file.length()); return (fileLength >= maxFileSize); }
python
def _group_range(records, method): """ Yield the range of all dates between the extrema of a list of records, separated by a given time delta. """ start_date = records[0].datetime end_date = records[-1].datetime _fun = DATE_GROUPERS[method] d = start_date # Day and week use timedelta if method not in ["month", "year"]: def increment(i): return i + timedelta(**{method + 's': 1}) elif method == "month": def increment(i): year, month = divmod(i.month + 1, 12) if month == 0: month = 12 year = year - 1 return d.replace(year=d.year + year, month=month) elif method == "year": def increment(i): return d.replace(year=d.year + 1) while _fun(d) <= _fun(end_date): yield d d = increment(d)
java
public static void log (String message, Object... args) { StringBuilder sb = new StringBuilder(); sb.append(message); if (args.length > 1) { sb.append(" ["); for (int ii = 0, ll = args.length/2; ii < ll; ii++) { if (ii > 0) { sb.append(", "); } sb.append(args[2*ii]).append("=").append(args[2*ii+1]); } sb.append("]"); } Object error = (args.length % 2 == 1) ? args[args.length-1] : null; if (GWT.isScript()) { if (error != null) { sb.append(": ").append(error); } firebugLog(sb.toString(), error); } else { GWT.log(sb.toString(), (Throwable)error); } }
python
def _count_localizations(df): """ count the most likely localization for each depentent peptide. :param df: allPeptides.txt table. """ grp = df.groupby(_index_columns) counts = grp['DP AA'].apply(lambda x: count(x.str.split(';').values)) counts.index = counts.index.set_names('DP AA', level=4) counts.name = 'DP AA count' best_localization = counts.reset_index().groupby(_index_columns).apply(_frequent_localizations) return best_localization
python
def resolve_peer(self, peer_id: Union[int, str]): """Use this method to get the InputPeer of a known peer_id. This is a utility method intended to be used **only** when working with Raw Functions (i.e: a Telegram API method you wish to use which is not available yet in the Client class as an easy-to-use method), whenever an InputPeer type is required. Args: peer_id (``int`` | ``str``): The peer id you want to extract the InputPeer from. Can be a direct id (int), a username (str) or a phone number (str). Returns: On success, the resolved peer id is returned in form of an InputPeer object. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. ``KeyError`` in case the peer doesn't exist in the internal database. """ try: return self.peers_by_id[peer_id] except KeyError: if type(peer_id) is str: if peer_id in ("self", "me"): return types.InputPeerSelf() peer_id = re.sub(r"[@+\s]", "", peer_id.lower()) try: int(peer_id) except ValueError: if peer_id not in self.peers_by_username: self.send( functions.contacts.ResolveUsername( username=peer_id ) ) return self.peers_by_username[peer_id] else: try: return self.peers_by_phone[peer_id] except KeyError: raise PeerIdInvalid if peer_id > 0: self.fetch_peers( self.send( functions.users.GetUsers( id=[types.InputUser(user_id=peer_id, access_hash=0)] ) ) ) else: if str(peer_id).startswith("-100"): self.send( functions.channels.GetChannels( id=[types.InputChannel(channel_id=int(str(peer_id)[4:]), access_hash=0)] ) ) else: self.send( functions.messages.GetChats( id=[-peer_id] ) ) try: return self.peers_by_id[peer_id] except KeyError: raise PeerIdInvalid
java
protected static void closeWelcomePage() { final IIntroManager introManager = PlatformUI.getWorkbench().getIntroManager(); if (introManager != null) { final IIntroPart intro = introManager.getIntro(); if (intro != null) { introManager.closeIntro(intro); } } }
java
public static Node getFunctionBody(Node fn) { checkArgument(fn.isFunction(), fn); return fn.getLastChild(); }
java
public CmsSite matchSite(CmsSiteMatcher matcher) { CmsSite site = m_siteMatcherSites.get(matcher); if (site == null) { // return the default site (might be null as well) site = m_defaultSite; } return site; }
java
public boolean isTransientException (SQLException sqe) { // TODO: Add more error messages here as we encounter them. String msg = sqe.getMessage(); return (msg != null && msg.indexOf("An I/O error occured while sending to the backend") != -1); }
java
private boolean load(InputStream in) throws IOException, StreamCorruptedException { boolean updated = false; Iterable<Version> loadedVersions = loadData(in); for (Version loadedVersion : loadedVersions) { // see https://github.com/ThreeTen/threetenbp/pull/28 for issue wrt // multiple versions of lib on classpath Version existing = versions.putIfAbsent(loadedVersion.versionId, loadedVersion); if (existing != null && !existing.versionId.equals(loadedVersion.versionId)) { throw new ZoneRulesException("Data already loaded for TZDB time-zone rules version: " + loadedVersion.versionId); } updated = true; } return updated; }
java
public static boolean sequenceEqualConstantTime(byte[] self, byte[] other) { if (self == null) { throw new IllegalArgumentException("self"); } if (other == null) { throw new IllegalArgumentException("other"); } // Constant time comparison of two byte arrays long difference = (self.length & 0xffffffffL) ^ (other.length & 0xffffffffL); for (int i = 0; i < self.length && i < other.length; i++) { difference |= (self[i] ^ other[i]) & 0xffffffffL; } return difference == 0; }
java
public OvhOrder cart_cartId_checkout_POST(String cartId, Boolean autoPayWithPreferredPaymentMethod, Boolean waiveRetractationPeriod) throws IOException { String qPath = "/order/cart/{cartId}/checkout"; StringBuilder sb = path(qPath, cartId); HashMap<String, Object>o = new HashMap<String, Object>(); addBody(o, "autoPayWithPreferredPaymentMethod", autoPayWithPreferredPaymentMethod); addBody(o, "waiveRetractationPeriod", waiveRetractationPeriod); String resp = exec(qPath, "POST", sb.toString(), o); return convertTo(resp, OvhOrder.class); }
java
@Override public boolean serviceExists(URI serviceUri) throws ServiceException { if (serviceUri == null || !serviceUri.isAbsolute()) { log.warn("The Service URI is either absent or relative. Provide an absolute URI"); return false; } URI graphUri; try { graphUri = getGraphUriForElement(serviceUri); } catch (URISyntaxException e) { log.warn("The namespace of the URI of the message content is incorrect.", e); return Boolean.FALSE; } if (graphUri == null) { log.warn("Could not obtain a graph URI for the element. The URI may not be managed by the server - " + serviceUri); return Boolean.FALSE; } String queryStr = new StringBuilder() .append("ASK { \n") .append("GRAPH <").append(graphUri.toASCIIString()).append("> {") .append("<").append(serviceUri.toASCIIString()).append("> <").append(RDF.type.getURI()).append("> <").append(MSM.Service).append("> }\n}").toString(); Query query = QueryFactory.create(queryStr); QueryExecution qe = QueryExecutionFactory.sparqlService(this.graphStoreManager.getSparqlQueryEndpoint().toASCIIString(), query); MonitoredQueryExecution qexec = new MonitoredQueryExecution(qe); try { return qexec.execAsk(); } finally { qexec.close(); } }
java
public void setLabels(java.util.Collection<LabelDetection> labels) { if (labels == null) { this.labels = null; return; } this.labels = new java.util.ArrayList<LabelDetection>(labels); }
java
Collection<BindingStrategy> collectFromAggregates(Collection<Class<? extends AggregateRoot>> aggregateClasses) { Collection<BindingStrategy> bindingStrategies = new ArrayList<>(); Map<Type[], Key<?>> allGenerics = new HashMap<>(); for (Class<? extends AggregateRoot<?>> aggregateClass : BusinessUtils.includeSuperClasses(aggregateClasses)) { Type[] generics = getTypes(aggregateClass); TypeLiteral<?> genericInterface = TypeLiteral.get(newParameterizedType(Repository.class, generics)); allGenerics.put(generics, resolveDefaultQualifier( bindings, application.getConfiguration(aggregateClass), DEFAULT_REPOSITORY_KEY, aggregateClass, genericInterface ).orElse(null) ); } // Create a binding strategy for each default repository implementation for (Class<? extends Repository> defaultRepoImpl : defaultRepositoryImplementations) { bindingStrategies.add(new GenericBindingStrategy<>( Repository.class, defaultRepoImpl, allGenerics) ); } return bindingStrategies; }
java
public void setBundleActivator(String bundleActivator) { String old = mainAttributes.get(BUNDLE_ACTIVATOR); if (!bundleActivator.equals(old)) { this.mainAttributes.put(BUNDLE_ACTIVATOR, bundleActivator); this.modified = true; this.bundleActivator = bundleActivator; } }
java
public EClass getRemoteServiceCalled() { if (remoteServiceCalledEClass == null) { remoteServiceCalledEClass = (EClass) EPackage.Registry.INSTANCE.getEPackage(LogPackage.eNS_URI) .getEClassifiers().get(31); } return remoteServiceCalledEClass; }
java
public IoTSetResponse setUsingIq(FullJid jid, Collection<? extends SetData> data) throws NoResponseException, XMPPErrorException, NotConnectedException, InterruptedException { IoTSetRequest request = new IoTSetRequest(data); request.setTo(jid); IoTSetResponse response = connection().createStanzaCollectorAndSend(request).nextResultOrThrow(); return response; }
java
public Map<String, String> getStringMap(final Map<String, String> map) { final int len = getVInt(); for (int i = 0; i < len; i++) { map.put(getString(), getString()); } return map; }
java
public void waitForElementPresent(final By by, final int maximumSeconds) { WebDriverWait wait = new WebDriverWait(driver, maximumSeconds); wait.until(ExpectedConditions.presenceOfElementLocated((by))); }
java
public HBeanRowCollector getEager(Set<HBeanRow> rows, FetchType... fetchType) throws HBeanNotFoundException { Set<HBeanRow> result; result = getLazy(rows, fetchType); HBeanRowCollector collector = new HBeanRowCollector(result); getEager(result, collector, FETCH_DEPTH_MAX, fetchType); return collector; }
java
public static int compareTimestamps(UUID uuid1, UUID uuid2) { return Longs.compare(uuid1.timestamp(), uuid2.timestamp()); }
java
private static FactorFilter<Executor, ExecutableFlow> getStaticRemainingFlowSizeFilter() { return FactorFilter .create(STATICREMAININGFLOWSIZE_FILTER_NAME, (filteringTarget, referencingObject) -> { if (null == filteringTarget) { logger.debug(String.format("%s : filtering out the target as it is null.", STATICREMAININGFLOWSIZE_FILTER_NAME)); return false; } final ExecutorInfo stats = filteringTarget.getExecutorInfo(); if (null == stats) { logger.debug(String.format("%s : filtering out %s as it's stats is unavailable.", STATICREMAININGFLOWSIZE_FILTER_NAME, filteringTarget.toString())); return false; } return stats.getRemainingFlowCapacity() > 0; }); }
java
public TupleCombinerBuilder once( Stream<TupleRef> tupleRefs) { tupleRefs.forEach( tupleRef -> tupleCombiner_.addOnceTuple( tupleRef)); return this; }
java
@XmlElementDecl(namespace = "http://www.opengis.net/gml", name = "degrees") public JAXBElement<DegreesType> createDegrees(DegreesType value) { return new JAXBElement<DegreesType>(_Degrees_QNAME, DegreesType.class, null, value); }
java
public long unfilledDependencyToLong(UnfilledDependency dep) { long argNum = dep.getArgumentIndex(); long objectNum, objectWordInd, subjectNum, subjectWordInd, subjectSyntaxNum; if (dep.hasObject()) { IndexedPredicate obj = dep.getObject(); objectNum = dependencyHeadType.getValueIndex(obj.getHead()) + MAX_ARG_NUM; objectWordInd = obj.getHeadIndex(); } else { objectNum = dep.getObjectIndex(); objectWordInd = 0L; } if (dep.hasSubject()) { IndexedPredicate sbj = dep.getSubject(); subjectNum = dependencyHeadType.getValueIndex(sbj.getHead()) + MAX_ARG_NUM; subjectWordInd = sbj.getHeadIndex(); subjectSyntaxNum = dependencySyntaxType.getValueIndex(dep.getSubjectSyntax()); } else { subjectNum = dep.getSubjectIndex(); subjectSyntaxNum = 0L; subjectWordInd = 0L; } return marshalUnfilledDependency(objectNum, argNum, subjectNum, subjectSyntaxNum, objectWordInd, subjectWordInd); }
java
private ImmutableSetMultimap<Class<? extends Annotation>, Element> validElements( ImmutableMap<String, Optional<? extends Element>> deferredElements, RoundEnvironment roundEnv) { ImmutableSetMultimap.Builder<Class<? extends Annotation>, Element> deferredElementsByAnnotationBuilder = ImmutableSetMultimap.builder(); for (Entry<String, Optional<? extends Element>> deferredTypeElementEntry : deferredElements.entrySet()) { Optional<? extends Element> deferredElement = deferredTypeElementEntry.getValue(); if (deferredElement.isPresent()) { findAnnotatedElements( deferredElement.get(), getSupportedAnnotationClasses(), deferredElementsByAnnotationBuilder); } else { deferredElementNames.add(ElementName.forTypeName(deferredTypeElementEntry.getKey())); } } ImmutableSetMultimap<Class<? extends Annotation>, Element> deferredElementsByAnnotation = deferredElementsByAnnotationBuilder.build(); ImmutableSetMultimap.Builder<Class<? extends Annotation>, Element> validElements = ImmutableSetMultimap.builder(); Set<ElementName> validElementNames = new LinkedHashSet<ElementName>(); // Look at the elements we've found and the new elements from this round and validate them. for (Class<? extends Annotation> annotationClass : getSupportedAnnotationClasses()) { // This should just call roundEnv.getElementsAnnotatedWith(Class) directly, but there is a bug // in some versions of eclipse that cause that method to crash. TypeElement annotationType = elements.getTypeElement(annotationClass.getCanonicalName()); Set<? extends Element> elementsAnnotatedWith = (annotationType == null) ? ImmutableSet.<Element>of() : roundEnv.getElementsAnnotatedWith(annotationType); for (Element annotatedElement : Sets.union(elementsAnnotatedWith, deferredElementsByAnnotation.get(annotationClass))) { if (annotatedElement.getKind().equals(PACKAGE)) { PackageElement annotatedPackageElement = (PackageElement) annotatedElement; ElementName annotatedPackageName = ElementName.forPackageName(annotatedPackageElement.getQualifiedName().toString()); boolean validPackage = validElementNames.contains(annotatedPackageName) || (!deferredElementNames.contains(annotatedPackageName) && validateElement(annotatedPackageElement)); if (validPackage) { validElements.put(annotationClass, annotatedPackageElement); validElementNames.add(annotatedPackageName); } else { deferredElementNames.add(annotatedPackageName); } } else { TypeElement enclosingType = getEnclosingType(annotatedElement); ElementName enclosingTypeName = ElementName.forTypeName(enclosingType.getQualifiedName().toString()); boolean validEnclosingType = validElementNames.contains(enclosingTypeName) || (!deferredElementNames.contains(enclosingTypeName) && validateElement(enclosingType)); if (validEnclosingType) { validElements.put(annotationClass, annotatedElement); validElementNames.add(enclosingTypeName); } else { deferredElementNames.add(enclosingTypeName); } } } } return validElements.build(); }
python
def bindings(self, queue, virtual_host='/'): """Get Queue bindings. :param str queue: Queue name :param str virtual_host: Virtual host name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: list """ virtual_host = quote(virtual_host, '') return self.http_client.get(API_QUEUE_BINDINGS % ( virtual_host, queue ))
python
def tuples_to_coll(cls, generator, coerce=False): """*required virtual method* This class method, part of the sub-class API, converts a generator of ``(K, V)`` tuples (the *tuple protocol*) to one of the underlying collection type. """ if cls != Collection: raise exc.CollectionDefinitionError( property='tuples_to_coll', coll='Collection', )
java
public void init(Record record, BaseField fldDeleteFlag, Record recDetail) { m_fldDeleteFlag = fldDeleteFlag; m_recDetail = recDetail; super.init(record); }
java
public BoxFolder.Info restoreFolder(String folderID) { URL url = RESTORE_FOLDER_URL_TEMPLATE.build(this.api.getBaseURL(), folderID); BoxAPIRequest request = new BoxAPIRequest(this.api, url, "POST"); JsonObject requestJSON = new JsonObject() .add("", ""); request.setBody(requestJSON.toString()); BoxJSONResponse response = (BoxJSONResponse) request.send(); JsonObject responseJSON = JsonObject.readFrom(response.getJSON()); BoxFolder restoredFolder = new BoxFolder(this.api, responseJSON.get("id").asString()); return restoredFolder.new Info(responseJSON); }
python
def save_attributes(self): ''' Saves the attributes without closing the attributes file. This should probably be called after the attribute holder sets/overrides the attributes in initialization as the del method is not guaranteed to be called. ''' if not self.saveable(): raise AttributeError("Cannot save attribute file without a valid file") if self._read_only: raise AttributeError("Cannot save read-only data") if not self._db_closed: self._fd.clear() for attr_name in self._saved_attrs: self._fd[attr_name] = getattr(self._target, attr_name)
python
def send_and_return_status(self, send, expect=None, shutit_pexpect_child=None, timeout=None, fail_on_empty_before=True, record_command=True, exit_values=None, echo=None, escape=False, retry=3, note=None, assume_gnu=True, follow_on_commands=None, loglevel=logging.INFO): """Returns true if a good exit code was received (usually 0) """ shutit_global.shutit_global_object.yield_to_draw() shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child) shutit_pexpect_session.send(ShutItSendSpec(shutit_pexpect_session, send=send, expect=expect, timeout=timeout, check_exit=False, fail_on_empty_before=fail_on_empty_before, record_command=record_command, exit_values=exit_values, echo=echo, escape=escape, retry=retry, note=note, assume_gnu=assume_gnu, loglevel=loglevel, follow_on_commands=follow_on_commands)) return shutit_pexpect_session.check_last_exit_values(send, check_exit=True, expect=expect, exit_values=exit_values, retry=retry, retbool=True)
python
def check_paths(self, paths, update=None): ''' Check if the path is in the os environ, and if not add it Paramters: paths (OrderedDict): An ordered dict containing all of the paths from the a given section, as key:val = name:path update (bool): If True, overwrites existing tree environment variables in your local environment. Default is False. ''' # set up the exclusion list exclude = [] if not self.exclude else self.exclude \ if isinstance(self.exclude, list) else [self.exclude] # check the path names for pathname, path in paths.items(): if update and pathname.upper() not in exclude: os.environ[pathname.upper()] = os.path.normpath(path) elif pathname.upper() not in os.environ: os.environ[pathname.upper()] = os.path.normpath(path)
java
@Override public ServiceRefAmp session(String name) { String address = "session:///" + name + "/"; return sessionImpl(address); }
python
def StreamMetrics(self, request_iterator, context): """Dispatches metrics streamed by collector""" LOG.debug("StreamMetrics called") # set up arguments collect_args = (next(request_iterator)) max_metrics_buffer = 0 max_collect_duration = 0 cfg = Metric(pb=collect_args.Metrics_Arg.metrics[0]) try: max_metrics_buffer = int(cfg.config["max-metrics-buffer"]) except Exception as ex: LOG.debug("Unable to get schedule parameters: {}".format(ex)) try: max_collect_duration = int(cfg.config["max-collect-duration"]) except Exception as ex: LOG.debug("Unable to get schedule parameters: {}".format(ex)) if max_metrics_buffer > 0: self.max_metrics_buffer = max_metrics_buffer if max_collect_duration > 0: self.max_collect_duration = max_collect_duration # start collection thread thread = threading.Thread(target=self._stream_wrapper, args=(collect_args,),) thread.daemon = True thread.start() # stream metrics metrics = [] metrics_to_stream = [] stream_timeout = self.max_collect_duration while context.is_active(): try: # wait for metrics until timeout is reached t_start = time.time() metrics = self.metrics_queue.get(block=True, timeout=stream_timeout) elapsed = round(time.time() - t_start) stream_timeout -= elapsed except queue.Empty: LOG.debug("Max collect duration exceeded. Streaming {} metrics".format(len(metrics_to_stream))) metrics_col = CollectReply(Metrics_Reply=MetricsReply(metrics=[m.pb for m in metrics_to_stream])) metrics_to_stream = [] stream_timeout = self.max_collect_duration yield metrics_col else: for metric in metrics: metrics_to_stream.append(metric) if len(metrics_to_stream) == self.max_metrics_buffer: LOG.debug("Max metrics buffer reached. Streaming {} metrics".format(len(metrics_to_stream))) metrics_col = CollectReply( Metrics_Reply=MetricsReply(metrics=[m.pb for m in metrics_to_stream])) metrics_to_stream = [] stream_timeout = self.max_collect_duration yield metrics_col # stream metrics if max_metrics_buffer is 0 or enough metrics has been collected if self.max_metrics_buffer == 0: LOG.debug("Max metrics buffer set to 0. Streaming {} metrics".format(len(metrics_to_stream))) metrics_col = CollectReply(Metrics_Reply=MetricsReply(metrics=[m.pb for m in metrics_to_stream])) metrics_to_stream = [] stream_timeout = self.max_collect_duration yield metrics_col # sent notification if stream has been stopped self.done_queue.put(True)
java
@Override public final V remove(Object key) { long hash, allocIndex; Segment<K, V> segment; V oldValue; if ((allocIndex = (segment = segment(segmentIndex(hash = keyHashCode(key)))) .remove(this, hash, key, null, false)) > 0) { oldValue = segment.readValue(allocIndex); segment.eraseAlloc(allocIndex); return oldValue; } return null; }
python
def get_path_and_qs(self): """ Parse and obtain the path and query values. We don't care about fragments. Return {'path': ..., 'qs_values': ...} on success Return {'error': ...} on error """ path_parts = self.path.split("?", 1) if len(path_parts) > 1: qs = path_parts[1].split("#", 1)[0] else: qs = "" path = path_parts[0].split("#", 1)[0] path = posixpath.normpath(urllib.unquote(path)) qs_values = self.parse_qs( qs ) if qs_values is None: return {'error': 'Failed to parse query string'} parts = path.strip('/').split('/') return {'path': path, 'qs_values': qs_values, 'parts': parts}
java
public static String notEmpty(String text, String errorMsgTemplate, Object... params) throws IllegalArgumentException { if (StrUtil.isEmpty(text)) { throw new IllegalArgumentException(StrUtil.format(errorMsgTemplate, params)); } return text; }
python
def copy(self): """ Copy constructor for Sequence objects. """ return Sequence(self.name, self.sequenceData, self.start, self.end, self.strand, self.remaining, self.meta_data, self.mutableString)
python
def service_unavailable(cls, errors=None): """Shortcut API for HTTP 503 `Service Unavailable` response. Args: errors (list): Response key/value data. Returns: WSResponse Instance. """ if cls.expose_status: # pragma: no cover cls.response.content_type = 'application/json' cls.response._status_line = '503 Service Unavailable' return cls(503, None, errors).to_json
python
def mark_stages(self, start_time, length, stage_name): """Mark stages, only add the new ones. Parameters ---------- start_time : int start time in s of the epoch being scored. length : int duration in s of the epoch being scored. stage_name : str one of the stages defined in global stages. """ y_pos = BARS['stage']['pos0'] current_stage = STAGES.get(stage_name, STAGES['Unknown']) # the -1 is really important, otherwise we stay on the edge of the rect old_score = self.scene.itemAt(start_time + length / 2, y_pos + current_stage['pos0'] + current_stage['pos1'] - 1, self.transform()) # check we are not removing the black border if old_score is not None and old_score.pen() == NoPen: lg.debug('Removing old score at {}'.format(start_time)) self.scene.removeItem(old_score) self.idx_annot.remove(old_score) rect = QGraphicsRectItem(start_time, y_pos + current_stage['pos0'], length, current_stage['pos1']) rect.setPen(NoPen) rect.setBrush(current_stage['color']) self.scene.addItem(rect) self.idx_annot.append(rect)
java
@Nullable public static String getAllCharactersAsString (@Nullable @WillClose final Reader aReader) { if (aReader == null) return null; return getCopy (aReader).getAsString (); }
java
public EClass getTileSize() { if (tileSizeEClass == null) { tileSizeEClass = (EClass)EPackage.Registry.INSTANCE.getEPackage(AfplibPackage.eNS_URI).getEClassifiers().get(395); } return tileSizeEClass; }
python
def get_site_pattern(agent): """Construct a dictionary of Monomer site states from an Agent. This crates the mapping to the associated PySB monomer from an INDRA Agent object.""" if not isinstance(agent, ist.Agent): return {} pattern = {} # Handle bound conditions for bc in agent.bound_conditions: # Here we make the assumption that the binding site # is simply named after the binding partner if bc.is_bound: pattern[get_binding_site_name(bc.agent)] = ANY else: pattern[get_binding_site_name(bc.agent)] = None # Handle modifications for mod in agent.mods: mod_site_str = abbrevs[mod.mod_type] if mod.residue is not None: mod_site_str = mod.residue mod_pos_str = mod.position if mod.position is not None else '' mod_site = ('%s%s' % (mod_site_str, mod_pos_str)) site_states = states[mod.mod_type] if mod.is_modified: pattern[mod_site] = (site_states[1], WILD) else: pattern[mod_site] = (site_states[0], WILD) # Handle mutations for mc in agent.mutations: res_from = mc.residue_from if mc.residue_from else 'mut' res_to = mc.residue_to if mc.residue_to else 'X' if mc.position is None: mut_site_name = res_from else: mut_site_name = res_from + mc.position pattern[mut_site_name] = res_to # Handle location if agent.location is not None: pattern['loc'] = _n(agent.location) # Handle activity if agent.activity is not None: active_site_name = agent.activity.activity_type if agent.activity.is_active: active_site_state = 'active' else: active_site_state = 'inactive' pattern[active_site_name] = active_site_state return pattern
python
def _persist_inplace(self, **kwargs): """ Persist all Dask arrays in memory """ # access .data to coerce everything to numpy or dask arrays lazy_data = {k: v._data for k, v in self.variables.items() if isinstance(v._data, dask_array_type)} if lazy_data: import dask # evaluate all the dask arrays simultaneously evaluated_data = dask.persist(*lazy_data.values(), **kwargs) for k, data in zip(lazy_data, evaluated_data): self.variables[k].data = data return self
java
public static <T1, T2, T3> TriPredicate<T1, T2, T3> spy2nd(TriPredicate<T1, T2, T3> predicate, Box<T2> param2) { return spy(predicate, Box.<Boolean>empty(), Box.<T1>empty(), param2, Box.<T3>empty()); }
java
@Override public List<TargetUsageItem> findUsageStatistics( String targetId ) { // Get usage first List<String> appNames; synchronized( LOCK ) { appNames = applicationsThatUse( targetId ); } // Now, let's build the result Set<TargetUsageItem> result = new HashSet<> (); for( Map.Entry<InstanceContext,String> entry : this.instanceToCachedId.entrySet()) { if( ! entry.getValue().equals( targetId )) continue; String appName = entry.getKey().getName(); TargetUsageItem item = new TargetUsageItem(); item.setName( appName ); item.setVersion( entry.getKey().getQualifier()); item.setReferencing( true ); item.setUsing( appNames.contains( appName )); result.add( item ); } return new ArrayList<>( result ); }
python
def make_eps(asset_array, num_samples, seed, correlation): """ :param asset_array: an array of assets :param int num_samples: the number of ruptures :param int seed: a random seed :param float correlation: the correlation coefficient :returns: epsilons matrix of shape (num_assets, num_samples) """ assets_by_taxo = group_array(asset_array, 'taxonomy') eps = numpy.zeros((len(asset_array), num_samples), numpy.float32) for taxonomy, assets in assets_by_taxo.items(): shape = (len(assets), num_samples) logging.info('Building %s epsilons for taxonomy %s', shape, taxonomy) zeros = numpy.zeros(shape) epsilons = scientific.make_epsilons(zeros, seed, correlation) for asset, epsrow in zip(assets, epsilons): eps[asset['ordinal']] = epsrow return eps
java
public RouteResponse withResponseModels(java.util.Map<String, String> responseModels) { setResponseModels(responseModels); return this; }
python
def get_output_sensors(self): """ Build the output using lm_sensors. Requires sensors Python module (see docs). """ data = dict() found_sensors = get_sensors() if len(found_sensors) == 0: raise Exception("No sensors detected! " "Ensure lm-sensors is installed and check the output of the `sensors` command.") for sensor in found_sensors: data[sensor.name] = self.format_sensor(sensor) data["{}_bar".format(sensor.name)] = self.format_sensor_bar(sensor) data['temp'] = max((s.current for s in found_sensors)) return { 'full_text': self.format.format(**data), 'urgent': self.get_urgent(found_sensors), 'color': self.color if not self.dynamic_color else None, }
java
public static RoleCreator creator(final String pathServiceSid, final String friendlyName, final Role.RoleType type, final List<String> permission) { return new RoleCreator(pathServiceSid, friendlyName, type, permission); }
python
def cli(env): """Get price options to create a load balancer with.""" mgr = SoftLayer.LoadBalancerManager(env.client) table = formatting.Table(['price_id', 'capacity', 'description', 'price']) table.sortby = 'price' table.align['price'] = 'r' table.align['capacity'] = 'r' table.align['id'] = 'r' packages = mgr.get_lb_pkgs() for package in packages: table.add_row([ package['prices'][0]['id'], package.get('capacity'), package['description'], '%.2f' % float(package['prices'][0]['recurringFee']) ]) env.fout(table)
java
public synchronized String getAttribute(String key, String defaultValue) { if(attributes.containsKey(key)) { return attributes.get(key).toString(); } return defaultValue; }
java
public Observable<ServiceResponse<ImageAnalysis>> analyzeImageWithServiceResponseAsync(String url, AnalyzeImageOptionalParameter analyzeImageOptionalParameter) { if (this.client.endpoint() == null) { throw new IllegalArgumentException("Parameter this.client.endpoint() is required and cannot be null."); } if (url == null) { throw new IllegalArgumentException("Parameter url is required and cannot be null."); } final List<VisualFeatureTypes> visualFeatures = analyzeImageOptionalParameter != null ? analyzeImageOptionalParameter.visualFeatures() : null; final List<Details> details = analyzeImageOptionalParameter != null ? analyzeImageOptionalParameter.details() : null; final String language = analyzeImageOptionalParameter != null ? analyzeImageOptionalParameter.language() : null; return analyzeImageWithServiceResponseAsync(url, visualFeatures, details, language); }
python
def download_list(user=None, pwd=None, limit=20, offset=0): """ Lists the downloads created by a user. :param user: [str] A user name, look at env var ``GBIF_USER`` first :param pwd: [str] Your password, look at env var ``GBIF_PWD`` first :param limit: [int] Number of records to return. Default: ``20`` :param offset: [int] Record number to start at. Default: ``0`` Usage:: from pygbif import occurrences as occ occ.download_list(user = "sckott") occ.download_list(user = "sckott", limit = 5) occ.download_list(user = "sckott", offset = 21) """ user = _check_environ('GBIF_USER', user) pwd = _check_environ('GBIF_PWD', pwd) url = 'http://api.gbif.org/v1/occurrence/download/user/' + user args = {'limit': limit, 'offset': offset} res = gbif_GET(url, args, auth=(user, pwd)) return {'meta': {'offset': res['offset'], 'limit': res['limit'], 'endofrecords': res['endOfRecords'], 'count': res['count']}, 'results': res['results']}
python
def on_rule(self, *args): """Make sure to update when the rule changes""" if self.rule is None: return self.rule.connect(self._listen_to_rule)
python
def _get_script(self): """Returns fixed commands script. If `settings.repeat` is `True`, appends command with second attempt of running fuck in case fixed command fails again. """ if settings.repeat: repeat_fuck = '{} --repeat {}--force-command {}'.format( get_alias(), '--debug ' if settings.debug else '', shell.quote(self.script)) return shell.or_(self.script, repeat_fuck) else: return self.script
java
protected boolean isLeftmostNode() { @SuppressWarnings("unchecked") K node = (K) this; while (node != null) { K parent = node.getParent(); if (parent != null && parent.leftChild != node) return false; node = parent; } return true; }
python
def indent(txt, spacing=4): """ Indent given text using custom spacing, default is set to 4. """ return prefix(str(txt), ''.join([' ' for _ in range(spacing)]))
java
public static String hexDump(byte[] array, int offset, int length) { if (length < 0) { throw new IllegalArgumentException("length: " + length); } if (length == 0) { return EMPTY_STRING; } int endIndex = offset + length; char[] buf = new char[length << 1]; int srcIdx = offset; int dstIdx = 0; for (; srcIdx < endIndex; srcIdx ++, dstIdx += 2) { System.arraycopy( HEXDUMP_TABLE, (array[srcIdx] & 0xFF) << 1, buf, dstIdx, 2); } return new String(buf); }
java
public void setAsynchConsumer(AsynchConsumerCallback consumer, int maxActiveMessages, long messageLockExpiry, int maxBatchSize, OrderingContext orderContext, int maxSequentialFailures, //SIB0115d.comms long hiddenMessageDelay, boolean stoppable) //472879 throws SISessionUnavailableException, SISessionDroppedException, SIConnectionUnavailableException, SIConnectionDroppedException, SIErrorException, SIIncorrectCallException { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(this, tc, "setAsynchConsumer", new Object[] { consumer, maxActiveMessages, messageLockExpiry, maxBatchSize, orderContext, maxSequentialFailures, //SIB0115d.comms hiddenMessageDelay, stoppable //472879 }); if (sessionId == 0) { // If the session Id = 0, then no one called setSessionId(). As such we are unable to flow // to the server as we do not know which session to instruct the server to use. SIErrorException e = new SIErrorException( nls.getFormattedMessage("SESSION_ID_HAS_NOT_BEEN_SET_SICO1043", null, null) ); FFDCFilter.processException(e, CLASS_NAME + ".setAsyncConsumer", CommsConstants.CONVERSATIONHELPERIMPL_02, this); if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) SibTr.debug(this, tc, "Session Id was 0", e); throw e; } CommsByteBuffer request = getCommsByteBuffer(); // Connection object id request.putShort(connectionObjectId); // Consumer session id request.putShort(sessionId); // Now put the message order context id if we have one if (orderContext != null) { request.putShort(((OrderingContextProxy)orderContext).getId()); } else { request.putShort(CommsConstants.NO_ORDER_CONTEXT); } // Client session id - this is the proxy queue ID request.putShort(proxyQueueId); // Max active messages request.putInt(maxActiveMessages); // Message lock expiry request.putLong(messageLockExpiry); // Max batch size request.putInt(maxBatchSize); // If callback is Stoppable then send maxSequentialFailures & hiddenMessageDelay then change the // Segment Id to Stoppable SIB0115d.comms int JFapSegmentId = JFapChannelConstants.SEG_REGISTER_ASYNC_CONSUMER; //SIB0115d.comms if (stoppable) { //SIB0115d.comms,472879 request.putInt(maxSequentialFailures); //SIB0115d.comms request.putLong(hiddenMessageDelay); JFapSegmentId = JFapChannelConstants.SEG_REGISTER_STOPPABLE_ASYNC_CONSUMER; //SIB0115d.comms } //SIB0115d.comms CommsByteBuffer reply = null; try { // Pass on call to server reply = jfapExchange(request, JFapSegmentId, //SIB0115d.comms JFapChannelConstants.PRIORITY_MEDIUM, true); } catch (SIConnectionLostException e) { // No FFDC Code needed // Converting this to a connection dropped as that is all we can throw if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) SibTr.debug(this, tc, "Connection was lost", e); throw new SIConnectionDroppedException(e.getMessage(), e); } // Confirm appropriate data returned try { short err = reply.getCommandCompletionCode(JFapChannelConstants.SEG_REGISTER_ASYNC_CONSUMER_R); if (err != CommsConstants.SI_NO_EXCEPTION) { checkFor_SISessionUnavailableException(reply, err); checkFor_SISessionDroppedException(reply, err); checkFor_SIConnectionUnavailableException(reply, err); checkFor_SIConnectionDroppedException(reply, err); checkFor_SIIncorrectCallException(reply, err); checkFor_SIErrorException(reply, err); defaultChecker(reply, err); } } finally { if (reply != null) reply.release(); } if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(this, tc, "setAsynchConsumer"); }
java
public DrawerView clearProfiles() { for (DrawerProfile profile : mProfileAdapter.getItems()) { profile.detach(); } mProfileAdapter.clear(); updateProfile(); return this; }
java
private static List<String> splitAndEncodeParams(URL url) { if (url.getQuery() == null) { return new ArrayList<>(); } String[] params = url.getQuery().split("&"); List<String> result = new ArrayList<>(params.length); for (String p : params) { String[] kv = p.split("="); kv[0] = PercentEncoding.decode(kv[0]); kv[1] = PercentEncoding.decode(kv[1]); kv[0] = PercentEncoding.encode(kv[0]); kv[1] = PercentEncoding.encode(kv[1]); String np = kv[0] + "=" + kv[1]; result.add(np); } return result; }
java
public final T[] toArray(final Class<T> elementClass) { return Iterables.toArray(toCollection(Lists.<T>newArrayListWithCapacity(256)), elementClass); }
java
public V get(Object key) { TreeMapEntry<K,V> p = getEntry(key); return (p==null ? null : p.value); }
python
def format_index(df): """Create a datetime index from day of year, and time columns. Parameters ---------- df: pd.Dataframe The srml data to reindex. Returns ------- df: pd.Dataframe The Dataframe with a DatetimeIndex localized to 'Etc/GMT+8'. """ # Name of the second column indicates the year of the file, but # the column contains times. year = int(df.columns[1]) df_doy = df[df.columns[0]] # Times are expressed as integers from 1-2400, we convert to 0-2359 by # subracting one and then correcting the minutes at each former hour. df_time = df[df.columns[1]] - 1 fifty_nines = df_time % 100 == 99 times = df_time.where(~fifty_nines, df_time - 40) times = times.apply(lambda x: '{:04.0f}'.format(x)) doy = df_doy.apply(lambda x: '{:03.0f}'.format(x)) dts = pd.to_datetime(str(year) + '-' + doy + '-' + times, format='%Y-%j-%H%M') df.index = dts df = df.tz_localize('Etc/GMT+8') return df
java
public static dbuser[] get(nitro_service service, dbuser_args args) throws Exception{ dbuser obj = new dbuser(); options option = new options(); option.set_args(nitro_util.object_to_string_withoutquotes(args)); dbuser[] response = (dbuser[])obj.get_resources(service, option); return response; }
java
public void drawEmphasizedText(Graphics g, Color foreground, Color emphasis, String s, int x, int y) { drawEmphasizedText(g, foreground, emphasis, s, -1, x, y); }
java
public void addColumn(ColumnSchema column) { String name = column.getName().name; if (findColumn(name) >= 0) { throw Error.error(ErrorCode.X_42504, name); } if (column.isIdentity()) { if (identityColumn != -1) { throw Error.error(ErrorCode.X_42525, name); } identityColumn = getColumnCount(); identitySequence = column.getIdentitySequence(); } addColumnNoCheck(column); }
python
def education(self): """ A list of structures describing the user's education history. Each structure has attributes ``school``, ``year``, ``concentration`` and ``type``. ``school``, ``year`` reference ``Page`` instances, while ``concentration`` is a list of ``Page`` instances. ``type`` is just a string that describes the education level. .. note:: ``concentration`` may be ``False`` if the user has not specified his/her concentration for the given school. """ educations = [] for education in self.cache['education']: school = Page(**education.get('school')) year = Page(**education.get('year')) type = education.get('type') if 'concentration' in education: concentration = map(lambda c: Page(**c), education.get('concentration')) else: concentration = False education = Structure( school = school, year = year, concentration = concentration, type = type ) educations.append(education) return educations