language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
protected int _firstch(int identity) { // Boiler-plate code for each of the _xxx functions, except for the array. int info = (identity >= m_size) ? NOTPROCESSED : m_firstch.elementAt(identity); // Check to see if the information requested has been processed, and, // if not, advance the iterator until we the information has been // processed. while (info == NOTPROCESSED) { boolean isMore = nextNode(); if (identity >= m_size &&!isMore) return NULL; else { info = m_firstch.elementAt(identity); if(info == NOTPROCESSED && !isMore) return NULL; } } return info; }
java
public <T> T pick (Iterable<? extends T> iterable, T ifEmpty) { return pickPluck(iterable, ifEmpty, false); }
python
def gnomonicSphereToImage(lon, lat): """ Gnomonic projection (deg). """ # Convert angle to [-180, 180] interval lon = lon - 360.*(lon>180) lon = np.radians(lon) lat = np.radians(lat) r_theta = (180. / np.pi) / np.tan(lat) x = r_theta * np.cos(lon) y = r_theta * np.sin(lon) return x, y
java
public String toHeaderRow(Class<?> objectClass) { if(ClassPath.isPrimitive(objectClass) || Scheduler.isDateOrTime(objectClass)) return new StringBuilder(objectClass.getSimpleName()).append("\n").toString(); Method[] methodArray = objectClass.getMethods(); Method m; String methodName = null; methods = new TreeMap<String,Method>(); for (int i = 0; i < methodArray.length; i++) { m = methodArray[i]; methodName = m.getName(); if(! methodName.startsWith(GET_PREFIX) || m.getParameterCount() != 0 || methodName.equals("getClass")) continue; //not properties methods.put(methodName, m); } StringBuilder csv = new StringBuilder(); for (String keyMethodName : methods.keySet()) { if(csv.length() != 0) csv.append(SEPARATOR); csv.append(QUOTE).append(format(toFieldName(keyMethodName))).append(QUOTE); } csv.append(NEWLINE); return csv.toString(); }
python
def duration(self): """The duration of this stimulus :returns: float -- duration in seconds """ durs = [] for track in self._segments: durs.append(sum([comp.duration() for comp in track])) return max(durs)
java
public DescribeStacksResult withStacks(Stack... stacks) { if (this.stacks == null) { setStacks(new com.amazonaws.internal.SdkInternalList<Stack>(stacks.length)); } for (Stack ele : stacks) { this.stacks.add(ele); } return this; }
java
public static FileStatus replaceScheme(FileStatus st, String replace, String replacement) { if (replace != null && replace.equals(replacement)) { return st; } try { return new FileStatus(st.getLen(), st.isDir(), st.getReplication(), st.getBlockSize(), st.getModificationTime(), st.getAccessTime(), st.getPermission(), st.getOwner(), st.getGroup(), st.isSymlink() ? st.getSymlink() : null, replaceScheme(st.getPath(), replace, replacement)); } catch (IOException ioe) { throw new RuntimeException(ioe); } }
python
def on_dict(self, node): # ('keys', 'values') """Dictionary.""" return dict([(self.run(k), self.run(v)) for k, v in zip(node.keys, node.values)])
java
public static long readLongBigEndian(InputStream io) throws IOException { long value = io.read(); if (value < 0) throw new EOFException(); value <<= 56; int i = io.read(); if (i < 0) throw new EOFException(); value |= ((long)i) << 48; i = io.read(); if (i < 0) throw new EOFException(); value |= ((long)i) << 40; i = io.read(); if (i < 0) throw new EOFException(); value |= ((long)i) << 32; i = io.read(); if (i < 0) throw new EOFException(); value |= ((long)i) << 24; i = io.read(); if (i < 0) throw new EOFException(); value |= i << 16; i = io.read(); if (i < 0) throw new EOFException(); value |= i << 8; i = io.read(); if (i < 0) throw new EOFException(); value |= i; return value; }
python
def sun_rise_set_transit_spa(times, latitude, longitude, how='numpy', delta_t=67.0, numthreads=4): """ Calculate the sunrise, sunset, and sun transit times using the NREL SPA algorithm described in [1]. If numba is installed, the functions can be compiled to machine code and the function can be multithreaded. Without numba, the function evaluates via numpy with a slight performance hit. Parameters ---------- times : pandas.DatetimeIndex Must be localized to the timezone for ``latitude`` and ``longitude``. latitude : float Latitude in degrees, positive north of equator, negative to south longitude : float Longitude in degrees, positive east of prime meridian, negative to west delta_t : float, optional If delta_t is None, uses spa.calculate_deltat using times.year and times.month from pandas.DatetimeIndex. For most simulations specifing delta_t is sufficient. Difference between terrestrial time and UT1. delta_t = None will break code using nrel_numba, this will be fixed in a future version. By default, use USNO historical data and predictions how : str, optional, default 'numpy' Options are 'numpy' or 'numba'. If numba >= 0.17.0 is installed, how='numba' will compile the spa functions to machine code and run them multithreaded. numthreads : int, optional, default 4 Number of threads to use if how == 'numba'. Returns ------- pandas.DataFrame index is the same as input `times` argument columns are 'sunrise', 'sunset', and 'transit' References ---------- [1] Reda, I., Andreas, A., 2003. Solar position algorithm for solar radiation applications. Technical report: NREL/TP-560- 34302. Golden, USA, http://www.nrel.gov. """ # Added by Tony Lorenzo (@alorenzo175), University of Arizona, 2015 lat = latitude lon = longitude # times must be localized if times.tz: tzinfo = times.tz else: raise ValueError('times must be localized') # must convert to midnight UTC on day of interest utcday = pd.DatetimeIndex(times.date).tz_localize('UTC') unixtime = np.array(utcday.astype(np.int64)/10**9) spa = _spa_python_import(how) delta_t = delta_t or spa.calculate_deltat(times.year, times.month) transit, sunrise, sunset = spa.transit_sunrise_sunset( unixtime, lat, lon, delta_t, numthreads) # arrays are in seconds since epoch format, need to conver to timestamps transit = pd.to_datetime(transit*1e9, unit='ns', utc=True).tz_convert( tzinfo).tolist() sunrise = pd.to_datetime(sunrise*1e9, unit='ns', utc=True).tz_convert( tzinfo).tolist() sunset = pd.to_datetime(sunset*1e9, unit='ns', utc=True).tz_convert( tzinfo).tolist() return pd.DataFrame(index=times, data={'sunrise': sunrise, 'sunset': sunset, 'transit': transit})
java
public void setSupportedTimezones(java.util.Collection<Timezone> supportedTimezones) { if (supportedTimezones == null) { this.supportedTimezones = null; return; } this.supportedTimezones = new java.util.ArrayList<Timezone>(supportedTimezones); }
java
@Override public void writeShort(short v) throws IOException { outputStream.write(0xFF & v); outputStream.write(0xFF & (v >> 8)); bytesWritten += 2; }
java
public ServiceFuture<RecommendationRuleInner> getRuleDetailsByWebAppAsync(String resourceGroupName, String siteName, String name, Boolean updateSeen, String recommendationId, final ServiceCallback<RecommendationRuleInner> serviceCallback) { return ServiceFuture.fromResponse(getRuleDetailsByWebAppWithServiceResponseAsync(resourceGroupName, siteName, name, updateSeen, recommendationId), serviceCallback); }
python
def get_collection_datasets(collection_id,**kwargs): """ Get all the datasets from the collection with the specified name """ collection_datasets = db.DBSession.query(Dataset).filter(Dataset.id==DatasetCollectionItem.dataset_id, DatasetCollectionItem.collection_id==DatasetCollection.id, DatasetCollection.id==collection_id).all() return collection_datasets
java
public static IterableResult<Extension> search(ExtensionRepository repository, ExtensionQuery query, IterableResult<Extension> previousSearchResult) throws SearchException { IterableResult<Extension> result; if (repository instanceof Searchable) { if (repository instanceof AdvancedSearchable) { AdvancedSearchable searchableRepository = (AdvancedSearchable) repository; result = searchableRepository.search(query); } else { Searchable searchableRepository = (Searchable) repository; result = searchableRepository.search(query.getQuery(), query.getOffset(), query.getLimit()); } if (previousSearchResult != null) { result = RepositoryUtils.appendSearchResults(previousSearchResult, result); } } else { result = previousSearchResult; } return result; }
java
public void setDistanceFunction(DistanceFunction df) throws Exception { if (!(df instanceof EuclideanDistance)) throw new Exception("KDTree currently only works with " + "EuclideanDistanceFunction."); m_DistanceFunction = m_EuclideanDistance = (EuclideanDistance) df; }
java
public void autoReportZero(final Set<MetricIdentity> autoMetrics) { Preconditions.checkNotNull(autoMetrics); for (MetricIdentity identity : autoMetrics) { if (!aggregateExistsForCurrentMinute(identity)) { MetricAggregate aggregate = getAggregate(identity); aggregate.setCount(1); aggregate.setValue(0.0); } } }
java
public DateRangeQuery end(Date end, boolean inclusive) { this.end = SearchUtils.toFtsUtcString(end); this.inclusiveEnd = inclusive; return this; }
python
def remove_resource(zone, resource_type, resource_key, resource_value): ''' Remove a resource zone : string name of zone resource_type : string type of resource resource_key : string key for resource selection resource_value : string value for resource selection .. note:: Set resource_selector to None for resource that do not require one. CLI Example: .. code-block:: bash salt '*' zonecfg.remove_resource tallgeese rctl name zone.max-locked-memory ''' ret = {'status': True} # generate update script cfg_file = salt.utils.files.mkstemp() with salt.utils.files.fpopen(cfg_file, 'w+', mode=0o600) as fp_: if resource_key: fp_.write("remove {0} {1}={2}\n".format(resource_type, resource_key, _sanitize_value(resource_value))) else: fp_.write("remove {0}\n".format(resource_type)) # update property if cfg_file: _dump_cfg(cfg_file) res = __salt__['cmd.run_all']('zonecfg -z {zone} -f {path}'.format( zone=zone, path=cfg_file, )) ret['status'] = res['retcode'] == 0 ret['message'] = res['stdout'] if ret['status'] else res['stderr'] if ret['message'] == '': del ret['message'] else: ret['message'] = _clean_message(ret['message']) # cleanup config file if __salt__['file.file_exists'](cfg_file): __salt__['file.remove'](cfg_file) return ret
java
public static FLValue fromData(AllocSlice slice) { final long value = fromData(slice.handle); return value != 0 ? new FLValue(value) : null; }
java
@Deprecated public ComponentMetadata findComponentMetadata(Class<?> componentType) { ComponentMetadata md = null; while (md == null) { String componentName = componentType.getName(); md = findComponentMetadata(componentName); if (md == null) { // Test superclasses/superinterfaces. if (!componentType.equals(Object.class) && !componentType.isInterface()) componentType = componentType.getSuperclass(); else md = dependencyFreeComponent; } } return md; }
python
def publisher(self): """Name of the publisher of the abstract. Note: Information provided in the FULL view of the article might be more complete. """ # Return information from FULL view, fall back to other views full = chained_get(self._head, ['source', 'publisher', 'publishername']) if full is None: return self._json['coredata'].get('dc:publisher') else: return full
python
def get_image(self, id): """Return a Image object representing the image with the given id.""" url = self._base_url + "/3/image/{0}".format(id) resp = self._send_request(url) return Image(resp, self)
python
def check_storage_controllers(): """ Check the status of the storage controllers Skip this check, if --noController is set """ if ctrl_flag: ctrl = walk_data(sess, oid_ctrl, helper)[0] for x, data in enumerate(ctrl, 1): ctrl_summary_output, ctrl_long_output = state_summary(data, 'Controller %d' % x, normal_state, helper) add_output(ctrl_summary_output, ctrl_long_output, helper)
java
public void marshall(UpdatePullRequestTitleRequest updatePullRequestTitleRequest, ProtocolMarshaller protocolMarshaller) { if (updatePullRequestTitleRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(updatePullRequestTitleRequest.getPullRequestId(), PULLREQUESTID_BINDING); protocolMarshaller.marshall(updatePullRequestTitleRequest.getTitle(), TITLE_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
java
public String get(final String path) throws IOException { final HttpRequest request = getRequest(path); final HttpResponse response = request.execute(); return response.parseAsString(); }
python
def allowed(self, url, agent): '''Return true if the provided URL is allowed to agent.''' return self.get(url).allowed(url, agent)
python
def _make_middleware_stack(middleware, base): """ Given a list of in-order middleware callables `middleware` and a base function `base`, chains them together so each middleware is fed the function below, and returns the top level ready to call. """ for ware in reversed(middleware): base = ware(base) return base
java
public static Object deserialize(byte[] bytes, boolean gzipOnSerialize) { try { InputStream is = new ByteArrayInputStream(bytes); if (gzipOnSerialize) { is = new GZIPInputStream(is); } ObjectInputStream in = new ObjectInputStream(is); Object inObj = in.readObject(); in.close(); return inObj; } catch (IOException e) { throw new RuntimeException(e); } catch (ClassNotFoundException e) { throw new RuntimeException(e); } }
python
def _iter_matches(self, input_match, subject_graph, one_match, level=0): """Given an onset for a match, iterate over all completions of that match This iterator works recursively. At each level the match is extended with a new set of relations based on vertices in the pattern graph that are at a distances 'level' from the starting vertex """ self.print_debug("ENTERING _ITER_MATCHES", 1) self.print_debug("input_match: %s" % input_match) # A) collect the new edges in the pattern graph and the subject graph # to extend the match. # # Note that the edges are ordered. edge[0] is always in the match. # edge[1] is never in the match. The constraints contain information # about the end points of edges0. It is a list of two-tuples where # (a, b) means that a and b must be connected. # # Second note: suffix 0 indicates the pattern graph and suffix 1 # is used for the subject graph. edges0, constraints0 = self.pattern.get_new_edges(level) edges1 = input_match.get_new_edges(subject_graph) self.print_debug("edges0: %s" % edges0) self.print_debug("constraints0: %s" % constraints0) self.print_debug("edges1: %s" % edges1) # B) iterate over the sets of new relations: [(vertex0[i], vertex1[j]), # ...] that contain all endpoints of edges0, that satisfy the # constraints0 and where (vertex0[i], vertex1[j]) only occurs if these # are end points of a edge0 and edge1 whose starting points are already # in init_match. These conditions are implemented in an iterator as to # separate concerns. This iterator also calls the routines that check # whether vertex1[j] also satisfies additional conditions inherent # vertex0[i]. inr = self._iter_new_relations(input_match, subject_graph, edges0, constraints0, edges1) for new_relations in inr: # for each set of new_relations, construct a next_match and recurse next_match = input_match.copy_with_new_relations(new_relations) if not self.pattern.check_next_match(next_match, new_relations, subject_graph, one_match): continue if self.pattern.complete(next_match, subject_graph): yield next_match else: for match in self._iter_matches(next_match, subject_graph, one_match, level+1): yield match self.print_debug("LEAVING_ITER_MATCHES", -1)
python
def subSampleWholeColumn(spikeTrains, colIndices, cellsPerColumn, currentTS, timeWindow): """ Obtains subsample from matrix of spike trains by considering the cells in columns specified by colIndices. Thus, it returns a matrix of spike trains of cells within the same column. @param spikeTrains (array) array containing the spike trains of cells in the TM @param colIndices (array) array containing the indices of columns whose spike trains should be sampled @param cellsPerColumn (int) number of cells per column in the TM @param currentTS (int) time-step upper bound of sample (sample will go from time-step 0 up to currentTS) @param timeWindow (int) number of time-steps to sample from the spike trains @return subSpikeTrains (array) spike train matrix sampled from the total spike train matrix """ numColumns = np.shape(colIndices)[0] numCells = numColumns * cellsPerColumn if currentTS > 0 and currentTS < timeWindow: subSpikeTrains = np.zeros((numCells, currentTS), dtype = "uint32") for i in range(numColumns): currentCol = colIndices[i] initialCell = cellsPerColumn * currentCol for j in range(cellsPerColumn): subSpikeTrains[(cellsPerColumn*i) + j,:] = spikeTrains[initialCell + j,:] elif currentTS > 0 and currentTS >= timeWindow: subSpikeTrains = np.zeros((numCells, timeWindow), dtype = "uint32") for i in range(numColumns): currentCol = colIndices[i] initialCell = cellsPerColumn * currentCol for j in range(cellsPerColumn): subSpikeTrains[(cellsPerColumn*i) + j,:] = spikeTrains[initialCell + j,(currentTS-timeWindow):currentTS] elif currentTS == 0: # This option takes the whole spike train history totalTS = np.shape(spikeTrains)[1] subSpikeTrains = np.zeros((numCells, totalTS), dtype = "uint32") for i in range(numColumns): currentCol = colIndices[i] initialCell = cellsPerColumn * currentCol for j in range(cellsPerColumn): subSpikeTrains[(cellsPerColumn*i) + j,:] = spikeTrains[initialCell + j,:] elif currentTS < 0: totalTS = np.shape(spikeTrains)[1] subSpikeTrains = np.zeros((numCells, timeWindow), dtype = "uint32") rnd = random.randrange(totalTS - timeWindow) print "Starting from timestep: " + str(rnd) for i in range(numColumns): currentCol = colIndices[i] initialCell = cellsPerColumn * currentCol for j in range(cellsPerColumn): subSpikeTrains[(cellsPerColumn*i) + j,:] = spikeTrains[initialCell + j,rnd:(rnd+timeWindow)] return subSpikeTrains
python
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: SyncListItemContext for this SyncListItemInstance :rtype: twilio.rest.preview.sync.service.sync_list.sync_list_item.SyncListItemContext """ if self._context is None: self._context = SyncListItemContext( self._version, service_sid=self._solution['service_sid'], list_sid=self._solution['list_sid'], index=self._solution['index'], ) return self._context
java
public boolean isBeforeRange(final Range<T> otherRange) { if (otherRange == null) { return false; } return isBefore(otherRange.minimum); }
python
def get_percentage_bond_dist_changes(self, max_radius=3.0): """ Returns the percentage bond distance changes for each site up to a maximum radius for nearest neighbors. Args: max_radius (float): Maximum radius to search for nearest neighbors. This radius is applied to the initial structure, not the final structure. Returns: Bond distance changes as a dict of dicts. E.g., {index1: {index2: 0.011, ...}}. For economy of representation, the index1 is always less than index2, i.e., since bonding between site1 and siten is the same as bonding between siten and site1, there is no reason to duplicate the information or computation. """ data = collections.defaultdict(dict) for inds in itertools.combinations(list(range(len(self.initial))), 2): (i, j) = sorted(inds) initial_dist = self.initial[i].distance(self.initial[j]) if initial_dist < max_radius: final_dist = self.final[i].distance(self.final[j]) data[i][j] = final_dist / initial_dist - 1 return data
python
def get_hgp(p, k, N, K, n): """Calculate the hypergeometric p-value when p = f(k; N,K,n) is already known. """ pval = p while k < min(K, n): p *= (float((n-k)*(K-k) / float((k+1)*(N-K-n+k+1)))) pval += p k += 1 return pval
java
public IStringBuffer append( char c ) { if ( count == buff.length ) { resizeTo( buff.length * 2 + 1 ); } buff[count++] = c; return this; }
java
public void marshall(UpdateCertificateOptionsRequest updateCertificateOptionsRequest, ProtocolMarshaller protocolMarshaller) { if (updateCertificateOptionsRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(updateCertificateOptionsRequest.getCertificateArn(), CERTIFICATEARN_BINDING); protocolMarshaller.marshall(updateCertificateOptionsRequest.getOptions(), OPTIONS_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
java
public ConstructorDeclaration getConstructor(String signature) { for (EntityDeclaration node : type.getMembers()) { if (node.getEntityType() == EntityType.CONSTRUCTOR) { ConstructorDeclaration cons = (ConstructorDeclaration) node; if (signature.equals(signature(cons))) { return cons; } } } return null; }
python
def notify_mail(title, message, recipient=None, sender=None, smtp_host=None, smtp_port=None, **kwargs): """ Mail notification method taking a *title* and a string *message*. *recipient*, *sender*, *smtp_host* and *smtp_port* default to the configuration values in the [notifications] section. """ cfg = Config.instance() if not recipient: recipient = cfg.get_expanded("notifications", "mail_recipient") if not sender: sender = cfg.get_expanded("notifications", "mail_sender") if not smtp_host: smtp_host = cfg.get_expanded("notifications", "mail_smtp_host") if not smtp_port: smtp_port = cfg.get_expanded("notifications", "mail_smtp_port") if not recipient or not sender: logger.warning("cannot send mail notification, recipient ({}) or sender ({}) empty".format( recipient, sender)) return False return send_mail(recipient, sender, title, message, smtp_host=smtp_host, smtp_port=smtp_port)
python
def authorize_security_group( self, group_name=None, group_id=None, source_group_name="", source_group_owner_id="", ip_protocol="", from_port="", to_port="", cidr_ip=""): """ There are two ways to use C{authorize_security_group}: 1) associate an existing group (source group) with the one that you are targeting (group_name) with an authorization update; or 2) associate a set of IP permissions with the group you are targeting with an authorization update. @param group_name: The group you will be modifying with a new authorization. @param group_id: The id of the group you will be modifying with a new authorization. Optionally, the following parameters: @param source_group_name: Name of security group to authorize access to when operating on a user/group pair. @param source_group_owner_id: Owner of security group to authorize access to when operating on a user/group pair. If those parameters are not specified, then the following must be: @param ip_protocol: IP protocol to authorize access to when operating on a CIDR IP. @param from_port: Bottom of port range to authorize access to when operating on a CIDR IP. This contains the ICMP type if ICMP is being authorized. @param to_port: Top of port range to authorize access to when operating on a CIDR IP. This contains the ICMP code if ICMP is being authorized. @param cidr_ip: CIDR IP range to authorize access to when operating on a CIDR IP. @return: A C{Deferred} that will fire with a truth value for the success of the operation. """ if source_group_name and source_group_owner_id: parameters = { "SourceSecurityGroupName": source_group_name, "SourceSecurityGroupOwnerId": source_group_owner_id, } elif ip_protocol and from_port and to_port and cidr_ip: parameters = { "IpProtocol": ip_protocol, "FromPort": from_port, "ToPort": to_port, "CidrIp": cidr_ip, } else: msg = ("You must specify either both group parameters or " "all the ip parameters.") raise ValueError(msg) if group_id: parameters["GroupId"] = group_id elif group_name: parameters["GroupName"] = group_name else: raise ValueError("You must specify either the group name of the group id.") query = self.query_factory( action="AuthorizeSecurityGroupIngress", creds=self.creds, endpoint=self.endpoint, other_params=parameters) d = query.submit() return d.addCallback(self.parser.truth_return)
python
def active(): ''' List existing device-mapper device details. ''' ret = {} # TODO: This command should be extended to collect more information, such as UUID. devices = __salt__['cmd.run_stdout']('dmsetup ls --target crypt') out_regex = re.compile(r'(?P<devname>\w+)\W+\((?P<major>\d+), (?P<minor>\d+)\)') log.debug(devices) for line in devices.split('\n'): match = out_regex.match(line) if match: dev_info = match.groupdict() ret[dev_info['devname']] = dev_info else: log.warning('dmsetup output does not match expected format') return ret
python
def block_specification_to_number(block: BlockSpecification, web3: Web3) -> BlockNumber: """ Converts a block specification to an actual block number """ if isinstance(block, str): msg = f"string block specification can't contain {block}" assert block in ('latest', 'pending'), msg number = web3.eth.getBlock(block)['number'] elif isinstance(block, T_BlockHash): number = web3.eth.getBlock(block)['number'] elif isinstance(block, T_BlockNumber): number = block else: if __debug__: raise AssertionError(f'Unknown type {type(block)} given for block specification') return BlockNumber(number)
java
@Override public DescribeNotebookInstanceLifecycleConfigResult describeNotebookInstanceLifecycleConfig(DescribeNotebookInstanceLifecycleConfigRequest request) { request = beforeClientExecution(request); return executeDescribeNotebookInstanceLifecycleConfig(request); }
java
protected <T> T handleGet(URL resourceUrl, Class<T> type) throws ExecutionException, InterruptedException, KubernetesClientException, IOException { return handleGet(resourceUrl, type, Collections.<String, String>emptyMap()); }
python
def connection_made(self, transport): """ Called by the underlying transport when a connection is made. :param transport: The transport representing the connection. """ # Save the underlying transport self._transport = transport # Call connection_made() on the client protocol, passing # ourself as the transport self._client.connection_made(self)
java
public <T> T getService(String serviceName) { T service = findService(serviceName, true); if (service == null) { throw new IllegalArgumentException(format("No service found named \"%s\"", serviceName)); } return service; }
java
@Override public void execute(TridentTuple tuple, TridentCollector collector) { String receivedStr = tuple.getString(0); String[] splitedStr = StringUtils.split(receivedStr, this.delimeter); int dataNum = splitedStr.length; double[] points = new double[splitedStr.length]; try { for (int index = 0; index < dataNum; index++) { points[index] = Double.parseDouble(splitedStr[index].trim()); } KmeansPoint result = new KmeansPoint(); result.setDataPoint(points); collector.emit(new Values(result)); } catch (Exception ex) { logger.warn("Received data is invalid. skip this data. ReceivedData=" + receivedStr, ex); } }
java
public static void multAddOuter( double alpha , DMatrix6x6 A , double beta , DMatrix6 u , DMatrix6 v , DMatrix6x6 C ) { C.a11 = alpha*A.a11 + beta*u.a1*v.a1; C.a12 = alpha*A.a12 + beta*u.a1*v.a2; C.a13 = alpha*A.a13 + beta*u.a1*v.a3; C.a14 = alpha*A.a14 + beta*u.a1*v.a4; C.a15 = alpha*A.a15 + beta*u.a1*v.a5; C.a16 = alpha*A.a16 + beta*u.a1*v.a6; C.a21 = alpha*A.a21 + beta*u.a2*v.a1; C.a22 = alpha*A.a22 + beta*u.a2*v.a2; C.a23 = alpha*A.a23 + beta*u.a2*v.a3; C.a24 = alpha*A.a24 + beta*u.a2*v.a4; C.a25 = alpha*A.a25 + beta*u.a2*v.a5; C.a26 = alpha*A.a26 + beta*u.a2*v.a6; C.a31 = alpha*A.a31 + beta*u.a3*v.a1; C.a32 = alpha*A.a32 + beta*u.a3*v.a2; C.a33 = alpha*A.a33 + beta*u.a3*v.a3; C.a34 = alpha*A.a34 + beta*u.a3*v.a4; C.a35 = alpha*A.a35 + beta*u.a3*v.a5; C.a36 = alpha*A.a36 + beta*u.a3*v.a6; C.a41 = alpha*A.a41 + beta*u.a4*v.a1; C.a42 = alpha*A.a42 + beta*u.a4*v.a2; C.a43 = alpha*A.a43 + beta*u.a4*v.a3; C.a44 = alpha*A.a44 + beta*u.a4*v.a4; C.a45 = alpha*A.a45 + beta*u.a4*v.a5; C.a46 = alpha*A.a46 + beta*u.a4*v.a6; C.a51 = alpha*A.a51 + beta*u.a5*v.a1; C.a52 = alpha*A.a52 + beta*u.a5*v.a2; C.a53 = alpha*A.a53 + beta*u.a5*v.a3; C.a54 = alpha*A.a54 + beta*u.a5*v.a4; C.a55 = alpha*A.a55 + beta*u.a5*v.a5; C.a56 = alpha*A.a56 + beta*u.a5*v.a6; C.a61 = alpha*A.a61 + beta*u.a6*v.a1; C.a62 = alpha*A.a62 + beta*u.a6*v.a2; C.a63 = alpha*A.a63 + beta*u.a6*v.a3; C.a64 = alpha*A.a64 + beta*u.a6*v.a4; C.a65 = alpha*A.a65 + beta*u.a6*v.a5; C.a66 = alpha*A.a66 + beta*u.a6*v.a6; }
java
@Override public <E extends Exception> void add(Iteration<? extends Statement, E> statements, Resource... contexts) throws RepositoryException, E { while(statements.hasNext()){ Statement st = statements.next(); add(st.getSubject(), st.getPredicate(), st.getObject(), mergeResource(st.getContext(), contexts)); } }
python
def PatchAt(cls, n, module, method_wrapper=None, module_alias=None, method_name_modifier=utils.identity, blacklist_predicate=_False, whitelist_predicate=_True, return_type_predicate=_None, getmembers_predicate=inspect.isfunction, admit_private=False, explanation=""): """ This classmethod lets you easily patch all of functions/callables from a module or class as methods a Builder class. **Arguments** * **n** : the position the the object being piped will take in the arguments when the function being patched is applied. See `RegisterMethod` and `ThenAt`. * **module** : a module or class from which the functions/methods/callables will be taken. * `module_alias = None` : an optional alias for the module used for documentation purposes. * `method_name_modifier = lambda f_name: None` : a function that can modify the name of the method will take. If `None` the name of the function will be used. * `blacklist_predicate = lambda f_name: name[0] != "_"` : A predicate that determines which functions are banned given their name. By default it excludes all function whose name start with `'_'`. `blacklist_predicate` can also be of type list, in which case all names contained in this list will be banned. * `whitelist_predicate = lambda f_name: True` : A predicate that determines which functions are admitted given their name. By default it include any function. `whitelist_predicate` can also be of type list, in which case only names contained in this list will be admitted. You can use both `blacklist_predicate` and `whitelist_predicate` at the same time. * `return_type_predicate = lambda f_name: None` : a predicate that determines the `_return_type` of the Builder. By default it will always return `None`. See `phi.builder.Builder.ThenAt`. * `getmembers_predicate = inspect.isfunction` : a predicate that determines what type of elements/members will be fetched by the `inspect` module, defaults to [inspect.isfunction](https://docs.python.org/2/library/inspect.html#inspect.isfunction). See [getmembers](https://docs.python.org/2/library/inspect.html#inspect.getmembers). **Examples** Lets patch ALL the main functions from numpy into a custom builder! from phi import PythonBuilder #or Builder import numpy as np class NumpyBuilder(PythonBuilder): #or Builder "A Builder for numpy functions!" pass NumpyBuilder.PatchAt(1, np) N = NumpyBuilder(lambda x: x) Thats it! Although a serious patch would involve filtering out functions that don't take arrays. Another common task would be to use `NumpyBuilder.PatchAt(2, ...)` (`PatchAt(n, ..)` in general) when convenient to send the object being pipe to the relevant argument of the function. The previous is usually done with and a combination of `whitelist_predicate`s and `blacklist_predicate`s on `PatchAt(1, ...)` and `PatchAt(2, ...)` to filter or include the approriate functions on each kind of patch. Given the previous code we could now do import numpy as np x = np.array([[1,2],[3,4]]) y = np.array([[5,6],[7,8]]) z = N.Pipe( x, N .dot(y) .add(x) .transpose() .sum(axis=1) ) Which is strictly equivalent to import numpy as np x = np.array([[1,2],[3,4]]) y = np.array([[5,6],[7,8]]) z = np.dot(x, y) z = np.add(z, x) z = np.transpose(z) z = np.sum(z, axis=1) The thing to notice is that with the `NumpyBuilder` we avoid the repetitive and needless passing and reassigment of the `z` variable, this removes a lot of noise from our code. """ _rtp = return_type_predicate return_type_predicate = (lambda x: _rtp) if inspect.isclass(_rtp) and issubclass(_rtp, Builder) else _rtp module_name = module_alias if module_alias else module.__name__ + '.' patch_members = _get_patch_members(module, blacklist_predicate=blacklist_predicate, whitelist_predicate=whitelist_predicate, getmembers_predicate=getmembers_predicate, admit_private=admit_private) for name, f in patch_members: wrapped = None if method_wrapper: g = method_wrapper(f) wrapped = f else: g = f cls.RegisterAt(n, g, module_name, wrapped=wrapped, _return_type=return_type_predicate(name), alias=method_name_modifier(name), explanation=explanation)
python
def cross(self): """Return cross join""" self.query.do_join(Join(self.item, JoinType.cross)) return self.query
java
public static CPInstance fetchByCompanyId_First(long companyId, OrderByComparator<CPInstance> orderByComparator) { return getPersistence() .fetchByCompanyId_First(companyId, orderByComparator); }
java
@Override public void recordVariableUpdate(VariableInstanceEntity variable) { if (isHistoryLevelAtLeast(HistoryLevel.ACTIVITY)) { HistoricVariableInstanceEntity historicProcessVariable = getEntityCache().findInCache(HistoricVariableInstanceEntity.class, variable.getId()); if (historicProcessVariable == null) { historicProcessVariable = getHistoricVariableInstanceEntityManager().findHistoricVariableInstanceByVariableInstanceId(variable.getId()); } if (historicProcessVariable != null) { getHistoricVariableInstanceEntityManager().copyVariableValue(historicProcessVariable, variable); } else { getHistoricVariableInstanceEntityManager().copyAndInsert(variable); } } }
java
protected List<String> getTokensFromHeader(Request request, String headerName) { List<String> result = new ArrayList<>(); Enumeration<String> headers = request.getHeaders(headerName); while (headers.hasMoreElements()) { String header = headers.nextElement(); String[] tokens = header.split(","); for (String token : tokens) { result.add(token.trim()); } } return result; }
python
def get_meta_graph_def(saved_model_dir, tag_set): """Utility function to read a meta_graph_def from disk. From `saved_model_cli.py <https://github.com/tensorflow/tensorflow/blob/8e0e8d41a3a8f2d4a6100c2ea1dc9d6c6c4ad382/tensorflow/python/tools/saved_model_cli.py#L186>`_ Args: :saved_model_dir: path to saved_model. :tag_set: list of string tags identifying the TensorFlow graph within the saved_model. Returns: A TensorFlow meta_graph_def, or raises an Exception otherwise. """ saved_model = reader.read_saved_model(saved_model_dir) set_of_tags = set(tag_set.split(',')) for meta_graph_def in saved_model.meta_graphs: if set(meta_graph_def.meta_info_def.tags) == set_of_tags: return meta_graph_def raise RuntimeError("MetaGraphDef associated with tag-set {0} could not be found in SavedModel".format(tag_set))
java
public Where<T, ID> or(int numClauses) { if (numClauses == 0) { throw new IllegalArgumentException("Must have at least one clause in or(numClauses)"); } Clause[] clauses = new Clause[numClauses]; for (int i = numClauses - 1; i >= 0; i--) { clauses[i] = pop("OR"); } addClause(new ManyClause(clauses, ManyClause.OR_OPERATION)); return this; }
python
def get_password(args): """Get password Argument: args: arguments object Return: password string """ password = '' if args.__dict__.get('password'): # Specify password as argument (for -p option) password = args.password elif args.__dict__.get('P'): # Enter password interactively (for -P option) while True: # When setting password in $HOME/.tdclirc, using it. if password: break else: # Not set in $HOME/.tdclirc, attempt input prompt from getpass import getpass password = getpass(prompt='TonicDNS user password: ') return password
java
public Observable<Void> ignoreInput() { return getInput().map(new Func1<R, Void>() { @Override public Void call(R r) { ReferenceCountUtil.release(r); return null; } }).ignoreElements(); }
java
public void onCacheHit(String template, int locality) { final String methodName = "onCacheHit()"; CacheStatsModule csm = null; if ((csm = getCSM(template)) == null) { return; } if (tc.isDebugEnabled()) Tr.debug(tc, methodName + " cacheName=" + _sCacheName + " template=" + template + " locality=" + locality + " enable=" + csm._enable + " parentEnable=" + _enable + " " + this); switch (locality) { case REMOTE: if (csm._enable) { if (csm._remoteHitCount != null) { csm._remoteHitCount.increment(); } if (csm._inMemoryAndDiskCacheEntryCount != null) { csm._inMemoryAndDiskCacheEntryCount.increment(); } if (csm._remoteCreationCount != null) { csm._remoteCreationCount.increment(); } } break; case MEMORY: if (csm._enable && csm._hitsInMemoryCount != null) csm._hitsInMemoryCount.increment(); break; case DISK: if (_csmDisk != null && _csmDisk._enable && _csmDisk._hitsOnDisk != null) { _csmDisk._hitsOnDisk.increment(); } if (csm._enable && csm._hitsOnDiskCount != null) csm._hitsOnDiskCount.increment(); break; default: if (tc.isDebugEnabled()) Tr.debug(tc, methodName + " Error - Unrecognized locality " + locality + " cacheName=" + _sCacheName); break; } return; }
java
private void replaceModuleName() throws CmsException, UnsupportedEncodingException { CmsResourceFilter filter = CmsResourceFilter.ALL.addRequireFile().addExcludeState( CmsResource.STATE_DELETED).addRequireTimerange().addRequireVisible(); List<CmsResource> resources = getCms().readResources( CmsWorkplace.VFS_PATH_MODULES + m_cloneInfo.getName() + "/", filter); for (CmsResource resource : resources) { CmsFile file = getCms().readFile(resource); if (CmsResourceTypeXmlContent.isXmlContent(file)) { CmsXmlContent xmlContent = CmsXmlContentFactory.unmarshal(getCms(), file); xmlContent.setAutoCorrectionEnabled(true); file = xmlContent.correctXmlStructure(getCms()); } byte[] contents = file.getContents(); String encoding = CmsLocaleManager.getResourceEncoding(getCms(), file); String content = new String(contents, encoding); Matcher matcher = Pattern.compile(m_cloneInfo.getSourceModuleName()).matcher(content); if (matcher.find()) { contents = matcher.replaceAll(m_cloneInfo.getName()).getBytes(encoding); if (lockResource(getCms(), file)) { file.setContents(contents); getCms().writeFile(file); } } } }
java
public static Functor instanciateFunctorAsAnInstanceMethodWrapper(final Object instance, String methodName) throws Exception { if (null == instance) { throw new NullPointerException("Instance is null"); } Method _method = instance.getClass().getMethod(methodName, (Class<?>[]) null); return instanciateFunctorAsAMethodWrapper(instance, _method); }
python
def sparkify(series): u"""Converts <series> to a sparkline string. Example: >>> sparkify([ 0.5, 1.2, 3.5, 7.3, 8.0, 12.5, 13.2, 15.0, 14.2, 11.8, 6.1, ... 1.9 ]) u'β–β–β–‚β–„β–…β–‡β–‡β–ˆβ–ˆβ–†β–„β–‚' >>> sparkify([1, 1, -2, 3, -5, 8, -13]) u'β–†β–†β–…β–†β–„β–ˆβ–' Raises ValueError if input data cannot be converted to float. Raises TypeError if series is not an iterable. """ series = [ float(i) for i in series ] minimum = min(series) maximum = max(series) data_range = maximum - minimum if data_range == 0.0: # Graph a baseline if every input value is equal. return u''.join([ spark_chars[0] for i in series ]) coefficient = (len(spark_chars) - 1.0) / data_range return u''.join([ spark_chars[ int(round((x - minimum) * coefficient)) ] for x in series ])
java
public ParallelTaskBuilder setTargetHostsFromJsonPath(String jsonPath, String sourcePath, HostsSourceType sourceType) throws TargetHostsLoadException { this.targetHosts = targetHostBuilder.setTargetHostsFromJsonPath(jsonPath, sourcePath, sourceType); return this; }
java
public static String getDefaultProjectId() { String projectId = System.getProperty(PROJECT_ENV_NAME, System.getenv(PROJECT_ENV_NAME)); if (projectId == null) { projectId = System.getProperty(LEGACY_PROJECT_ENV_NAME, System.getenv(LEGACY_PROJECT_ENV_NAME)); } if (projectId == null) { projectId = getAppEngineProjectId(); } if (projectId == null) { projectId = getServiceAccountProjectId(); } return projectId != null ? projectId : getGoogleCloudProjectId(); }
python
def run(self): """ Runs the simulation. """ self.init_run() if self.debug: self.dump("AfterInit: ") #print("++++++++++++++++ Time: %f"%self.current_time) while self.step(): #self.dump("Time: %f"%self.current_time) #print("++++++++++++++++ Time: %f"%self.current_time) pass
python
def console_new(w: int, h: int) -> tcod.console.Console: """Return an offscreen console of size: w,h. .. deprecated:: 8.5 Create new consoles using :any:`tcod.console.Console` instead of this function. """ return tcod.console.Console(w, h)
java
public CheckRequest asCheckRequest(Clock clock) { Preconditions.checkState(!Strings.isNullOrEmpty(getServiceName()), "a service name must be set"); Preconditions.checkState(!Strings.isNullOrEmpty(getOperationId()), "an operation ID must be set"); Preconditions.checkState(!Strings.isNullOrEmpty(getOperationName()), "an operation name must be set"); Operation.Builder b = super.asOperation(clock).toBuilder(); b.putAllLabels(getSystemLabels()); return CheckRequest.newBuilder().setServiceName(getServiceName()).setOperation(b).build(); }
python
def similar_title(post, parameter=None): '''Skip posts with fuzzy-matched (threshold = levenshtein distance / length) title. Parameters (comma-delimited): minimal threshold, at which values are considired similar (float, 0 < x < 1, default: {0}); comparison timespan, seconds (int, 0 = inf, default: {1}).''' from feedjack.models import Post threshold, timespan = DEFAULT_SIMILARITY_THRESHOLD, DEFAULT_SIMILARITY_TIMESPAN if parameter: parameter = map(op.methodcaller('strip'), parameter.split(',', 1)) threshold = parameter.pop() try: threshold, timespan = parameter.pop(), threshold except IndexError: pass threshold, timespan = float(threshold), int(timespan) similar = Post.objects.filtered(for_display=False)\ .exclude(id=post.id).similar(threshold, title=post.title) if timespan: similar = similar.filter(date_updated__gt=timezone.now() - timedelta(seconds=timespan)) return not bool(similar.exists())
java
public GroupOptions setOffset(@Nullable Integer offset) { this.offset = offset == null ? null : Math.max(0, offset); return this; }
java
private int completeRead(int b) throws IOException, StreamIntegrityException { if (b == END_OF_STREAM) { handleEndOfStream(); } else { // Have we reached the end of the stream? int c = pushbackInputStream.read(); if (c == END_OF_STREAM) { handleEndOfStream(); } else { pushbackInputStream.unread(c); } } return b; }
python
def node_vectors(node_id): """Get the vectors of a node. You must specify the node id in the url. You can pass direction (incoming/outgoing/all) and failed (True/False/all). """ exp = Experiment(session) # get the parameters direction = request_parameter(parameter="direction", default="all") failed = request_parameter(parameter="failed", parameter_type="bool", default=False) for x in [direction, failed]: if type(x) == Response: return x # execute the request node = models.Node.query.get(node_id) if node is None: return error_response(error_type="/node/vectors, node does not exist") try: vectors = node.vectors(direction=direction, failed=failed) exp.vector_get_request(node=node, vectors=vectors) session.commit() except Exception: return error_response( error_type="/node/vectors GET server error", status=403, participant=node.participant, ) # return the data return success_response(vectors=[v.__json__() for v in vectors])
python
def remote_space_available(self, search_pattern=r"(\d+) bytes free"): """Return space available on remote device.""" remote_cmd = 'system "df {}"'.format(self.folder_name) remote_output = self.ssh_ctl_chan.send_command_expect(remote_cmd) for line in remote_output.splitlines(): if self.folder_name in line: space_available = line.split()[-3] break return int(space_available)
python
def upsert(self, space, t, operations, **kwargs) -> _MethodRet: """ Update request coroutine. Performs either insert or update (depending of either tuple exists or not) Examples: .. code-block:: pycon # upsert does not return anything >>> await conn.upsert('tester', [0, 'hello'], ... [ ['=', 1, 'hi!'] ]) <Response sync=3 rowcount=0 data=[]> :param space: space id or space name. :param t: tuple to insert if it's not in space :param operations: Operations list to use for update if tuple is already in space. It has the same format as in update requets: [ [op_type, field_no, ...], ... ]. Please refer to https://tarantool.org/doc/book/box/box_space.html?highlight=update#lua-function.space_object.update You can use field numbers as well as their names in space format as a field_no (if only fetch_schema is True). If field is unknown then TarantoolSchemaError is raised. :param timeout: Request timeout :returns: :class:`asynctnt.Response` instance """ return self._db.upsert(space, t, operations, **kwargs)
java
protected long readFromFile(OutputStream stream, File file, long length, long position) throws IOException { FileInputStream in = null; try { if (channel == null || !channel.isOpen()) { in = PrivilegedFileHelper.fileInputStream(file); channel = in.getChannel(); } length = validateAndAdjustLenght(length, position, channel.size()); MappedByteBuffer bb = channel.map(FileChannel.MapMode.READ_ONLY, position, length); WritableByteChannel ch = Channels.newChannel(stream); ch.write(bb); ch.close(); return length; } finally { if (in != null) { in.close(); if (channel != null) { channel.close(); } } } }
java
@Override protected ParameterBinder createBinder(Object[] values) { return new SpelExpressionStringQueryParameterBinder((DefaultParameters) getQueryMethod().getParameters(), values, query, evaluationContextProvider, parser); }
python
def concat(self, target, sources, **kwargs): """Concat existing files together. For preconditions, see https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/filesystem/filesystem.html#void_concatPath_p_Path_sources :param target: the path to the target destination. :param sources: the paths to the sources to use for the concatenation. :type sources: list """ if isinstance(sources, basestring): raise ValueError("sources should be a list") if any(',' in s for s in sources): raise NotImplementedError("WebHDFS does not support commas in concat") response = self._post(target, 'CONCAT', sources=','.join(sources), **kwargs) assert not response.content
python
def parse(content, *args, **kwargs): ''' Use mecab-python3 by default to parse JP text. Fall back to mecab binary app if needed ''' global MECAB_PYTHON3 if 'mecab_loc' not in kwargs and MECAB_PYTHON3 and 'MeCab' in globals(): return MeCab.Tagger(*args).parse(content) else: return run_mecab_process(content, *args, **kwargs)
java
@Override public void describeTo(Description description) { describeKeyTo(description); if (isRun) { if (hasResult()) { description.appendText(" = ") .appendValue(getResult()); } else { description.appendText("-> ") .appendValue(getException()); } } }
python
def backwards(self, backwards): """Decorator to specify the ``backwards`` action.""" if self._backwards is not None: raise ValueError('Backwards action already specified.') self._backwards = backwards return backwards
java
public void setupFields() { FieldInfo field = null; field = new FieldInfo(this, ID, Constants.DEFAULT_FIELD_LENGTH, null, null); field.setDataClass(Integer.class); field.setHidden(true); field = new FieldInfo(this, LAST_CHANGED, Constants.DEFAULT_FIELD_LENGTH, null, null); field.setDataClass(Date.class); field.setHidden(true); field = new FieldInfo(this, DELETED, 10, null, new Boolean(false)); field.setDataClass(Boolean.class); field.setHidden(true); field = new FieldInfo(this, START_ICON, Constants.DEFAULT_FIELD_LENGTH, null, null); field.setDataClass(Object.class); field = new FieldInfo(this, END_ICON, Constants.DEFAULT_FIELD_LENGTH, null, null); field.setDataClass(Object.class); field = new FieldInfo(this, START_PARENT_ICON, Constants.DEFAULT_FIELD_LENGTH, null, null); field.setDataClass(Object.class); field = new FieldInfo(this, END_PARENT_ICON, Constants.DEFAULT_FIELD_LENGTH, null, null); field.setDataClass(Object.class); field = new FieldInfo(this, TASK_COLOR, 10, null, null); field.setDataClass(Integer.class); field = new FieldInfo(this, TASK_SELECT_COLOR, 10, null, null); field.setDataClass(Integer.class); field = new FieldInfo(this, PARENT_TASK_COLOR, 10, null, null); field.setDataClass(Integer.class); field = new FieldInfo(this, PARENT_TASK_SELECT_COLOR, 10, null, null); field.setDataClass(Integer.class); }
java
private void obtainPreviewSize(@NonNull final TypedArray typedArray) { int defaultValue = getContext().getResources() .getDimensionPixelSize(R.dimen.color_picker_preference_default_preview_size); setPreviewSize(typedArray .getDimensionPixelSize(R.styleable.AbstractColorPickerPreference_previewSize, defaultValue)); }
python
def sflow_polling_interval(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") sflow = ET.SubElement(config, "sflow", xmlns="urn:brocade.com:mgmt:brocade-sflow") polling_interval = ET.SubElement(sflow, "polling-interval") polling_interval.text = kwargs.pop('polling_interval') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def write_sig(self): '''Write signature to sig store''' if not self._md5: self._md5 = fileMD5(self) with open(self.sig_file(), 'w') as sig: sig.write( f'{os.path.getmtime(self)}\t{os.path.getsize(self)}\t{self._md5}' )
java
public Optional<URL> getRoute() { Optional<Route> optionalRoute = getClient().routes().inNamespace(namespace) .list().getItems() .stream() .findFirst(); return optionalRoute .map(OpenShiftRouteLocator::createUrlFromRoute); }
python
def consume(self, tokens): """Consume tokens. Args: tokens (float): number of transport tokens to consume Returns: wait_time (float): waiting time for the consumer """ wait_time = 0. self.tokens -= tokens if self.tokens < 0: self._get_tokens() if self.tokens < 0: wait_time = -self.tokens / self.fill_rate return wait_time
python
def checkResponse(request): ''' Returns if a request has an okay error code, otherwise raises InvalidRequest. ''' # Check the status code of the returned request if str(request.status_code)[0] not in ['2', '3']: w = str(request.text).split('\\r')[0][2:] raise InvalidRequest(w) return
python
def _init_metadata(self): """stub""" super(LabelOrthoFacesAnswerFormRecord, self)._init_metadata() self._face_values_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'face_values'), 'element_label': 'Orthographic Face Values', 'instructions': '', 'required': True, 'read_only': False, 'linked': True, 'array': False, 'default_object_values': [{}], 'syntax': 'OBJECT', 'object_set': [] }
python
def greater_than(self, greater_than): """Adds new `>` condition :param greater_than: str or datetime compatible object (naive UTC datetime or tz-aware datetime) :raise: - QueryTypeError: if `greater_than` is of an unexpected type """ if hasattr(greater_than, 'strftime'): greater_than = datetime_as_utc(greater_than).strftime('%Y-%m-%d %H:%M:%S') elif isinstance(greater_than, six.string_types): raise QueryTypeError('Expected value of type `int` or instance of `datetime`, not %s' % type(greater_than)) return self._add_condition('>', greater_than, types=[int, str])
java
public void data_smd_smdId_DELETE(Long smdId) throws IOException { String qPath = "/domain/data/smd/{smdId}"; StringBuilder sb = path(qPath, smdId); exec(qPath, "DELETE", sb.toString(), null); }
java
public static StoredFile createLocalStoredFile(String sourceUrl, String localFilePath, String mimeType) { InputStream is = null; try { Context context = ApptentiveInternal.getInstance().getApplicationContext(); if (URLUtil.isContentUrl(sourceUrl) && context != null) { Uri uri = Uri.parse(sourceUrl); is = context.getContentResolver().openInputStream(uri); } else { File file = new File(sourceUrl); is = new FileInputStream(file); } return createLocalStoredFile(is, sourceUrl, localFilePath, mimeType); } catch (FileNotFoundException e) { return null; } finally { ensureClosed(is); } }
python
def parse_responses(self): """ Parses the json response sent back by the server and tries to get out the important return variables Returns: dict: multicast_ids (list), success (int), failure (int), canonical_ids (int), results (list) and optional topic_message_id (str but None by default) Raises: FCMServerError: FCM is temporary not available AuthenticationError: error authenticating the sender account InvalidDataError: data passed to FCM was incorrecly structured """ response_dict = { 'multicast_ids': [], 'success': 0, 'failure': 0, 'canonical_ids': 0, 'results': [], 'topic_message_id': None } for response in self.send_request_responses: if response.status_code == 200: if 'content-length' in response.headers and int(response.headers['content-length']) <= 0: raise FCMServerError("FCM server connection error, the response is empty") else: parsed_response = response.json() multicast_id = parsed_response.get('multicast_id', None) success = parsed_response.get('success', 0) failure = parsed_response.get('failure', 0) canonical_ids = parsed_response.get('canonical_ids', 0) results = parsed_response.get('results', []) message_id = parsed_response.get('message_id', None) # for topic messages if message_id: success = 1 if multicast_id: response_dict['multicast_ids'].append(multicast_id) response_dict['success'] += success response_dict['failure'] += failure response_dict['canonical_ids'] += canonical_ids response_dict['results'].extend(results) response_dict['topic_message_id'] = message_id elif response.status_code == 401: raise AuthenticationError("There was an error authenticating the sender account") elif response.status_code == 400: raise InvalidDataError(response.text) else: raise FCMServerError("FCM server is temporarily unavailable") return response_dict
python
def get_milestone(self, title): """ given the title as str, looks for an existing milestone or create a new one, and return the object """ if not title: return GithubObject.NotSet if not hasattr(self, '_milestones'): self._milestones = {m.title: m for m in self.repo.get_milestones()} milestone = self._milestones.get(title) if not milestone: milestone = self.repo.create_milestone(title=title) return milestone
python
def login_handler(self, config=None, prefix=None, **args): """OAuth starts here, redirect user to Google.""" params = { 'response_type': 'code', 'client_id': self.google_api_client_id, 'redirect_uri': self.scheme_host_port_prefix( 'http', config.host, config.port, prefix) + '/home', 'scope': self.google_api_scope, 'state': self.request_args_get('next', default=''), } url = self.google_oauth2_url + 'auth?' + urlencode(params) return self.login_handler_redirect(url)
java
public java.rmi.Remote getPort(Class serviceEndpointInterface) throws javax.xml.rpc.ServiceException { try { if (com.google.api.ads.adwords.axis.v201809.cm.BatchJobServiceInterface.class.isAssignableFrom(serviceEndpointInterface)) { com.google.api.ads.adwords.axis.v201809.cm.BatchJobServiceSoapBindingStub _stub = new com.google.api.ads.adwords.axis.v201809.cm.BatchJobServiceSoapBindingStub(new java.net.URL(BatchJobServiceInterfacePort_address), this); _stub.setPortName(getBatchJobServiceInterfacePortWSDDServiceName()); return _stub; } } catch (java.lang.Throwable t) { throw new javax.xml.rpc.ServiceException(t); } throw new javax.xml.rpc.ServiceException("There is no stub implementation for the interface: " + (serviceEndpointInterface == null ? "null" : serviceEndpointInterface.getName())); }
java
public boolean needsRefreshing() { // is there a new version available? long lastmodified = new File(pathResourceDirectory, Parameters.lexiconName).lastModified(); boolean needsRefreshing = false; if (lastmodifiedLexicon != lastmodified) { needsRefreshing = true; } return needsRefreshing; }
java
public static String getDependencyIssue(AddOn.BaseRunRequirements requirements) { if (!requirements.hasDependencyIssue()) { return null; } List<Object> issueDetails = requirements.getDependencyIssueDetails(); switch (requirements.getDependencyIssue()) { case CYCLIC: return "Cyclic dependency with: " + issueDetails.get(0); case OLDER_VERSION: return "Older version still installed: " + issueDetails.get(0); case MISSING: String addOnId = (String) issueDetails.get(0); return MessageFormat.format("Add-On with ID \"{0}\"", addOnId); case VERSION: AddOn addOn = (AddOn) issueDetails.get(0); return MessageFormat.format( "Add-on \"{0}\" with version matching {1} (found version {2})", addOn.getName(), issueDetails.get(1), addOn.getSemVer() != null ? addOn.getSemVer() : addOn.getVersion()); default: LOGGER.warn("Failed to handle dependency issue with name \"" + requirements.getDependencyIssue().name() + "\" and details: " + issueDetails); return null; } }
java
public void setSpecified (int index, boolean value) { if (index < 0 || index >= getLength ()) throw new ArrayIndexOutOfBoundsException ( "No attribute at index: " + index); specified [index] = value; }
java
public void offer(PrioritizedSplitRunner split) { checkArgument(split != null, "split is null"); split.setReady(); int level = split.getPriority().getLevel(); lock.lock(); try { if (levelWaitingSplits.get(level).isEmpty()) { // Accesses to levelScheduledTime are not synchronized, so we have a data race // here - our level time math will be off. However, the staleness is bounded by // the fact that only running splits that complete during this computation // can update the level time. Therefore, this is benign. long level0Time = getLevel0TargetTime(); long levelExpectedTime = (long) (level0Time / Math.pow(levelTimeMultiplier, level)); long delta = levelExpectedTime - levelScheduledTime[level].get(); levelScheduledTime[level].addAndGet(delta); } levelWaitingSplits.get(level).offer(split); notEmpty.signal(); } finally { lock.unlock(); } }
java
public static Document asDocument(String html) throws IOException { DOMParser domParser = new DOMParser(); try { domParser .setProperty( "http://cyberneko.org/html/properties/names/elems", "match"); domParser.setFeature("http://xml.org/sax/features/namespaces", false); domParser.parse(new InputSource(new StringReader(html))); } catch (SAXException e) { throw new IOException("Error while reading HTML: " + html, e); } return domParser.getDocument(); }
python
def get_method_contents(self, method): """ Returns the swagger contents of the given method. This checks to see if a conditional block has been used inside of the method, and, if so, returns the method contents that are inside of the conditional. :param dict method: method dictionary :return: list of swagger component dictionaries for the method """ if self._CONDITIONAL_IF in method: return method[self._CONDITIONAL_IF][1:] return [method]