language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
public void copyDeepMask(BufferedImage pSrcBitmap, BufferedImage pMaskBitmap, Rectangle pSrcRect, Rectangle pMaskRect, Rectangle pDstRect, int pSrcCopy, Shape pMaskRgn) { throw new UnsupportedOperationException("Method copyDeepMask not implemented"); // TODO: Implement }
python
def delete_entity(self, table_name, partition_key, row_key, if_match='*', timeout=None): ''' Deletes an existing entity in a table. Throws if the entity does not exist. When an entity is successfully deleted, the entity is immediately marked for deletion and is no longer accessible to clients. The entity is later removed from the Table service during garbage collection. :param str table_name: The name of the table containing the entity to delete. :param str partition_key: The PartitionKey of the entity. :param str row_key: The RowKey of the entity. :param str if_match: The client may specify the ETag for the entity on the request in order to compare to the ETag maintained by the service for the purpose of optimistic concurrency. The delete operation will be performed only if the ETag sent by the client matches the value maintained by the server, indicating that the entity has not been modified since it was retrieved by the client. To force an unconditional delete, set If-Match to the wildcard character (*). :param int timeout: The server timeout, expressed in seconds. ''' _validate_not_none('table_name', table_name) request = _delete_entity(partition_key, row_key, if_match) request.host = self._get_host() request.query += [('timeout', _int_to_str(timeout))] request.path = _get_entity_path(table_name, partition_key, row_key) self._perform_request(request)
java
public boolean offer(E e, long timeout, TimeUnit unit) throws InterruptedException { Objects.requireNonNull(e); long nanos = unit.toNanos(timeout); final ReentrantLock lock = this.lock; lock.lockInterruptibly(); try { while (count == items.length) { if (nanos <= 0L) return false; nanos = notFull.awaitNanos(nanos); } enqueue(e); return true; } finally { lock.unlock(); } }
python
def to_dict(self): """ Returns: itself as a dictionary """ dictator = Script.to_dict(self) # the dynamically created ScriptIterator classes have a generic name # replace this with ScriptIterator to indicate that this class is of type ScriptIterator dictator[self.name]['class'] = 'ScriptIterator' return dictator
python
def _get_data(self): """Process the IGRA2 text file for observations at site_id matching time. Return: ------- :class: `pandas.DataFrame` containing the body data. :class: `pandas.DataFrame` containing the header data. """ # Split the list of times into begin and end dates. If only # one date is supplied, set both begin and end dates equal to that date. body, header, dates_long, dates = self._get_data_raw() params = self._get_fwf_params() df_body = pd.read_fwf(StringIO(body), **params['body']) df_header = pd.read_fwf(StringIO(header), **params['header']) df_body['date'] = dates_long df_body = self._clean_body_df(df_body) df_header = self._clean_header_df(df_header) df_header['date'] = dates return df_body, df_header
java
public static <S extends Iterator<? extends T>, T> Iterator<T> iteratorOverIterators(Iterator<S> iteratorsIterator) { Objects.requireNonNull(iteratorsIterator, "The iteratorsIterator is null"); return new CombiningIterator<T>(iteratorsIterator); }
java
MACAddressSegment[] toEUISegments(boolean extended) { IPv6AddressSegment seg0, seg1, seg2, seg3; int start = addressSegmentIndex; int segmentCount = getSegmentCount(); int segmentIndex; if(start < 4) { start = 0; segmentIndex = 4 - start; } else { start -= 4; segmentIndex = 0; } int originalSegmentIndex = segmentIndex; seg0 = (start == 0 && segmentIndex < segmentCount) ? getSegment(segmentIndex++) : null; seg1 = (start <= 1 && segmentIndex < segmentCount) ? getSegment(segmentIndex++) : null; seg2 = (start <= 2 && segmentIndex < segmentCount) ? getSegment(segmentIndex++) : null; seg3 = (start <= 3 && segmentIndex < segmentCount) ? getSegment(segmentIndex++) : null; int macSegCount = (segmentIndex - originalSegmentIndex) << 1; if(!extended) { macSegCount -= 2; } if((seg1 != null && !seg1.matchesWithMask(0xff, 0xff)) || (seg2 != null && !seg2.matchesWithMask(0xfe00, 0xff00)) || macSegCount == 0) { return null; } MACAddressCreator creator = getMACNetwork().getAddressCreator(); MACAddressSegment ZERO_SEGMENT = creator.createSegment(0); MACAddressSegment newSegs[] = creator.createSegmentArray(macSegCount); int macStartIndex = 0; if(seg0 != null) { seg0.getSplitSegments(newSegs, macStartIndex, creator); //toggle the u/l bit MACAddressSegment macSegment0 = newSegs[0]; int lower0 = macSegment0.getSegmentValue(); int upper0 = macSegment0.getUpperSegmentValue(); int mask2ndBit = 0x2; if(!macSegment0.matchesWithMask(mask2ndBit & lower0, mask2ndBit)) { return null; } //you can use matches with mask lower0 ^= mask2ndBit;//flip the universal/local bit upper0 ^= mask2ndBit; newSegs[0] = creator.createSegment(lower0, upper0, null); macStartIndex += 2; } if(seg1 != null) { seg1.getSplitSegments(newSegs, macStartIndex, creator); //a ff fe b if(!extended) { newSegs[macStartIndex + 1] = ZERO_SEGMENT; } macStartIndex += 2; } if(seg2 != null) { if(!extended) { if(seg1 != null) { macStartIndex -= 2; MACAddressSegment first = newSegs[macStartIndex]; seg2.getSplitSegments(newSegs, macStartIndex, creator); newSegs[macStartIndex] = first; } else { seg2.getSplitSegments(newSegs, macStartIndex, creator); newSegs[macStartIndex] = ZERO_SEGMENT; } } else { seg2.getSplitSegments(newSegs, macStartIndex, creator); } macStartIndex += 2; } if(seg3 != null) { seg3.getSplitSegments(newSegs, macStartIndex, creator); } return newSegs; }
java
public static void filterP12(File p12, String p12Password) throws IOException { if (!p12.exists()) throw new IllegalArgumentException("p12 file does not exist: " + p12.getPath()); final File pem; if (USE_GENERIC_TEMP_DIRECTORY) pem = File.createTempFile(UUID.randomUUID().toString(), ""); else pem = new File(p12.getAbsolutePath() + ".pem.tmp"); final String pemPassword = UUID.randomUUID().toString(); try { P12toPEM(p12, p12Password, pem, pemPassword); PEMtoP12(pem, pemPassword, p12, p12Password); } finally { if (pem.exists()) if (!pem.delete()) log.warn("[OpenSSLPKCS12] {filterP12} Could not delete temporary PEM file " + pem); } }
python
def project_path(*names): """Path to a file in the project.""" return os.path.join(os.path.dirname(__file__), *names)
java
public void warn(String msg, Throwable e) { doLog(msg, LOG_WARNING, null, e); }
java
public boolean contains(IPAddressString other) { if(isValid()) { if(other == this) { return true; } if(other.addressProvider.isUninitialized()) { // other not yet validated - if other is validated no need for this quick contains //do the quick check that uses only the String of the other Boolean directResult = addressProvider.contains(other.fullAddr); if(directResult != null) { return directResult.booleanValue(); } } if(other.isValid()) { // note the quick result also handles the case of "all addresses" Boolean directResult = addressProvider.contains(other.addressProvider); if(directResult != null) { return directResult.booleanValue(); } IPAddress addr = getAddress(); if(addr != null) { IPAddress otherAddress = other.getAddress(); if(otherAddress != null) { return addr.contains(otherAddress); } } } } return false; }
java
public static void initialize() { if (AppConfigHandler.get().isQueryCacheDeactivated()) { QueryCache.NOOP = new NoOpQueryCache(); } else { if (InfinispanCache.get().exists(QueryCache.INDEXCACHE)) { InfinispanCache.get().<String, QueryKey>getCache(QueryCache.INDEXCACHE).clear(); } else { final Cache<String, QueryKey> cache = InfinispanCache.get().<String, QueryKey>getCache( QueryCache.INDEXCACHE); cache.addListener(new CacheLogListener(QueryCache.LOG)); } if (InfinispanCache.get().exists(QueryCache.SQLCACHE)) { InfinispanCache.get().<QueryKey, Object>getCache(QueryCache.SQLCACHE).clear(); } else { final Cache<QueryKey, Object> cache = InfinispanCache.get().<QueryKey, Object>getCache( QueryCache.SQLCACHE); cache.addListener(new CacheLogListener(QueryCache.LOG)); cache.addListener(new SqlCacheListener()); } } }
java
public static <B extends ConnectionConfiguration.Builder<B,?>> B setTLSOnly(B builder) { builder.setEnabledSSLProtocols(new String[] { PROTO_TLSV1_2, PROTO_TLSV1_1, PROTO_TLSV1 }); return builder; }
java
@Override public CPDefinition findByC_S_Last(long CProductId, int status, OrderByComparator<CPDefinition> orderByComparator) throws NoSuchCPDefinitionException { CPDefinition cpDefinition = fetchByC_S_Last(CProductId, status, orderByComparator); if (cpDefinition != null) { return cpDefinition; } StringBundler msg = new StringBundler(6); msg.append(_NO_SUCH_ENTITY_WITH_KEY); msg.append("CProductId="); msg.append(CProductId); msg.append(", status="); msg.append(status); msg.append("}"); throw new NoSuchCPDefinitionException(msg.toString()); }
java
public final <S, T> void registerBinding(Class<S> source, Class<T> target, Binding<S, T> converter, Class<? extends Annotation> qualifier) { registerBinding(new ConverterKey<S,T>(source, target, qualifier == null ? DefaultBinding.class : qualifier), converter); }
java
public int parse(List<String> words, List<String> postags, List<Integer> heads, List<String> deprels) { Instance inst = new Instance(); inst.forms.add(SpecialOption.ROOT); inst.postags.add(SpecialOption.ROOT); for (int i = 0; i < words.size(); i++) { inst.forms.add(words.get(i)); inst.postags.add(postags.get(i)); } parser.predict(inst, heads, deprels); heads.remove(0); deprels.remove(0); return heads.size(); }
java
@Override public void refresh(Object bean) { methodCalls.add(MethodCall.of("refresh").with("bean", bean)); find.refresh(bean); }
java
public XML deleteClass(Class<?> aClass){ boolean isRemoved = xmlJmapper.classes.remove(new XmlClass(aClass.getName())); if(!isRemoved)Error.xmlClassInexistent(this.xmlPath,aClass); return this; }
python
def parse_directive_location(lexer: Lexer) -> NameNode: """DirectiveLocation""" start = lexer.token name = parse_name(lexer) if name.value in DirectiveLocation.__members__: return name raise unexpected(lexer, start)
java
private void createProcessAndConnections(Conversion cnv, ConversionDirectionType direction) { assert cnv.getConversionDirection() == null || cnv.getConversionDirection().equals(direction) || cnv.getConversionDirection().equals(ConversionDirectionType.REVERSIBLE); // create the process for the conversion in that direction Glyph process = factory.createGlyph(); process.setClazz(GlyphClazz.PROCESS.getClazz()); process.setId(convertID(cnv.getUri()) + "_" + direction.name().replaceAll("_","")); glyphMap.put(process.getId(), process); // Determine input and output sets Set<PhysicalEntity> input = direction.equals(ConversionDirectionType.RIGHT_TO_LEFT) ? cnv.getRight() : cnv.getLeft(); Set<PhysicalEntity> output = direction.equals(ConversionDirectionType.RIGHT_TO_LEFT) ? cnv.getLeft() : cnv.getRight(); // Create input and outputs ports for the process addPorts(process); Map<PhysicalEntity, Stoichiometry> stoic = getStoichiometry(cnv); // Associate inputs to input port for (PhysicalEntity pe : input) { Glyph g = getGlyphToLink(pe, process.getId()); createArc(g, process.getPort().get(0), direction == ConversionDirectionType.REVERSIBLE ? ArcClazz.PRODUCTION.getClazz() : ArcClazz.CONSUMPTION.getClazz(), stoic.get(pe)); } // Associate outputs to output port for (PhysicalEntity pe : output) { Glyph g = getGlyphToLink(pe, process.getId()); createArc(process.getPort().get(1), g, ArcClazz.PRODUCTION.getClazz(), stoic.get(pe)); } processControllers(cnv.getControlledOf(), process); // Record mapping Set<String> uris = new HashSet<String>(); uris.add(cnv.getUri()); sbgn2BPMap.put(process.getId(), uris); }
python
def _as_versioned_jar(self, internal_target): """Fetches the jar representation of the given target, and applies the latest pushdb version.""" jar, _ = internal_target.get_artifact_info() pushdb_entry = self._get_db(internal_target).get_entry(internal_target) return jar.copy(rev=pushdb_entry.version().version())
python
def predict(self): """ Returns ------- proba : ndarray, shape=(n_clusters, ) The probability of given cluster being label 1. """ if self.w_ is not None: sigmoid = lambda t: 1. / (1. + np.exp(-t)) return sigmoid(np.dot(self.centers, self.w_[:-1]) + self.w_[-1]) else: # TODO the model is not trained pass
python
def read_sparse(cls, file_path: str): """Read a sparse representation from a tab-delimited text file. TODO: docstring""" with open(file_path) as fh: next(fh) # skip header line genes = next(fh)[1:-1].split('\t') cells = next(fh)[1:-1].split('\t') next(fh) m, n, nnz = [int(s) for s in next(fh)[:-1].split(' ')] t = pd.read_csv(file_path, sep=' ', skiprows=5, header=None, dtype={0: np.uint32, 1: np.uint32}) i = t[0].values - 1 j = t[1].values - 1 data = t[2].values assert data.size == nnz X = sparse.coo_matrix((data, (i,j)), shape=[m, n]).todense() return cls(X=X, genes=genes, cells=cells)
python
def updateAndFlush(self, login, tableName, cells): """ Parameters: - login - tableName - cells """ self.send_updateAndFlush(login, tableName, cells) self.recv_updateAndFlush()
python
def mcc(y, z): """Matthews correlation coefficient """ tp, tn, fp, fn = contingency_table(y, z) return (tp * tn - fp * fn) / K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
java
public boolean isRoundingAvailable(String roundingId, String... providers) { return isRoundingAvailable( RoundingQueryBuilder.of().setProviderNames(providers).setRoundingName(roundingId).build()); }
python
def add_homogeneous_model(self, magnitude, phase=0, frequency=None): """Add homogeneous models to one or all tomodirs. Register those as forward models Parameters ---------- magnitude : float Value of homogeneous magnitude model phase : float, optional Value of homogeneous phase model. Default 0 frequency : float, optional Frequency of of the tomodir to use. If None, then apply to all tomodirs. Default is None. """ if frequency is None: frequencies = self.frequencies else: assert isinstance(frequency, Number) frequencies = [frequency, ] for freq in frequencies: pidm, pidp = self.tds[freq].add_homogeneous_model(magnitude, phase) self.a['forward_rmag'][freq] = pidm self.a['forward_rpha'][freq] = pidp
python
def find_includes(filename): """ Find user includes (no system includes) requested from given source file. All .h files will be given relative to the current folder, e.g. ["c/rowindex.h", "c/column.h"]. """ includes = [] with open(filename, "r", encoding="utf-8") as inp: for line in inp: line = line.strip() if not line or line.startswith("//"): continue if line.startswith("#"): mm = re.match(rx_include, line) if mm: includename = os.path.join("c", mm.group(1)) includes.append(includename) return includes
java
@Override public final PArray getArray(final String key) { PArray result = optArray(key); if (result == null) { throw new ObjectMissingException(this, key); } return result; }
python
def _get_block(self): """Just read a single block from your current location in _fh""" b = self._fh.read(4) # get block size bytes #print self._fh.tell() if not b: raise StopIteration block_size = struct.unpack('<i',b)[0] return self._fh.read(block_size)
java
void genCatch(JCCatch tree, Env<GenContext> env, int startpc, int endpc, List<Integer> gaps) { if (startpc != endpc) { List<Pair<List<Attribute.TypeCompound>, JCExpression>> catchTypeExprs = catchTypesWithAnnotations(tree); while (gaps.nonEmpty()) { for (Pair<List<Attribute.TypeCompound>, JCExpression> subCatch1 : catchTypeExprs) { JCExpression subCatch = subCatch1.snd; int catchType = makeRef(tree.pos(), subCatch.type); int end = gaps.head.intValue(); registerCatch(tree.pos(), startpc, end, code.curCP(), catchType); for (Attribute.TypeCompound tc : subCatch1.fst) { tc.position.setCatchInfo(catchType, startpc); } } gaps = gaps.tail; startpc = gaps.head.intValue(); gaps = gaps.tail; } if (startpc < endpc) { for (Pair<List<Attribute.TypeCompound>, JCExpression> subCatch1 : catchTypeExprs) { JCExpression subCatch = subCatch1.snd; int catchType = makeRef(tree.pos(), subCatch.type); registerCatch(tree.pos(), startpc, endpc, code.curCP(), catchType); for (Attribute.TypeCompound tc : subCatch1.fst) { tc.position.setCatchInfo(catchType, startpc); } } } VarSymbol exparam = tree.param.sym; code.statBegin(tree.pos); code.markStatBegin(); int limit = code.nextreg; code.newLocal(exparam); items.makeLocalItem(exparam).store(); code.statBegin(TreeInfo.firstStatPos(tree.body)); genStat(tree.body, env, CRT_BLOCK); code.endScopes(limit); code.statBegin(TreeInfo.endPos(tree.body)); } }
python
def delete_service_endpoint(self, project, endpoint_id, deep=None): """DeleteServiceEndpoint. [Preview API] Delete a service endpoint. :param str project: Project ID or project name :param str endpoint_id: Id of the service endpoint to delete. :param bool deep: Specific to AzureRM endpoint created in Automatic flow. When set to true, this will also delete corresponding AAD application in Azure. Default value is true. """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if endpoint_id is not None: route_values['endpointId'] = self._serialize.url('endpoint_id', endpoint_id, 'str') query_parameters = {} if deep is not None: query_parameters['deep'] = self._serialize.query('deep', deep, 'bool') self._send(http_method='DELETE', location_id='e85f1c62-adfc-4b74-b618-11a150fb195e', version='5.0-preview.2', route_values=route_values, query_parameters=query_parameters)
java
@Override public void setPeerRecoveryLeaseTimeout(int leaseTimeout) { if (tc.isEntryEnabled()) Tr.entry(tc, "setPeerRecoveryLeaseTimeout", leaseTimeout); // Store the Lease Timeout _leaseTimeout = leaseTimeout; if (tc.isEntryEnabled()) Tr.exit(tc, "setPeerRecoveryLeaseTimeout", this); }
python
def build_rrule(count=None, interval=None, bysecond=None, byminute=None, byhour=None, byweekno=None, bymonthday=None, byyearday=None, bymonth=None, until=None, bysetpos=None, wkst=None, byday=None, freq=None): """ Build rrule dictionary for vRecur class. :param count: int :param interval: int :param bysecond: int :param byminute: int :param byhour: int :param byweekno: int :param bymonthday: int :param byyearday: int :param bymonth: int :param until: datetime :param bysetpos: int :param wkst: str, two-letter weekday :param byday: weekday :param freq: str, frequency name ('WEEK', 'MONTH', etc) :return: dict """ result = {} if count is not None: result['COUNT'] = count if interval is not None: result['INTERVAL'] = interval if bysecond is not None: result['BYSECOND'] = bysecond if byminute is not None: result['BYMINUTE'] = byminute if byhour is not None: result['BYHOUR'] = byhour if byweekno is not None: result['BYWEEKNO'] = byweekno if bymonthday is not None: result['BYMONTHDAY'] = bymonthday if byyearday is not None: result['BYYEARDAY'] = byyearday if bymonth is not None: result['BYMONTH'] = bymonth if until is not None: result['UNTIL'] = until if bysetpos is not None: result['BYSETPOS'] = bysetpos if wkst is not None: result['WKST'] = wkst if byday is not None: result['BYDAY'] = byday if freq is not None: if freq not in vRecur.frequencies: raise ValueError('Frequency value should be one of: {0}' .format(vRecur.frequencies)) result['FREQ'] = freq return result
python
def insert_entity(self, table_name, entity, timeout=None): ''' Inserts a new entity into the table. Throws if an entity with the same PartitionKey and RowKey already exists. When inserting an entity into a table, you must specify values for the PartitionKey and RowKey system properties. Together, these properties form the primary key and must be unique within the table. Both the PartitionKey and RowKey values must be string values; each key value may be up to 64 KB in size. If you are using an integer value for the key value, you should convert the integer to a fixed-width string, because they are canonically sorted. For example, you should convert the value 1 to 0000001 to ensure proper sorting. :param str table_name: The name of the table to insert the entity into. :param entity: The entity to insert. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: a dict or :class:`~azure.storage.table.models.Entity` :param int timeout: The server timeout, expressed in seconds. :return: The etag of the inserted entity. :rtype: str ''' _validate_not_none('table_name', table_name) request = _insert_entity(entity) request.host = self._get_host() request.path = '/' + _to_str(table_name) request.query += [('timeout', _int_to_str(timeout))] response = self._perform_request(request) return _extract_etag(response)
java
@Override public boolean satisfies(Match match, int... ind) { assertIndLength(ind); // Collect values of the element group Set values = new HashSet(); for (BioPAXElement gen : con1.generate(match, ind)) { values.addAll(pa1.getValueFromBean(gen)); } // If emptiness is desired, check that if (value == EMPTY) return values.isEmpty(); // If cannot be empty, check it if (oper == Operation.NOT_EMPTY_AND_NOT_INTERSECT && values.isEmpty()) return false; // If the second element is desired value, check that else if (value == USE_SECOND_ARG) { BioPAXElement q = match.get(ind[1]); return oper == Operation.INTERSECT ? values.contains(q) : !values.contains(q); } // If element group is compared to preset value, but the value is actually a collection, // then iterate the collection, see if any of them matches else if (value instanceof Collection) { Collection query = (Collection) value; values.retainAll(query); if (oper == Operation.INTERSECT) return !values.isEmpty(); else return values.isEmpty(); } // If two set of elements should share a field value, check that else if (pa2 != null) { // Collect values of the second group Set others = new HashSet(); for (BioPAXElement gen : con2.generate(match, ind)) { others.addAll(pa2.getValueFromBean(gen)); } switch (oper) { case INTERSECT: others.retainAll(values); return !others.isEmpty(); case NOT_INTERSECT: others.retainAll(values); return others.isEmpty(); case NOT_EMPTY_AND_NOT_INTERSECT: if (others.isEmpty()) return false; others.retainAll(values); return others.isEmpty(); default: throw new RuntimeException("Unhandled operation: " + oper); } } // Check if the element field values contain the parameter value else if (oper == Operation.INTERSECT) return values.contains(value); else return !values.contains(value); }
python
def dumps(obj, indent=None, default=None, sort_keys=False, **kw): """Dump string.""" return YAMLEncoder(indent=indent, default=default, sort_keys=sort_keys, **kw).encode(obj)
java
private static boolean isArticleLink(String linkedArticleTitle) { String s = linkedArticleTitle.toLowerCase(); return !(s.startsWith("image:") || s.startsWith("wikipedia:") || s.startsWith("template:") || s.startsWith("category:") || s.startsWith("portal:") || s.contains("(disambiguation)")); }
python
def ctrl_x(self, x, to=None): """ Sends a character to the currently active element with Ctrl pressed. This method takes care of pressing and releasing Ctrl. """ seq = [Keys.CONTROL, x, Keys.CONTROL] # This works around a bug in Selenium that happens in FF on # Windows, and in Chrome on Linux. # # The bug was reported here: # # https://code.google.com/p/selenium/issues/detail?id=7303 # if (self.firefox and self.windows) or (self.linux and self.chrome): seq.append(Keys.PAUSE) if to is None: ActionChains(self.driver) \ .send_keys(seq) \ .perform() else: self.send_keys(to, seq)
python
def set_range_value(self, data): """ Validates date range by parsing into 2 datetime objects and validating them both. """ dtfrom = data.pop('value_from') dtto = data.pop('value_to') if dtfrom is dtto is None: self.errors['value'] = ['Date range requires values'] raise forms.ValidationError([]) data['value'] = (dtfrom, dtto)
python
def __remove_index(self, ids): """remove affected ids from the index""" if not ids: return ids = ",".join((str(id) for id in ids)) self.execute("DELETE FROM fact_index where id in (%s)" % ids)
java
@SuppressWarnings("WeakerAccess") public CreateInstanceRequest setType(@Nonnull Instance.Type type) { Preconditions.checkNotNull(type); Preconditions.checkArgument(type != Instance.Type.UNRECOGNIZED, "Type is unrecognized"); builder.getInstanceBuilder().setType(type.toProto()); return this; }
java
public synchronized void start() throws SocketException { if (!isRunning()) { socket.set(new DatagramSocket(BEAT_PORT)); deliverLifecycleAnnouncement(logger, true); final byte[] buffer = new byte[512]; final DatagramPacket packet = new DatagramPacket(buffer, buffer.length); Thread receiver = new Thread(null, new Runnable() { @Override public void run() { boolean received; while (isRunning()) { try { socket.get().receive(packet); received = !DeviceFinder.getInstance().isAddressIgnored(packet.getAddress()); } catch (IOException e) { // Don't log a warning if the exception was due to the socket closing at shutdown. if (isRunning()) { // We did not expect to have a problem; log a warning and shut down. logger.warn("Problem reading from DeviceAnnouncement socket, stopping", e); stop(); } received = false; } try { if (received) { final Util.PacketType kind = Util.validateHeader(packet, BEAT_PORT); if (kind != null) { switch (kind) { case BEAT: if (isPacketLongEnough(packet, 96, "beat")) { deliverBeat(new Beat(packet)); } break; case CHANNELS_ON_AIR: if (isPacketLongEnough(packet, 0x2d, "channels on-air")) { byte[] data = packet.getData(); Set<Integer> audibleChannels = new TreeSet<Integer>(); for (int channel = 1; channel <= 4; channel++) { if (data[0x23 + channel] != 0) { audibleChannels.add(channel); } } audibleChannels = Collections.unmodifiableSet(audibleChannels); deliverOnAirUpdate(audibleChannels); } break; case SYNC_CONTROL: if (isPacketLongEnough(packet, 0x2c, "sync control command")) { deliverSyncCommand(packet.getData()[0x2b]); } break; case MASTER_HANDOFF_REQUEST: if (isPacketLongEnough(packet, 0x28, "tempo master handoff request")) { deliverMasterYieldCommand(packet.getData()[0x21]); } break; case MASTER_HANDOFF_RESPONSE: if (isPacketLongEnough(packet, 0x2c, "tempo master handoff response")) { byte[] data = packet.getData(); deliverMasterYieldResponse(data[0x21], data[0x2b] == 1); } break; case FADER_START_COMMAND: if (isPacketLongEnough(packet, 0x28, "fader start command")) { byte[] data = packet.getData(); Set<Integer> playersToStart = new TreeSet<Integer>(); Set<Integer> playersToStop = new TreeSet<Integer>(); for (int channel = 1; channel <= 4; channel++) { switch (data[0x23 + channel]) { case 0: playersToStart.add(channel); break; case 1: playersToStop.add(channel); break; case 2: // Leave this player alone break; default: logger.warn("Ignoring unrecognized fader start command, " + data[0x23 + channel] + ", for channel " + channel); } } playersToStart = Collections.unmodifiableSet(playersToStart); playersToStop = Collections.unmodifiableSet(playersToStop); deliverFaderStartCommand(playersToStart, playersToStop); } break; default: logger.warn("Ignoring packet received on beat port with unexpected type: " + kind); } } } } catch (Throwable t) { logger.warn("Problem processing beat packet", t); } } } }, "beat-link BeatFinder receiver"); receiver.setDaemon(true); receiver.setPriority(Thread.MAX_PRIORITY); receiver.start(); } }
python
def lal(self): """ Returns a LAL Object that contains this data """ lal_data = None if self._data.dtype == float32: lal_data = _lal.CreateREAL4Vector(len(self)) elif self._data.dtype == float64: lal_data = _lal.CreateREAL8Vector(len(self)) elif self._data.dtype == complex64: lal_data = _lal.CreateCOMPLEX8Vector(len(self)) elif self._data.dtype == complex128: lal_data = _lal.CreateCOMPLEX16Vector(len(self)) lal_data.data[:] = self.numpy() return lal_data
java
public void setExpressions(java.util.Collection<ExpressionStatus> expressions) { if (expressions == null) { this.expressions = null; return; } this.expressions = new com.amazonaws.internal.SdkInternalList<ExpressionStatus>(expressions); }
python
def if_json_contain(left_json, right_json, op='strict'): """ 判断一个 json 是否包含另外一个 json 的 key,并且 value 相等; :param: * left_json: (dict) 需要判断的 json,我们称之为 left * right_json: (dict) 需要判断的 json,我们称之为 right,目前是判断 left 是否包含在 right 中 * op: (string) 判断操作符,目前只有一种,默认为 strict,向后兼容 :return: * result: (bool) right json 包含 left json 的 key,并且 value 一样,返回 True,否则都返回 False 举例如下:: print('--- json contain demo ---') json1 = {"id": "0001"} json2 = {"id": "0001", "value": "File"} print(if_json_contain(json1, json2)) print('---') 执行结果:: --- json contain demo --- True --- """ key_list = left_json.keys() if op == 'strict': for key in key_list: if not right_json.get(key) == left_json.get(key): return False return True
python
def get_key(key_name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a key exists. Returns fingerprint and name if it does and False if it doesn't CLI Example: .. code-block:: bash salt myminion boto_ec2.get_key mykey ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: key = conn.get_key_pair(key_name) log.debug("the key to return is : %s", key) if key is None: return False return key.name, key.fingerprint except boto.exception.BotoServerError as e: log.debug(e) return False
java
String getHost() { LOGGER.entering(); String val = ""; InstanceType type = getType(); if (commands.contains(HOST_ARG)) { val = commands.get(commands.indexOf(HOST_ARG) + 1); LOGGER.exiting(val); return val; } try { if (type.equals(InstanceType.SELENIUM_NODE) || type.equals(InstanceType.SELENIUM_HUB)) { val = getSeleniumConfigAsJsonObject().get("host").getAsString(); } } catch (JsonParseException | NullPointerException e) { // ignore } // return the value if it looks okay, otherwise return "localhost" as a last ditch effort val = (StringUtils.isNotEmpty(val) && !val.equalsIgnoreCase("ip")) ? val : "localhost"; LOGGER.exiting(val); return val; }
java
public Trace withSegments(Segment... segments) { if (this.segments == null) { setSegments(new java.util.ArrayList<Segment>(segments.length)); } for (Segment ele : segments) { this.segments.add(ele); } return this; }
python
def h_boiling_Yan_Lin(m, x, Dh, rhol, rhog, mul, kl, Hvap, Cpl, q, A_channel_flow): r'''Calculates the two-phase boiling heat transfer coefficient of a liquid and gas flowing inside a plate and frame heat exchanger, as developed in [1]_. Reviewed in [2]_, [3]_, [4]_, and [5]_. .. math:: h = 1.926\left(\frac{k_l}{D_h}\right) Re_{eq} Pr_l^{1/3} Bo_{eq}^{0.3} Re^{-0.5} Re_{eq} = \frac{G_{eq} D_h}{\mu_l} Bo_{eq} = \frac{q}{G_{eq} H_{vap}} G_{eq} = \frac{m}{A_{flow}}\left[1 - x + x\left(\frac{\rho_l}{\rho_g} \right)^{1/2}\right] Re = \frac{G D_h}{\mu_l} Claimed to be valid for :math:`2000 < Re_{eq} < 10000`. Parameters ---------- m : float Mass flow rate [kg/s] x : float Quality at the specific point in the plate exchanger [] Dh : float Hydraulic diameter of the plate, :math:`D_h = \frac{4\lambda}{\phi}` [m] rhol : float Density of the liquid [kg/m^3] rhog : float Density of the gas [kg/m^3] mul : float Viscosity of the liquid [Pa*s] kl : float Thermal conductivity of liquid [W/m/K] Hvap : float Heat of vaporization of the fluid at the system pressure, [J/kg] Cpl : float Heat capacity of liquid [J/kg/K] q : float Heat flux, [W/m^2] A_channel_flow : float The flow area for the fluid, calculated as :math:`A_{ch} = 2\cdot \text{width} \cdot \text{amplitude}` [m] Returns ------- h : float Boiling heat transfer coefficient [W/m^2/K] Notes ----- Developed with R134a as the refrigerant in a PHD with 2 channels, chevron angle 60 degrees, quality from 0.1 to 0.8, heat flux 11-15 kW/m^2, and mass fluxes of 55 and 70 kg/m^2/s. Examples -------- >>> h_boiling_Yan_Lin(m=3E-5, x=.4, Dh=0.002, rhol=567., rhog=18.09, ... kl=0.086, Cpl=2200, mul=156E-6, Hvap=9E5, q=1E5, A_channel_flow=0.0003) 318.7228565961241 References ---------- .. [1] Yan, Y.-Y., and T.-F. Lin. "Evaporation Heat Transfer and Pressure Drop of Refrigerant R-134a in a Plate Heat Exchanger." Journal of Heat Transfer 121, no. 1 (February 1, 1999): 118-27. doi:10.1115/1.2825924. .. [2] Amalfi, Raffaele L., Farzad Vakili-Farahani, and John R. Thome. "Flow Boiling and Frictional Pressure Gradients in Plate Heat Exchangers. Part 1: Review and Experimental Database." International Journal of Refrigeration 61 (January 2016): 166-84. doi:10.1016/j.ijrefrig.2015.07.010. .. [3] Eldeeb, Radia, Vikrant Aute, and Reinhard Radermacher. "A Survey of Correlations for Heat Transfer and Pressure Drop for Evaporation and Condensation in Plate Heat Exchangers." International Journal of Refrigeration 65 (May 2016): 12-26. doi:10.1016/j.ijrefrig.2015.11.013. .. [4] García-Cascales, J. R., F. Vera-García, J. M. Corberán-Salvador, and J. Gonzálvez-Maciá. "Assessment of Boiling and Condensation Heat Transfer Correlations in the Modelling of Plate Heat Exchangers." International Journal of Refrigeration 30, no. 6 (September 2007): 1029-41. doi:10.1016/j.ijrefrig.2007.01.004. .. [5] Huang, Jianchang. "Performance Analysis of Plate Heat Exchangers Used as Refrigerant Evaporators," 2011. Thesis. http://wiredspace.wits.ac.za/handle/10539/9779 ''' G = m/A_channel_flow G_eq = G*((1. - x) + x*(rhol/rhog)**0.5) Re_eq = G_eq*Dh/mul Re = G*Dh/mul # Not actually specified clearly but it is in another paper by them Bo_eq = q/(G_eq*Hvap) Pr_l = Prandtl(Cp=Cpl, k=kl, mu=mul) return 1.926*(kl/Dh)*Re_eq*Pr_l**(1/3.)*Bo_eq**0.3*Re**-0.5
java
public ServerBuilder port(int port, Iterable<SessionProtocol> protocols) { return port(new ServerPort(port, protocols)); }
java
protected void configureDetailedWeekView( DetailedWeekView newDetailedWeekView, boolean trimTimeBounds) { newDetailedWeekView.getWeekView().setShowToday(false); newDetailedWeekView.getWeekView().setTrimTimeBounds(trimTimeBounds); }
python
def from_json_dict(cls, json_dict # type: Dict[str, Any] ): # type: (...) -> IntegerSpec """ Make a IntegerSpec object from a dictionary containing its properties. :param dict json_dict: This dictionary may contain `'minimum'` and `'maximum'` keys. In addition, it must contain a `'hashing'` key, whose contents are passed to :class:`FieldHashingProperties`. :param dict json_dict: The properties dictionary. """ # noinspection PyCompatibility result = cast(IntegerSpec, # For Mypy. super().from_json_dict(json_dict)) format_ = json_dict['format'] result.minimum = format_.get('minimum') result.maximum = format_.get('maximum') return result
java
public EEnum getCPCPrtFlags() { if (cpcPrtFlagsEEnum == null) { cpcPrtFlagsEEnum = (EEnum)EPackage.Registry.INSTANCE.getEPackage(AfplibPackage.eNS_URI).getEClassifiers().get(12); } return cpcPrtFlagsEEnum; }
python
def parse_400_row(row: list) -> tuple: """ Interval event record (400) """ return EventRecord(int(row[1]), int(row[2]), row[3], row[4], row[5])
python
def encode_binary_dict(array, buffers): ''' Send a numpy array as an unencoded binary buffer The encoded format is a dict with the following structure: .. code:: python { '__buffer__' : << an ID to locate the buffer >>, 'shape' : << array shape >>, 'dtype' : << dtype name >>, 'order' : << byte order at origin (little or big)>> } Args: array (np.ndarray) : an array to encode buffers (set) : Set to add buffers to **This is an "out" parameter**. The values it contains will be modified in-place. Returns: dict ''' buffer_id = make_id() buf = (dict(id=buffer_id), array.tobytes()) buffers.append(buf) return { '__buffer__' : buffer_id, 'shape' : array.shape, 'dtype' : array.dtype.name, 'order' : sys.byteorder }
python
def flatten(items,enter=lambda x:isinstance(x, list)): # http://stackoverflow.com/a/40857703 # https://github.com/ctmakro/canton/blob/master/canton/misc.py """Yield items from any nested iterable; see REF.""" for x in items: if enter(x): yield from flatten(x) else: yield x
java
public byte[] toBytes(final int padding) { // special case a single entry if (padding == 0 && segments.size() == 1) { BufferSegment seg = segments.get(0); if (seg.offset == 0 && seg.len == seg.buf.length) { return seg.buf; } return Arrays.copyOfRange(seg.buf, seg.offset, seg.offset + seg.len); } byte[] result = new byte[total_length + padding]; int ofs = 0; for (BufferSegment seg : segments) { System.arraycopy(seg.buf, seg.offset, result, ofs, seg.len); ofs += seg.len; } return result; }
java
public void service(HttpServletRequest req, HttpServletResponse res) throws ServletException, IOException { if ((proxyURLPrefix == null) || (proxyURLPrefix.length() == 0)) { // No proxy specified super.service(req, res); return; } ServletOutputStream streamOut = res.getOutputStream(); try { String proxyURLString = getProxyURLString(req); HttpRequestBase httpRequest = getHttpRequest(req, proxyURLString); addHeaders(req, httpRequest); this.getDataFromClient(httpRequest, streamOut); } catch (Exception e) { displayErrorInHtml(streamOut, e); } }
python
def derivatives_factory(cls, coef, domain, kind, **kwargs): """ Given some coefficients, return a the derivative of a certain kind of orthogonal polynomial defined over a specific domain. """ basis_polynomial = cls._basis_polynomial_factory(kind) return basis_polynomial(coef, domain).deriv()
java
public static List<String> extractResponseCodes(JavadocComment jdoc){ List<String> list = new ArrayList<>(); list.addAll(extractDocAnnotation(DOC_RESPONSE_CODE,jdoc)); return list; }
java
void run(List<String> arguments, @Nullable Path workingDirectory) throws ProcessHandlerException, CloudSdkNotFoundException, CloudSdkOutOfDateException, CloudSdkVersionFileException, IOException { sdk.validateCloudSdk(); List<String> command = new ArrayList<>(); command.add(sdk.getGCloudPath().toAbsolutePath().toString()); command.addAll(arguments); if (outputFormat != null) { command.addAll(GcloudArgs.get("format", outputFormat)); } if (credentialFile != null) { command.addAll(GcloudArgs.get("credential-file-override", credentialFile)); } logger.info("submitting command: " + Joiner.on(" ").join(command)); ProcessBuilder processBuilder = processBuilderFactory.newProcessBuilder(); processBuilder.command(command); if (workingDirectory != null) { processBuilder.directory(workingDirectory.toFile()); } processBuilder.environment().putAll(getGcloudCommandEnvironment()); Process process = processBuilder.start(); processHandler.handleProcess(process); }
java
public static <K, V> V putIfAbsent(final ConcurrentMap<K, V> map, final K key, final V value) { if (map == null) { return null; } final V result = map.putIfAbsent(key, value); return result != null ? result : value; }
python
def to_output(self, value): """Convert value to process output format.""" return {self.name: [self.inner.to_output(v)[self.name] for v in value]}
python
def _retry_over_time(fun, catch, args=[], kwargs={}, errback=None, max_retries=None, interval_start=2, interval_step=2, interval_max=30): """Retry the function over and over until max retries is exceeded. For each retry we sleep a for a while before we try again, this interval is increased for every retry until the max seconds is reached. :param fun: The function to try :param catch: Exceptions to catch, can be either tuple or a single exception class. :keyword args: Positional arguments passed on to the function. :keyword kwargs: Keyword arguments passed on to the function. :keyword max_retries: Maximum number of retries before we give up. If this is not set, we will retry forever. :keyword interval_start: How long (in seconds) we start sleeping between retries. :keyword interval_step: By how much the interval is increased for each retry. :keyword interval_max: Maximum number of seconds to sleep between retries. """ retries = 0 interval_range = __fxrange(interval_start, interval_max + interval_start, interval_step, repeatlast=True) for retries in count(): try: return fun(*args, **kwargs) except catch as exc: if max_retries and retries >= max_retries: raise tts = float(errback(exc, interval_range, retries) if errback else next(interval_range)) if tts: sleep(tts)
python
def load_xml(self, filepath): """Loads the values of the configuration variables from an XML path.""" from os import path import xml.etree.ElementTree as ET #Make sure the file exists and then import it as XML and read the values out. uxpath = path.expanduser(filepath) if path.isfile(uxpath): tree = ET.parse(uxpath) root = tree.getroot() if "symlink" in root.attrib: self._vardict["symlink"] = root.attrib.lower() == "true" for child in root: if child.tag == "codes": self._load_codes(child) elif child.tag == "mappings": self._load_mapping(child) elif child.tag == "ssh": self._load_ssh(child) elif child.tag == "isense": self._load_isense(child) elif child.tag == "libraries": self._load_includes(child) elif child.tag == "compilers": self._vardict["compilers"] = child.text
java
protected Element findAndReplaceXpp3DOM(Counter counter, Element parent, String name, Xpp3Dom dom) { boolean shouldExist = (dom != null) && ((dom.getChildCount() > 0) || (dom.getValue() != null)); Element element = updateElement(counter, parent, name, shouldExist); if (shouldExist) { replaceXpp3DOM(element, dom, new Counter(counter.getDepth() + 1)); } return element; }
java
public Object invoke(Object self, Method thisMethod, Method proceed, Object[] args) throws Throwable { if (thisMethod == null) { BeanLogger.LOG.methodHandlerProcessingReturningBeanInstance(self.getClass()); if (beanInstance == null) { throw BeanLogger.LOG.beanInstanceNotSetOnProxy(getBean()); } return beanInstance.getInstance(); } BeanLogger.LOG.methodHandlerProcessingCall(thisMethod, self.getClass()); if (thisMethod.getDeclaringClass().equals(TargetInstanceProxy.class)) { if (beanInstance == null) { throw BeanLogger.LOG.beanInstanceNotSetOnProxy(getBean()); } if (thisMethod.getName().equals("weld_getTargetInstance")) { return beanInstance.getInstance(); } else if (thisMethod.getName().equals("weld_getTargetClass")) { return beanInstance.getInstanceType(); } else { return null; } } else if (thisMethod.getName().equals("_initMH")) { BeanLogger.LOG.settingNewMethodHandler(args[0], self.getClass()); return new ProxyMethodHandler(contextId, new TargetBeanInstance(args[0]), getBean()); } else { if (beanInstance == null) { throw BeanLogger.LOG.beanInstanceNotSetOnProxy(getBean()); } Object instance = beanInstance.getInstance(); Object result = beanInstance.invoke(instance, thisMethod, args); // if the method returns this and the return type matches the proxy type, return the proxy instead // to prevent the bean instance escaping if (result != null && result == instance && (thisMethod.getReturnType().isAssignableFrom(self.getClass()))) { return self; } return result; } }
java
public void seekToHoliday(String holidayString, String direction, String seekAmount) { Holiday holiday = Holiday.valueOf(holidayString); assert(holiday != null); seekToIcsEvent(HOLIDAY_ICS_FILE, holiday.getSummary(), direction, seekAmount); }
python
def get_times(annot, evt_type=None, stage=None, cycle=None, chan=None, exclude=False, buffer=0): """Get start and end times for selected segments of data, bundled together with info. Parameters ---------- annot: instance of Annotations The annotation file containing events and epochs evt_type: list of str, optional Enter a list of event types to get events; otherwise, epochs will be returned. stage: list of str, optional Stage(s) of interest. If None, stage is ignored. cycle: list of tuple of two float, optional Cycle(s) of interest, as start and end times in seconds from record start. If None, cycles are ignored. chan: list of str or tuple of None Channel(s) of interest. Channel format is 'chan_name (group_name)'. If None, channel is ignored. exclude: bool Exclude epochs by quality. If True, epochs marked as 'Poor' quality or staged as 'Artefact' will be rejected (and the signal segmented in consequence). Has no effect on event selection. buffer : float adds this many seconds of signal before and after each segment Returns ------- list of dict Each dict has times (the start and end times of each segment, as list of tuple of float), stage, cycle, chan, name (event type, if applicable) Notes ----- This function returns epoch or event start and end times, bundled together according to the specified parameters. Presently, setting exclude to True does not exclude events found in Poor signal epochs. The rationale is that events would never be marked in Poor signal epochs. If they were automatically detected, these epochs would have been left out during detection. If they were manually marked, then it must have been Good signal. At the moment, in the GUI, the exclude epoch option is disabled when analyzing events, but we could fix the code if we find a use case for rejecting events based on the quality of the epoch signal. """ getter = annot.get_epochs last = annot.last_second if stage is None: stage = (None,) if cycle is None: cycle = (None,) if chan is None: chan = (None,) if evt_type is None: evt_type = (None,) elif isinstance(evt_type[0], str): getter = annot.get_events if chan != (None,): chan.append('') # also retrieve events marked on all channels else: lg.error('Event type must be list/tuple of str or None') qual = None if exclude: qual = 'Good' bundles = [] for et in evt_type: for ch in chan: for cyc in cycle: for ss in stage: st_input = ss if ss is not None: st_input = (ss,) evochs = getter(name=et, time=cyc, chan=(ch,), stage=st_input, qual=qual) if evochs: times = [( max(e['start'] - buffer, 0), min(e['end'] + buffer, last)) for e in evochs] times = sorted(times, key=lambda x: x[0]) one_bundle = {'times': times, 'stage': ss, 'cycle': cyc, 'chan': ch, 'name': et} bundles.append(one_bundle) return bundles
java
private void initializeHideNavigation(final SharedPreferences sharedPreferences) { String key = getString(R.string.hide_navigation_preference_key); boolean defaultValue = Boolean.valueOf(getString(R.string.hide_navigation_preference_default_value)); boolean hideNavigation = sharedPreferences.getBoolean(key, defaultValue); hideNavigation(hideNavigation); }
java
private long resolveLongFwk(final String key, final String frameworkProp, final long defval) { long timeout = defval; String opt = resolve(key); if (opt == null && frameworkProp != null && framework.getPropertyLookup().hasProperty(frameworkProp)) { opt = framework.getProperty(frameworkProp); } if (opt != null) { try { timeout = Long.parseLong(opt); } catch (NumberFormatException ignored) { } } return timeout; }
java
public ServiceFuture<OperationStatus> updatePublishSettingsAsync(UUID appId, PublishSettingUpdateObject publishSettingUpdateObject, final ServiceCallback<OperationStatus> serviceCallback) { return ServiceFuture.fromResponse(updatePublishSettingsWithServiceResponseAsync(appId, publishSettingUpdateObject), serviceCallback); }
python
def write_json_to_file(json_data, filename="metadata"): """ Write all JSON in python dictionary to a new json file. :param dict json_data: JSON data :param str filename: Target filename (defaults to 'metadata.jsonld') :return None: """ logger_jsons.info("enter write_json_to_file") json_data = rm_empty_fields(json_data) # Use demjson to maintain unicode characters in output json_bin = demjson.encode(json_data, encoding='utf-8', compactly=False) # Write json to file try: open("{}.jsonld".format(filename), "wb").write(json_bin) logger_jsons.info("wrote data to json file") except FileNotFoundError as e: print("Error: Writing json to file: {}".format(filename)) logger_jsons.debug("write_json_to_file: FileNotFound: {}, {}".format(filename, e)) logger_jsons.info("exit write_json_to_file") return
java
protected static double computePi(int i, double[] dist_i, double[] pij_i, double perplexity, double logPerp) { // Relation to paper: beta == 1. / (2*sigma*sigma) double beta = estimateInitialBeta(dist_i, perplexity); double diff = computeH(i, dist_i, pij_i, -beta) - logPerp; double betaMin = 0.; double betaMax = Double.POSITIVE_INFINITY; for(int tries = 0; tries < PERPLEXITY_MAXITER && Math.abs(diff) > PERPLEXITY_ERROR; ++tries) { if(diff > 0) { betaMin = beta; beta += (betaMax == Double.POSITIVE_INFINITY) ? beta : ((betaMax - beta) * .5); } else { betaMax = beta; beta -= (beta - betaMin) * .5; } diff = computeH(i, dist_i, pij_i, -beta) - logPerp; } return beta; }
python
def create_user(self, customer_id, name, login, password, role=FastlyRoles.USER, require_new_password=True): """Create a user.""" body = self._formdata({ "customer_id": customer_id, "name": name, "login": login, "password": password, "role": role, "require_new_password": require_new_password, }, FastlyUser.FIELDS) content = self._fetch("/user", method="POST", body=body) return FastlyUser(self, content)
java
void store(OutputStream out) throws IOException { BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(out)); for (Entry<String, Point2d[]> e : templateMap.entries()) { bw.write(encodeEntry(e)); bw.write('\n'); } bw.close(); }
java
@XmlElementDecl(namespace = "http://www.opengis.net/gml", name = "angle") public JAXBElement<MeasureType> createAngle(MeasureType value) { return new JAXBElement<MeasureType>(_Angle_QNAME, MeasureType.class, null, value); }
python
def coastal_coords(): """ A coastal coord is a 2-tuple: (tile id, direction). An edge is coastal if it is on the grid's border. :return: list( (tile_id, direction) ) """ coast = list() for tile_id in coastal_tile_ids(): tile_coord = tile_id_to_coord(tile_id) for edge_coord in coastal_edges(tile_id): dirn = tile_edge_offset_to_direction(edge_coord - tile_coord) if tile_id_in_direction(tile_id, dirn) is None: coast.append((tile_id, dirn)) # logging.debug('coast={}'.format(coast)) return coast
python
def get_ccle_mrna_levels(): """Get CCLE mRNA amounts using cBioClient""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) gene_list = body.get('gene_list') cell_lines = body.get('cell_lines') mrna_amounts = cbio_client.get_ccle_mrna(gene_list, cell_lines) res = {'mrna_amounts': mrna_amounts} return res
python
def bytes_available(device): """ Determines the number of bytes available for reading from an AlarmDecoder device :param device: the AlarmDecoder device :type device: :py:class:`~alarmdecoder.devices.Device` :returns: int """ bytes_avail = 0 if isinstance(device, alarmdecoder.devices.SerialDevice): if hasattr(device._device, "in_waiting"): bytes_avail = device._device.in_waiting else: bytes_avail = device._device.inWaiting() elif isinstance(device, alarmdecoder.devices.SocketDevice): bytes_avail = 4096 return bytes_avail
python
def _send_bool(self,value): """ Convert a boolean value into a bytes object. Uses 0 and 1 as output. """ # Sanity check. if type(value) != bool and value not in [0,1]: err = "{} is not boolean.".format(value) raise ValueError(err) return struct.pack("?",value)
java
public void incEncodePB(final MiniSatStyleSolver s, final LNGIntVector lits, final LNGIntVector coeffs, int rhs, final LNGIntVector assumptions, int size) { assert this.incrementalStrategy == IncrementalStrategy.ITERATIVE; switch (this.pbEncoding) { case SWC: this.swc.encode(s, lits, coeffs, rhs, assumptions, size); break; default: throw new IllegalStateException("Unknown pseudo-Boolean encoding: " + this.pbEncoding); } }
java
@Override public Object eGet(int featureID, boolean resolve, boolean coreType) { switch (featureID) { case AfplibPackage.BPG__PAGE_NAME: return getPageName(); case AfplibPackage.BPG__TRIPLETS: return getTriplets(); } return super.eGet(featureID, resolve, coreType); }
java
@SuppressWarnings("unchecked") private String encodeAttribute(String s, Charset charset) throws ErrorDataEncoderException { if (s == null) { return ""; } try { String encoded = URLEncoder.encode(s, charset.name()); if (encoderMode == EncoderMode.RFC3986) { for (Map.Entry<Pattern, String> entry : percentEncodings) { String replacement = entry.getValue(); encoded = entry.getKey().matcher(encoded).replaceAll(replacement); } } return encoded; } catch (UnsupportedEncodingException e) { throw new ErrorDataEncoderException(charset.name(), e); } }
python
def execute(self, cmd, fname, codes=[0, None]): ''' Execute a command against the specified file. @cmd - Command to execute. @fname - File to run command against. @codes - List of return codes indicating cmd success. Returns True on success, False on failure, or None if the external extraction utility could not be found. ''' tmp = None rval = 0 retval = True command_list = [] binwalk.core.common.debug("Running extractor '%s'" % str(cmd)) try: if callable(cmd): command_list.append(get_class_name_from_method(cmd)) try: retval = cmd(fname) except KeyboardInterrupt as e: raise e except Exception as e: binwalk.core.common.warning("Internal extractor '%s' failed with exception: '%s'" % (str(cmd), str(e))) elif cmd: # If not in debug mode, create a temporary file to redirect # stdout and stderr to if not binwalk.core.common.DEBUG: tmp = tempfile.TemporaryFile() # Generate unique file paths for all paths in the current # command that are surrounded by UNIQUE_PATH_DELIMITER while self.UNIQUE_PATH_DELIMITER in cmd: need_unique_path = cmd.split(self.UNIQUE_PATH_DELIMITER)[ 1].split(self.UNIQUE_PATH_DELIMITER)[0] unique_path = binwalk.core.common.unique_file_name(need_unique_path) cmd = cmd.replace(self.UNIQUE_PATH_DELIMITER + need_unique_path + self.UNIQUE_PATH_DELIMITER, unique_path) # Execute. for command in cmd.split("&&"): # Replace all instances of FILE_NAME_PLACEHOLDER in the # command with fname command = command.strip().replace(self.FILE_NAME_PLACEHOLDER, fname) binwalk.core.common.debug("subprocess.call(%s, stdout=%s, stderr=%s)" % (command, str(tmp), str(tmp))) rval = subprocess.call(shlex.split(command), stdout=tmp, stderr=tmp) if rval in codes: retval = True else: retval = False binwalk.core.common.debug('External extractor command "%s" completed with return code %d (success: %s)' % (cmd, rval, str(retval))) command_list.append(command) # TODO: Should errors from all commands in a command string be checked? Currently we only support # specifying one set of error codes, so at the moment, this is not done; it is up to the # final command to return success or failure (which presumably it will if previous necessary # commands were not successful, but this is an assumption). # if retval == False: # break except KeyboardInterrupt as e: raise e except Exception as e: binwalk.core.common.warning("Extractor.execute failed to run external extractor '%s': %s, '%s' might not be installed correctly" % (str(cmd), str(e), str(cmd))) retval = None if tmp is not None: tmp.close() return (retval, '&&'.join(command_list))
python
def select_files(self, what="o"): """ Helper function used to select the files of a task. Args: what: string with the list of characters selecting the file type Possible choices: i ==> input_file, o ==> output_file, f ==> files_file, j ==> job_file, l ==> log_file, e ==> stderr_file, q ==> qout_file, all ==> all files. """ choices = collections.OrderedDict([ ("i", self.input_file), ("o", self.output_file), ("f", self.files_file), ("j", self.job_file), ("l", self.log_file), ("e", self.stderr_file), ("q", self.qout_file), ]) if what == "all": return [getattr(v, "path") for v in choices.values()] selected = [] for c in what: try: selected.append(getattr(choices[c], "path")) except KeyError: logger.warning("Wrong keyword %s" % c) return selected
python
def source_analysis( source_path, group, encoding='automatic', fallback_encoding='cp1252', generated_regexes=pygount.common.regexes_from(DEFAULT_GENERATED_PATTERNS_TEXT), duplicate_pool=None): """ Analysis for line counts in source code stored in ``source_path``. :param source_path: :param group: name of a logical group the sourc code belongs to, e.g. a package. :param encoding: encoding according to :func:`encoding_for` :param fallback_encoding: fallback encoding according to :func:`encoding_for` :return: a :class:`SourceAnalysis` """ assert encoding is not None assert generated_regexes is not None result = None lexer = None source_code = None source_size = os.path.getsize(source_path) if source_size == 0: _log.info('%s: is empty', source_path) result = pseudo_source_analysis(source_path, group, SourceState.empty) elif is_binary_file(source_path): _log.info('%s: is binary', source_path) result = pseudo_source_analysis(source_path, group, SourceState.binary) elif not has_lexer(source_path): _log.info('%s: unknown language', source_path) result = pseudo_source_analysis(source_path, group, SourceState.unknown) elif duplicate_pool is not None: duplicate_path = duplicate_pool.duplicate_path(source_path) if duplicate_path is not None: _log.info('%s: is a duplicate of %s', source_path, duplicate_path) result = pseudo_source_analysis(source_path, group, SourceState.duplicate, duplicate_path) if result is None: if encoding in ('automatic', 'chardet'): encoding = encoding_for(source_path, encoding, fallback_encoding) try: with open(source_path, 'r', encoding=encoding) as source_file: source_code = source_file.read() except (LookupError, OSError, UnicodeError) as error: _log.warning('cannot read %s using encoding %s: %s', source_path, encoding, error) result = pseudo_source_analysis(source_path, group, SourceState.error, error) if result is None: lexer = guess_lexer(source_path, source_code) assert lexer is not None if (result is None) and (len(generated_regexes) != 0): number_line_and_regex = matching_number_line_and_regex( pygount.common.lines(source_code), generated_regexes ) if number_line_and_regex is not None: number, _, regex = number_line_and_regex message = 'line {0} matches {1}'.format(number, regex) _log.info('%s: is generated code because %s', source_path, message) result = pseudo_source_analysis(source_path, group, SourceState.generated, message) if result is None: assert lexer is not None assert source_code is not None language = lexer.name if ('xml' in language.lower()) or (language == 'Genshi'): dialect = pygount.xmldialect.xml_dialect(source_path, source_code) if dialect is not None: language = dialect _log.info('%s: analyze as %s using encoding %s', source_path, language, encoding) mark_to_count_map = {'c': 0, 'd': 0, 'e': 0, 's': 0} for line_parts in _line_parts(lexer, source_code): mark_to_increment = 'e' for mark_to_check in ('d', 's', 'c'): if mark_to_check in line_parts: mark_to_increment = mark_to_check mark_to_count_map[mark_to_increment] += 1 result = SourceAnalysis( path=source_path, language=language, group=group, code=mark_to_count_map['c'], documentation=mark_to_count_map['d'], empty=mark_to_count_map['e'], string=mark_to_count_map['s'], state=SourceState.analyzed.name, state_info=None, ) assert result is not None return result
python
def select_visible_page_image(infiles, output_file, log, context): """Selects a whole page image that we can show the user (if necessary)""" options = context.get_options() if options.clean_final: image_suffix = '.pp-clean.png' elif options.deskew: image_suffix = '.pp-deskew.png' elif options.remove_background: image_suffix = '.pp-background.png' else: image_suffix = '.page.png' image = next(ii for ii in infiles if ii.endswith(image_suffix)) pageinfo = get_pageinfo(image, context) if pageinfo.images and all(im.enc == 'jpeg' for im in pageinfo.images): log.debug(f'{page_number(image):4d}: JPEG input -> JPEG output') # If all images were JPEGs originally, produce a JPEG as output with Image.open(image) as im: # At this point the image should be a .png, but deskew, unpaper # might have removed the DPI information. In this case, fall back to # square DPI used to rasterize. When the preview image was # rasterized, it was also converted to square resolution, which is # what we want to give tesseract, so keep it square. fallback_dpi = get_page_square_dpi(pageinfo, options) dpi = im.info.get('dpi', (fallback_dpi, fallback_dpi)) # Pillow requires integer DPI dpi = round(dpi[0]), round(dpi[1]) im.save(output_file, format='JPEG', dpi=dpi) else: re_symlink(image, output_file, log)
python
def power_chisq_bins_from_sigmasq_series(sigmasq_series, num_bins, kmin, kmax): """Returns bins of equal power for use with the chisq functions Parameters ---------- sigmasq_series: FrequencySeries A frequency series containing the cumulative power of a filter template preweighted by a psd. num_bins: int The number of chisq bins to calculate. kmin: int DOCUMENTME kmax: int DOCUMENTME Returns ------- bins: List of ints A list of the edges of the chisq bins is returned. """ sigmasq = sigmasq_series[kmax - 1] edge_vec = numpy.arange(0, num_bins) * sigmasq / num_bins bins = numpy.searchsorted(sigmasq_series[kmin:kmax], edge_vec, side='right') bins += kmin return numpy.append(bins, kmax)
java
public int uploadFile(String name, java.io.File file) { FileDataBodyPart filePart = new FileDataBodyPart("source", file); // Work around for bug in cherrypy FormDataContentDisposition.FormDataContentDispositionBuilder builder = FormDataContentDisposition .name(filePart.getName()); builder.fileName(file.getName()); builder.size(file.length()); filePart.setFormDataContentDisposition(builder.build()); FormDataMultiPart multiPart = new FormDataMultiPart(); multiPart.bodyPart(filePart); multiPart.field("filename", name); Builder resource = getResourceFactory().getApiResource("/file/v2/") .entity(multiPart, new MediaType("multipart", "form-data", Collections .singletonMap("boundary", "AaB03x"))); return resource.post(File.class).getId(); }
java
public static PreprocessedRowsFlusher create(final XMLUtil xmlUtil, final List<TableRow> tableRows) throws IOException { return new PreprocessedRowsFlusher(xmlUtil, tableRows, new StringBuilder(STRING_BUILDER_SIZE)); }
java
private boolean needsToBeCreatedInitially(Entity ent) { boolean create = false; if(ent instanceof PhysicalEntity || ent instanceof Gene) { if(ubiqueDet != null && ubiqueDet.isUbique(ent)) create = false; // ubiques will be created where they are actually used. else if (!ent.getParticipantOf().isEmpty()) create = true; else if(ent instanceof Complex && ((Complex) ent).getComponentOf().isEmpty() && ((Complex) ent).getMemberPhysicalEntityOf().isEmpty()) create = true; //do make a root/top complex despite it's dangling } return create; }
java
public static void applyKVToBean(Object bean, String key, Object value) throws NoSuchMethodException, IllegalAccessException, IllegalArgumentException, InvocationTargetException { Method getterMethod = bean.getClass().getMethod(Util.getterMethodName(key)); Method setterMethod = bean.getClass().getMethod(Util.setterMethodName(key), getterMethod.getReturnType()); setterMethod.invoke(bean, value); }
java
@Override public boolean documentExists(URI documentUri) throws DocumentException { URI fileUri = getDocumentInternalUri(documentUri); File file = new File(fileUri); return file.exists(); }
python
def w_diffuser_inner(sed_inputs=sed_dict): """Return the inner width of each diffuser in the sedimentation tank. Parameters ---------- sed_inputs : dict A dictionary of all of the constant inputs needed for sedimentation tank calculations can be found in sed.yaml Returns ------- float Inner width of each diffuser in the sedimentation tank Examples -------- >>> from aide_design.play import* >>> """ return ut.ceil_nearest(w_diffuser_inner_min(sed_inputs).magnitude, (np.arange(1/16,1/4,1/16)*u.inch).magnitude)
python
def os_deployment_servers(self): """ Gets the Os Deployment Servers API client. Returns: OsDeploymentServers: """ if not self.__os_deployment_servers: self.__os_deployment_servers = OsDeploymentServers(self.__connection) return self.__os_deployment_servers
java
protected List<DbEntityOperation> sortByReferences(SortedSet<DbEntityOperation> preSorted) { // copy the pre-sorted set and apply final sorting to list List<DbEntityOperation> opList = new ArrayList<DbEntityOperation>(preSorted); for (int i = 0; i < opList.size(); i++) { DbEntityOperation currentOperation = opList.get(i); DbEntity currentEntity = currentOperation.getEntity(); Set<String> currentReferences = currentOperation.getFlushRelevantEntityReferences(); // check whether this operation must be placed after another operation int moveTo = i; for(int k = i+1; k < opList.size(); k++) { DbEntityOperation otherOperation = opList.get(k); DbEntity otherEntity = otherOperation.getEntity(); Set<String> otherReferences = otherOperation.getFlushRelevantEntityReferences(); if(currentOperation.getOperationType() == INSERT) { // if we reference the other entity, we need to be inserted after that entity if(currentReferences != null && currentReferences.contains(otherEntity.getId())) { moveTo = k; break; // we can only reference a single entity } } else { // UPDATE or DELETE // if the other entity has a reference to us, we must be placed after the other entity if(otherReferences != null && otherReferences.contains(currentEntity.getId())) { moveTo = k; // cannot break, there may be another entity further to the right which also references us } } } if(moveTo > i) { opList.remove(i); opList.add(moveTo, currentOperation); i--; } } return opList; }
java
public void marshall(RetrieveTapeRecoveryPointRequest retrieveTapeRecoveryPointRequest, ProtocolMarshaller protocolMarshaller) { if (retrieveTapeRecoveryPointRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(retrieveTapeRecoveryPointRequest.getTapeARN(), TAPEARN_BINDING); protocolMarshaller.marshall(retrieveTapeRecoveryPointRequest.getGatewayARN(), GATEWAYARN_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
python
def tag_residues_with_heptad_register(helices): """ tags Residues in input helices with heptad register. (Helices not required to be the same length). Parameters ---------- helices : [Polypeptide] Returns ------- None """ base_reg = 'abcdefg' start, end = start_and_end_of_reference_axis(helices) for h in helices: ref_axis = gen_reference_primitive(h, start=start, end=end) crangles = crick_angles(h, reference_axis=ref_axis, tag=False)[:-1] reg_fit = fit_heptad_register(crangles) exp_base = base_reg * (len(h) // 7 + 2) hep_pos = reg_fit[0][0] register_string = exp_base[hep_pos:hep_pos + len(h)] for i, register in enumerate(register_string): h[i].tags['register'] = register return