language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
public java.util.List<String> getAdditionalSlaveSecurityGroups() { if (additionalSlaveSecurityGroups == null) { additionalSlaveSecurityGroups = new com.amazonaws.internal.SdkInternalList<String>(); } return additionalSlaveSecurityGroups; }
java
private static Object findValue(final OptionElement subroot, final String[] pathElements, int pos) { if (pos >= 0) { // Not at root. if (!pathElements[pos].equals(subroot.name)) { return null; } } if (subroot.isValue) { // pos must be last entry if ((pos + 1) != pathElements.length) { return null; } return subroot.val; } pos++; if (pos == pathElements.length) { return null; } /* look to children for some value */ Object singleRes = null; ArrayList<Object> multiRes = null; for (OptionElement optel: subroot.getChildren()) { Object res = findValue(optel, pathElements, pos); if (res != null) { if (singleRes != null) { multiRes = new ArrayList<Object>(); appendResult(singleRes, multiRes); singleRes = null; appendResult(res, multiRes); } else if (multiRes != null) { appendResult(res, multiRes); } else { singleRes = res; } } } if (multiRes != null) { return multiRes; } return singleRes; }
python
def _create_argument_value_pairs(func, *args, **kwargs): """ Create dictionary with argument names as keys and their passed values as values. An empty dictionary is returned if an error is detected, such as more arguments than in the function definition, argument(s) defined by position and keyword, etc. """ # Capture parameters that have been explicitly specified in function call try: arg_dict = signature(func).bind_partial(*args, **kwargs).arguments except TypeError: return dict() # Capture parameters that have not been explicitly specified # but have default values arguments = signature(func).parameters for arg_name in arguments: if (arguments[arg_name].default != Parameter.empty) and ( arguments[arg_name].name not in arg_dict ): arg_dict[arguments[arg_name].name] = arguments[arg_name].default return arg_dict
python
def check(self, diff): """Check that the new file introduced is a python source file""" path = diff.b_path assert any( path.endswith(ext) for ext in importlib.machinery.SOURCE_SUFFIXES )
python
def on_app_shutdown(self, app): '''Dump profile content to disk''' if self.filewatcher: self.filewatcher.stop() if self.profile: self.upload_page.on_destroy() self.download_page.on_destroy()
java
public void marshall(IpAddressRequest ipAddressRequest, ProtocolMarshaller protocolMarshaller) { if (ipAddressRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(ipAddressRequest.getSubnetId(), SUBNETID_BINDING); protocolMarshaller.marshall(ipAddressRequest.getIp(), IP_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
java
@Override public final JSONObject toJSONObject() { final JSONObject object = new JSONObject(); object.put("type", new JSONString(getNodeType().getValue())); if (hasMetaData()) { final MetaData meta = getMetaData(); if (false == meta.isEmpty()) { object.put("meta", new JSONObject(meta.getJSO())); } } object.put("attributes", new JSONObject(getAttributes().getJSO())); final JSONArray children = new JSONArray(); children.set(0, getScene().toJSONObject()); object.put("children", children); object.put("storage", getStorageEngine().toJSONObject()); return object; }
python
def get_battery_state(self, prop): """ Return the first line from the file located at battery_path/prop as a string. """ with open(os.path.join(self.options['battery_path'], prop), 'r') as f: return f.readline().strip()
java
public void setCondition( ICondition condition) { super.setCondition( condition); // Reset ancestry for all descendants. if( members_ != null) { for( IVarDef member : members_) { member.setParent( this); } } }
java
public static com.liferay.commerce.product.model.CPInstance createCPInstance( long CPInstanceId) { return getService().createCPInstance(CPInstanceId); }
python
def current_branch(self): """The name of the branch that's currently checked out in the working tree (a string or :data:`None`).""" output = self.context.capture('git', 'rev-parse', '--abbrev-ref', 'HEAD', check=False, silent=True) return output if output != 'HEAD' else None
java
@SuppressWarnings("unchecked") protected boolean isValid() { boolean res = true; res = res & m_description.isValid(); res = res & m_name.isValid(); if (!res) { return res; } for (I_CmsEditableGroupRow row : m_ouResources.getRows()) { if (!((AbstractField<String>)row.getComponent()).isValid()) { return false; } } return true; }
java
static int optimalNumOfHashFunctions(long n, long m) { // (m / n) * log(2), but avoid truncation due to division! return Math.max(1, (int) Math.round((double) m / n * Math.log(2))); }
java
@Override public boolean cleanupLocalisations() throws SIResourceException { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(tc, "cleanupLocalisations"); boolean allCleanedUp = _pubSubRemoteSupport.cleanupLocalisations(_consumerDispatchersDurable); if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(tc, "cleanupLocalisations", new Boolean(allCleanedUp)); return allCleanedUp; }
python
def toProtocolElement(self): """ Returns the GA4GH protocol representation of this ReadGroup. """ # TODO this is very incomplete, but we don't have the # implementation to fill out the rest of the fields currently readGroup = protocol.ReadGroup() readGroup.id = self.getId() readGroup.created = self._creationTime readGroup.updated = self._updateTime dataset = self.getParentContainer().getParentContainer() readGroup.dataset_id = dataset.getId() readGroup.name = self.getLocalId() readGroup.predicted_insert_size = pb.int(self.getPredictedInsertSize()) referenceSet = self._parentContainer.getReferenceSet() readGroup.sample_name = pb.string(self.getSampleName()) readGroup.biosample_id = pb.string(self.getBiosampleId()) if referenceSet is not None: readGroup.reference_set_id = referenceSet.getId() readGroup.stats.CopyFrom(self.getStats()) readGroup.programs.extend(self.getPrograms()) readGroup.description = pb.string(self.getDescription()) readGroup.experiment.CopyFrom(self.getExperiment()) self.serializeAttributes(readGroup) return readGroup
java
public static void writeConfigFile(File configFile, Class<?>[] classes, boolean sortClasses) throws SQLException, IOException { System.out.println("Writing configurations to " + configFile.getAbsolutePath()); writeConfigFile(new FileOutputStream(configFile), classes, sortClasses); }
python
def asgray(im): """ Takes an image and returns its grayscale version by averaging the color channels. if an alpha channel is present, it will simply be ignored. If a grayscale image is given, the original image is returned. Parameters ---------- image : ndarray, ndim 2 or 3 RGB or grayscale image. Returns ------- gray_image : ndarray, ndim 2 Grayscale version of image. """ if im.ndim == 2: return im elif im.ndim == 3 and im.shape[2] in (3, 4): return im[..., :3].mean(axis=-1) else: raise ValueError('Invalid image format')
python
def chunk_fill(iterable, size, fillvalue=None): """ chunk_fill('ABCDEFG', 3, 'x') --> ABC DEF Gxx """ # TODO: not used args = [iter(iterable)] * size return itertools.zip_longest(*args, fillvalue=fillvalue)
java
public ServiceFuture<Void> enableKeyVaultAsync(String resourceGroupName, String accountName, final ServiceCallback<Void> serviceCallback) { return ServiceFuture.fromResponse(enableKeyVaultWithServiceResponseAsync(resourceGroupName, accountName), serviceCallback); }
java
public static <T> T unmarshal(ProjectionEntity nativeEntity, Class<T> entityClass) { return unmarshalBaseEntity(nativeEntity, entityClass); }
python
def present(profile='pagerduty', subdomain=None, api_key=None, **kwargs): ''' Ensure that a pagerduty escalation policy exists. Will create or update as needed. This method accepts as args everything defined in https://developer.pagerduty.com/documentation/rest/escalation_policies/create. In addition, user and schedule id's will be translated from name (or email address) into PagerDuty unique ids. For example: .. code-block:: yaml pagerduty_escalation_policy.present: - name: bruce test escalation policy - escalation_rules: - targets: - type: schedule id: 'bruce test schedule level1' - type: user id: 'Bruce Sherrod' In this example, 'Bruce Sherrod' will be looked up and replaced with the PagerDuty id (usually a 7 digit all-caps string, e.g. PX6GQL7) ''' # for convenience, we accept id, name, or email for users # and we accept the id or name for schedules for escalation_rule in kwargs['escalation_rules']: for target in escalation_rule['targets']: target_id = None if target['type'] == 'user': user = __salt__['pagerduty_util.get_resource']('users', target['id'], ['email', 'name', 'id'], profile=profile, subdomain=subdomain, api_key=api_key) if user: target_id = user['id'] elif target['type'] == 'schedule': schedule = __salt__['pagerduty_util.get_resource']('schedules', target['id'], ['name', 'id'], profile=profile, subdomain=subdomain, api_key=api_key) if schedule: target_id = schedule['schedule']['id'] if target_id is None: raise Exception('unidentified target: {0}'.format(target)) target['id'] = target_id r = __salt__['pagerduty_util.resource_present']('escalation_policies', ['name', 'id'], _diff, profile, subdomain, api_key, **kwargs) return r
java
public static void assertUnchanged(String message, DataSource dataSource) throws DBAssertionError { DataSet emptyDataSet = empty(dataSource); DBAssert.deltaAssertion(CallInfo.create(message), emptyDataSet, emptyDataSet); }
java
public static SegmentType segmentTypeFromType(Class<?> cl) { if (Tenant.class.equals(cl)) { return Tenant.SEGMENT_TYPE; } else if (Environment.class.equals(cl)) { return Environment.SEGMENT_TYPE; } else if (Feed.class.equals(cl)) { return Feed.SEGMENT_TYPE; } else if (Metric.class.equals(cl)) { return Metric.SEGMENT_TYPE; } else if (MetricType.class.equals(cl)) { return MetricType.SEGMENT_TYPE; } else if (Resource.class.equals(cl)) { return Resource.SEGMENT_TYPE; } else if (ResourceType.class.equals(cl)) { return ResourceType.SEGMENT_TYPE; } else if (DataEntity.class.equals(cl)) { return DataEntity.SEGMENT_TYPE; } else if (OperationType.class.equals(cl)) { return OperationType.SEGMENT_TYPE; } else if (MetadataPack.class.equals(cl)) { return MetadataPack.SEGMENT_TYPE; } else if (Relationship.class.equals(cl)) { return Relationship.SEGMENT_TYPE; } else if (StructuredData.class.equals(cl)) { return StructuredData.SEGMENT_TYPE; } else if (RelativePath.Up.class.equals(cl)) { return RelativePath.Up.SEGMENT_TYPE; } else { throw new IllegalStateException("There is no " + SegmentType.class.getName() + " for type " + (cl == null ? "null" : cl.getName())); } }
python
def decimal_to_dms(value, precision): ''' Convert decimal position to degrees, minutes, seconds in a fromat supported by EXIF ''' deg = math.floor(value) min = math.floor((value - deg) * 60) sec = math.floor((value - deg - min / 60) * 3600 * precision) return ((deg, 1), (min, 1), (sec, precision))
java
@SuppressWarnings("deprecation") private static int calculateNumberOfNetworkBuffers(Configuration configuration, long maxJvmHeapMemory) { final int numberOfNetworkBuffers; if (!hasNewNetworkConfig(configuration)) { // fallback: number of network buffers numberOfNetworkBuffers = configuration.getInteger(TaskManagerOptions.NETWORK_NUM_BUFFERS); checkOldNetworkConfig(numberOfNetworkBuffers); } else { if (configuration.contains(TaskManagerOptions.NETWORK_NUM_BUFFERS)) { LOG.info("Ignoring old (but still present) network buffer configuration via {}.", TaskManagerOptions.NETWORK_NUM_BUFFERS.key()); } final long networkMemorySize = calculateNewNetworkBufferMemory(configuration, maxJvmHeapMemory); // tolerate offcuts between intended and allocated memory due to segmentation (will be available to the user-space memory) long numberOfNetworkBuffersLong = networkMemorySize / getPageSize(configuration); if (numberOfNetworkBuffersLong > Integer.MAX_VALUE) { throw new IllegalArgumentException("The given number of memory bytes (" + networkMemorySize + ") corresponds to more than MAX_INT pages."); } numberOfNetworkBuffers = (int) numberOfNetworkBuffersLong; } return numberOfNetworkBuffers; }
java
public static int getDelimiterOffset(final String line, final int start, final char delimiter) { int idx = line.indexOf(delimiter, start); if (idx >= 0) { idx -= start - 1; } return idx; }
java
private Pair<ResourcePackage, PackageHeader> readPackage(PackageHeader packageHeader) { Pair<ResourcePackage, PackageHeader> pair = new Pair<>(); //read packageHeader ResourcePackage resourcePackage = new ResourcePackage(packageHeader); pair.setLeft(resourcePackage); long beginPos = buffer.position(); // read type string pool if (packageHeader.getTypeStrings() > 0) { Buffers.position(buffer, beginPos + packageHeader.getTypeStrings() - packageHeader.getHeaderSize()); resourcePackage.setTypeStringPool(ParseUtils.readStringPool(buffer, (StringPoolHeader) readChunkHeader())); } //read key string pool if (packageHeader.getKeyStrings() > 0) { Buffers.position(buffer, beginPos + packageHeader.getKeyStrings() - packageHeader.getHeaderSize()); resourcePackage.setKeyStringPool(ParseUtils.readStringPool(buffer, (StringPoolHeader) readChunkHeader())); } outer: while (buffer.hasRemaining()) { ChunkHeader chunkHeader = readChunkHeader(); long chunkBegin = buffer.position(); switch (chunkHeader.getChunkType()) { case ChunkType.TABLE_TYPE_SPEC: TypeSpecHeader typeSpecHeader = (TypeSpecHeader) chunkHeader; long[] entryFlags = new long[(int) typeSpecHeader.getEntryCount()]; for (int i = 0; i < typeSpecHeader.getEntryCount(); i++) { entryFlags[i] = Buffers.readUInt(buffer); } TypeSpec typeSpec = new TypeSpec(typeSpecHeader); typeSpec.setEntryFlags(entryFlags); //id start from 1 typeSpec.setName(resourcePackage.getTypeStringPool() .get(typeSpecHeader.getId() - 1)); resourcePackage.addTypeSpec(typeSpec); Buffers.position(buffer, chunkBegin + typeSpecHeader.getBodySize()); break; case ChunkType.TABLE_TYPE: TypeHeader typeHeader = (TypeHeader) chunkHeader; // read offsets table long[] offsets = new long[(int) typeHeader.getEntryCount()]; for (int i = 0; i < typeHeader.getEntryCount(); i++) { offsets[i] = Buffers.readUInt(buffer); } Type type = new Type(typeHeader); type.setName(resourcePackage.getTypeStringPool().get(typeHeader.getId() - 1)); long entryPos = chunkBegin + typeHeader.getEntriesStart() - typeHeader.getHeaderSize(); Buffers.position(buffer, entryPos); ByteBuffer b = buffer.slice(); b.order(byteOrder); type.setBuffer(b); type.setKeyStringPool(resourcePackage.getKeyStringPool()); type.setOffsets(offsets); type.setStringPool(stringPool); resourcePackage.addType(type); locales.add(type.getLocale()); Buffers.position(buffer, chunkBegin + typeHeader.getBodySize()); break; case ChunkType.TABLE_PACKAGE: // another package. we should read next package here pair.setRight((PackageHeader) chunkHeader); break outer; case ChunkType.TABLE_LIBRARY: // read entries LibraryHeader libraryHeader = (LibraryHeader) chunkHeader; for (long i = 0; i < libraryHeader.getCount(); i++) { int packageId = buffer.getInt(); String name = Buffers.readZeroTerminatedString(buffer, 128); LibraryEntry entry = new LibraryEntry(packageId, name); //TODO: now just skip it.. } Buffers.position(buffer, chunkBegin + chunkHeader.getBodySize()); break; case ChunkType.NULL: // Buffers.position(buffer, chunkBegin + chunkHeader.getBodySize()); Buffers.position(buffer, buffer.position() + buffer.remaining()); break; default: throw new ParserException("unexpected chunk type: 0x" + chunkHeader.getChunkType()); } } return pair; }
java
public FSArray getPubTypeList() { if (Header_Type.featOkTst && ((Header_Type)jcasType).casFeat_pubTypeList == null) jcasType.jcas.throwFeatMissing("pubTypeList", "de.julielab.jules.types.Header"); return (FSArray)(jcasType.ll_cas.ll_getFSForRef(jcasType.ll_cas.ll_getRefValue(addr, ((Header_Type)jcasType).casFeatCode_pubTypeList)));}
java
private Set<URI> listEntitiesByDataModel(com.hp.hpl.jena.rdf.model.Resource entityType, Property dataPropertyType, URI modelReference) { if (modelReference == null) { return ImmutableSet.of(); } StringBuilder queryBuilder = new StringBuilder() .append("SELECT DISTINCT ?entity WHERE { \n"); // Deal with engines that store the inference in the default graph (e.g., OWLIM) queryBuilder.append("{").append("\n") .append(" ?entity <").append(RDF.type.getURI()).append("> <").append(entityType.getURI()).append("> .").append("\n"); // Deal with the difference between Services and Operations if (entityType.equals(MSM.Service)) { queryBuilder.append(" ?entity <").append(MSM.hasOperation.getURI()).append("> / ").append("\n") .append(" <").append(dataPropertyType.getURI()).append("> / ").append("\n"); } else { queryBuilder.append(" ?entity <").append(dataPropertyType.getURI()).append("> / ").append("\n"); } queryBuilder.append(" <").append(SAWSDL.modelReference.getURI()).append("> <").append(modelReference.toASCIIString()).append("> .").append("\n"); // UNION queryBuilder.append("\n } UNION { \n"); queryBuilder.append(" ?entity <").append(RDF.type.getURI()).append("> <").append(entityType.getURI()).append("> .").append("\n"); // Deal with the difference between Services and Operations if (entityType.equals(MSM.Service)) { queryBuilder.append(" ?entity <").append(MSM.hasOperation.getURI()).append("> / ").append("\n") .append(" <").append(dataPropertyType.getURI()).append("> ?message .").append("\n"); } else { queryBuilder.append(" ?entity <").append(dataPropertyType.getURI()).append("> ?message .").append("\n"); } queryBuilder.append("?message (<").append(MSM.hasOptionalPart.getURI()). append("> | <").append(MSM.hasMandatoryPart.getURI()).append(">)+ ?part . \n"). append(" ?part <").append(SAWSDL.modelReference.getURI()).append("> <").append(modelReference.toASCIIString()).append("> ."). append("}}"); return this.graphStoreManager.listResourcesByQuery(queryBuilder.toString(), "entity"); }
java
public static boolean isRequestEntityTooLargeException(SdkBaseException exception) { return isAse(exception) && toAse(exception).getStatusCode() == HttpStatus.SC_REQUEST_TOO_LONG; }
python
def add_package( self, target=None, package_manager=None, package=None, type_option=None, version_option=None, node_paths=None, workunit_name=None, workunit_labels=None): """Add an additional package using requested package_manager.""" package_manager = package_manager or self.get_package_manager(target=target) command = package_manager.add_package( package, type_option=type_option, version_option=version_option, node_paths=node_paths, ) return self._execute_command( command, workunit_name=workunit_name, workunit_labels=workunit_labels)
python
def _deserialize_list(cls, type_item, list_): """ :type type_item: T|type :type list_: list :rtype: list[T] """ list_deserialized = [] for item in list_: item_deserialized = cls.deserialize(type_item, item) list_deserialized.append(item_deserialized) return list_deserialized
java
public final AtomEntry<Node> readAtomEntry(final InputStream in) { final Document doc = parseDocument(createDocumentBuilder(), in); final XPath xPath = createXPath("atom", "http://www.w3.org/2005/Atom"); final String eventStreamId = findContentText(doc, xPath, "/atom:entry/atom:content/eventStreamId"); final Integer eventNumber = findContentInteger(doc, xPath, "/atom:entry/atom:content/eventNumber"); final String eventType = findContentText(doc, xPath, "/atom:entry/atom:content/eventType"); final String eventId = findContentText(doc, xPath, "/atom:entry/atom:content/eventId"); final Node escMetaNode = findNode(doc, xPath, "/atom:entry/atom:content/metadata/esc-meta"); final String dataContextTypeStr = findContentText(escMetaNode, xPath, "data-content-type"); final EnhancedMimeType dataContentType = EnhancedMimeType.create(dataContextTypeStr); final Node data = findNode(doc, xPath, "/atom:entry/atom:content/data"); final EnhancedMimeType metaContentType; final String metaTypeStr; final Node meta; if (hasMetaData(escMetaNode)) { final String metaContentTypeStr = findContentText(escMetaNode, xPath, "meta-content-type"); metaContentType = EnhancedMimeType.create(metaContentTypeStr); metaTypeStr = findContentText(escMetaNode, xPath, "meta-type"); meta = escMetaNode; } else { metaContentType = null; metaTypeStr = null; meta = null; } return new AtomEntry<Node>(eventStreamId, eventNumber, eventType, eventId, dataContentType, metaContentType, metaTypeStr, data, meta); }
python
def decrypt_cbc(self, data, init_vector): """ Return an iterator that decrypts `data` using the Cipher-Block Chaining (CBC) mode of operation. CBC mode can only operate on `data` that is a multiple of the block-size in length. Each iteration returns a block-sized :obj:`bytes` object (i.e. 8 bytes) containing the decrypted bytes of the corresponding block in `data`. `init_vector` is the initialization vector and should be a :obj:`bytes`-like object with exactly 8 bytes. If it is not, a :exc:`ValueError` exception is raised. `data` should be a :obj:`bytes`-like object that is a multiple of the block-size in length (i.e. 8, 16, 32, etc.). If it is not, a :exc:`ValueError` exception is raised. """ S1, S2, S3, S4 = self.S P = self.P u4_1_pack = self._u4_1_pack u1_4_unpack = self._u1_4_unpack decrypt = self._decrypt u4_2_pack = self._u4_2_pack try: prev_cipher_L, prev_cipher_R = self._u4_2_unpack(init_vector) except struct_error: raise ValueError("initialization vector is not 8 bytes in length") try: LR_iter = self._u4_2_iter_unpack(data) except struct_error: raise ValueError("data is not a multiple of the block-size in length") for cipher_L, cipher_R in LR_iter: L, R = decrypt( cipher_L, cipher_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack ) yield u4_2_pack(prev_cipher_L ^ L, prev_cipher_R ^ R) prev_cipher_L = cipher_L prev_cipher_R = cipher_R
java
public JobOutput withWatermarks(JobWatermark... watermarks) { if (this.watermarks == null) { setWatermarks(new com.amazonaws.internal.SdkInternalList<JobWatermark>(watermarks.length)); } for (JobWatermark ele : watermarks) { this.watermarks.add(ele); } return this; }
python
def get_timestats_str(unixtime_list, newlines=1, full=True, isutc=True): r""" Args: unixtime_list (list): newlines (bool): Returns: str: timestat_str CommandLine: python -m utool.util_time --test-get_timestats_str Example: >>> # ENABLE_DOCTEST >>> from utool.util_time import * # NOQA >>> import utool as ut >>> unixtime_list = [0, 0 + 60 * 60 * 5 , 10 + 60 * 60 * 5, 100 + 60 * 60 * 5, 1000 + 60 * 60 * 5] >>> newlines = 1 >>> full = False >>> timestat_str = get_timestats_str(unixtime_list, newlines, full=full, isutc=True) >>> result = ut.align(str(timestat_str), ':') >>> print(result) { 'max' : '1970/01/01 05:16:40', 'mean' : '1970/01/01 04:03:42', 'min' : '1970/01/01 00:00:00', 'range': '5:16:40', 'std' : '2:02:01', } Example2: >>> # ENABLE_DOCTEST >>> from utool.util_time import * # NOQA >>> import utool as ut >>> unixtime_list = [0, 0 + 60 * 60 * 5 , 10 + 60 * 60 * 5, 100 + 60 * 60 * 5, 1000 + 60 * 60 * 5, float('nan'), 0] >>> newlines = 1 >>> timestat_str = get_timestats_str(unixtime_list, newlines, isutc=True) >>> result = ut.align(str(timestat_str), ':') >>> print(result) { 'max' : '1970/01/01 05:16:40', 'mean' : '1970/01/01 03:23:05', 'min' : '1970/01/01 00:00:00', 'nMax' : 1, 'nMin' : 2, 'num_nan': 1, 'range' : '5:16:40', 'shape' : (7,), 'std' : '2:23:43', } """ import utool as ut datetime_stats = get_timestats_dict(unixtime_list, full=full, isutc=isutc) timestat_str = ut.repr4(datetime_stats, newlines=newlines) return timestat_str
python
def push(self, filename, data): """Push a chunk of a file to the streaming endpoint. Args: filename: Name of file that this is a chunk of. chunk_id: TODO: change to 'offset' chunk: File data. """ self._queue.put(Chunk(filename, data))
python
def assignUserCredits(self, usernames, credits): """ assigns credit to a user. Inputs: usernames - list of users credits - number of credits to assign to the users Ouput: dictionary """ userAssignments = [] for name in usernames: userAssignments.append( { "username" : name, "credits" : credits } ) params = { "userAssignments" : userAssignments, "f" : "json" } url = self.root + "/assignUserCredits" return self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
java
private void notifyOfEntityEvent(Collection<ITypedReferenceableInstance> entityDefinitions, EntityNotification.OperationType operationType) throws AtlasException { List<EntityNotification> messages = new LinkedList<>(); for (IReferenceableInstance entityDefinition : entityDefinitions) { Referenceable entity = new Referenceable(entityDefinition); Map<String, Object> attributesMap = entity.getValuesMap(); List<String> entityNotificationAttrs = getNotificationAttributes(entity.getTypeName()); if (MapUtils.isNotEmpty(attributesMap) && CollectionUtils.isNotEmpty(entityNotificationAttrs)) { for (String entityAttr : attributesMap.keySet()) { if (!entityNotificationAttrs.contains(entityAttr)) { entity.setNull(entityAttr); } } } EntityNotificationImpl notification = new EntityNotificationImpl(entity, operationType, getAllTraits(entity, typeSystem)); messages.add(notification); } notificationInterface.send(NotificationInterface.NotificationType.ENTITIES, messages); }
python
def list_datastore_full(service_instance, datastore): ''' Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore. ''' datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore) if not datastore_object: raise salt.exceptions.VMwareObjectRetrievalError( 'Datastore \'{0}\' does not exist.'.format(datastore) ) items = {} items['name'] = str(datastore_object.summary.name).replace("'", "") items['type'] = str(datastore_object.summary.type).replace("'", "") items['url'] = str(datastore_object.summary.url).replace("'", "") items['capacity'] = datastore_object.summary.capacity / 1024 / 1024 items['free'] = datastore_object.summary.freeSpace / 1024 / 1024 items['used'] = items['capacity'] - items['free'] items['usage'] = (float(items['used']) / float(items['capacity'])) * 100 items['hosts'] = [] for host in datastore_object.host: host_key = str(host.key).replace("'", "").split(":", 1)[1] host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key) items['hosts'].append(host_object.name) return items
java
@Override public boolean isWsocRequest(ServletRequest request) throws ServletException { ServletContext context = request.getServletContext(); Object wsocContainer = null; if (context != null) { wsocContainer = context.getAttribute(WebSocketContainerManager.SERVER_CONTAINER_ATTRIBUTE); } if (wsocContainer != null) { //check if this is a IBM's implementation of WebSocketContainer. If this is a tomcat or some other server's WebSocket //implementation of WebSocketContainer then, this is not a valid WebSocket request for us and Web Container should not call //WsocHandler.handleRequest() subsequently. if (!isValidContainer(wsocContainer)) { return false; } } else { //not a WebSocket request if ServerContainer attribute is missing. return false; } //call setNoMoreAddEndPoints only once per webapp if (!noMoreAddsCalled) { ((ServerContainerExt) wsocContainer).setNoMoreAddEndpointsAllowed(); noMoreAddsCalled = true; } return checkWebSocketRequest(request, wsocContainer); }
python
def extrapolate_reciprocal(xs, ys, n, noise): """ return the parameters such that a + b / x^n hits the last two data points """ if len(xs) > 4 and noise: y1 = (ys[-3] + ys[-4]) / 2 y2 = (ys[-1] + ys[-2]) / 2 x1 = (xs[-3] + xs[-4]) / 2 x2 = (xs[-1] + xs[-2]) / 2 try: b = (y1 - y2) / (1/x1**n - 1/x2**n) a = y2 - b / x2**n except IndexError: print_and_raise_error(xs, ys, 'extrapolate_reciprocal') else: try: b = (ys[-2] - ys[-1]) / (1/(xs[-2])**n - 1/(xs[-1])**n) a = ys[-1] - b / (xs[-1])**n except IndexError: print_and_raise_error(xs, ys, 'extrapolate_reciprocal') return [a, b, n]
java
public static String normalise(String ensName) { try { return IDN.toASCII(ensName, IDN.USE_STD3_ASCII_RULES) .toLowerCase(); } catch (IllegalArgumentException e) { throw new EnsResolutionException("Invalid ENS name provided: " + ensName); } }
java
public static boolean appearsToBeValidDDLBatch(String batch) { BufferedReader reader = new BufferedReader(new StringReader(batch)); String line; try { while ((line = reader.readLine()) != null) { if (isWholeLineComment(line)) { continue; } line = line.trim(); if (line.equals("")) continue; // we have a non-blank line that contains more than just a comment. return queryIsDDL(line); } } catch (IOException e) { // This should never happen for a StringReader assert(false); } // trivial empty batch: no lines are non-blank or non-comments return true; }
python
def get_choice_status(self): """ Returns a message field, which indicates whether choices statically or dynamically defined, and flag indicating whether a dynamic file selection loading error occurred. Throws an error if this is not a choice parameter. """ if 'choiceInfo' not in self.dto[self.name]: raise GPException('not a choice parameter') status = self.dto[self.name]['choiceInfo']['status'] return status['message'], status['flag']
java
public void setRows(com.google.api.ads.admanager.axis.v201808.Row[] rows) { this.rows = rows; }
python
def envcontext(patch, _env=None): """In this context, `os.environ` is modified according to `patch`. `patch` is an iterable of 2-tuples (key, value): `key`: string `value`: - string: `environ[key] == value` inside the context. - UNSET: `key not in environ` inside the context. - template: A template is a tuple of strings and Var which will be replaced with the previous environment """ env = os.environ if _env is None else _env before = env.copy() for k, v in patch: if v is UNSET: env.pop(k, None) elif isinstance(v, tuple): env[k] = format_env(v, before) else: env[k] = v try: yield finally: env.clear() env.update(before)
python
def stop(self): """Stop listening.""" self._running = False if self._sleep_task: self._sleep_task.cancel() self._sleep_task = None
python
def get(self, key=None, view=None): """Register a new model (models)""" self.set_header("Access-Control-Allow-Origin", "*") self.set_header("Content-Type", "application/json") if key is not None: value = {} value.update(self.database[key]) if view is not None: # generate a context with the relevant variables context = {} context["value"] = value context["ctx"] = self.ctx result = json.dumps(getattr(views, view)(context)) else: result = json.dumps(value) else: result = json.dumps(self.database.values()) self.write(result)
java
public AVIMConversation getConversation(String conversationId, int convType) { AVIMConversation result = null; switch (convType) { case Conversation.CONV_TYPE_SYSTEM: result = getServiceConversation(conversationId); break; case Conversation.CONV_TYPE_TEMPORARY: result = getTemporaryConversation(conversationId); break; case Conversation.CONV_TYPE_TRANSIENT: result = getChatRoom(conversationId); break; default: result = getConversation(conversationId); break; } return result; }
python
def read_frame_losc(channels, start_time, end_time): """ Read channels from losc data Parameters ---------- channels: str or list The channel name to read or list of channel names. start_time: int The gps time in GPS seconds end_time: int The end time in GPS seconds Returns ------- ts: TimeSeries Returns a timeseries or list of timeseries with the requested data. """ from pycbc.frame import read_frame if not isinstance(channels, list): channels = [channels] ifos = [c[0:2] for c in channels] urls = {} for ifo in ifos: urls[ifo] = losc_frame_urls(ifo, start_time, end_time) if len(urls[ifo]) == 0: raise ValueError("No data found for %s so we " "can't produce a time series" % ifo) fnames = {ifo:[] for ifo in ifos} for ifo in ifos: for url in urls[ifo]: fname = download_file(url, cache=True) fnames[ifo].append(fname) ts = [read_frame(fnames[channel[0:2]], channel, start_time=start_time, end_time=end_time) for channel in channels] if len(ts) == 1: return ts[0] else: return ts
java
protected KdTree.Node computeChild(List<P> points , GrowQueue_I32 indexes ) { if( points.size() == 0 ) return null; if( points.size() == 1 ) { return createLeaf(points,indexes); } else { return computeBranch(points,indexes); } }
python
def check_typ(helper, typ): """ check if typ parameter is TCP or UDP """ if typ != "tcp" and typ != "udp": helper.exit(summary="Type (-t) must be udp or tcp.", exit_code=unknown, perfdata='')
java
public SVGPath smoothCubicTo(double c2x, double c2y, double x, double y) { return append(PATH_SMOOTH_CUBIC_TO).append(c2x).append(c2y).append(x).append(y); }
python
def _add_ephemeral_service(config, onion, progress, version, auth=None, await_all_uploads=None): """ Internal Helper. This uses ADD_ONION to add the given service to Tor. The Deferred this returns will callback when the ADD_ONION call has succeed, *and* when at least one descriptor has been uploaded to a Hidden Service Directory. :param config: a TorConfig instance :param onion: an EphemeralOnionService instance :param progress: a callable taking 3 arguments (percent, tag, description) that is called some number of times to tell you of progress. :param version: 2 or 3, which kind of service to create :param auth: if not None, create an authenticated service ("basic" is the only kind supported currently so a AuthBasic instance should be passed) """ if onion not in config.EphemeralOnionServices: config.EphemeralOnionServices.append(onion) # we have to keep this as a Deferred for now so that HS_DESC # listener gets added before we issue ADD_ONION assert version in (2, 3) uploaded_d = _await_descriptor_upload(config.tor_protocol, onion, progress, await_all_uploads) # we allow a key to be passed that *doestn'* start with # "RSA1024:" because having to escape the ":" for endpoint # string syntax (which uses ":" as delimeters) is annoying # XXX rethink ^^? what do we do when the type is upgraded? # maybe just a magic-character that's different from ":", or # force people to escape them? if onion.private_key: if onion.private_key is not DISCARD and ':' not in onion.private_key: if version == 2: if not onion.private_key.startswith("RSA1024:"): onion._private_key = "RSA1024:" + onion.private_key elif version == 3: if not onion.private_key.startswith("ED25519-V3:"): onion._private_key = "ED25519-V3:" + onion.private_key # okay, we're set up to listen, and now we issue the ADD_ONION # command. this will set ._hostname and ._private_key properly keystring = 'NEW:BEST' if onion.private_key not in (None, DISCARD): keystring = onion.private_key elif version == 3: keystring = 'NEW:ED25519-V3' if version == 3: if 'V3' not in keystring: raise ValueError( "version=3 but private key isn't 'ED25519-V3'" ) # hmm, is it better to validate keyblob args in the create # methods? "Feels nicer" to see it here when building ADD_ONION # though? if '\r' in keystring or '\n' in keystring: raise ValueError( "No newline or return characters allowed in key blobs" ) cmd = 'ADD_ONION {}'.format(keystring) for port in onion._ports: cmd += ' Port={},{}'.format(*port.split(' ', 1)) flags = [] if onion._detach: flags.append('Detach') if onion.private_key is DISCARD: flags.append('DiscardPK') if auth is not None: assert isinstance(auth, AuthBasic) # don't support AuthStealth yet if isinstance(auth, AuthBasic): flags.append('BasicAuth') if onion._single_hop: flags.append('NonAnonymous') # depends on some Tor options, too if flags: cmd += ' Flags={}'.format(','.join(flags)) if auth is not None: for client_name in auth.client_names(): keyblob = auth.keyblob_for(client_name) if keyblob is None: cmd += ' ClientAuth={}'.format(client_name) else: cmd += ' ClientAuth={}:{}'.format(client_name, keyblob) onion._add_client(client_name, keyblob) raw_res = yield config.tor_protocol.queue_command(cmd) res = find_keywords(raw_res.split('\n')) try: onion._hostname = res['ServiceID'] + '.onion' if onion.private_key is DISCARD: onion._private_key = None else: # if we specified a private key, it's not echoed back if not onion.private_key: onion._private_key = res['PrivateKey'].strip() except KeyError: raise RuntimeError( "Expected ADD_ONION to return ServiceID= and PrivateKey= args." "Got: {}".format(res) ) if auth is not None: for line in raw_res.split('\n'): if line.startswith("ClientAuth="): name, blob = line[11:].split(':', 1) onion._add_client(name, blob) log.msg("{}: waiting for descriptor uploads.".format(onion.hostname)) yield uploaded_d
java
private void handleNoBreadcrumbsIntent(@NonNull final Bundle extras) { if (extras.containsKey(EXTRA_NO_BREAD_CRUMBS)) { hideBreadCrumb(extras.getBoolean(EXTRA_NO_BREAD_CRUMBS)); } }
python
def validate_frequencies(frequencies, max_freq, min_freq, allow_negatives=False): """Checks that a 1-d frequency ndarray is well-formed, and raises errors if not. Parameters ---------- frequencies : np.ndarray, shape=(n,) Array of frequency values max_freq : float If a frequency is found above this pitch, a ValueError will be raised. (Default value = 5000.) min_freq : float If a frequency is found below this pitch, a ValueError will be raised. (Default value = 20.) allow_negatives : bool Whether or not to allow negative frequency values. """ # If flag is true, map frequencies to their absolute value. if allow_negatives: frequencies = np.abs(frequencies) # Make sure no frequency values are huge if (np.abs(frequencies) > max_freq).any(): raise ValueError('A frequency of {} was found which is greater than ' 'the maximum allowable value of max_freq = {} (did ' 'you supply frequency values in ' 'Hz?)'.format(frequencies.max(), max_freq)) # Make sure no frequency values are tiny if (np.abs(frequencies) < min_freq).any(): raise ValueError('A frequency of {} was found which is less than the ' 'minimum allowable value of min_freq = {} (did you ' 'supply frequency values in ' 'Hz?)'.format(frequencies.min(), min_freq)) # Make sure frequency values are 1-d np ndarrays if frequencies.ndim != 1: raise ValueError('Frequencies should be 1-d numpy ndarray, ' 'but shape={}'.format(frequencies.shape))
java
public static TimeSpan toTimespan(Object o, TimeSpan defaultValue) { if (o instanceof TimeSpan) return (TimeSpan) o; else if (o instanceof String) { String[] arr = o.toString().split(","); if (arr.length == 4) { int[] values = new int[4]; try { for (int i = 0; i < arr.length; i++) { values[i] = toIntValue(arr[i]); } return new TimeSpanImpl(values[0], values[1], values[2], values[3]); } catch (ExpressionException e) {} } } else if (o instanceof ObjectWrap) { Object embeded = ((ObjectWrap) o).getEmbededObject(DEFAULT); if (embeded == DEFAULT) return defaultValue; return toTimespan(embeded, defaultValue); } double dbl = toDoubleValue(o, true, Double.NaN); if (!Double.isNaN(dbl)) return TimeSpanImpl.fromDays(dbl); return defaultValue; }
java
private ItemStack handleHotbar(MalisisInventory inventory, MalisisSlot hoveredSlot, int num) { MalisisSlot hotbarSlot = getPlayerInventory().getSlot(num); // slot from player's inventory, swap itemStacks if (inventory == getPlayerInventory() || hoveredSlot.getItemStack().isEmpty()) { if (hoveredSlot.isState(PLAYER_INSERT)) { ItemStack dest = hotbarSlot.extract(ItemUtils.FULL_STACK); ItemStack src = hoveredSlot.extract(ItemUtils.FULL_STACK); dest = hoveredSlot.insert(dest); //couldn't fit all into the slot, put back what's left in hotbar if (!dest.isEmpty()) { hotbarSlot.insert(dest); //src should be empty but better safe than sorry inventory.transferInto(src); } else src = hotbarSlot.insert(src); } } // merge itemStack in slot into hotbar. If already holding an itemStack, move elsewhere inside player inventory else { if (hoveredSlot.isState(PLAYER_EXTRACT)) { ItemStack dest = hoveredSlot.extract(ItemUtils.FULL_STACK); ItemStack left = hotbarSlot.insert(dest, ItemUtils.FULL_STACK, true); getPlayerInventory().transferInto(left, false); } } return hotbarSlot.getItemStack(); }
python
def p_case_def(self, p): '''case_def : CASE term COLON expr | DEFAULT COLON expr''' if len(p) == 5: p[0] = ('case', (p[2], p[4])) elif len(p) == 4: p[0] = ('default', p[3])
python
def lookup(self, name: str): '''lookup a symbol by fully qualified name.''' # <module> if name in self._moduleMap: return self._moduleMap[name] # <module>.<Symbol> (module_name, type_name, fragment_name) = self.split_typename(name) if not module_name and type_name: click.secho('not able to lookup symbol: {0}'.format(name), fg='red') return None module = self._moduleMap[module_name] return module.lookup(type_name, fragment_name)
java
@Override public List<ForeignKey<Record, ?>> getReferences() { return Arrays.<ForeignKey<Record, ?>>asList(Keys.FK_BOOK_AUTHOR, Keys.FK_BOOK_LANGUAGE); }
python
def from_clause(cls, clause): """ Factory method """ [_, field, operator, val] = clause return cls(field, operator, resolve(val))
java
public static void rotate(List<?> list, int distance) { if (list instanceof RandomAccess || list.size() < ROTATE_THRESHOLD) rotate1(list, distance); else rotate2(list, distance); }
java
public Observable<ServiceResponse<List<KeyVaultKeyInner>>> listKeyVaultKeysWithServiceResponseAsync(String resourceGroupName, String integrationAccountName, ListKeyVaultKeysDefinition listKeyVaultKeys) { if (this.client.subscriptionId() == null) { throw new IllegalArgumentException("Parameter this.client.subscriptionId() is required and cannot be null."); } if (resourceGroupName == null) { throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."); } if (integrationAccountName == null) { throw new IllegalArgumentException("Parameter integrationAccountName is required and cannot be null."); } if (this.client.apiVersion() == null) { throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); } if (listKeyVaultKeys == null) { throw new IllegalArgumentException("Parameter listKeyVaultKeys is required and cannot be null."); } Validator.validate(listKeyVaultKeys); return service.listKeyVaultKeys(this.client.subscriptionId(), resourceGroupName, integrationAccountName, this.client.apiVersion(), listKeyVaultKeys, this.client.acceptLanguage(), this.client.userAgent()) .flatMap(new Func1<Response<ResponseBody>, Observable<ServiceResponse<List<KeyVaultKeyInner>>>>() { @Override public Observable<ServiceResponse<List<KeyVaultKeyInner>>> call(Response<ResponseBody> response) { try { ServiceResponse<PageImpl2<KeyVaultKeyInner>> result = listKeyVaultKeysDelegate(response); List<KeyVaultKeyInner> items = null; if (result.body() != null) { items = result.body().items(); } ServiceResponse<List<KeyVaultKeyInner>> clientResponse = new ServiceResponse<List<KeyVaultKeyInner>>(items, result.response()); return Observable.just(clientResponse); } catch (Throwable t) { return Observable.error(t); } } }); }
python
def F_calc(TP, FP, FN, beta): """ Calculate F-score. :param TP: true positive :type TP : int :param FP: false positive :type FP : int :param FN: false negative :type FN : int :param beta : beta coefficient :type beta : float :return: F score as float """ try: result = ((1 + (beta)**2) * TP) / \ ((1 + (beta)**2) * TP + FP + (beta**2) * FN) return result except ZeroDivisionError: return "None"
java
public static boolean hasField( Class clazz, String fieldName ) { try { clazz.getDeclaredField( fieldName ); return true; } catch( NoSuchFieldException nfe ) { if( clazz.getSuperclass() != null ) { return hasField( clazz.getSuperclass(), fieldName ); } else { return false; } } }
java
public static MappedMemory allocate(File file, FileChannel.MapMode mode, int size) { if (size > MAX_SIZE) { throw new IllegalArgumentException("size cannot be greater than " + MAX_SIZE); } return new MappedMemoryAllocator(file, mode).allocate(size); }
python
def getserialnum(flist): """ This function assumes the serial number of the camera is in a particular place in the filename. Yes, this is a little lame, but it's how the original 2011 image-writing program worked, and I've carried over the scheme rather than appending bits to dozens of TB of files. """ sn = [] for f in flist: tmp = search(r'(?<=CamSer)\d{3,6}', f) if tmp: ser = int(tmp.group()) else: ser = None sn.append(ser) return sn
java
public void beginDeleteById(String resourceId, String apiVersion) { beginDeleteByIdWithServiceResponseAsync(resourceId, apiVersion).toBlocking().single().body(); }
java
@Override public String getString(final FieldCase c) { boolean success = false; String numberString; do { double number = getDouble(c); numberString = format.format(number); if (notZero && ZERO_PATTERN.matcher(numberString).matches()) { log.info(XMLTags.NOTZERO + " is true and a zero value was generated: value=" + numberString + " - generating new value"); } else { success = true; } } while (!success); if (c.isBad()) { numberString = expression.negateString(numberString, c.getBad()); } return numberString; }
python
def update(self, section, val, data): """Add a setting to the config, but if same as default or None then no action. This saves the .save writing the defaults `section` (mandatory) (string) the section name in the config E.g. `"agent"` `val` (mandatory) (string) the section name in the config E.g. `"host"` `data` (mandatory) (as appropriate) the new value for the `val` """ k = self.get(section, val) # logger.debug('update %s %s from: %s to: %s', section, val, k, data) if data is not None and k != data: self.set(section, val, data)
java
public void remove(final ObjectSinkNode node) { if ( (this.firstNode != node) && (this.lastNode != node) ) { node.getPreviousObjectSinkNode().setNextObjectSinkNode( node.getNextObjectSinkNode() ); node.getNextObjectSinkNode().setPreviousObjectSinkNode( node.getPreviousObjectSinkNode() ); this.size--; node.setPreviousObjectSinkNode( null ); node.setNextObjectSinkNode( null ); } else { if ( this.firstNode == node ) { removeFirst(); } else { removeLast(); } } }
python
def get_corrections_dict(self, entry): """ Returns the corrections applied to a particular entry. Args: entry: A ComputedEntry object. Returns: ({correction_name: value}) """ corrections = {} for c in self.corrections: val = c.get_correction(entry) if val != 0: corrections[str(c)] = val return corrections
java
protected void createServletWrappers() throws Exception { // NOTE: namespace preinvoke/postinvoke not necessary as the only // external // code being run is the servlet's init() and that is handled in the // ServletWrapper // check if an extensionFactory is present for *.jsp: // We do this by constructing an arbitrary mapping which // will only match the *.xxx extension pattern // if (com.ibm.ejs.ras.TraceComponent.isAnyTracingEnabled() && logger.isLoggable(Level.FINE)) logger.entering(CLASS_NAME, "createServletWrappers"); WebExtensionProcessor jspProcessor = (WebExtensionProcessor) requestMapper.map("/dummyPath.jsp"); if (jspProcessor == null) { // No extension processor present to handle this kind of // target. Hence warn, skip. // pk435011 // LIBERTY: changing log level to debug as this is valid if there's no JSP support configured if (com.ibm.ejs.ras.TraceComponent.isAnyTracingEnabled() && logger.isLoggable(Level.FINE)) logger.logp(Level.FINE, CLASS_NAME, "createServletWrappers", "No Extension Processor found for handling JSPs"); //logger.logp(Level.WARNING, CLASS_NAME, "createServletWrappers", "no.jsp.extension.handler.found"); } Iterator<IServletConfig> sortedServletConfigIterator = sortNamesByStartUpWeight(config.getServletInfos()); Map<String, List<String>> mappings = config.getServletMappings(); String path = null; IServletConfig servletConfig; IServletWrapper wrapper = null; while (sortedServletConfigIterator.hasNext() && !com.ibm.ws.webcontainer.osgi.WebContainer.isServerStopping()) { wrapper = null; // 248871: reset wrapper to null servletConfig = sortedServletConfigIterator.next(); String servletName = servletConfig.getServletName(); List<String> mapList = mappings.get(servletConfig.getServletName()); servletConfig.setServletContext(this.getFacade()); // Begin 650884 // WARNING!!! We shouldn't map by name only as there is // no way to configure a security constraint // for a dynamically added path. //Consolidate the code to setup a single entry map list when its mapped by name only // if (mapList==null){ // //WARNING!!! We shouldn't map by name only as there is // //no way to configure a security constraint // //for a dynamically added path. // //Also, if there was nothing mapped to the servlet // //in web.xml, we would have never called WebAppConfiguration.addServletMapping // //which sets the mappings on sconfig. Adding the list directly to the hashMap short // //circuits that logic so future calls to addMapping on the ServletConfig wouldn't work // //unless there was at least one mapping in web.xml // // // // hardcode the path, since it had no mappings // String byNamePath = BY_NAME_ONLY + servletName; // // // Add this to the config, because we will be looking at // // the mappings in order to get to the servlet through the // // mappings in the config. // mapList = new ArrayList<String>(); // mapList.add(byNamePath); // mappings.put(servletName, mapList); // } // End 650884 if (mapList == null || mapList.isEmpty()) { wrapper = jspAwareCreateServletWrapper(jspProcessor, servletConfig, servletName); } else { for (String urlPattern : mapList) { path = urlPattern; if (path == null) { // shouldn't happen since there is a mapping specified // but too bad the user can never hit the servlet // pk435011 // Begin 650884 logger.logp(Level.SEVERE, CLASS_NAME, "createServletWrappers", "illegal.servlet.mapping", servletName); // PK33511 // path = "/" + BY_NAME_ONLY + "/" + servletName; // End 650884 } else if (path.equals("/")) { path = "/*"; } if (wrapper == null) { // 248871: Check to see if we've // already found wrapper for // servletName wrapper = jspAwareCreateServletWrapper(jspProcessor, servletConfig, servletName); if (wrapper == null) continue; } try { // Begin:248871: Check to see if we found the wrapper // before adding if (com.ibm.ejs.ras.TraceComponent.isAnyTracingEnabled() && logger.isLoggable(Level.FINE)) logger.logp(Level.FINE, CLASS_NAME, "createServletWrappers" , "determine whether to add mapping for path[{0}] wrapper[{1}] isEnabled[{2}]" , new Object[] { path, wrapper, servletConfig.isEnabled() }); if (path != null && servletConfig.isEnabled()) { requestMapper.addMapping(path, wrapper); } // End:248871 } catch (Exception e) { //TODO: ???? extension processor used to call addMappingTarget after the wrappers had been added. //Now it is done before, and you can get this exception here in the case they call addMappingTarget //and add the mapping to the servletConfig because we'll try to add it again, but it will //already be mapped. You could add a list of paths to ignore and not try to add again. So any //path added via addMappingTarget will be recorded and addMapping can be skipped. It is preferrable //to just have them not call addMappingTarget any more instead of adding the extra check. // pk435011 logger.logp(Level.WARNING, CLASS_NAME, "createServletWrappers", "error.while.adding.servlet.mapping.for.path", new Object[] { path, wrapper, getApplicationName() }); if (com.ibm.ejs.ras.TraceComponent.isAnyTracingEnabled() && logger.isLoggable(Level.FINE)) { com.ibm.wsspi.webcontainer.util.FFDCWrapper.processException(e, CLASS_NAME + ".createServletWrappers", "455", this); // pk435011 logger.logp(Level.SEVERE, CLASS_NAME, "createServletWrappers", "error.adding.servlet.mapping.for.servlet", new Object[] { servletName, getApplicationName(), e }); // PK33511 } } } } servletConfig.setServletWrapper(wrapper); this.initializeNonDDRepresentableAnnotation(servletConfig); // set the servlet wrapper on the // servlet config so // ServletConfig.addMapping // can put it in the // requestMapper } }
java
@SuppressWarnings("unchecked") @NotNull public <TYPE> TYPE create(@NotNull @FileExists @IsFile final File file, @NotNull final JAXBContext jaxbContext) throws UnmarshalObjectException { Contract.requireArgNotNull("file", file); FileExistsValidator.requireArgValid("file", file); IsFileValidator.requireArgValid("file", file); Contract.requireArgNotNull("jaxbContext", jaxbContext); try { final FileReader fr = new FileReader(file); try { return (TYPE) create(fr, jaxbContext); } finally { fr.close(); } } catch (final IOException ex) { throw new UnmarshalObjectException("Unable to parse XML from file: " + file, ex); } }
python
def disambiguate(self, symclasses): """Use the connection to the atoms around a given vertex as a multiplication function to disambiguate a vertex""" offsets = self.offsets result = symclasses[:] for index in self.range: try: val = 1 for offset, bondtype in offsets[index]: val *= symclasses[offset] * bondtype except OverflowError: # Hmm, how often does this occur? val = 1L for offset, bondtype in offsets[index]: val *= symclasses[offset] * bondtype result[index] = val return result
java
public void setAttribute(final String name, final Attribute attribute) { if (name.equals(MAP_KEY)) { this.mapAttribute = (MapAttribute) attribute; } }
python
def describe(self, name=None): """ Cleanly show what the four displayed distribution moments are: - Mean - Variance - Standardized Skewness Coefficient - Standardized Kurtosis Coefficient For a standard Normal distribution, these are [0, 1, 0, 3]. If the object has an associated tag, this is presented. If the optional ``name`` kwarg is utilized, this is presented as with the moments. Otherwise, no unique name is presented. Example ======= :: >>> x = N(0, 1, 'x') >>> x.describe() # print tag since assigned MCERP Uncertain Value (x): ... >>> x.describe('foobar') # 'name' kwarg takes precedence MCERP Uncertain Value (foobar): ... >>> y = x**2 >>> y.describe('y') # print name since assigned MCERP Uncertain Value (y): ... >>> y.describe() # print nothing since no tag MCERP Uncertain Value: ... """ mn, vr, sk, kt = self.stats if name is not None: s = "MCERP Uncertain Value (" + name + "):\n" elif self.tag is not None: s = "MCERP Uncertain Value (" + self.tag + "):\n" else: s = "MCERP Uncertain Value:\n" s += " > Mean................... {: }\n".format(mn) s += " > Variance............... {: }\n".format(vr) s += " > Skewness Coefficient... {: }\n".format(sk) s += " > Kurtosis Coefficient... {: }\n".format(kt) print(s)
java
public QueryBuilder<T, ID> queryBuilder() throws SQLException { if (statementBuilder instanceof QueryBuilder) { return (QueryBuilder<T, ID>) statementBuilder; } else { throw new SQLException("Cannot cast " + statementBuilder.getType() + " to QueryBuilder"); } }
python
def load(fp, **kwargs) -> BioCCollection: """ Deserialize fp (a .read()-supporting text file or binary file containing a JSON document) to a BioCCollection object Args: fp: a file containing a JSON document **kwargs: Returns: BioCCollection: a collection """ obj = json.load(fp, **kwargs) return parse_collection(obj)
java
public void stop() { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(tc, "stop"); // Lock exclusively for start operations mpioLockManager.lockExclusive(); started = false; mpioLockManager.unlockExclusive(); if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(tc, "stop"); }
java
private Node createCompilerDefaultValueOverridesVarNode( Node sourceInformationNode) { Node objNode = IR.objectlit().srcref(sourceInformationNode); for (Entry<String, Node> entry : compilerDefaultValueOverrides.entrySet()) { Node objKeyNode = IR.stringKey(entry.getKey()) .useSourceInfoIfMissingFrom(sourceInformationNode); Node objValueNode = entry.getValue().cloneNode() .useSourceInfoIfMissingFrom(sourceInformationNode); objKeyNode.addChildToBack(objValueNode); objNode.addChildToBack(objKeyNode); } return objNode; }
java
protected SemanticSpace getSpace() { // Ensure that the configured DependencyExtactor is in place prior to // constructing the SVS setupDependencyExtractor(); DependencyPathAcceptor acceptor; if (argOptions.hasOption("pathAcceptor")) acceptor = ReflectionUtil.getObjectInstance( argOptions.getStringOption("pathAcceptor")); else acceptor = new UniversalPathAcceptor(); VectorCombinor combinor; if (argOptions.hasOption("pathAcceptor")) combinor = ReflectionUtil.getObjectInstance( argOptions.getStringOption("vectorCombinor")); else combinor = new PointWiseCombinor(); DependencyExtractor extractor = DependencyExtractorManager.getDefaultExtractor(); return new StructuredVectorSpace(extractor, acceptor, combinor); }
java
protected MailMessage createMailRequest(Map<String, Object> messageHeaders, BodyPart bodyPart, MailEndpointConfiguration endpointConfiguration) { return MailMessage.request(messageHeaders) .marshaller(endpointConfiguration.getMarshaller()) .from(messageHeaders.get(CitrusMailMessageHeaders.MAIL_FROM).toString()) .to(messageHeaders.get(CitrusMailMessageHeaders.MAIL_TO).toString()) .cc(messageHeaders.get(CitrusMailMessageHeaders.MAIL_CC).toString()) .bcc(messageHeaders.get(CitrusMailMessageHeaders.MAIL_BCC).toString()) .subject(messageHeaders.get(CitrusMailMessageHeaders.MAIL_SUBJECT).toString()) .body(bodyPart); }
java
public String getUuid() { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(tc, "getUuid"); String uuid = null; if(_outputHandler!=null) { uuid = _outputHandler.getTopicSpaceUuid().toString(); } else { uuid = _anycastInputHandler.getBaseDestinationHandler(). getUuid().toString(); } if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(tc, "getUuid", uuid); return uuid; }
python
def group_path(cls, project, group): """Return a fully-qualified group string.""" return google.api_core.path_template.expand( "projects/{project}/groups/{group}", project=project, group=group )
python
def download_on_demand_class(session, args, class_name): """ Download all requested resources from the on-demand class given in class_name. @return: Tuple of (bool, bool), where the first bool indicates whether errors occurred while parsing syllabus, the second bool indicates whether the course appears to be completed. @rtype: (bool, bool) """ error_occurred = False extractor = CourseraExtractor(session) cached_syllabus_filename = '%s-syllabus-parsed.json' % class_name if args.cache_syllabus and os.path.isfile(cached_syllabus_filename): modules = slurp_json(cached_syllabus_filename) else: error_occurred, modules = extractor.get_modules( class_name, args.reverse, args.unrestricted_filenames, args.subtitle_language, args.video_resolution, args.download_quizzes, args.mathjax_cdn_url, args.download_notebooks ) if is_debug_run or args.cache_syllabus(): spit_json(modules, cached_syllabus_filename) if args.only_syllabus: return error_occurred, False downloader = get_downloader(session, class_name, args) downloader_wrapper = ParallelDownloader(downloader, args.jobs) \ if args.jobs > 1 else ConsecutiveDownloader(downloader) # obtain the resources ignored_formats = [] if args.ignore_formats: ignored_formats = args.ignore_formats.split(",") course_downloader = CourseraDownloader( downloader_wrapper, commandline_args=args, class_name=class_name, path=args.path, ignored_formats=ignored_formats, disable_url_skipping=args.disable_url_skipping ) completed = course_downloader.download_modules(modules) # Print skipped URLs if any if course_downloader.skipped_urls: print_skipped_urls(course_downloader.skipped_urls) # Print failed URLs if any # FIXME: should we set non-zero exit code if we have failed URLs? if course_downloader.failed_urls: print_failed_urls(course_downloader.failed_urls) return error_occurred, completed
python
def key(self, direction, mechanism, purviews=False, _prefix=None): """Cache key. This is the call signature of |Subsystem.find_mice()|.""" return (_prefix, direction, mechanism, purviews)
java
public DescribeClassicLinkInstancesResult withInstances(ClassicLinkInstance... instances) { if (this.instances == null) { setInstances(new com.amazonaws.internal.SdkInternalList<ClassicLinkInstance>(instances.length)); } for (ClassicLinkInstance ele : instances) { this.instances.add(ele); } return this; }
python
def db_temp_from_wb_rh(wet_bulb, rel_humid, b_press=101325): """Dry Bulb Temperature (C) and humidity_ratio at at wet_bulb (C), rel_humid (%) and Pressure b_press (Pa). Formula is only valid for rel_humid == 0 or rel_humid == 100. """ assert rel_humid == 0 or rel_humid == 100, 'formula is only valid for' \ ' rel_humid == 0 or rel_humid == 100' humidity_ratio = humid_ratio_from_db_rh(wet_bulb, rel_humid, b_press) hr_saturation = humid_ratio_from_db_rh(wet_bulb, 100, b_press) db_temp = wet_bulb + (((hr_saturation - humidity_ratio) * 2260000) / (1005)) return db_temp, humidity_ratio
python
def validateExtractOptions(options): ''' Check the validity of the option combinations for barcode extraction''' if not options.pattern and not options.pattern2: if not options.read2_in: U.error("Must supply --bc-pattern for single-end") else: U.error("Must supply --bc-pattern and/or --bc-pattern2 " "if paired-end ") if options.pattern2: if not options.read2_in: U.error("must specify a paired fastq ``--read2-in``") if not options.pattern2: options.pattern2 = options.pattern extract_cell = False extract_umi = False # If the pattern is a regex we can compile the regex(es) prior to # ExtractFilterAndUpdate instantiation if options.extract_method == "regex": if options.pattern: try: options.pattern = regex.compile(options.pattern) except regex.error: U.error("--bc-pattern '%s' is not a " "valid regex" % options.pattern) if options.pattern2: try: options.pattern2 = regex.compile(options.pattern2) except regex.Error: U.error("--bc-pattern2 '%s' is not a " "valid regex" % options.pattern2) # check whether the regex contains a umi group(s) and cell groups(s) if options.extract_method == "regex": if options.pattern: for group in options.pattern.groupindex: if group.startswith("cell_"): extract_cell = True elif group.startswith("umi_"): extract_umi = True if options.pattern2: for group in options.pattern2.groupindex: if group.startswith("cell_"): extract_cell = True elif group.startswith("umi_"): extract_umi = True # check whether the pattern string contains umi/cell bases elif options.extract_method == "string": if options.pattern: if "C" in options.pattern: extract_cell = True if "N" in options.pattern: extract_umi = True if options.pattern2: if "C" in options.pattern2: extract_cell = True if "N" in options.pattern2: extract_umi = True if not extract_umi: if options.extract_method == "string": U.error("barcode pattern(s) do not include any umi bases " "(marked with 'Ns') %s, %s" % ( options.pattern, options.pattern2)) elif options.extract_method == "regex": U.error("barcode regex(es) do not include any umi groups " "(starting with 'umi_') %s, %s" ( options.pattern, options.pattern2)) return(extract_cell, extract_umi)
java
@SuppressWarnings("unchecked") public <T> BehaviorTree<T> createBehaviorTree (String treeReference, T blackboard) { BehaviorTree<T> bt = (BehaviorTree<T>)retrieveArchetypeTree(treeReference).cloneTask(); bt.setObject(blackboard); return bt; }
python
def progressbar(iterable=None, length=None, label=None, show_eta=True, show_percent=None, show_pos=False, item_show_func=None, fill_char='#', empty_char='-', bar_template='%(label)s [%(bar)s] %(info)s', info_sep=' ', width=36, file=None, color=None): """This function creates an iterable context manager that can be used to iterate over something while showing a progress bar. It will either iterate over the `iterable` or `length` items (that are counted up). While iteration happens, this function will print a rendered progress bar to the given `file` (defaults to stdout) and will attempt to calculate remaining time and more. By default, this progress bar will not be rendered if the file is not a terminal. The context manager creates the progress bar. When the context manager is entered the progress bar is already displayed. With every iteration over the progress bar, the iterable passed to the bar is advanced and the bar is updated. When the context manager exits, a newline is printed and the progress bar is finalized on screen. No printing must happen or the progress bar will be unintentionally destroyed. Example usage:: with progressbar(items) as bar: for item in bar: do_something_with(item) Alternatively, if no iterable is specified, one can manually update the progress bar through the `update()` method instead of directly iterating over the progress bar. The update method accepts the number of steps to increment the bar with:: with progressbar(length=chunks.total_bytes) as bar: for chunk in chunks: process_chunk(chunk) bar.update(chunks.bytes) .. versionadded:: 2.0 .. versionadded:: 4.0 Added the `color` parameter. Added a `update` method to the progressbar object. :param iterable: an iterable to iterate over. If not provided the length is required. :param length: the number of items to iterate over. By default the progressbar will attempt to ask the iterator about its length, which might or might not work. If an iterable is also provided this parameter can be used to override the length. If an iterable is not provided the progress bar will iterate over a range of that length. :param label: the label to show next to the progress bar. :param show_eta: enables or disables the estimated time display. This is automatically disabled if the length cannot be determined. :param show_percent: enables or disables the percentage display. The default is `True` if the iterable has a length or `False` if not. :param show_pos: enables or disables the absolute position display. The default is `False`. :param item_show_func: a function called with the current item which can return a string to show the current item next to the progress bar. Note that the current item can be `None`! :param fill_char: the character to use to show the filled part of the progress bar. :param empty_char: the character to use to show the non-filled part of the progress bar. :param bar_template: the format string to use as template for the bar. The parameters in it are ``label`` for the label, ``bar`` for the progress bar and ``info`` for the info section. :param info_sep: the separator between multiple info items (eta etc.) :param width: the width of the progress bar in characters, 0 means full terminal width :param file: the file to write to. If this is not a terminal then only the label is printed. :param color: controls if the terminal supports ANSI colors or not. The default is autodetection. This is only needed if ANSI codes are included anywhere in the progress bar output which is not the case by default. """ from ._termui_impl import ProgressBar color = resolve_color_default(color) return ProgressBar(iterable=iterable, length=length, show_eta=show_eta, show_percent=show_percent, show_pos=show_pos, item_show_func=item_show_func, fill_char=fill_char, empty_char=empty_char, bar_template=bar_template, info_sep=info_sep, file=file, label=label, width=width, color=color)
python
def parse_numpy_doc(doc): """ Extract the text from the various sections of a numpy-formatted docstring. Parameters ---------- doc: Union[str, None] Returns ------- OrderedDict[str, Union[None,str]] The extracted numpy-styled docstring sections.""" doc_sections = OrderedDict([("Short Summary", None), ("Deprecation Warning", None), ("Attributes", None), ("Extended Summary", None), ("Parameters", None), ("Returns", None), ("Yields", None), ("Other Parameters", None), ("Raises", None), ("See Also", None), ("Notes", None), ("References", None), ("Examples", None)]) if not doc: return doc_sections doc = cleandoc(doc) lines = iter(doc.splitlines()) key = "Short Summary" body = [] while True: try: line = next(lines).rstrip() if line in doc_sections: doc_sections[key] = "\n".join(body).rstrip() if body else None body = [] key = line next(lines) # skip section delimiter else: body.append(line) except StopIteration: doc_sections[key] = "\n".join(body) break return doc_sections
java
private BeanDefinitionBuilder parseSqlQueryAction(Element element, Element scriptValidationElement, List<Element> validateElements, List<Element> extractElements) { BeanDefinitionBuilder beanDefinition = BeanDefinitionBuilder.rootBeanDefinition(ExecuteSQLQueryAction.class); // check for script validation if (scriptValidationElement != null) { beanDefinition.addPropertyValue("scriptValidationContext", getScriptValidationContext(scriptValidationElement)); } Map<String, List<String>> controlResultSet = new HashMap<String, List<String>>(); for (Iterator<?> iter = validateElements.iterator(); iter.hasNext();) { Element validateElement = (Element) iter.next(); Element valueListElement = DomUtils.getChildElementByTagName(validateElement, "values"); if (valueListElement != null) { List<String> valueList = new ArrayList<String>(); List<?> valueElements = DomUtils.getChildElementsByTagName(valueListElement, "value"); for (Iterator<?> valueElementsIt = valueElements.iterator(); valueElementsIt.hasNext();) { Element valueElement = (Element) valueElementsIt.next(); valueList.add(DomUtils.getTextValue(valueElement)); } controlResultSet.put(validateElement.getAttribute("column"), valueList); } else if (validateElement.hasAttribute("value")) { controlResultSet.put(validateElement.getAttribute("column"), Collections.singletonList(validateElement.getAttribute("value"))); } else { throw new BeanCreationException(element.getLocalName(), "Neither value attribute nor value list is set for column validation: " + validateElement.getAttribute("column")); } } beanDefinition.addPropertyValue("controlResultSet", controlResultSet); Map<String, String> extractVariables = new HashMap<String, String>(); for (Iterator<?> iter = extractElements.iterator(); iter.hasNext();) { Element validate = (Element) iter.next(); extractVariables.put(validate.getAttribute("column"), validate.getAttribute("variable")); } beanDefinition.addPropertyValue("extractVariables", extractVariables); return beanDefinition; }
python
def get_activities_by_objective_banks(self, objective_bank_ids): """Gets the list of ``Activities`` corresponding to a list of ``ObjectiveBanks``. arg: objective_bank_ids (osid.id.IdList): list of objective bank ``Ids`` return: (osid.learning.ActivityList) - list of activities raise: NullArgument - ``objective_bank_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinSession.get_resources_by_bins activity_list = [] for objective_bank_id in objective_bank_ids: activity_list += list( self.get_activities_by_objective_bank(objective_bank_id)) return objects.ActivityList(activity_list)
python
def bbox_vert_aligned_left(box1, box2): """ Returns true if the left boundary of both boxes is within 2 pts """ if not (box1 and box2): return False return abs(box1.left - box2.left) <= 2
python
def sample_assignment_probs(qubits, nsamples, cxn): """ Sample the assignment probabilities of qubits using nsamples per measurement, and then compute the estimated assignment probability matrix. See the docstring for estimate_assignment_probs for more information. :param list qubits: Qubits to sample the assignment probabilities for. :param int nsamples: The number of samples to use in each measurement. :param QPUConnection|QVMConnection cxn: The Connection object to connect to Forest. :return: The assignment probability matrix. :rtype: numpy.ndarray """ num_qubits = len(qubits) dimension = 2 ** num_qubits hists = [] preps = basis_state_preps(*qubits) jobs = [] _log.info('Submitting jobs...') for jj, p in izip(TRANGE(dimension), preps): jobs.append(cxn.run_and_measure_async(p, qubits, nsamples)) _log.info('Waiting for results...') for jj, job_id in izip(TRANGE(dimension), jobs): job = cxn.wait_for_job(job_id) results = job.result() idxs = list(map(bitlist_to_int, results)) hists.append(make_histogram(idxs, dimension)) return estimate_assignment_probs(hists)
python
def sce2c(sc, et): """ Convert ephemeris seconds past J2000 (ET) to continuous encoded spacecraft clock "ticks". Non-integral tick values may be returned. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/sce2c_c.html :param sc: NAIF spacecraft ID code. :type sc: int :param et: Ephemeris time, seconds past J2000. :type et: float :return: SCLK, encoded as ticks since spacecraft clock start. sclkdp need not be integral. :rtype: float """ sc = ctypes.c_int(sc) et = ctypes.c_double(et) sclkdp = ctypes.c_double() libspice.sce2c_c(sc, et, ctypes.byref(sclkdp)) return sclkdp.value