language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def show(ctx, component): """Show the stored, active configuration of a component.""" col = ctx.obj['col'] if col.count({'name': component}) > 1: log('More than one component configuration of this name! Try ' 'one of the uuids as argument. Get a list with "config ' 'list"') return if component is None: configurations = col.find() for configuration in configurations: log("%-15s : %s" % (configuration.name, configuration.uuid), emitter='MANAGE') else: configuration = col.find_one({'name': component}) if configuration is None: configuration = col.find_one({'uuid': component}) if configuration is None: log('No component with that name or uuid found.') return print(json.dumps(configuration.serializablefields(), indent=4))
java
public List<EventSubscriptionInner> listRegionalBySubscriptionForTopicType(String location, String topicTypeName) { return listRegionalBySubscriptionForTopicTypeWithServiceResponseAsync(location, topicTypeName).toBlocking().single().body(); }
python
def _get_config_instance(group_or_term, session, **kwargs): """ Finds appropriate config instance and returns it. Args: group_or_term (Group or Term): session (Sqlalchemy session): kwargs (dict): kwargs to pass to get_or_create. Returns: tuple of (Config, bool): """ path = group_or_term._get_path() cached = group_or_term._top._cached_configs.get(path) if cached: config = cached created = False else: # does not exist or not yet cached config, created = get_or_create(session, Config, **kwargs) return config, created
java
public static Expectations jwtCookieExists(String testAction, WebClient webClient, String jwtCookieName) { Expectations expectations = new Expectations(); expectations.addExpectation(new CookieExpectation(testAction, webClient, jwtCookieName, JwtFatConstants.JWT_REGEX, JwtFatConstants.NOT_SECURE, JwtFatConstants.HTTPONLY)); return expectations; }
python
def center_crop(im, min_sz=None): """ Return a center crop of an image """ r,c,*_ = im.shape if min_sz is None: min_sz = min(r,c) start_r = math.ceil((r-min_sz)/2) start_c = math.ceil((c-min_sz)/2) return crop(im, start_r, start_c, min_sz)
java
public int[] getChildIndexes(int index) { if (index == 0) { if (isConstant()) { return new int[0]; } else { int[] result = new int[subexpressions.size()]; int startIndex = 1; for (int i = 0; i < subexpressions.size(); i++) { result[i] = startIndex; startIndex += subexpressions.get(i).size(); } return result; } } else { int[] parts = findSubexpression(index); int[] result = subexpressions.get(parts[0]).getChildIndexes(parts[1]); for (int i = 0; i < result.length; i++) { result[i] += parts[2]; } return result; } }
python
def send_all_waypoints(self): '''send all waypoints to vehicle''' self.master.waypoint_clear_all_send() if self.wploader.count() == 0: return self.loading_waypoints = True self.loading_waypoint_lasttime = time.time() self.master.waypoint_count_send(self.wploader.count())
java
public alluxio.grpc.CancelPOptions getOptions() { return options_ == null ? alluxio.grpc.CancelPOptions.getDefaultInstance() : options_; }
python
def _download_project(self, activity, project, temp_directory, path_filter): """ Download the project with project_name to temp_directory. :param activity: CopyActivity: info about the copy activity are downloading for :param project: remotestore.RemoteProject project to download :param temp_directory: str path to directory we can download into :param path_filter: PathFilter: filters what files are shared """ self.print_func("Downloading a copy of '{}'.".format(project.name)) project_download = ProjectDownload(self.remote_store, project, temp_directory, path_filter, file_download_pre_processor=DownloadedFileRelations(activity)) project_download.run()
java
private int beatOffset(int beatNumber) { if (beatCount == 0) { throw new IllegalStateException("There are no beats in this beat grid."); } if (beatNumber < 1 || beatNumber > beatCount) { throw new IndexOutOfBoundsException("beatNumber (" + beatNumber + ") must be between 1 and " + beatCount); } return beatNumber - 1; }
java
public Search search(String partitionKey, String searchIndexId) { return new Search(client, this, partitionKey, searchIndexId); }
java
protected void addEnumList(String key, List<? extends Enum> list) { optionsMap.put(key, list.stream().map(i -> i.toString()).collect(Collectors.toList())); }
java
public static FirmwareVersionInfo getWatchFWVersion(final Context context) { Cursor c = null; try { c = queryProvider(context); if (c == null || !c.moveToNext()) { return null; } int majorVersion = c.getInt(KIT_STATE_COLUMN_VERSION_MAJOR); int minorVersion = c.getInt(KIT_STATE_COLUMN_VERSION_MINOR); int pointVersion = c.getInt(KIT_STATE_COLUMN_VERSION_POINT); String versionTag = c.getString(KIT_STATE_COLUMN_VERSION_TAG); return new FirmwareVersionInfo(majorVersion, minorVersion, pointVersion, versionTag); } finally { if (c != null) { c.close(); } } }
python
def set_dimmer(self, dimmer): """Set final dimmer value for task.""" command = { ATTR_START_ACTION: { ATTR_DEVICE_STATE: self.state, ROOT_START_ACTION: [{ ATTR_ID: self.raw[ATTR_ID], ATTR_LIGHT_DIMMER: dimmer, ATTR_TRANSITION_TIME: self.raw[ATTR_TRANSITION_TIME] }, self.devices_dict] } } return self.set_values(command)
java
public Deserializer getDeserializer(Class cl) throws HessianProtocolException { if (ObjectName.class.equals(cl)) { return new StringValueDeserializer(cl); } else if (ObjectInstance.class.equals(cl)) { return new ObjectInstanceDeserializer(); } else if (MBeanAttributeInfo.class.isAssignableFrom(cl)) { return new MBeanAttributeInfoDeserializer(); } else if (MBeanConstructorInfo.class.isAssignableFrom(cl)) { return new MBeanConstructorInfoDeserializer(); } else if (MBeanOperationInfo.class.isAssignableFrom(cl)) { return new MBeanOperationInfoDeserializer(); } else if (MBeanParameterInfo.class.isAssignableFrom(cl)) { return new MBeanParameterInfoDeserializer(); } else if (MBeanNotificationInfo.class.isAssignableFrom(cl)) { return new MBeanNotificationInfoDeserializer(); } /* else if (MBeanInfo.class.equals(cl)) { return new MBeanInfoDeserializer(); } */ return null; }
java
private void addActivationHandler() { if (m_activationHandlerRegistration == null) { m_activationHandlerRegistration = addMouseDownHandler(new MouseDownHandler() { public void onMouseDown(MouseDownEvent event) { // only act on click if not inside the button bar if (!m_buttonBar.getElement().isOrHasChild((Node)event.getNativeEvent().getEventTarget().cast())) { activateWidget(); } } }); } }
java
public static int getPageCount(int size, int pageSize, int page) { int totalPage = size / pageSize; if (size > pageSize * totalPage) { totalPage += 1; } return totalPage; }
python
def _split_ns_by_scatter(cls, shard_count, namespace, raw_entity_kind, filters, app): """Split a namespace by scatter index into key_range.KeyRange. TODO(user): Power this with key_range.KeyRange.compute_split_points. Args: shard_count: number of shards. namespace: namespace name to split. str. raw_entity_kind: low level datastore API entity kind. app: app id in str. Returns: A list of key_range.KeyRange objects. If there are not enough entities to splits into requested shards, the returned list will contain KeyRanges ordered lexicographically with any Nones appearing at the end. """ if shard_count == 1: # With one shard we don't need to calculate any split points at all. return [key_range.KeyRange(namespace=namespace, _app=app)] ds_query = datastore.Query(kind=raw_entity_kind, namespace=namespace, _app=app, keys_only=True) ds_query.Order("__scatter__") oversampling_factor = 32 random_keys = None if filters: ds_query_with_filters = copy.copy(ds_query) for (key, op, value) in filters: ds_query_with_filters.update({'%s %s' % (key, op): value}) try: random_keys = ds_query_with_filters.Get(shard_count * oversampling_factor) except db.NeedIndexError, why: logging.warning('Need to add an index for optimal mapreduce-input' ' splitting:\n%s' % why) # We'll try again without the filter. We hope the filter # will filter keys uniformly across the key-name space! if not random_keys: random_keys = ds_query.Get(shard_count * oversampling_factor) if not random_keys: # There are no entities with scatter property. We have no idea # how to split. return ([key_range.KeyRange(namespace=namespace, _app=app)] + [None] * (shard_count - 1)) random_keys.sort() if len(random_keys) >= shard_count: # We've got a lot of scatter values. Sample them down. random_keys = cls._choose_split_points(random_keys, shard_count) k_ranges = [] k_ranges.append(key_range.KeyRange( key_start=None, key_end=random_keys[0], direction=key_range.KeyRange.ASC, include_start=False, include_end=False, namespace=namespace, _app=app)) for i in range(0, len(random_keys) - 1): k_ranges.append(key_range.KeyRange( key_start=random_keys[i], key_end=random_keys[i+1], direction=key_range.KeyRange.ASC, include_start=True, include_end=False, namespace=namespace, _app=app)) k_ranges.append(key_range.KeyRange( key_start=random_keys[-1], key_end=None, direction=key_range.KeyRange.ASC, include_start=True, include_end=False, namespace=namespace, _app=app)) if len(k_ranges) < shard_count: # We need to have as many shards as it was requested. Add some Nones. k_ranges += [None] * (shard_count - len(k_ranges)) return k_ranges
java
public final void ruleXNumberLiteral() throws RecognitionException { int stackSize = keepStackSize(); try { // InternalXbase.g:1346:2: ( ( ( rule__XNumberLiteral__Group__0 ) ) ) // InternalXbase.g:1347:2: ( ( rule__XNumberLiteral__Group__0 ) ) { // InternalXbase.g:1347:2: ( ( rule__XNumberLiteral__Group__0 ) ) // InternalXbase.g:1348:3: ( rule__XNumberLiteral__Group__0 ) { if ( state.backtracking==0 ) { before(grammarAccess.getXNumberLiteralAccess().getGroup()); } // InternalXbase.g:1349:3: ( rule__XNumberLiteral__Group__0 ) // InternalXbase.g:1349:4: rule__XNumberLiteral__Group__0 { pushFollow(FOLLOW_2); rule__XNumberLiteral__Group__0(); state._fsp--; if (state.failed) return ; } if ( state.backtracking==0 ) { after(grammarAccess.getXNumberLiteralAccess().getGroup()); } } } } catch (RecognitionException re) { reportError(re); recover(input,re); } finally { restoreStackSize(stackSize); } return ; }
java
public static RecoveryDirector recoveryDirector() throws InternalLogException { if (tc.isEntryEnabled()) Tr.entry(tc, "recoveryDirector"); // If the recovery director is null its an error in JET if (_recoveryDirector == null) { final InternalLogException ile = new InternalLogException(); if (tc.isEntryEnabled()) Tr.exit(tc, "recoveryDirector", ile); throw ile; } if (tc.isEntryEnabled()) Tr.exit(tc, "recoveryDirector", _recoveryDirector); return _recoveryDirector; }
java
public Type asSuper(Type t, Symbol sym) { /* Some examples: * * (Enum<E>, Comparable) => Comparable<E> * (c.s.s.d.AttributeTree.ValueKind, Enum) => Enum<c.s.s.d.AttributeTree.ValueKind> * (c.s.s.t.ExpressionTree, c.s.s.t.Tree) => c.s.s.t.Tree * (j.u.List<capture#160 of ? extends c.s.s.d.DocTree>, Iterable) => * Iterable<capture#160 of ? extends c.s.s.d.DocTree> */ if (sym.type == syms.objectType) { //optimization return syms.objectType; } return asSuper.visit(t, sym); }
java
public WriteResult<T, K> updateById(K id, DBUpdate.Builder update) throws MongoException { return this.update(createIdQuery(id), update.serialiseAndGet(objectMapper)); }
java
public void setLogFileSize(long newSize) throws ObjectManagerException { if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled()) trace.entry(this, cclass , "seLogFileSize" ); objectManagerState.logOutput.setLogFileSize(newSize); if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled()) trace.exit(this, cclass , "setLogFileSize" ); }
java
public Matrix4x3f rotateTowards(float dirX, float dirY, float dirZ, float upX, float upY, float upZ) { return rotateTowards(dirX, dirY, dirZ, upX, upY, upZ, this); }
java
private void writeHeader(ESRIBounds box, ShapeElementType type, Collection<? extends E> elements) throws IOException { if (!this.headerWasWritten) { initializeContentBuffer(); box.ensureMinMax(); //Byte 0 : File Code (9994) writeBEInt(SHAPE_FILE_CODE); //Byte 4 : Unused (0) writeBEInt(0); //Byte 8 : Unused (0) writeBEInt(0); //Byte 12 : Unused (0) writeBEInt(0); //Byte 16 : Unused (0) writeBEInt(0); //Byte 20 : Unused (0) writeBEInt(0); //Byte 24 : File Length, fill later writeBEInt(0); //Byte 28 : Version(1000) writeLEInt(SHAPE_FILE_VERSION); //Byte 32 : ShapeType writeLEInt(type.shapeType); //Byte 36 : Xmin writeLEDouble(toESRI_x(box.getMinX())); //Byte 44 : Ymin writeLEDouble(toESRI_y(box.getMinY())); //Byte 52 : Xmax writeLEDouble(toESRI_x(box.getMaxX())); //Byte 60 : Ymax writeLEDouble(toESRI_y(box.getMaxY())); //Byte 68 : Zmin writeLEDouble(toESRI_z(box.getMinZ())); //Byte 76 : Zmax writeLEDouble(toESRI_z(box.getMaxZ())); //Byte 84 : Mmin writeLEDouble(toESRI_m(box.getMinM())); //Byte 92 : Mmax writeLEDouble(toESRI_m(box.getMaxM())); this.headerWasWritten = true; this.recordIndex = 0; onHeaderWritten(box, type, elements); } }
python
def ResolveForCreate(self, document): """Resolves the collection for creating the document based on the partition key. :param dict document: The document to be created. :return: Collection Self link or Name based link which should handle the Create operation. :rtype: str """ if document is None: raise ValueError("document is None.") partition_key = self.partition_key_extractor(document) containing_range = self._GetContainingRange(partition_key) if containing_range is None: raise ValueError("A containing range for " + str(partition_key) + " doesn't exist in the partition map.") return self.partition_map.get(containing_range)
java
public ResultList<MovieBasic> getFavoriteMovies(String sessionId, int accountId) throws MovieDbException { return tmdbAccount.getFavoriteMovies(sessionId, accountId); }
java
public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException { conf.setIfUnset("hbase.client.connection.impl", BigtableConfiguration.getConnectionClass().getName()); conf.setIfUnset(BigtableOptionsFactory.BIGTABLE_RPC_TIMEOUT_MS_KEY, "60000"); TableName tableName = TableName.valueOf(args[0]); conf.set(TABLE_NAME, tableName.getNameAsString()); Path inputDir = new Path(args[1]); Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + tableName)); job.setJarByClass(Importer.class); FileInputFormat.setInputPaths(job, inputDir); // Randomize the splits to avoid hot spotting a single tablet server job.setInputFormatClass(ShuffledSequenceFileInputFormat.class); // Give the mappers enough work to do otherwise each split will be dominated by spinup time ShuffledSequenceFileInputFormat.setMinInputSplitSize(job, 1L*1024*1024*1024); String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY); // make sure we get the filter in the jars try { Class<? extends Filter> filter = conf.getClass(FILTER_CLASS_CONF_KEY, null, Filter.class); if (filter != null) { TableMapReduceUtil.addDependencyJars(conf, filter); } } catch (Exception e) { throw new IOException(e); } if (hfileOutPath != null) { job.setMapperClass(KeyValueImporter.class); try (Connection conn = ConnectionFactory.createConnection(conf); Table table = conn.getTable(tableName); RegionLocator regionLocator = conn.getRegionLocator(tableName)){ job.setReducerClass(KeyValueSortReducer.class); Path outputDir = new Path(hfileOutPath); FileOutputFormat.setOutputPath(job, outputDir); job.setMapOutputKeyClass(ImmutableBytesWritable.class); job.setMapOutputValueClass(KeyValue.class); HFileOutputFormat2.configureIncrementalLoad(job, table, regionLocator); TableMapReduceUtil.addDependencyJars(job.getConfiguration(), com.google.common.base.Preconditions.class); } } else { // No reducers. Just write straight to table. Call initTableReducerJob // because it sets up the TableOutputFormat. job.setMapperClass(Importer.class); //TableMapReduceUtil.initTableReducerJob(tableName.getNameAsString(), null, job); TableMapReduceUtil.initTableReducerJob(tableName.getNameAsString(), null, job, null, null, null, null, false); job.setNumReduceTasks(0); } return job; }
java
public LabInner update(String resourceGroupName, String labAccountName, String labName, LabFragment lab) { return updateWithServiceResponseAsync(resourceGroupName, labAccountName, labName, lab).toBlocking().single().body(); }
java
public static long decodeLong(byte[] src, int srcOffset) throws CorruptEncodingException { try { return (((long)(((src[srcOffset ] ) << 24) | ((src[srcOffset + 1] & 0xff) << 16) | ((src[srcOffset + 2] & 0xff) << 8 ) | ((src[srcOffset + 3] & 0xff) )) ^ 0x80000000 ) << 32) | (((long)(((src[srcOffset + 4] ) << 24) | ((src[srcOffset + 5] & 0xff) << 16) | ((src[srcOffset + 6] & 0xff) << 8 ) | ((src[srcOffset + 7] & 0xff) )) & 0xffffffffL) ); } catch (IndexOutOfBoundsException e) { throw new CorruptEncodingException(null, e); } }
java
public TextObject append(Reader reader) throws IOException { char[] buf = new char[1024]; while (reader.ready() && (maxTextLength==0 || stringBuilder.length()<maxTextLength)) { int length = reader.read(buf); append(String.valueOf(buf, 0, length)); } return this; }
python
def remove_brackets(name): """Removes brackets form input :param name: path to fix :return: inputs with no brackets """ name = re.sub( r"([(\[]).*?([)\]])", r"\g<1>\g<2>", name ) # remove anything in between brackets brackets = "()[]{}" # list of brackets for bracket in brackets: name = name.replace(bracket, "") return name
java
public Observable<ServiceResponse<SubscriptionMediaServiceInner>> getBySubscriptionWithServiceResponseAsync(String accountName) { if (this.client.subscriptionId() == null) { throw new IllegalArgumentException("Parameter this.client.subscriptionId() is required and cannot be null."); } if (accountName == null) { throw new IllegalArgumentException("Parameter accountName is required and cannot be null."); } if (this.client.apiVersion() == null) { throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); } return service.getBySubscription(this.client.subscriptionId(), accountName, this.client.apiVersion(), this.client.acceptLanguage(), this.client.userAgent()) .flatMap(new Func1<Response<ResponseBody>, Observable<ServiceResponse<SubscriptionMediaServiceInner>>>() { @Override public Observable<ServiceResponse<SubscriptionMediaServiceInner>> call(Response<ResponseBody> response) { try { ServiceResponse<SubscriptionMediaServiceInner> clientResponse = getBySubscriptionDelegate(response); return Observable.just(clientResponse); } catch (Throwable t) { return Observable.error(t); } } }); }
java
@XmlElementDecl(namespace = "http://www.opengis.net/gml", name = "operationParameterGroupRef") public JAXBElement<OperationParameterRefType> createOperationParameterGroupRef(OperationParameterRefType value) { return new JAXBElement<OperationParameterRefType>(_OperationParameterGroupRef_QNAME, OperationParameterRefType.class, null, value); }
java
private synchronized void updateRecordsWrittenMeter() { if (this.recordsWrittenMeter.isPresent()) { this.recordsWrittenMeter.get().mark(recordsWritten() - this.recordsWrittenMeter.get().getCount()); } }
python
def generate_method_deprecation_message(to_be_removed_in_version, old_method_name, method_name=None, module_name=None): """Generate a message to be used when warning about the use of deprecated methods. :param to_be_removed_in_version: Version of this module the deprecated method will be removed in. :type to_be_removed_in_version: str :param old_method_name: Deprecated method name. :type old_method_name: str :param method_name: Method intended to replace the deprecated method indicated. This method's docstrings are included in the decorated method's docstring. :type method_name: str :param module_name: Name of the module containing the new method to use. :type module_name: str :return: Full deprecation warning message for the indicated method. :rtype: str """ message = "Call to deprecated function '{old_method_name}'. This method will be removed in version '{version}'".format( old_method_name=old_method_name, version=to_be_removed_in_version, ) if method_name is not None and module_name is not None: message += " Please use the '{method_name}' method on the '{module_name}' class moving forward.".format( method_name=method_name, module_name=module_name, ) return message
java
public Version<Attributes<T>> getOrCreateVersion() { List<Node> nodeList = childNode.get("version"); if (nodeList != null && nodeList.size() > 0) { return new VersionImpl<Attributes<T>>(this, "version", childNode, nodeList.get(0)); } return createVersion(); }
python
def _load_recursive(self, shape, gen): """Recursively create a multidimensional array (as lists of lists) from a bit generator. """ if len(shape) > 0: ans = [] for i in range(shape[0]): ans.append(self._load_recursive(shape[1:], gen)) else: fields = [] for code, length in self.format: field = None raw = gen.send(length) if code == 'u': field = raw elif code == 'i': field = raw # Interpret as 2's complement if field >= 1 << (length - 1): field -= 1 << length elif code == 'b': field = bool(raw) elif code == 'c': field = six.int2byte(raw) elif code == 'f': if length == 32: field = _np.uint32(raw).view(_np.float32) elif length == 64: field = _np.uint64(raw).view(_np.float64) else: raise ValueError('unhandled float length {0}'.format((code, length))) else: raise ValueError('unhandled format {0}'.format((code, length))) fields.append(field) if len(fields) == 1: ans = fields[0] else: ans = tuple(fields) return ans
python
def io_size_kb(prev, curr, counters): """ calculate the io size based on bandwidth and throughput formula: average_io_size = bandwidth / throughput :param prev: prev resource, not used :param curr: current resource :param counters: two stats, bandwidth in MB and throughput count :return: value, NaN if invalid """ bw_stats, io_stats = counters size_mb = div(getattr(curr, bw_stats), getattr(curr, io_stats)) return mul(size_mb, 1024)
python
def mousePress(self, button): """ Send a mouse click at the last set position button: int: [1-n] """ log.debug('mousePress %s', button) buttons = self.buttons | (1 << (button - 1)) self.mouseDown(button) self.mouseUp(button) return self
java
protected double sqDist(double[] v1, double[] v2) { assert (v1.length == v2.length) : "Lengths do not agree: " + v1.length + " " + v2.length; double sum = 0; for(int i = 0; i < v1.length; i++) { final double diff = v1[i] - v2[i]; sum += diff * diff; } ++projectedDistances; return sum; }
java
public ApiSuccessResponse switchToBargeIn(String id, MonitoringScopeData monitoringScopeData) throws ApiException { ApiResponse<ApiSuccessResponse> resp = switchToBargeInWithHttpInfo(id, monitoringScopeData); return resp.getData(); }
java
@Override public SubmitContainerStateChangeResult submitContainerStateChange(SubmitContainerStateChangeRequest request) { request = beforeClientExecution(request); return executeSubmitContainerStateChange(request); }
python
def get_times_from_cli(cli_token): """Convert a CLI token to a datetime tuple. Argument: cli_token (str): an isoformat datetime token ([ISO date]:[ISO date]) or a special value among: * thisday * thisweek * thismonth * thisyear Returns: tuple: a datetime.date objects couple, where the first item is the start of a time frame and the second item the end of the time frame. Both elements can also be None, if no date was provided. Raises: ValueError: when the CLI token is not in the right format (no colon in the token, not one of the special values, dates are not in proper ISO-8601 format.) See Also: `ISO-8601 specification <https://en.wikipedia.org/wiki/ISO_8601>`_. """ today = datetime.date.today() if cli_token=="thisday": return today, today elif cli_token=="thisweek": return today, today - dateutil.relativedelta.relativedelta(days=7) elif cli_token=="thismonth": return today, today - dateutil.relativedelta.relativedelta(months=1) elif cli_token=="thisyear": return today, today - dateutil.relativedelta.relativedelta(years=1) else: try: start_date, stop_date = cli_token.split(':') except ValueError: raise ValueError("--time parameter must contain a colon (:)") if not start_date and not stop_date: # ':', no start date, no stop date return None, None try: start_date = date_from_isoformat(start_date) if start_date else None stop_date = date_from_isoformat(stop_date) if stop_date else None except ValueError: raise ValueError("--time parameter was not provided ISO formatted dates") return start_date, stop_date
python
def symmetrically_add_atom(self, specie, point, coords_are_cartesian=False): """ Class method for adding a site at a specified point in a slab. Will add the corresponding site on the other side of the slab to maintain equivalent surfaces. Arg: specie (str): The specie to add point (coords): The coordinate of the site in the slab to add. coords_are_cartesian (bool): Is the point in cartesian coordinates Returns: (Slab): The modified slab """ # For now just use the species of the # surface atom as the element to add # Get the index of the corresponding site at the bottom point2 = self.get_symmetric_site(point, cartesian=coords_are_cartesian) self.append(specie, point, coords_are_cartesian=coords_are_cartesian) self.append(specie, point2, coords_are_cartesian=coords_are_cartesian)
python
def install_mesos_single_box_mode(distribution): """ install mesos (all of it) on a single node""" if 'ubuntu' in distribution: log_green('adding mesosphere apt-key') apt_add_key(keyid='E56151BF') os = lsb_release() apt_string = 'deb http://repos.mesosphere.io/%s %s main' % ( os['DISTRIB_ID'], os['DISTRIB_CODENAME']) log_green('adding mesosphere apt repository') apt_add_repository_from_apt_string(apt_string, 'mesosphere.list') log_green('installing ubuntu development tools') install_ubuntu_development_tools() install_oracle_java(distribution, '8') log_green('installing mesos and marathon') apt_install(packages=['mesos', 'marathon']) if not file_contains('/etc/default/mesos-master', 'MESOS_QUORUM=1', use_sudo=True): file_append('/etc/default/mesos-master', 'MESOS_QUORUM=1', use_sudo=True) log_green('restarting services...') for svc in ['zookeeper', 'mesos-master', 'mesos-slave', 'marathon']: restart_service(svc) if not file_contains('/etc/mesos-slave/work_dir', '/data/mesos', use_sudo=True): file_append('/etc/mesos-slave/work_dir', '/data/mesos', use_sudo=True) log_green('restarting services...') for svc in ['mesos-slave']: restart_service(svc) log_green('enabling nginx autoindex on /...') with quiet(): cmd = 'cat /etc/nginx/sites-available/default' contents = sudo(cmd).replace('\n', ' ').replace('\r', '') if not bool(re.search('.*#*location \/ {.*autoindex on;.*', contents)): insert_line_in_file_after_regex( path='/etc/nginx/sites-available/default', line=' autoindex on;', after_regex='^[^#]*location \/ {', use_sudo=True) log_green('restarting nginx') restart_service('nginx')
python
def set_display_label(self, display_label): """Seta a display label. arg: display_label (string): the new display label raise: InvalidArgument - ``display_label`` is invalid raise: NoAccess - ``display_label`` cannot be modified raise: NullArgument - ``display_label`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ if self.get_display_label_metadata().is_read_only(): raise errors.NoAccess() if not self._is_valid_string(display_label, self.get_display_label_metadata()): raise errors.InvalidArgument() self._my_map['displayLabel']['text'] = display_label
java
private ProtoNetwork failProteinFamilies(final ProtoNetwork pn, final StringBuilder bldr, String pfLocation, String errorMessage) { bldr.append("PROTEIN FAMILY RESOLUTION FAILURE in "); bldr.append(pfLocation); bldr.append("\n\treason: "); bldr.append(errorMessage); stageWarning(bldr.toString()); // could not resolve protein family resource so return original // proto network. return pn; }
java
public void marshall(DeregisterManagedInstanceRequest deregisterManagedInstanceRequest, ProtocolMarshaller protocolMarshaller) { if (deregisterManagedInstanceRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(deregisterManagedInstanceRequest.getInstanceId(), INSTANCEID_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
python
def prior_predictive_to_xarray(self): """Convert prior_predictive samples to xarray.""" data = self.prior_predictive if not isinstance(data, dict): raise TypeError("DictConverter.prior_predictive is not a dictionary") return dict_to_dataset(data, library=None, coords=self.coords, dims=self.dims)
python
def lease(self, ttl=DEFAULT_TIMEOUT): """Create a Lease object given a timeout :param ttl: timeout :return: Lease object """ result = self.post(self.get_url("/lease/grant"), json={"TTL": ttl, "ID": 0}) return Lease(int(result['ID']), client=self)
java
public static Set<AtlasResourceTypes> getAtlasResourceType(String contextPath) { Set<AtlasResourceTypes> resourceTypes = new HashSet<>(); if (isDebugEnabled) { LOG.debug("==> getAtlasResourceType for {}", contextPath); } String api = getApi(contextPath); if (api.startsWith("types")) { resourceTypes.add(AtlasResourceTypes.TYPE); } else if (api.startsWith("admin") && (contextPath.contains("/session") || contextPath.contains("/version"))) { resourceTypes.add(AtlasResourceTypes.UNKNOWN); } else if ((api.startsWith("discovery") && contextPath.contains("/gremlin")) || api.startsWith("admin") || api.startsWith("graph")) { resourceTypes.add(AtlasResourceTypes.OPERATION); } else if (api.startsWith("entities") || api.startsWith("lineage") || api.startsWith("discovery") || api.startsWith("entity") || api.startsWith("search")) { resourceTypes.add(AtlasResourceTypes.ENTITY); } else if (api.startsWith("taxonomies")) { resourceTypes.add(AtlasResourceTypes.TAXONOMY); // taxonomies are modeled as entities resourceTypes.add(AtlasResourceTypes.ENTITY); if (contextPath.contains("/terms")) { resourceTypes.add(AtlasResourceTypes.TERM); } } else if (api.startsWith("relationship")) { resourceTypes.add(AtlasResourceTypes.RELATIONSHIP); } else { LOG.error("Unable to find Atlas Resource corresponding to : {}\nSetting {}" , api, AtlasResourceTypes.UNKNOWN.name()); resourceTypes.add(AtlasResourceTypes.UNKNOWN); } if (isDebugEnabled) { LOG.debug("<== Returning AtlasResources {} for api {}", resourceTypes, api); } return resourceTypes; }
python
def get_share_acl(self, share_name, timeout=None): ''' Gets the permissions for the specified share. :param str share_name: Name of existing share. :param int timeout: The timeout parameter is expressed in seconds. :return: A dictionary of access policies associated with the share. :rtype: dict(str, :class:`~azure.storage.common.models.AccessPolicy`) ''' _validate_not_none('share_name', share_name) request = HTTPRequest() request.method = 'GET' request.host_locations = self._get_host_locations() request.path = _get_path(share_name) request.query = { 'restype': 'share', 'comp': 'acl', 'timeout': _int_to_str(timeout), } return self._perform_request(request, _convert_xml_to_signed_identifiers)
java
public Serializer registerClassLoader(String className, ClassLoader classLoader) { classLoaders.put(className, classLoader); return this; }
python
def cleanup_lines( lines, **kwargs ): ''' Cleans up annotation after syntactic pre-processing and processing: -- Removes embedded clause boundaries "<{>" and "<}>"; -- Removes CLBC markings from analysis; -- Removes additional information between < and > from analysis; -- Removes additional information between " and " from analysis; -- If remove_caps==True , removes 'cap' annotations from analysis; -- If remove_clo==True , removes CLO CLC CLB markings from analysis; -- If double_quotes=='esc' then " will be overwritten with \\"; and if double_quotes=='unesc' then \\" will be overwritten with "; -- If fix_sent_tags=True, then sentence tags (<s> and </s>) will be checked for mistakenly added analysis, and found analysis will be removed; Returns the input list, which has been cleaned from additional information; ''' if not isinstance( lines, list ): raise Exception('(!) Unexpected type of input argument! Expected a list of strings.') remove_caps = False remove_clo = False double_quotes = None fix_sent_tags = False for argName, argVal in kwargs.items() : if argName in ['remove_caps', 'remove_cap']: remove_caps = bool(argVal) if argName == 'remove_clo': remove_clo = bool(argVal) if argName == 'fix_sent_tags': fix_sent_tags = bool(argVal) if argName in ['double_quotes', 'quotes'] and argVal and \ argVal.lower() in ['esc', 'escape', 'unesc', 'unescape']: double_quotes = argVal.lower() pat_token_line = re.compile('^"<(.+)>"\s*$') pat_analysis_start = re.compile('^(\s+)"(.+)"(\s[LZT].*)$') i = 0 to_delete = [] while ( i < len(lines) ): line = lines[i] isAnalysisLine = line.startswith(' ') or line.startswith('\t') if not isAnalysisLine: removeCurrentTokenAndAnalysis = False # 1) Remove embedded clause boundaries "<{>" and "<}>" if line.startswith('"<{>"'): if i+1 == len(lines) or (i+1 < len(lines) and not '"{"' in lines[i+1]): removeCurrentTokenAndAnalysis = True if line.startswith('"<}>"'): if i+1 == len(lines) or (i+1 < len(lines) and not '"}"' in lines[i+1]): removeCurrentTokenAndAnalysis = True if removeCurrentTokenAndAnalysis: # Remove the current token and all the subsequent analyses del lines[i] j=i while ( j < len(lines) ): line2 = lines[j] if line2.startswith(' ') or line2.startswith('\t'): del lines[j] else: break continue # 2) Convert double quotes (if required) if double_quotes: # '^"<(.+)>"\s*$' if pat_token_line.match( lines[i] ): token_cleaned = (pat_token_line.match(lines[i])).group(1) # Escape or unescape double quotes if double_quotes in ['esc', 'escape']: token_cleaned = token_cleaned.replace('"', '\\"') lines[i] = '"<'+token_cleaned+'>"' elif double_quotes in ['unesc', 'unescape']: token_cleaned = token_cleaned.replace('\\"', '"') lines[i] = '"<'+token_cleaned+'>"' else: # Normalize analysis line lines[i] = re.sub('^\s{4,}', '\t', lines[i]) # Remove clause boundary markings lines[i] = re.sub('(.*)" ([LZT].*) CLBC (.*)', '\\1" \\2 \\3', lines[i]) # Remove additional information that was added during the analysis lines[i] = re.sub('(.*)" L([^"<]*) ["<]([^@]*) (@.*)', '\\1" L\\2 \\4', lines[i]) # Remove 'cap' tags if remove_caps: lines[i] = lines[i].replace(' cap ', ' ') # Convert double quotes (if required) if double_quotes and double_quotes in ['unesc', 'unescape']: lines[i] = lines[i].replace('\\"', '"') elif double_quotes and double_quotes in ['esc', 'escape']: m = pat_analysis_start.match( lines[i] ) if m: # '^(\s+)"(.+)"(\s[LZT].*)$' start = m.group(1) content = m.group(2) end = m.group(3) content = content.replace('"', '\\"') lines[i] = ''.join([start, '"', content, '"', end]) # Remove CLO CLC CLB markings if remove_clo and 'CL' in lines[i]: lines[i] = re.sub('\sCL[OCB]', ' ', lines[i]) lines[i] = re.sub('\s{2,}', ' ', lines[i]) # Fix sentence tags that mistakenly could have analysis (in EDT corpus) if fix_sent_tags: if i-1 > -1 and ('"</s>"' in lines[i-1] or '"<s>"' in lines[i-1]): lines[i] = '' i += 1 return lines
java
public static void destroyAll() { for (Map.Entry<ServerConfig, ProtocolConfig> entry : SERVER_MAP.entrySet()) { entry.getValue().destory(); } try { ProtocolConfig.destroyAll(); } catch (Exception e) { // NOPMD } }
python
def WriteCronJobRun(self, run_object, cursor=None): """Stores a cron job run object in the database.""" query = ("INSERT INTO cron_job_runs " "(job_id, run_id, write_time, run) " "VALUES (%s, %s, FROM_UNIXTIME(%s), %s) " "ON DUPLICATE KEY UPDATE " "run=VALUES(run), write_time=VALUES(write_time)") write_time_str = mysql_utils.RDFDatetimeToTimestamp( rdfvalue.RDFDatetime.Now()) try: cursor.execute(query, [ run_object.cron_job_id, db_utils.CronJobRunIDToInt(run_object.run_id), write_time_str, run_object.SerializeToString(), ]) except MySQLdb.IntegrityError as e: raise db.UnknownCronJobError( "CronJob with id %s not found." % run_object.cron_job_id, cause=e)
python
def get_identifier(self): """ For methods this is the return type, the name and the (non-pretty) argument descriptor. For fields it is simply the name. The return-type of methods is attached to the identifier when it is a bridge method, which can technically allow two methods with the same name and argument type list, but with different return type. """ ident = self.get_name() if self.is_method: args = ",".join(self.get_arg_type_descriptors()) if self.is_bridge(): ident = "%s(%s):%s" % (ident, args, self.get_descriptor()) else: ident = "%s(%s)" % (ident, args) return ident
python
def resolve(tor_endpoint, hostname): """ This is easier to use via :meth:`txtorcon.Tor.dns_resolve` :param tor_endpoint: the Tor SOCKS endpoint to use. :param hostname: the hostname to look up. """ if six.PY2 and isinstance(hostname, str): hostname = unicode(hostname) # noqa elif six.PY3 and isinstance(hostname, bytes): hostname = hostname.decode('ascii') factory = _TorSocksFactory( hostname, 0, 'RESOLVE', None, ) proto = yield tor_endpoint.connect(factory) result = yield proto.when_done() returnValue(result)
java
public static EntityGetOperation<LocatorInfo> get(String locatorId) { return new DefaultGetOperation<LocatorInfo>(ENTITY_SET, locatorId, LocatorInfo.class); }
java
protected void definingClass( String className ) { String baseName = Name.suffix(className,1); int i = baseName.indexOf("$"); if ( i != -1 ) baseName = baseName.substring(i+1); String cur = definingClassesBaseNames.get( baseName ); if ( cur != null ) throw new InterpreterError("Defining class problem: "+className +": BeanShell cannot yet simultaneously define two or more " +"dependent classes of the same name. Attempt to define: " + className +" while defining: "+cur ); definingClasses.add( className ); definingClassesBaseNames.put( baseName, className ); }
java
public String getSmallIconPath(CmsObject cms, CmsUser user) { return getIconPath(cms, user, IconSize.Small); }
java
public synchronized void deleteShareRequests(Destination destination) { SQLiteDatabase db = null; try { db = getWritableDatabase(); int recordsDeleted = db.delete(ShareRequestTable.NAME, WHERE_CLAUSE_BY_DESTINATION, new String[]{String.valueOf(destination.getHash())}); sLogger.log(WingsDbHelper.class, "deleteShareRequests", "destination=" + destination.getHash() + " rowsDeleted=" + recordsDeleted); } catch (SQLException e) { // Do nothing. } finally { db.close(); } }
java
public EClass getIfcFurnishingElement() { if (ifcFurnishingElementEClass == null) { ifcFurnishingElementEClass = (EClass) EPackage.Registry.INSTANCE.getEPackage(Ifc2x3tc1Package.eNS_URI) .getEClassifiers().get(259); } return ifcFurnishingElementEClass; }
java
private String getPart(int pos) { String value = this.getValue(); if (value == null) { return null; } String[] parts = value.split("/"); return parts.length >= pos + 1 ? parts[pos] : null; }
java
@Override protected void shutDown() throws Exception { this.scheduledExecutorPool.shutdown(); this.scheduledExecutorPool.awaitTermination(TERMINATION_TIMEOUT, TimeUnit.SECONDS); this.jobStatusMonitor.shutDown(); }
python
def fixed_width_binning(data=None, bin_width: Union[float, int] = 1, *, range=None, includes_right_edge=False, **kwargs) -> FixedWidthBinning: """Construct fixed-width binning schema. Parameters ---------- bin_width: float range: Optional[tuple] (min, max) align: Optional[float] Must be multiple of bin_width """ result = FixedWidthBinning(bin_width=bin_width, includes_right_edge=includes_right_edge, **kwargs) if range: result._force_bin_existence(range[0]) result._force_bin_existence(range[1], includes_right_edge=True) if not kwargs.get("adaptive"): return result # Otherwise we want to adapt to data if data is not None and data.shape[0]: # print("Jo, tady") result._force_bin_existence([np.min(data), np.max(data)], includes_right_edge=includes_right_edge) return result
python
def validate_variable_type(var_name, var_type, value): """Ensures the value is the correct variable type. Args: var_name (str): The name of the defined variable on a blueprint. var_type (type): The type that the value should be. value (obj): The object representing the value provided for the variable Returns: object: Returns the appropriate value object. If the original value was of CFNType, the returned value will be wrapped in CFNParameter. Raises: ValueError: If the `value` isn't of `var_type` and can't be cast as that type, this is raised. """ if isinstance(var_type, CFNType): value = CFNParameter(name=var_name, value=value) elif isinstance(var_type, TroposphereType): try: value = var_type.create(value) except Exception as exc: name = "{}.create".format(var_type.resource_name) raise ValidatorError(var_name, name, value, exc) else: if not isinstance(value, var_type): raise ValueError( "Value for variable %s must be of type %s. Actual " "type: %s." % (var_name, var_type, type(value)) ) return value
java
public static void getNestedConcats(StringBuilder stb, ImmutableTerm term1, ImmutableTerm term2) { if (term1 instanceof ImmutableFunctionalTerm) { ImmutableFunctionalTerm f = (ImmutableFunctionalTerm) term1; getNestedConcats(stb, f.getTerms().get(0), f.getTerms().get(1)); } else { stb.append(appendTerms(term1)); } if (term2 instanceof ImmutableFunctionalTerm) { ImmutableFunctionalTerm f = (ImmutableFunctionalTerm) term2; getNestedConcats(stb, f.getTerms().get(0), f.getTerms().get(1)); } else { stb.append(appendTerms(term2)); } }
java
public static String simplifyMolecularFormula(String formula) { String newFormula = formula; char thisChar; if (formula.contains(" ")) { newFormula = newFormula.replace(" ", ""); } if (!formula.contains(".")) return breakExtractor(formula); List<String> listMF = new ArrayList<String>(); while (newFormula.contains(".")) { int pos = newFormula.indexOf('.'); String thisFormula = newFormula.substring(0, pos); if (thisFormula.charAt(0) >= '0' && thisFormula.charAt(0) <= '9') thisFormula = multipleExtractor(thisFormula); if (thisFormula.contains("(")) thisFormula = breakExtractor(thisFormula); listMF.add(thisFormula); thisFormula = newFormula.substring(pos + 1, newFormula.length()); if (!thisFormula.contains(".")) { if (thisFormula.charAt(0) >= '0' && thisFormula.charAt(0) <= '9') thisFormula = multipleExtractor(thisFormula); if (thisFormula.contains("(")) thisFormula = breakExtractor(thisFormula); listMF.add(thisFormula); } newFormula = thisFormula; } if (newFormula.contains("(")) newFormula = breakExtractor(newFormula); String recentElementSymbol = ""; String recentElementCountString = "0"; List<String> eleSymb = new ArrayList<String>(); List<Integer> eleCount = new ArrayList<Integer>(); for (int i = 0; i < listMF.size(); i++) { String thisFormula = listMF.get(i); for (int f = 0; f < thisFormula.length(); f++) { thisChar = thisFormula.charAt(f); if (f < thisFormula.length()) { if (thisChar >= 'A' && thisChar <= 'Z') { recentElementSymbol = String.valueOf(thisChar); recentElementCountString = "0"; } if (thisChar >= 'a' && thisChar <= 'z') { recentElementSymbol += thisChar; } if (thisChar >= '0' && thisChar <= '9') { recentElementCountString += thisChar; } } if (f == thisFormula.length() - 1 || (thisFormula.charAt(f + 1) >= 'A' && thisFormula.charAt(f + 1) <= 'Z')) { int posit = eleSymb.indexOf(recentElementSymbol); int count = Integer.valueOf(recentElementCountString); if (posit == -1) { eleSymb.add(recentElementSymbol); eleCount.add(count); } else { int countP = Integer.valueOf(recentElementCountString); if (countP == 0) countP = 1; int countA = eleCount.get(posit); if (countA == 0) countA = 1; int value = countP + countA; eleCount.remove(posit); eleCount.add(posit, value); } } } } String newF = ""; for (int i = 0; i < eleCount.size(); i++) { String element = eleSymb.get(i); int num = eleCount.get(i); if (num == 0) newF += element; else newF += element + num; } return newF; }
python
def ChangePassword(self, password_old, password_new): """ Change the password used to protect the private key. Args: password_old (str): the current password used to encrypt the private key. password_new (str): the new to be used password to encrypt the private key. Returns: bool: whether the password has been changed """ if not self.ValidatePassword(password_old): return False if isinstance(password_new, str): password_new = password_new.encode('utf-8') password_key = hashlib.sha256(password_new) self.SaveStoredData("PasswordHash", password_key) self.SaveStoredData("MasterKey", AES.new(self._master_key, AES.MODE_CBC, self._iv)) return True
java
public static void setLookAtM(float[] rm, int rmOffset, float eyeX, float eyeY, float eyeZ, float centerX, float centerY, float centerZ, float upX, float upY, float upZ) { // See the OpenGL GLUT documentation for gluLookAt for a description // of the algorithm. We implement it in a straightforward way: float fx = centerX - eyeX; float fy = centerY - eyeY; float fz = centerZ - eyeZ; // Normalize f float rlf = 1.0f / Matrix.length(fx, fy, fz); fx *= rlf; fy *= rlf; fz *= rlf; // compute s = f x up (x means "cross product") float sx = fy * upZ - fz * upY; float sy = fz * upX - fx * upZ; float sz = fx * upY - fy * upX; // and normalize s float rls = 1.0f / Matrix.length(sx, sy, sz); sx *= rls; sy *= rls; sz *= rls; // compute u = s x f float ux = sy * fz - sz * fy; float uy = sz * fx - sx * fz; float uz = sx * fy - sy * fx; rm[rmOffset + 0] = sx; rm[rmOffset + 1] = ux; rm[rmOffset + 2] = -fx; rm[rmOffset + 3] = 0.0f; rm[rmOffset + 4] = sy; rm[rmOffset + 5] = uy; rm[rmOffset + 6] = -fy; rm[rmOffset + 7] = 0.0f; rm[rmOffset + 8] = sz; rm[rmOffset + 9] = uz; rm[rmOffset + 10] = -fz; rm[rmOffset + 11] = 0.0f; rm[rmOffset + 12] = 0.0f; rm[rmOffset + 13] = 0.0f; rm[rmOffset + 14] = 0.0f; rm[rmOffset + 15] = 1.0f; translateM(rm, rmOffset, -eyeX, -eyeY, -eyeZ); }
java
public Properties parsePropertiesString(String propertiesAsString) { final Properties p = new Properties(); try (StringReader reader = new StringReader(propertiesAsString)) { p.load(reader); } catch (IOException e) { throw new IllegalArgumentException("Unable to parse .properties: " + propertiesAsString, e); } return p; }
java
private static <V, R extends Serializable> Accumulator<V, R> mergeSingle(Accumulator<?, ?> target, Accumulator<?, ?> toMerge) { @SuppressWarnings("unchecked") Accumulator<V, R> typedTarget = (Accumulator<V, R>) target; @SuppressWarnings("unchecked") Accumulator<V, R> typedToMerge = (Accumulator<V, R>) toMerge; typedTarget.merge(typedToMerge); return typedTarget; }
python
def fan_speed(self, value): """Verifies the value is between 1 and 9 inclusively.""" if value not in range(1, 10): raise exceptions.RoasterValueError self._fan_speed.value = value
java
public Collection<Deployment> findByName(String name) { Collection<Deployment> result = new ArrayList<Deployment>(); for (Deployment d : deployments) { if (d.getName() != null && d.getName().equals(name)) result.add(d); } return Collections.unmodifiableCollection(result); }
python
def _run_vardict_caller(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Detect SNPs and indels with VarDict. var2vcf_valid uses -A flag which reports all alleles and improves sensitivity: https://github.com/AstraZeneca-NGS/VarDict/issues/35#issuecomment-276738191 """ config = items[0]["config"] if out_file is None: out_file = "%s-variants.vcf.gz" % os.path.splitext(align_bams[0])[0] if not utils.file_exists(out_file): with file_transaction(items[0], out_file) as tx_out_file: vrs = bedutils.population_variant_regions(items) target = shared.subset_variant_regions( vrs, region, out_file, items=items, do_merge=False) num_bams = len(align_bams) sample_vcf_names = [] # for individual sample names, given batch calling may be required for bamfile, item in zip(align_bams, items): # prepare commands sample = dd.get_sample_name(item) vardict = get_vardict_command(items[0]) opts, var2vcf_opts = _vardict_options_from_config(items, config, out_file, target) vcfstreamsort = config_utils.get_program("vcfstreamsort", config) compress_cmd = "| bgzip -c" if tx_out_file.endswith("gz") else "" fix_ambig_ref = vcfutils.fix_ambiguous_cl() fix_ambig_alt = vcfutils.fix_ambiguous_cl(5) remove_dup = vcfutils.remove_dup_cl() py_cl = os.path.join(utils.get_bcbio_bin(), "py") jvm_opts = _get_jvm_opts(items[0], tx_out_file) setup = ("%s && unset JAVA_HOME &&" % utils.get_R_exports()) contig_cl = vcfutils.add_contig_to_header_cl(ref_file, tx_out_file) lowfreq_filter = _lowfreq_linear_filter(0, False) cmd = ("{setup}{jvm_opts}{vardict} -G {ref_file} " "-N {sample} -b {bamfile} {opts} " "| teststrandbias.R " "| var2vcf_valid.pl -A -N {sample} -E {var2vcf_opts} " "| {contig_cl} | bcftools filter -i 'QUAL >= 0' | {lowfreq_filter} " "| {fix_ambig_ref} | {fix_ambig_alt} | {remove_dup} | {vcfstreamsort} {compress_cmd}") if num_bams > 1: temp_file_prefix = out_file.replace(".gz", "").replace(".vcf", "") + item["name"][1] tmp_out = temp_file_prefix + ".temp.vcf" tmp_out += ".gz" if out_file.endswith("gz") else "" sample_vcf_names.append(tmp_out) with file_transaction(item, tmp_out) as tx_tmp_file: if not _is_bed_file(target): vcfutils.write_empty_vcf(tx_tmp_file, config, samples=[sample]) else: cmd += " > {tx_tmp_file}" do.run(cmd.format(**locals()), "Genotyping with VarDict: Inference", {}) else: if not _is_bed_file(target): vcfutils.write_empty_vcf(tx_out_file, config, samples=[sample]) else: cmd += " > {tx_out_file}" do.run(cmd.format(**locals()), "Genotyping with VarDict: Inference", {}) if num_bams > 1: # N.B. merge_variant_files wants region in 1-based end-inclusive # coordinates. Thus use bamprep.region_to_gatk vcfutils.merge_variant_files(orig_files=sample_vcf_names, out_file=tx_out_file, ref_file=ref_file, config=config, region=bamprep.region_to_gatk(region)) return out_file
java
public void insert(final String argin) { final String[] values = new String[1]; values[0] = argin; attrval.r_dim.dim_x = 1; attrval.r_dim.dim_y = 0; DevVarStringArrayHelper.insert(attrval.value, values); }
java
public static ClassLoadInfo classLoadManager() { ClassLoadingMXBean classLoadingMXBean = ManagementFactory.getClassLoadingMXBean(); int nowLoadedClassCount = classLoadingMXBean.getLoadedClassCount(); long totalLoadedClassCount = classLoadingMXBean.getTotalLoadedClassCount(); long unloadedClassCount = classLoadingMXBean.getUnloadedClassCount(); return new ClassLoadInfo(nowLoadedClassCount, totalLoadedClassCount, unloadedClassCount); }
python
def send_request(self, request): """ Handles the Blocks option in a outgoing request. :type request: Request :param request: the outgoing request :return: the edited request """ assert isinstance(request, Request) if request.block1 or (request.payload is not None and len(request.payload) > defines.MAX_PAYLOAD): host, port = request.destination key_token = hash(str(host) + str(port) + str(request.token)) if request.block1: num, m, size = request.block1 else: num = 0 m = 1 size = defines.MAX_PAYLOAD self._block1_sent[key_token] = BlockItem(size, num, m, size, request.payload, request.content_type) request.payload = request.payload[0:size] del request.block1 request.block1 = (num, m, size) elif request.block2: host, port = request.destination key_token = hash(str(host) + str(port) + str(request.token)) num, m, size = request.block2 item = BlockItem(size, num, m, size, "", None) self._block2_sent[key_token] = item return request return request
java
public static PdfPTable newPdfPTable(int numColumns, List<String> headerNames) { PdfPTable table = new PdfPTable(numColumns); headerNames.stream().forEach(columnHeaderName -> { PdfPCell header = new PdfPCell(); header.setBackgroundColor(BaseColor.LIGHT_GRAY); header.setBorderWidth(2); header.setPhrase(new Phrase(columnHeaderName)); table.addCell(header); }); return table; }
java
public AwsSecurityFindingFilters withRelatedFindingsId(StringFilter... relatedFindingsId) { if (this.relatedFindingsId == null) { setRelatedFindingsId(new java.util.ArrayList<StringFilter>(relatedFindingsId.length)); } for (StringFilter ele : relatedFindingsId) { this.relatedFindingsId.add(ele); } return this; }
java
@XmlElementDecl(namespace = "", name = "log") public JAXBElement<LogType> createLog(LogType value) { return new JAXBElement<>(_Log_QNAME, LogType.class, null, value); }
java
public Protein getCauses_protein(int i) { if (Event_Type.featOkTst && ((Event_Type)jcasType).casFeat_causes_protein == null) jcasType.jcas.throwFeatMissing("causes_protein", "ch.epfl.bbp.uima.genia.Event"); jcasType.jcas.checkArrayBounds(jcasType.ll_cas.ll_getRefValue(addr, ((Event_Type)jcasType).casFeatCode_causes_protein), i); return (Protein)(jcasType.ll_cas.ll_getFSForRef(jcasType.ll_cas.ll_getRefArrayValue(jcasType.ll_cas.ll_getRefValue(addr, ((Event_Type)jcasType).casFeatCode_causes_protein), i)));}
python
def _localize_df(schema_fields, df): """Localize any TIMESTAMP columns to tz-aware type. In pandas versions before 0.24.0, DatetimeTZDtype cannot be used as the dtype in Series/DataFrame construction, so localize those columns after the DataFrame is constructed. """ for field in schema_fields: column = str(field["name"]) if field["mode"].upper() == "REPEATED": continue if field["type"].upper() == "TIMESTAMP" and df[column].dt.tz is None: df[column] = df[column].dt.tz_localize("UTC") return df
java
public void addDependency(Area main, Area dependent) { List<Area> set = areasDependency.get(main); if (set == null) { set = new ArrayList<Area>(); areasDependency.put(main, set); } set.add(dependent); }
java
@Override public GeometryColumns createFeatureTableWithMetadata( GeometryColumns geometryColumns, BoundingBox boundingBox, long srsId) { return createFeatureTableWithMetadata(geometryColumns, null, null, boundingBox, srsId); }
java
@Override public void shutdown(ShutdownModeAmp mode) { // jamp/3210 //getReadMailbox().close(); try { getServiceRefOut().shutdown(mode); } catch (Throwable e) { log.log(Level.FINER, e.toString(), e); } for (ServiceRefAmp service : _linkServiceMap.values()) { service.shutdown(mode); } for (StubAmp actor : _closeList) { actor.onShutdown(mode); } }
python
def list_x(self, key=None): """ :param key: Which of key to return from "cid", "type", and "extention" :return: A list of x 'key' """ if key in ("cid", "type"): return sorted(set(operator.methodcaller(key)(p) for p in self._processors.values())) if key == "extension": return sorted(k for k, _v in self.list_by_x("extensions")) raise ValueError("keyword argument 'key' must be one of " "None, 'cid', 'type' and 'extension' " "but it was '%s'" % key)
python
def calc_datastore(request, job_id): """ Download a full datastore file. :param request: `django.http.HttpRequest` object. :param job_id: The id of the requested datastore :returns: A `django.http.HttpResponse` containing the content of the requested artifact, if present, else throws a 404 """ job = logs.dbcmd('get_job', int(job_id)) if job is None: return HttpResponseNotFound() if not utils.user_has_permission(request, job.user_name): return HttpResponseForbidden() fname = job.ds_calc_dir + '.hdf5' response = FileResponse( FileWrapper(open(fname, 'rb')), content_type=HDF5) response['Content-Disposition'] = ( 'attachment; filename=%s' % os.path.basename(fname)) response['Content-Length'] = str(os.path.getsize(fname)) return response
java
public static void makeDraggable (HasAllMouseHandlers dragHandle, PopupPanel target) { DragHandler dragger = new DragHandler(target); dragHandle.addMouseDownHandler(dragger); dragHandle.addMouseUpHandler(dragger); dragHandle.addMouseMoveHandler(dragger); }
java
public static List<String> get(BeanFactory beanFactory) { try { return beanFactory.getBean(BEAN, BasePackages.class).get(); } catch (NoSuchBeanDefinitionException ex) { throw new IllegalStateException( "Unable to retrieve @EnableAutoConfiguration base packages"); } }
python
def with_args(self, **kwargs): """Send these keyword-arguments to the phase when called.""" # Make a copy so we can have multiple of the same phase with different args # in the same test. new_info = mutablerecords.CopyRecord(self) new_info.options = new_info.options.format_strings(**kwargs) new_info.extra_kwargs.update(kwargs) new_info.measurements = [m.with_args(**kwargs) for m in self.measurements] return new_info
python
def top_result(self): """Return top `blastn` result Try to find a 100% identity and coverage result (perfect match). If one does not exist, then retrieve the result with the highest bitscore. Returns: Ordered dict of BLASTN results or None if no BLASTN results generated """ if self.is_missing: return None df_perfect_matches = self.df[(self.df['coverage'] == 1.0) & (self.df['pident'] == 100.0)] if df_perfect_matches.shape[0]: self.is_perfect_match = True return BlastReader.df_first_row_to_dict(df_perfect_matches) # Return the result with the highest bitscore. # This is the first result in dataframe since the df is ordered by # bitscore in descending order. result_dict = BlastReader.df_first_row_to_dict(self.df) result_trunc = BlastReader.is_blast_result_trunc(qstart=result_dict['qstart'], qend=result_dict['qend'], sstart=result_dict['sstart'], send=result_dict['send'], qlen=result_dict['qlen'], slen=result_dict['slen']) self.is_trunc = result_trunc return result_dict
python
def set_name(self, name, create_dir=False): """ Sets the name of the file to be saved. @params name - the name to the file increment - whether or not to increment the filename if there is an existing file ie test.txt => test1.txt overwrite - whether or not to overwrite existing local file, renders increment redundant """ # check if the file exists exists = os.path.exists(os.path.join(self._path, name)) self.filepath = os.path.join(self._path, name) if exists is False and create_dir is True: os.makedirs(self.filepath) self.name = name return self.name
python
def get_result(self): """ Returns resulting catalogue @rtype: MessageCatalogue """ for domain in self.domains: if domain not in self.messages: self._process_domain(domain) return self.result
java
public boolean writerObtained() { if (com.ibm.ejs.ras.TraceComponent.isAnyTracingEnabled()&&logger.isLoggable (Level.FINE)) //306998.15 logger.logp(Level.FINE, CLASS_NAME,"writerObtained"," " + String.valueOf(_gotWriter),"["+this+"]"); return _gotWriter; }
java
public JaversType getJaversType(Type javaType) { argumentIsNotNull(javaType); if (javaType == Object.class) { return OBJECT_TYPE; } return engine.computeIfAbsent(javaType, j -> typeFactory.infer(j, findPrototype(j))); }
java
private Connector findSipConnector(String ipAddress, int port, String transport) { Connector connectorToRemove = null; for (Connector connector : connectors) { final ProtocolHandler protocolHandler = connector.getProtocolHandler(); if(protocolHandler instanceof SipProtocolHandler) { final SipProtocolHandler sipProtocolHandler = (SipProtocolHandler) protocolHandler; if(sipProtocolHandler.getIpAddress().equals(ipAddress) && sipProtocolHandler.getPort() == port && sipProtocolHandler.getSignalingTransport().equalsIgnoreCase(transport)) { // connector.destroy(); connectorToRemove = connector; break; } } } return connectorToRemove; }
python
def _MakeMethodDescriptor(self, method_proto, service_name, package, scope, index): """Creates a method descriptor from a MethodDescriptorProto. Args: method_proto: The proto describing the method. service_name: The name of the containing service. package: Optional package name to look up for types. scope: Scope containing available types. index: Index of the method in the service. Returns: An initialized MethodDescriptor object. """ full_name = '.'.join((service_name, method_proto.name)) input_type = self._GetTypeFromScope( package, method_proto.input_type, scope) output_type = self._GetTypeFromScope( package, method_proto.output_type, scope) return descriptor.MethodDescriptor(name=method_proto.name, full_name=full_name, index=index, containing_service=None, input_type=input_type, output_type=output_type, options=_OptionsOrNone(method_proto))