language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def summary_df(df_in, **kwargs): """Make a panda data frame of the mean and std devs of an array of results, including the uncertainties on the values. This is similar to pandas.DataFrame.describe but also includes estimates of the numerical uncertainties. The output DataFrame has multiindex levels: 'calculation type': mean and standard deviations of the data. 'result type': value and uncertainty for each quantity. calculation type result type column_1 column_2 ... mean value mean uncertainty std value std uncertainty Parameters ---------- df_in: pandas DataFrame true_values: array Analytical values if known for comparison with mean. Used to calculate root mean squared errors (RMSE). include_true_values: bool, optional Whether or not to include true values in the output DataFrame. include_rmse: bool, optional Whether or not to include root-mean-squared-errors in the output DataFrame. Returns ------- df: MultiIndex DataFrame """ true_values = kwargs.pop('true_values', None) include_true_values = kwargs.pop('include_true_values', False) include_rmse = kwargs.pop('include_rmse', False) if kwargs: raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) if true_values is not None: assert true_values.shape[0] == df_in.shape[1], ( 'There should be one true value for every column! ' 'true_values.shape=' + str(true_values.shape) + ', ' 'df_in.shape=' + str(df_in.shape)) # make the data frame df = pd.DataFrame([df_in.mean(axis=0), df_in.std(axis=0, ddof=1)], index=['mean', 'std']) if include_true_values: assert true_values is not None df.loc['true values'] = true_values # Make index categorical to allow sorting df.index = pd.CategoricalIndex(df.index.values, ordered=True, categories=['true values', 'mean', 'std', 'rmse'], name='calculation type') # add uncertainties num_cals = df_in.shape[0] mean_unc = df.loc['std'] / np.sqrt(num_cals) std_unc = df.loc['std'] * np.sqrt(1 / (2 * (num_cals - 1))) df['result type'] = pd.Categorical(['value'] * df.shape[0], ordered=True, categories=['value', 'uncertainty']) df.set_index(['result type'], drop=True, append=True, inplace=True) df.loc[('mean', 'uncertainty'), :] = mean_unc.values df.loc[('std', 'uncertainty'), :] = std_unc.values if include_rmse: assert true_values is not None, \ 'Need to input true values for RMSE!' rmse, rmse_unc = rmse_and_unc(df_in.values, true_values) df.loc[('rmse', 'value'), :] = rmse df.loc[('rmse', 'uncertainty'), :] = rmse_unc # Ensure correct row order by sorting df.sort_index(inplace=True) # Cast calculation type index back from categorical to string to allow # adding new calculation types df.set_index( [df.index.get_level_values('calculation type').astype(str), df.index.get_level_values('result type')], inplace=True) return df
java
protected void read() throws IOException { buffer.rewind(); buffer.limit(buffer.capacity()); device.read(offset, buffer); this.dirty = false; }
java
public Object getArrayElement(Object o, int index) throws IndexOutOfBoundsException { if (o instanceof List) return ((List)o).get(index); return Array.get(o,index); }
python
def convert_value(value, field): """Given a :class:`.fields.Field` and a value, ensure that the value matches the given type, otherwise attempt to convert it. :param value: field value. :param field: :class:`.fields.Field` instance. :return: Result value. """ clz = field.field_type if clz is Boolean: if not isinstance(value, bool): return bool(value) elif clz is Date: if not isinstance(value, str): value = str(value) return parser.parse(value) elif clz is Number: if not isinstance(value, int): return int(value) elif clz is Object: if not isinstance(value, dict): return ast.literal_eval(value) elif clz is Text or clz is Symbol: if not isinstance(value, str): return str(value) elif clz is List or clz is MultipleAssets or clz is MultipleEntries: if not isinstance(value, list): return [value] # No need to convert :class:`.fields.Link` types as the expected value # should be of type :class:`.resources.ResourceLink` for links. return value
java
public static Optional<CopyableFileWatermarkGenerator> getCopyableFileWatermarkGenerator(State state) throws IOException { try { if (state.contains(WATERMARK_CREATOR)) { Class<?> watermarkCreatorClass = Class.forName(state.getProp(WATERMARK_CREATOR)); return Optional.of((CopyableFileWatermarkGenerator) watermarkCreatorClass.newInstance()); } else { return Optional.absent(); } } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) { throw new IOException("Failed to instantiate watermarkCreator."); } }
python
def get_newest(blocks, layout_blocks): """Filter out old layout blocks from list Arguments: List:blocks -- List of block objects List:layout_blocks -- List of layout block indexes Returns: List -- Newest layout blocks in list """ layout_temp = list(layout_blocks) for i in range(0, len(layout_temp)): for k in range(0, len(layout_blocks)): if blocks[layout_temp[i]].ec_hdr.image_seq != blocks[layout_blocks[k]].ec_hdr.image_seq: continue if blocks[layout_temp[i]].leb_num != blocks[layout_blocks[k]].leb_num: continue if blocks[layout_temp[i]].vid_hdr.sqnum > blocks[layout_blocks[k]].vid_hdr.sqnum: del layout_blocks[k] break return layout_blocks
java
public static void setResultDisplayDurationInMs(Intent intent, long duration) { intent.putExtra(Intents.Scan.RESULT_DISPLAY_DURATION_MS, duration); }
python
def parse_buckets(self, bucket, params): """ Parse a single S3 bucket TODO: - CORS - Lifecycle - Notification ? - Get bucket's policy :param bucket: :param params: :return: """ bucket['name'] = bucket.pop('Name') api_client = params['api_clients'][get_s3_list_region(list(params['api_clients'].keys())[0])] bucket['CreationDate'] = str(bucket['CreationDate']) bucket['region'] = get_s3_bucket_location(api_client, bucket['name']) # h4ck :: fix issue #59, location constraint can be EU or eu-west-1 for Ireland... if bucket['region'] == 'EU': bucket['region'] = 'eu-west-1' # h4ck :: S3 is global but region-aware... if bucket['region'] not in params['api_clients']: printInfo('Skipping bucket %s (region %s outside of scope)' % (bucket['name'], bucket['region'])) self.buckets_count -= 1 return api_client = params['api_clients'][bucket['region']] get_s3_bucket_logging(api_client, bucket['name'], bucket) get_s3_bucket_versioning(api_client, bucket['name'], bucket) get_s3_bucket_webhosting(api_client, bucket['name'], bucket) get_s3_bucket_default_encryption(api_client, bucket['name'], bucket) bucket['grantees'] = get_s3_acls(api_client, bucket['name'], bucket) get_s3_bucket_policy(api_client, bucket['name'], bucket) get_s3_bucket_secure_transport(api_client, bucket['name'], bucket) # If requested, get key properties #if params['check_encryption'] or params['check_acls']: # get_s3_bucket_keys(api_client, bucket['name'], bucket, params['check_encryption'], # params['check_acls']) bucket['id'] = self.get_non_aws_id(bucket['name']) self.buckets[bucket['id']] = bucket
java
public Observable<Void> beginRefreshHubSchemaAsync(String resourceGroupName, String serverName, String databaseName, String syncGroupName) { return beginRefreshHubSchemaWithServiceResponseAsync(resourceGroupName, serverName, databaseName, syncGroupName).map(new Func1<ServiceResponse<Void>, Void>() { @Override public Void call(ServiceResponse<Void> response) { return response.body(); } }); }
java
public static int validateZone(CharSequence zone) { for(int i = 0; i < zone.length(); i++) { char c = zone.charAt(i); if (c == IPAddress.PREFIX_LEN_SEPARATOR) { return i; } if (c == IPv6Address.SEGMENT_SEPARATOR) { return i; } } return -1; }
java
public static void addFull(JSONWriter writer, String field, Date value) throws JSONException { final SimpleDateFormat format = RedmineDateUtils.FULL_DATE_FORMAT.get(); JsonOutput.add(writer, field, value, format); }
java
@Override public void body(String namespace, String name, String bodyText) throws Exception { if (m_paramCount == 0) { m_bodyText = bodyText.trim(); } }
python
def cudnnActivationBackward(handle, mode, alpha, srcDesc, srcData, srcDiffDesc, srcDiffData, destDesc, destData, beta, destDiffDesc, destDiffData): """" Gradient of activation function. This routine computes the gradient of a neuron activation function. In-place operation is allowed for this routine; i.e., srcData and destData pointers may be equal and srcDiffData and destDiffData pointers may be equal. However, this requires the corresponding tensor descriptors to be identical (particularly, the strides of the input and output must match for in-place operation to be allowed). Parameters ---------- handle : cudnnHandle Handle to a previously created cuDNN context. mode : cudnnActivationMode Enumerant to specify the activation mode. alpha: float Scaling factor with which every element of the input tensor is multiplied. srcDesc : cudnnTensorDescriptor Handle to the previously initialized input tensor descriptor. srcData : void_p Data pointer to GPU memory associated with the tensor descriptor srcDesc. srcDiffDesc : cudnnTensorDescriptor Handle to the previously initialized input differential tensor descriptor. srcDiffData : void_p Data pointer to GPU memory associated with the tensor descriptor srcDiffData. destDesc : cudnnTensorDescriptor Handle to the previously initialized output tensor descriptor. destData : void_p Data pointer to GPU memory associated with the output tensor descriptor destDesc. beta: float Scaling factor which is applied on every element of the output tensor prior to adding the result of the activation gradient. Note that if beta is zero, the output is not read and can contain any uninitialized data (including Nan numbers). destDiffDesc : cudnnTensorDescriptor Handle to the previously initialized output differential tensor descriptor. destDiffData : void_p Data pointer to GPU memory associated with the output tensor descriptor destDiffDesc. """ dataType = cudnnGetTensor4dDescriptor(destDesc)[0] if dataType == cudnnDataType['CUDNN_DATA_DOUBLE']: alphaRef = ctypes.byref(ctypes.c_double(alpha)) betaRef = ctypes.byref(ctypes.c_double(beta)) else: alphaRef = ctypes.byref(ctypes.c_float(alpha)) betaRef = ctypes.byref(ctypes.c_float(beta)) status = _libcudnn.cudnnActivationBackward(handle, mode, alphaRef, srcDesc, srcData, srcDiffDesc, srcDiffData, destDesc, destData, betaRef, destDiffDesc, destDiffData) cudnnCheckStatus(status)
python
def _setup_stats_plugins(self): ''' Sets up the plugin stats collectors ''' self.stats_dict['plugins'] = {} for key in self.plugins_dict: plugin_name = self.plugins_dict[key]['instance'].__class__.__name__ temp_key = 'stats:redis-monitor:{p}'.format(p=plugin_name) self.stats_dict['plugins'][plugin_name] = {} for item in self.settings['STATS_TIMES']: try: time = getattr(StatsCollector, item) self.stats_dict['plugins'][plugin_name][time] = StatsCollector \ .get_rolling_time_window( redis_conn=self.redis_conn, key='{k}:{t}'.format(k=temp_key, t=time), window=time, cycle_time=self.settings['STATS_CYCLE']) self.logger.debug("Set up {p} plugin Stats Collector '{i}'"\ .format(p=plugin_name, i=item)) except AttributeError as e: self.logger.warning("Unable to find Stats Time '{s}'"\ .format(s=item)) total = StatsCollector.get_hll_counter(redis_conn=self.redis_conn, key='{k}:lifetime'.format(k=temp_key), cycle_time=self.settings['STATS_CYCLE'], roll=False) self.logger.debug("Set up {p} plugin Stats Collector 'lifetime'"\ .format(p=plugin_name)) self.stats_dict['plugins'][plugin_name]['lifetime'] = total
java
public IPv4AddressSection[] spanWithSequentialBlocks(IPv4AddressSection other) { return getSpanningSequentialBlocks( this, other, IPv4AddressSection::getLower, IPv4AddressSection::getUpper, Address.ADDRESS_LOW_VALUE_COMPARATOR::compare, IPv4AddressSection::withoutPrefixLength, getAddressCreator()); }
java
private void previewButtonClicked() { if (!tagPreviewBtnClicked) { colorPickerLayout .setSelectedColor(ColorPickerHelper.rgbToColorConverter(ColorPickerConstants.DEFAULT_COLOR)); } tagPreviewBtnClicked = !tagPreviewBtnClicked; colorPickerLayout.setVisible(tagPreviewBtnClicked); }
java
private int compareScheme(URI other) { String scheme = getScheme(); String otherScheme = other.getScheme(); if (scheme == null && otherScheme == null) { return 0; } if (scheme != null) { if (otherScheme != null) { return scheme.compareToIgnoreCase(otherScheme); } // not null is greater than 'null'. return 1; } // 'null' is less than not null. return -1; }
python
def export_items(elastic_url, in_index, out_index, elastic_url_out=None, search_after=False, search_after_value=None, limit=None, copy=False): """ Export items from in_index to out_index using the correct mapping """ if not limit: limit = DEFAULT_LIMIT if search_after_value: search_after_value_timestamp = int(search_after_value[0]) search_after_value_uuid = search_after_value[1] search_after_value = [search_after_value_timestamp, search_after_value_uuid] logging.info("Exporting items from %s/%s to %s", elastic_url, in_index, out_index) count_res = requests.get('%s/%s/_count' % (elastic_url, in_index)) try: count_res.raise_for_status() except requests.exceptions.HTTPError: if count_res.status_code == 404: logging.error("The index does not exists: %s", in_index) else: logging.error(count_res.text) sys.exit(1) logging.info("Total items to copy: %i", count_res.json()['count']) # Time to upload the items with the correct mapping elastic_in = ElasticSearch(elastic_url, in_index) if not copy: # Create the correct mapping for the data sources detected from in_index ds_mapping = find_mapping(elastic_url, in_index) else: logging.debug('Using the input index mapping') ds_mapping = extract_mapping(elastic_url, in_index) if not elastic_url_out: elastic_out = ElasticSearch(elastic_url, out_index, mappings=ds_mapping) else: elastic_out = ElasticSearch(elastic_url_out, out_index, mappings=ds_mapping) # Time to just copy from in_index to our_index uid_field = find_uuid(elastic_url, in_index) backend = find_perceval_backend(elastic_url, in_index) if search_after: total = elastic_out.bulk_upload(fetch(elastic_in, backend, limit, search_after_value, scroll=False), uid_field) else: total = elastic_out.bulk_upload(fetch(elastic_in, backend, limit), uid_field) logging.info("Total items copied: %i", total)
python
def groups_close(self, room_id, **kwargs): """Removes the private group from the user’s list of groups, only if you’re part of the group.""" return self.__call_api_post('groups.close', roomId=room_id, kwargs=kwargs)
java
@SuppressWarnings("unused") private void allocateBuffers(int numVertices) { /* * the allocated buffers are native order direct byte buffers, so they * can be passed directly to LWJGL or similar graphics APIs */ m_numVertices = numVertices; /* allocate for each vertex 3 floats */ if (m_numVertices > 0) { m_vertices = ByteBuffer.allocateDirect(numVertices * 3 * SIZEOF_FLOAT); m_vertices.order(ByteOrder.nativeOrder()); } }
java
public void delete(DbSession dbSession, String permission, String organizationUuid, @Nullable Integer groupId, @Nullable Long rootComponentId) { mapper(dbSession).delete(permission, organizationUuid, groupId, rootComponentId); }
python
def GetParserFromFilename(self, path): """Returns the appropriate parser class from the filename.""" # Find the configuration parser. handler_name = path.split("://")[0] for parser_cls in itervalues(GRRConfigParser.classes): if parser_cls.name == handler_name: return parser_cls # Handle the filename. extension = os.path.splitext(path)[1] if extension in [".yaml", ".yml"]: return YamlParser return ConfigFileParser
java
public static <T extends Savepoint> void storeCheckpointMetadata( T checkpointMetadata, OutputStream out) throws IOException { DataOutputStream dos = new DataOutputStream(out); storeCheckpointMetadata(checkpointMetadata, dos); }
python
def getOrCreate(cls, sc): """ Get the existing SQLContext or create a new one with given SparkContext. :param sc: SparkContext """ if cls._instantiatedContext is None: jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc()) sparkSession = SparkSession(sc, jsqlContext.sparkSession()) cls(sc, sparkSession, jsqlContext) return cls._instantiatedContext
java
public List<Type> randomSplit(Random rand, double... splits) { if(splits.length < 1) throw new IllegalArgumentException("Input array of split fractions must be non-empty"); IntList randOrder = new IntList(size()); ListUtils.addRange(randOrder, 0, size(), 1); Collections.shuffle(randOrder, rand); int[] stops = new int[splits.length]; double sum = 0; for(int i = 0; i < splits.length; i++) { sum += splits[i]; if(sum >= 1.001/*some flex room for numeric issues*/) throw new IllegalArgumentException("Input splits sum is greater than 1 by index " + i + " reaching a sum of " + sum); stops[i] = (int) Math.round(sum*randOrder.size()); } List<Type> datasets = new ArrayList<>(splits.length); int prev = 0; for(int i = 0; i < stops.length; i++) { datasets.add(getSubset(randOrder.subList(prev, stops[i]))); prev = stops[i]; } return datasets; }
java
@Override final public XAResource getXAResource() throws ResourceException { if (TraceComponent.isAnyTracingEnabled() && TRACE.isEntryEnabled()) { SibTr.entry(this, TRACE, "getXAResource"); } if (_coreConnection == null) { throw new ResourceAdapterInternalException(NLS.getFormattedMessage( "EXCEPTION_RECEIVED_CWSJR1106", new Object[] { "getXAResource" }, null)); } if (_xaResource == null) { try { _xaResource = _coreConnection.getSIXAResource(); } catch (final SIException exception) { FFDCFilter.processException(exception, CLASS_NAME + "getXAResource", "1:649:1.91", this); throw new ResourceException(NLS.getFormattedMessage( "EXCEPTION_RECEIVED_CWSJR1111", new Object[] { exception, "getXAResource" }, null), exception); } catch (final SIErrorException exception) { FFDCFilter.processException(exception, CLASS_NAME + "getXAResource", "1:658:1.91", this); throw new ResourceException(NLS.getFormattedMessage( "EXCEPTION_RECEIVED_CWSJR1111", new Object[] { exception, "getXAResource" }, null), exception); } } if (TraceComponent.isAnyTracingEnabled() && TRACE.isEntryEnabled()) { SibTr.exit(this, TRACE, "getXAResource", _xaResource); } return _xaResource; }
java
public <T extends D6Model> T[] execSelectTable(String preparedSql, Object[] searchKeys, Class<T> modelClazz) { @SuppressWarnings("unchecked") final Map<Class<?>, List<Object>> result = execSelectTableWithJoin(preparedSql, searchKeys, modelClazz); final List<Object> rowList = result.get(modelClazz); return toArray(rowList, modelClazz); }
python
def expand(self, flm, nmax=None): """ Return the Slepian expansion coefficients of the input function. Usage ----- s = x.expand(flm, [nmax]) Returns ------- s : SlepianCoeff class instance The Slepian expansion coefficients of the input function. Parameters ---------- flm : SHCoeffs class instance The input function to expand in Slepian functions. nmax : int, optional, default = (x.lmax+1)**2 The number of Slepian expansion coefficients to compute. Description ----------- The global function f is input using its spherical harmonic expansion coefficients flm. The expansion coefficients of the function f using Slepian functions g is given by f_alpha = sum_{lm}^{lmax} f_lm g(alpha)_lm """ if nmax is None: nmax = (self.lmax+1)**2 elif nmax is not None and nmax > (self.lmax+1)**2: raise ValueError( "nmax must be less than or equal to (lmax+1)**2 " + "where lmax is {:s}. Input value is {:s}" .format(repr(self.lmax), repr(nmax)) ) coeffsin = flm.to_array(normalization='4pi', csphase=1, lmax=self.lmax) return self._expand(coeffsin, nmax)
python
def trap_exceptions(results, handler, exceptions=Exception): """ Iterate through the results, but if an exception occurs, stop processing the results and instead replace the results with the output from the exception handler. """ try: for result in results: yield result except exceptions as exc: for result in always_iterable(handler(exc)): yield result
java
private boolean familyHalogen(IAtom atom) { String symbol = atom.getSymbol(); return symbol.equals("F") || symbol.equals("Cl") || symbol.equals("Br") || symbol.equals("I"); }
java
protected HttpURLConnection openConnection(String url) throws IOException { HttpURLConnection connection = (HttpURLConnection) new URL(url).openConnection(); connection.setConnectTimeout(DEFAULT_CONNECT_TIMEOUT_MILLIS); connection.setReadTimeout(DEFAULT_READ_TIMEOUT_MILLIS); connection.setRequestProperty("Content-Type", "application/json"); connection.setRequestProperty("User-Agent", USER_AGENT); connection.setDoInput(true); return connection; }
python
def add_primary_relationship(parent, childrel, child, parentrel, parentcol): """ When a parent-child relationship is defined as one-to-many, :func:`add_primary_relationship` lets the parent refer to one child as the primary, by creating a secondary table to hold the reference. Under PostgreSQL, a trigger is added as well to ensure foreign key integrity. A SQLAlchemy relationship named ``parent.childrel`` is added that makes usage seamless within SQLAlchemy. The secondary table is named after the parent and child tables, with ``_primary`` appended, in the form ``parent_child_primary``. This table can be found in the metadata in the ``parent.metadata.tables`` dictionary. Multi-column primary keys on either parent or child are unsupported at this time. :param parent: The parent model (on which this relationship will be added) :param childrel: The name of the relationship to the child that will be added :param child: The child model :param str parentrel: Name of the existing relationship on the child model that refers back to the parent model :param str parentcol: Name of the existing table column on the child model that refers back to the parent model :return: None """ parent_table_name = parent.__tablename__ child_table_name = child.__tablename__ primary_table_name = parent_table_name + '_' + child_table_name + '_primary' parent_id_columns = [c.name for c in inspect(parent).primary_key] child_id_columns = [c.name for c in inspect(child).primary_key] primary_table_columns = ( [Column( parent_table_name + '_' + name, None, ForeignKey(parent_table_name + '.' + name, ondelete='CASCADE'), primary_key=True, nullable=False) for name in parent_id_columns] + [Column( child_table_name + '_' + name, None, ForeignKey(child_table_name + '.' + name, ondelete='CASCADE'), nullable=False) for name in child_id_columns] + list(make_timestamp_columns()) ) primary_table = Table(primary_table_name, parent.metadata, *primary_table_columns) rel = relationship(child, uselist=False, secondary=primary_table) setattr(parent, childrel, rel) @event.listens_for(rel, 'set') def _validate_child(target, value, oldvalue, initiator): if value and getattr(value, parentrel) != target: raise ValueError("The target is not affiliated with this parent") # XXX: To support multi-column primary keys, update this SQL function event.listen(primary_table, 'after_create', DDL(''' CREATE FUNCTION %(function)s() RETURNS TRIGGER AS $$ DECLARE target RECORD; BEGIN IF (NEW.%(rhs)s IS NOT NULL) THEN SELECT %(parentcol)s INTO target FROM %(child_table_name)s WHERE %(child_id_column)s = NEW.%(rhs)s; IF (target.%(parentcol)s != NEW.%(lhs)s) THEN RAISE foreign_key_violation USING MESSAGE = 'The target is not affiliated with this parent'; END IF; END IF; RETURN NEW; END; $$ LANGUAGE plpgsql; CREATE TRIGGER %(trigger)s BEFORE INSERT OR UPDATE ON %(table)s FOR EACH ROW EXECUTE PROCEDURE %(function)s(); ''', context={ 'table': primary_table_name, 'function': '%s_validate' % primary_table_name, 'trigger': '%s_trigger' % primary_table_name, 'parentcol': parentcol, 'child_table_name': child_table_name, 'child_id_column': child_id_columns[0], 'lhs': '%s_%s' % (parent_table_name, parent_id_columns[0]), 'rhs': '%s_%s' % (child_table_name, child_id_columns[0]), } ).execute_if(dialect='postgresql') ) event.listen(primary_table, 'before_drop', DDL(''' DROP TRIGGER %(trigger)s ON %(table)s; DROP FUNCTION %(function)s(); ''', context={ 'table': primary_table_name, 'trigger': '%s_trigger' % primary_table_name, 'function': '%s_validate' % primary_table_name, } ).execute_if(dialect='postgresql') )
python
def get_gitdiff(filenode_old, filenode_new, ignore_whitespace=True): """ Returns git style diff between given ``filenode_old`` and ``filenode_new``. :param ignore_whitespace: ignore whitespaces in diff """ for filenode in (filenode_old, filenode_new): if not isinstance(filenode, FileNode): raise VCSError("Given object should be FileNode object, not %s" % filenode.__class__) old_raw_id = getattr(filenode_old.changeset, 'raw_id', '0' * 40) new_raw_id = getattr(filenode_new.changeset, 'raw_id', '0' * 40) repo = filenode_new.changeset.repository vcs_gitdiff = repo.get_diff(old_raw_id, new_raw_id, filenode_new.path, ignore_whitespace) return vcs_gitdiff
python
def updatepLvlGrid(self): ''' Update the grid of persistent income levels. Currently only works for infinite horizon models (cycles=0) and lifecycle models (cycles=1). Not clear what to do about cycles>1 because the distribution of persistent income will be different within a period depending on how many cycles have elapsed. This method uses a simulation approach to generate the pLvlGrid at each period of the cycle, drawing on the initial distribution of persistent income, the pLvlNextFuncs, and the attribute pLvlPctiles. Parameters ---------- None Returns ------- None ''' orig_time = self.time_flow self.timeFwd() LivPrbAll = np.array(self.LivPrb) # Simulate the distribution of persistent income levels by t_cycle in a lifecycle model if self.cycles == 1: pLvlNow = drawLognormal(self.AgentCount,mu=self.pLvlInitMean,sigma=self.pLvlInitStd,seed=31382) pLvlGrid = [] # empty list of time-varying persistent income grids # Calculate distribution of persistent income in each period of lifecycle for t in range(len(self.PermShkStd)): if t > 0: PermShkNow = drawDiscrete(N=self.AgentCount,P=self.PermShkDstn[t-1][0],X=self.PermShkDstn[t-1][1],exact_match=False,seed=t) pLvlNow = self.pLvlNextFunc[t-1](pLvlNow)*PermShkNow pLvlGrid.append(getPercentiles(pLvlNow,percentiles=self.pLvlPctiles)) # Calculate "stationary" distribution in infinite horizon (might vary across periods of cycle) elif self.cycles == 0: T_long = 1000 # Number of periods to simulate to get to "stationary" distribution pLvlNow = drawLognormal(self.AgentCount,mu=self.pLvlInitMean,sigma=self.pLvlInitStd,seed=31382) t_cycle = np.zeros(self.AgentCount,dtype=int) for t in range(T_long): LivPrb = LivPrbAll[t_cycle] # Determine who dies and replace them with newborns draws = drawUniform(self.AgentCount,seed=t) who_dies = draws > LivPrb pLvlNow[who_dies] = drawLognormal(np.sum(who_dies),mu=self.pLvlInitMean,sigma=self.pLvlInitStd,seed=t+92615) t_cycle[who_dies] = 0 for j in range(self.T_cycle): # Update persistent income these = t_cycle == j PermShkTemp = drawDiscrete(N=np.sum(these),P=self.PermShkDstn[j][0],X=self.PermShkDstn[j][1],exact_match=False,seed=t+13*j) pLvlNow[these] = self.pLvlNextFunc[j](pLvlNow[these])*PermShkTemp t_cycle = t_cycle + 1 t_cycle[t_cycle == self.T_cycle] = 0 # We now have a "long run stationary distribution", extract percentiles pLvlGrid = [] # empty list of time-varying persistent income grids for t in range(self.T_cycle): these = t_cycle == t pLvlGrid.append(getPercentiles(pLvlNow[these],percentiles=self.pLvlPctiles)) # Throw an error if cycles>1 else: assert False, "Can only handle cycles=0 or cycles=1!" # Store the result and add attribute to time_vary self.pLvlGrid = pLvlGrid self.addToTimeVary('pLvlGrid') if not orig_time: self.timeRev()
java
private void updateEvseStatus(ChargingStation chargingStation, String componentId, ComponentStatus status) { for (Evse evse : chargingStation.getEvses()) { if (evse.getEvseId().equals(componentId)) { evse.setStatus(status); } } }
java
public static <A, B> Multiset<B> mutableTransformedCopy(Multiset<A> ms, Function<A, B> func) { final Multiset<B> ret = HashMultiset.create(); for (final Multiset.Entry<A> entry : ms.entrySet()) { final B transformedElement = func.apply(entry.getElement()); ret.add(transformedElement, entry.getCount()); } return ret; }
python
def convert_coordinates(q, conversion, axisorder): """ Convert a 3-tuple in data coordinates into to simplex data coordinates for plotting. Parameters ---------- q: 3-tuple the point to be plotted in data coordinates conversion: dict keys = ['b','l','r'] values = lambda function giving the conversion axisorder: String giving the order of the axes for the coordinate tuple e.g. 'blr' for bottom, left, right coordinates. Returns ------- p: 3-tuple The point converted to simplex coordinates. """ p = [] for k in range(3): p.append(conversion[axisorder[k]](q[k])) return tuple(p)
python
def chunk_encoding(chunks, chunk): '''Write a chunk:: chunk-size(hex) CRLF chunk-data CRLF If the size is 0, this is the last chunk, and an extra CRLF is appended. ''' chunks.extend(("%X\r\n" % len(chunk)).encode('utf-8')) chunks.extend(chunk) chunks.extend(CRLF)
python
def init_dense_weight(layer): '''initilize dense layer weight. ''' units = layer.units weight = np.eye(units) bias = np.zeros(units) layer.set_weights( (add_noise(weight, np.array([0, 1])), add_noise(bias, np.array([0, 1]))) )
java
private HttpSession2 buildSession(boolean create) { if (create) { session = buildSession(sessionManager.getSessionIdGenerator().generate(request), true); log.debug("Build new session[{}].", session.getId()); return session; } else { return null; } }
python
def scrape_metrics(self, scraper_config): """ Poll the data from prometheus and return the metrics as a generator. """ response = self.poll(scraper_config) try: # no dry run if no label joins if not scraper_config['label_joins']: scraper_config['_dry_run'] = False elif not scraper_config['_watched_labels']: # build the _watched_labels set for val in itervalues(scraper_config['label_joins']): scraper_config['_watched_labels'].add(val['label_to_match']) for metric in self.parse_metric_family(response, scraper_config): yield metric # Set dry run off scraper_config['_dry_run'] = False # Garbage collect unused mapping and reset active labels for metric, mapping in list(iteritems(scraper_config['_label_mapping'])): for key in list(mapping): if key not in scraper_config['_active_label_mapping'][metric]: del scraper_config['_label_mapping'][metric][key] scraper_config['_active_label_mapping'] = {} finally: response.close()
java
public static void incrLogMetrics(Map<String, Long> incrMetrics) { if (incrMetrics == null || incrMetrics.size() == 0) { return; } MetricsRegistry registry = RaidNodeMetrics.getInstance( RaidNodeMetrics.DEFAULT_NAMESPACE_ID).getMetricsRegistry(); Map<String, MetricsTimeVaryingLong> logMetrics = RaidNodeMetrics.getInstance( RaidNodeMetrics.DEFAULT_NAMESPACE_ID).logMetrics; synchronized(logMetrics) { for (String key : incrMetrics.keySet()) { if (!logMetrics.containsKey(key)) { logMetrics.put(key, new MetricsTimeVaryingLong(key, registry)); } ((MetricsTimeVaryingLong)logMetrics.get(key)).inc(incrMetrics.get(key)); } } }
java
static public IParserInput getInstance( final char commandPrefix, final boolean allowEmbeddedCommandPrefix, final File args) throws IOException { final CommandLineParser parser = new CommandLineParser(); parser.commandPrefix = commandPrefix; parser.allowEmbeddedCommandPrefix = allowEmbeddedCommandPrefix; parser.commandLine = convertToString(args).toString(); return parser; }
python
def get_block_start(lines, lineno, maximum_indents=80): """Approximate block start""" pattern = get_block_start_patterns() for i in range(lineno, 0, -1): match = pattern.search(lines.get_line(i)) if match is not None and \ count_line_indents(lines.get_line(i)) <= maximum_indents: striped = match.string.lstrip() # Maybe we're in a list comprehension or generator expression if i > 1 and striped.startswith('if') or striped.startswith('for'): bracs = 0 for j in range(i, min(i + 5, lines.length() + 1)): for c in lines.get_line(j): if c == '#': break if c in '[(': bracs += 1 if c in ')]': bracs -= 1 if bracs < 0: break if bracs < 0: break if bracs < 0: continue return i return 1
python
def remove(self, payload): """Remove specified entries from the queue.""" succeeded = [] failed = [] for key in payload['keys']: running = self.process_handler.is_running(key) if not running: removed = self.queue.remove(key) if removed: succeeded.append(str(key)) else: failed.append(str(key)) else: failed.append(str(key)) message = '' if len(succeeded) > 0: message += 'Removed entries: {}.'.format(', '.join(succeeded)) status = 'success' if len(failed) > 0: message += '\nRunning or non-existing entry for keys: {}'.format(', '.join(failed)) status = 'error' answer = {'message': message.strip(), 'status': status} return answer
java
private void initialize(Locale locale) { // start with a new instance of the widgets and unique widgets m_widgets = new HashMap<String, I_CmsWidget>(25); m_uniqueWidgets = new ArrayList<I_CmsWidget>(12); m_values = new HashMap<String, I_CmsXmlContentValue>(25); // store Locale to use when collecting the widgets m_locale = locale; }
python
def wrap(self, message): """ NTM GSSwrap() :param message: The message to be encrypted :return: A Tuple containing the signature and the encrypted messaging """ cipher_text = _Ntlm2Session.encrypt(self, message) signature = _Ntlm2Session.sign(self, message) return cipher_text, signature
python
def make_cache_key(request): """ Generate a cache key from request object data """ headers = frozenset(request._p['header'].items()) path = frozenset(request._p['path'].items()) query = frozenset(request._p['query']) return (request.url, headers, path, query)
java
private void callLifecycleInterceptors(Class<?> mbClass, Object mbInstance) throws InjectionException { Collection<MethodInfo> methods = MethodMap.getAllDeclaredMethods(mbClass); for (MethodInfo methodInfo : methods) { Method method = methodInfo.getMethod(); PostConstruct postConstruct = method.getAnnotation(PostConstruct.class); if (postConstruct != null) { method.setAccessible(true); validateLifeCycleSignature(PC_NAME, method, true); try { method.invoke(mbInstance, (Object[]) null); } catch (InvocationTargetException ex) { Throwable cause = ex.getCause() != null ? ex.getCause() : ex; cause.printStackTrace(); throw new InjectionException(PC_NAME + " interceptor \"" + method + "\" failed with the following error : " + cause); } catch (Throwable ex) { ex.printStackTrace(); throw new InjectionException(PC_NAME + " interceptor \"" + method + "\" failed with the following error : " + ex); } } } }
java
public ComplexDouble subi(ComplexDouble c, ComplexDouble result) { if (this == result) { r -= c.r; i -= c.i; } else { result.r = r - c.r; result.i = i - c.i; } return this; }
python
def translate_to_stackdriver(self, trace): """Translate the spans json to Stackdriver format. See: https://cloud.google.com/trace/docs/reference/v2/rest/v2/ projects.traces/batchWrite :type trace: dict :param trace: Trace dictionary :rtype: dict :returns: Spans in Google Cloud StackDriver Trace format. """ set_attributes(trace) spans_json = trace.get('spans') trace_id = trace.get('traceId') for span in spans_json: span_name = 'projects/{}/traces/{}/spans/{}'.format( self.project_id, trace_id, span.get('spanId')) span_json = { 'name': span_name, 'displayName': span.get('displayName'), 'startTime': span.get('startTime'), 'endTime': span.get('endTime'), 'spanId': str(span.get('spanId')), 'attributes': self.map_attributes(span.get('attributes')), 'links': span.get('links'), 'status': span.get('status'), 'stackTrace': span.get('stackTrace'), 'timeEvents': span.get('timeEvents'), 'sameProcessAsParentSpan': span.get('sameProcessAsParentSpan'), 'childSpanCount': span.get('childSpanCount') } if span.get('parentSpanId') is not None: parent_span_id = str(span.get('parentSpanId')) span_json['parentSpanId'] = parent_span_id yield span_json
java
public void activateVersionControl(final Package converterPackage) throws CouldNotPerformException { try { String entryType; try { entryType = getDatabaseName(); } catch (Exception ex) { throw new CouldNotPerformException("Could not detect entry type!", ex); } super.activateVersionControl(entryType, converterPackage); } catch (CouldNotPerformException ex) { throw new CouldNotPerformException("Could not activate version control!", ex); } }
python
def approve( self, allowed_address: Address, allowance: TokenAmount, ): """ Aprove `allowed_address` to transfer up to `deposit` amount of token. Note: For channel deposit please use the channel proxy, since it does additional validations. """ # Note that given_block_identifier is not used here as there # are no preconditions to check before sending the transaction log_details = { 'node': pex(self.node_address), 'contract': pex(self.address), 'allowed_address': pex(allowed_address), 'allowance': allowance, } checking_block = self.client.get_checking_block() error_prefix = 'Call to approve will fail' gas_limit = self.proxy.estimate_gas( checking_block, 'approve', to_checksum_address(allowed_address), allowance, ) if gas_limit: error_prefix = 'Call to approve failed' log.debug('approve called', **log_details) transaction_hash = self.proxy.transact( 'approve', safe_gas_limit(gas_limit), to_checksum_address(allowed_address), allowance, ) self.client.poll(transaction_hash) receipt_or_none = check_transaction_threw(self.client, transaction_hash) transaction_executed = gas_limit is not None if not transaction_executed or receipt_or_none: if transaction_executed: block = receipt_or_none['blockNumber'] else: block = checking_block self.proxy.jsonrpc_client.check_for_insufficient_eth( transaction_name='approve', transaction_executed=transaction_executed, required_gas=GAS_REQUIRED_FOR_APPROVE, block_identifier=block, ) msg = self._check_why_approved_failed(allowance, block) error_msg = f'{error_prefix}. {msg}' log.critical(error_msg, **log_details) raise RaidenUnrecoverableError(error_msg) log.info('approve successful', **log_details)
python
def idempotency_key(self, idempotency_key): """ Sets the idempotency_key of this BatchUpsertCatalogObjectsRequest. A value you specify that uniquely identifies this request among all your requests. A common way to create a valid idempotency key is to use a Universally unique identifier (UUID). If you're unsure whether a particular request was successful, you can reattempt it with the same idempotency key without worrying about creating duplicate objects. See [Idempotency](/basics/api101/idempotency) for more information. :param idempotency_key: The idempotency_key of this BatchUpsertCatalogObjectsRequest. :type: str """ if idempotency_key is None: raise ValueError("Invalid value for `idempotency_key`, must not be `None`") if len(idempotency_key) < 1: raise ValueError("Invalid value for `idempotency_key`, length must be greater than or equal to `1`") self._idempotency_key = idempotency_key
java
public BaasResult<BaasUser> loginSync(String registrationId) { BaasBox box = BaasBox.getDefault(); if (password == null) throw new IllegalStateException("password cannot be null"); NetworkTask<BaasUser> task = new LoginRequest(box, this, registrationId, RequestOptions.DEFAULT, null); return box.submitSync(task); }
java
public static Builder measurementByPOJO(final Class<?> clazz) { Objects.requireNonNull(clazz, "clazz"); throwExceptionIfMissingAnnotation(clazz, Measurement.class); String measurementName = findMeasurementName(clazz); return new Builder(measurementName); }
java
public String processInterfaceTypes(TDefinitions descriptionType, boolean addAddress) { String allAddress = DBConstants.BLANK; for (TDocumented nextElement : descriptionType.getAnyTopLevelOptionalElement()) { // Create the service type if (nextElement instanceof TPortType) { String address = this.processPortType(descriptionType, (TPortType)nextElement, addAddress); if (allAddress != null) { if (allAddress == DBConstants.BLANK) allAddress = address; else if (!allAddress.equalsIgnoreCase(address)) allAddress = null; // null = not all address are the same } } } return allAddress; }
java
public static Pair<INDArray, INDArray> mergeTimeSeries(INDArray[][] arrays, INDArray[][] masks, int inOutIdx) { Pair<INDArray[], INDArray[]> p = selectColumnFromMDSData(arrays, masks, inOutIdx); return mergeTimeSeries(p.getFirst(), p.getSecond()); }
python
def data_not_in(db_data, user_data): """Validate data not in user data. Args: db_data (str): The data store in Redis. user_data (list): The user provided data. Returns: bool: True if the data passed validation. """ if isinstance(user_data, list): if db_data not in user_data: return True return False
python
def get_context_files(data): """Retrieve pre-installed annotation files for annotating genome context. """ ref_file = dd.get_ref_file(data) all_files = [] for ext in [".bed.gz"]: all_files += sorted(glob.glob(os.path.normpath(os.path.join(os.path.dirname(ref_file), os.pardir, "coverage", "problem_regions", "*", "*%s" % ext)))) return sorted(all_files)
python
def to_string(interval, conv=repr, disj=' | ', sep=',', left_open='(', left_closed='[', right_open=')', right_closed=']', pinf='+inf', ninf='-inf'): """ Export given interval (or atomic interval) to string. :param interval: an Interval or AtomicInterval instance. :param conv: function that is used to represent a bound (default is `repr`). :param disj: string representing disjunctive operator (default is ' | '). :param sep: string representing bound separator (default is ','). :param left_open: string representing left open boundary (default is '('). :param left_closed: string representing left closed boundary (default is '['). :param right_open: string representing right open boundary (default is ')'). :param right_closed: string representing right closed boundary (default is ']'). :param pinf: string representing a positive infinity (default is '+inf'). :param ninf: string representing a negative infinity (default is '-inf'). :return: a string representation for given interval. """ interval = Interval(interval) if isinstance(interval, AtomicInterval) else interval if interval.is_empty(): return '{}{}'.format(left_open, right_open) def _convert(bound): if bound == inf: return pinf elif bound == -inf: return ninf else: return conv(bound) exported_intervals = [] for item in interval: left = left_open if item.left == OPEN else left_closed right = right_open if item.right == OPEN else right_closed lower = _convert(item.lower) upper = _convert(item.upper) if item.lower == item.upper: exported_intervals.append('{}{}{}'.format(left, lower, right)) else: exported_intervals.append('{}{}{}{}{}'.format(left, lower, sep, upper, right)) return disj.join(exported_intervals)
java
public List<DomainTopicInner> listByDomain(String resourceGroupName, String domainName) { return listByDomainWithServiceResponseAsync(resourceGroupName, domainName).toBlocking().single().body(); }
java
public void marshall(DeleteSizeConstraintSetRequest deleteSizeConstraintSetRequest, ProtocolMarshaller protocolMarshaller) { if (deleteSizeConstraintSetRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(deleteSizeConstraintSetRequest.getSizeConstraintSetId(), SIZECONSTRAINTSETID_BINDING); protocolMarshaller.marshall(deleteSizeConstraintSetRequest.getChangeToken(), CHANGETOKEN_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
python
def format_distance(kilometers, fmt=DISTANCE_FORMAT, unit='km'): """ TODO docs. """ magnitude = DISTANCE_UNITS[unit](kilometers) return fmt % {'magnitude': magnitude, 'unit': unit}
java
public Object constantValue() { Object result = sym.getConstValue(); if (result != null && sym.type.hasTag(BOOLEAN)) // javac represents false and true as Integers 0 and 1 result = Boolean.valueOf(((Integer)result).intValue() != 0); return result; }
java
public static ProxyInfo setProxyCfg(String host, String port, String userName, String password) { return new ProxyInfo(host, port, userName, password); }
java
private boolean isExcludedClass(Class<?> clazz) { for (Class<?> c : excludedClasses) { if (c.isAssignableFrom(clazz)) { return true; } } return false; }
java
protected Queue<AsyncResourceRequest<V>> getRequestQueueForExistingKey(K key) { Queue<AsyncResourceRequest<V>> requestQueue = requestQueueMap.get(key); return requestQueue; }
python
def convertDict2Attrs(self, *args, **kwargs): """Each element on the atttrs attribute gest converted to a proper python object, depending on type. Some default constantFields are left as is (strings), because they are better treated as strings. """ constantFields = ['id', 'groupName', 'name', 'homePhone', 'mobilePhone1', 'phoneNumber', 'postcode', 'emailAddress'] def convierte(data): """Recursively convert the fields on the data given to a python object.""" # Iterators, lists and dictionaries # Here comes the recursive calls! try: it = iter(data) if type(it) == type(iter({})): d = {} for k in it: if k in constantFields: d[k] = data[k] else: d[k] = convierte(data[k]) data = d if type(it) == type(iter([])): l = [] for e in it: l.append(convierte(e)) data = l except TypeError as terr: pass except Exception as ex: raise ex # Python built-in types: ints, floats, or even datetimes. If it # cannot convert it to a built-in type, leave it as string, or # as-is. There may be nested Mambu objects here! # This are the recursion base cases! try: d = int(data) if str(d) != data: # if string has trailing 0's, leave it as string, to not lose them return data return d except (TypeError, ValueError) as tverr: try: return float(data) except (TypeError, ValueError) as tverr: try: return self.util_dateFormat(data) except (TypeError, ValueError) as tverr: return data return data self.attrs = convierte(self.attrs)
python
def Upload(self,directory,filename): """Uploads/Updates/Replaces files""" db = self._loadDB(directory) logger.debug("wp: Attempting upload of %s"%(filename)) # See if this already exists in our DB if db.has_key(filename): pid=db[filename] logger.debug('wp: Found %s in DB with post id %s'%(filename,pid)) else: pid=None fullfile=os.path.join(directory,filename) fid=open(fullfile,'r'); # Read meta data and content into dictionary post=self._readMetaAndContent(fid) #Connect to WP self._connectToWP() # If no pid, it means post is fresh off the press # and not uploaded yet! if not pid: # Get a PID by uploading pid=self.wp.call(NewPost(post)) if pid: logger.debug("wp: Uploaded post with pid %s",pid) db[filename]=pid self._saveDB(directory,db) return True else: logger.error("wp: Couldn't upload post") return False else: # Already has PID, replace post logger.debug("wp: Replacing post with pid %s",pid) #FIXME: Check return value?! self.wp.call(EditPost(pid,post)) return True return False
python
def unlock(self): '''Unlock card with SCardEndTransaction.''' component = self.component while True: if isinstance( component, smartcard.pcsc.PCSCCardConnection.PCSCCardConnection): hresult = SCardEndTransaction(component.hcard, SCARD_LEAVE_CARD) if 0 != hresult: raise CardConnectionException( 'Failed to unlock with SCardEndTransaction: ' + SCardGetErrorMessage(hresult)) else: # print('unlocked') pass break if hasattr(component, 'component'): component = component.component else: break
python
def insrtc(item, inset): """ Insert an item into a character set. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/insrtc_c.html :param item: Item to be inserted. :type item: str or list of str :param inset: Insertion set. :type inset: spiceypy.utils.support_types.SpiceCell """ assert isinstance(inset, stypes.SpiceCell) if isinstance(item, list): for c in item: libspice.insrtc_c(stypes.stringToCharP(c), ctypes.byref(inset)) else: item = stypes.stringToCharP(item) libspice.insrtc_c(item, ctypes.byref(inset))
python
def bam_conversion(job, samfile, sample_type, univ_options, samtools_options): """ Convert a sam to a bam. :param dict samfile: The input sam file :param str sample_type: Description of the sample to inject into the filename :param dict univ_options: Dict of universal options used by almost all tools :param dict samtools_options: Options specific to samtools :return: fsID for the generated bam :rtype: toil.fileStore.FileID """ work_dir = os.getcwd() input_files = { sample_type + '.sam': samfile} input_files = get_files_from_filestore(job, input_files, work_dir, docker=True) bamfile = '/'.join([work_dir, sample_type + '.bam']) parameters = ['view', '-bS', '-o', docker_path(bamfile), input_files[sample_type + '.sam'] ] docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'], tool_version=samtools_options['version']) output_file = job.fileStore.writeGlobalFile(bamfile) # The samfile is no longer useful so delete it job.fileStore.deleteGlobalFile(samfile) job.fileStore.logToMaster('Ran sam2bam on %s:%s successfully' % (univ_options['patient'], sample_type)) return output_file
java
@Override public void put(List<Put> puts) throws IOException { LOG.trace("put(List<Put>)"); if (puts == null || puts.isEmpty()) { return; } else if (puts.size() == 1) { try { put(puts.get(0)); } catch (IOException e) { throw createRetriesExhaustedWithDetailsException(e, puts.get(0)); } } else { getBatchExecutor().batch(puts); } }
python
def _cleanup_api(self): ''' Helper method to clean up resources and models if we detected a change in the swagger file for a stage ''' resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId, **self._common_aws_args) if resources.get('resources'): res = resources.get('resources')[1:] res.reverse() for resource in res: delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId, path=resource.get('path'), **self._common_aws_args) if not delres.get('deleted'): return delres models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args) if models.get('models'): for model in models.get('models'): delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId, modelName=model.get('name'), **self._common_aws_args) if not delres.get('deleted'): return delres return {'deleted': True}
python
def add_library_search_paths(self, paths, recursive=True, escape=False, target_name=None, configuration_name=None): """ Adds paths to the LIBRARY_SEARCH_PATHS configuration. :param paths: A string or array of strings :param recursive: Add the paths as recursive ones :param escape: Escape the path in case it contains spaces :param target_name: Target name or list of target names to add the flag to or None for every target :param configuration_name: Configuration name to add the flag to or None for every configuration :return: void """ self.add_search_paths(XCBuildConfigurationFlags.LIBRARY_SEARCH_PATHS, paths, recursive, escape, target_name, configuration_name)
python
def configure(self, binder): # type: (Binder) -> None """Initializer of the cache - creates the Redis cache module as the default cache infrastructure. The module is bound to `RedisCacheModule` and `CacheModule` keys. The initializer also creates `RedisIdHelper` and bounds it to `RedisIdHelper` and `IdHelper` keys. :param Binder binder: The binder object holding the binding context, we\ add cache to the binder. """ redis_cache_module = RedisCacheModule() binder.bind( RedisCacheModule, to=redis_cache_module, scope=singleton ) binder.bind( CacheModule, to=redis_cache_module, scope=singleton ) redis_id_helper = RedisIdHelper() binder.bind( RedisIdHelper, to=redis_id_helper, scope=singleton ) binder.bind( IdHelper, to=redis_id_helper, scope=singleton ) logging.debug("Created RedisCache binding.")
python
def _quantize(x, params, randomize=True): """Quantize x according to params, optionally randomizing the rounding.""" if not params.quantize: return x if not randomize: return tf.bitcast( tf.cast(x / params.quantization_scale, tf.int16), tf.float16) abs_x = tf.abs(x) sign_x = tf.sign(x) y = abs_x / params.quantization_scale y = tf.floor(y + tf.random_uniform(common_layers.shape_list(x))) y = tf.minimum(y, tf.int16.max) * sign_x q = tf.bitcast(tf.cast(y, tf.int16), tf.float16) return q
python
def _scopes_registered(self): """ Return a list that contains all the scopes registered in the class. """ scopes = [] for name in dir(self.__class__): if name.startswith('scope_'): scope = name.split('scope_')[1] scopes.append(scope) return scopes
java
public void addMBeanAttribute(String mbean, MBeanAttribute attr) throws Exception { MBeanHolder mbeanHolder = mbeanMap.get(mbean); if (mbeanHolder == null) { mbeanHolder = new MBeanHolder(this, process, mbean); mbeanMap.put(mbean, mbeanHolder); } mbeanHolder.addAttribute(attr); log.info("Added attribute " + attr + " to " + mbean); }
java
public static String buildCommand(YarnContainerType containerType, Map<String, String> args) { CommandBuilder commandBuilder = new CommandBuilder("./" + ALLUXIO_SETUP_SCRIPT).addArg(containerType.getName()); for (Entry<String, String> argsEntry : args.entrySet()) { commandBuilder.addArg(argsEntry.getKey(), argsEntry.getValue()); } // Redirect stdout and stderr to yarn log files commandBuilder.addArg("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"); commandBuilder.addArg("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"); return commandBuilder.toString(); }
java
public INDArray inferVector(String text, double learningRate, double minLearningRate, int iterations) { if (tokenizerFactory == null) throw new IllegalStateException("TokenizerFactory should be defined, prior to predict() call"); if (this.vocab == null || this.vocab.numWords() == 0) reassignExistingModel(); List<String> tokens = tokenizerFactory.create(text).getTokens(); List<VocabWord> document = new ArrayList<>(); for (String token : tokens) { if (vocab.containsWord(token)) { document.add(vocab.wordFor(token)); } } if (document.isEmpty()) throw new ND4JIllegalStateException("Text passed for inference has no matches in model vocabulary."); return inferVector(document, learningRate, minLearningRate, iterations); }
java
private static List<CharSequence> splitHeader(CharSequence header) { final StringBuilder builder = new StringBuilder(header.length()); final List<CharSequence> protocols = new ArrayList<CharSequence>(4); for (int i = 0; i < header.length(); ++i) { char c = header.charAt(i); if (Character.isWhitespace(c)) { // Don't include any whitespace. continue; } if (c == ',') { // Add the string and reset the builder for the next protocol. protocols.add(builder.toString()); builder.setLength(0); } else { builder.append(c); } } // Add the last protocol if (builder.length() > 0) { protocols.add(builder.toString()); } return protocols; }
java
@NotNull public Exceptional<T> ifPresent(@NotNull Consumer<? super T> consumer) { if (throwable == null) { consumer.accept(value); } return this; }
java
public Object getKey(Key key, String[] actualReturn) { return getKey(key, actualReturn, null); }
java
@Benchmark @BenchmarkMode(Mode.AverageTime) public void byteArrayRandomAccessFile(final Configuration configuration) throws IOException { byte[] buffer = new byte[BUFFER_CAPACITY]; int position = 0; try (RandomAccessFile file = new RandomAccessFile(configuration.file, "rw")) { for (long i = 0; i < LINES; ++i) { if (BUFFER_CAPACITY - position < DATA.length) { file.write(buffer, 0, position); position = 0; } if (BUFFER_CAPACITY < DATA.length) { file.write(DATA); } else { System.arraycopy(DATA, 0, buffer, position, DATA.length); position += DATA.length; } } if (position > 0) { file.write(buffer, 0, position); } } }
java
public static Interval plus(final Interval interval, final Period period) { return new Interval(interval.getStart().plus(period), interval.getEnd().plus(period)); }
python
def loads(self, param): ''' Checks the return parameters generating new proxy instances to avoid query concurrences from shared proxies and creating proxies for actors from another host. ''' if isinstance(param, ProxyRef): try: return self.lookup_url(param.url, param.klass, param.module) except HostError: print "Can't lookup for the actor received with the call. \ It does not exist or the url is unreachable.", param raise HostError(param) elif isinstance(param, list): return [self.loads(elem) for elem in param] elif isinstance(param, tuple): return tuple([self.loads(elem) for elem in param]) elif isinstance(param, dict): new_dict = param for key in new_dict.keys(): new_dict[key] = self.loads(new_dict[key]) return new_dict else: return param
python
def collect_manifest_dependencies(manifest_data, lockfile_data): """Convert the manifest format to the dependencies schema""" output = {} for dependencyName, dependencyConstraint in manifest_data.items(): output[dependencyName] = { # identifies where this dependency is installed from 'source': 'example-package-manager', # the constraint that the user is using (i.e. "> 1.0.0") 'constraint': dependencyConstraint, # all available versions above and outside of their constraint # - usually you would need to use the package manager lib or API # to get this information (we just fake it here) 'available': [ {'name': '2.0.0'}, ], } return output
python
def _walk_factory(self, dep_predicate): """Construct the right context object for managing state during a transitive walk.""" walk = None if dep_predicate: walk = self.DepPredicateWalk(dep_predicate) else: walk = self.NoDepPredicateWalk() return walk
java
public JvmTypeReference inferredType() { XComputedTypeReference result = xtypesFactory.createXComputedTypeReference(); result.setTypeProvider(new InferredTypeIndicator(null)); return result; }
python
def filesizeformat(bytes, decimals=1): """ Formats the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB, 102 bytes, etc). Based on django.template.defaultfilters.filesizeformat """ try: bytes = float(bytes) except (TypeError, ValueError, UnicodeDecodeError): raise ValueError def filesize_number_format(value): return formats.number_format(round(value, decimals), decimals) units_list = sorted(six.iteritems(FILESIZE_UNITS), key=operator.itemgetter(1)) value = unit = None len_unints_list = len(units_list) for i in xrange(1, len_unints_list): if bytes < units_list[i][1]: prev_unit = units_list[i - 1] value = filesize_number_format(bytes / prev_unit[1]) unit = prev_unit[0] break if value is None: value = filesize_number_format(bytes / units_list[-1][1]) unit = units_list[-1][0] return SIZEFIELD_FORMAT.format(value=value, unit=unit)
python
def get_cookie(self, key, default=None, secret=None): """ Return the content of a cookie. To read a `Signed Cookie`, the `secret` must match the one used to create the cookie (see :meth:`BaseResponse.set_cookie`). If anything goes wrong (missing cookie or wrong signature), return a default value. """ value = self.cookies.get(key) if secret and value: dec = cookie_decode(value, secret) # (key, value) tuple or None return dec[1] if dec and dec[0] == key else default return value or default
python
def remove_entry(data, entry): ''' Remove an entry in place. ''' file_field = entry['fields'].get('file') if file_field: try: os.remove(file_field) except IOError: click.echo('This entry\'s file was missing') data.remove(entry)
java
public NumberExpression<Integer> numInteriorRing() { if (numInteriorRing == null) { numInteriorRing = Expressions.numberOperation(Integer.class, SpatialOps.NUM_INTERIOR_RING, mixin); } return numInteriorRing; }
java
public static Table setPadding (final Padding padding, final Table table) { table.pad(padding.getTop(), padding.getLeft(), padding.getBottom(), padding.getRight()); return table; }
java
public static final boolean xor(boolean b1, boolean b2) { if (b1 == false && b2 == false) { return false; } else if (b1 == false && b2 == true) { return true; } else if (b1 == true && b2 == false) { return true; } else { return false; } }
python
def process(self, response): """ Returns HTTP backend agnostic ``Response`` data. """ try: code = response.status_code # 204 - No Content if code == 204: body = None # add an error message to 402 errors elif code == 402: body = { "message": "Payment Required", "status": "error" } else: body = response.json() return Response(code, body, response.content, response) except ValueError: raise ZencoderResponseError(response, response.content)
java
public LoadBalancerTlsCertificateRenewalSummary withDomainValidationOptions(LoadBalancerTlsCertificateDomainValidationOption... domainValidationOptions) { if (this.domainValidationOptions == null) { setDomainValidationOptions(new java.util.ArrayList<LoadBalancerTlsCertificateDomainValidationOption>(domainValidationOptions.length)); } for (LoadBalancerTlsCertificateDomainValidationOption ele : domainValidationOptions) { this.domainValidationOptions.add(ele); } return this; }
python
def get_source_scanner(self, node): """Fetch the source scanner for the specified node NOTE: "self" is the target being built, "node" is the source file for which we want to fetch the scanner. Implies self.has_builder() is true; again, expect to only be called from locations where this is already verified. This function may be called very often; it attempts to cache the scanner found to improve performance. """ scanner = None try: scanner = self.builder.source_scanner except AttributeError: pass if not scanner: # The builder didn't have an explicit scanner, so go look up # a scanner from env['SCANNERS'] based on the node's scanner # key (usually the file extension). scanner = self.get_env_scanner(self.get_build_env()) if scanner: scanner = scanner.select(node) return scanner