language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def remove(self, *widgets): ''' Remove @widgets from the blitting hand of the Container(). Each arg must be a Widget(), a fellow Container(), or an iterable. Else, things get ugly... ''' for w in widgets: if w in self.widgets: self.widgets.remove(w) w.remove_internal(self) elif w in self.containers: self.containers.remove(w) w.remove_internal(self) else: # If it isn't an iterable, we'll get an error here. # Desired effect. self.remove(*w)
java
public void marshall(CancelWorkflowExecutionDecisionAttributes cancelWorkflowExecutionDecisionAttributes, ProtocolMarshaller protocolMarshaller) { if (cancelWorkflowExecutionDecisionAttributes == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(cancelWorkflowExecutionDecisionAttributes.getDetails(), DETAILS_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
python
def get_controversial(self, *args, **kwargs): """Return a get_content generator for controversial submissions. Corresponds to submissions provided by ``https://www.reddit.com/controversial/`` for the session. The additional parameters are passed directly into :meth:`.get_content`. Note: the `url` parameter cannot be altered. """ return self.get_content(self.config['controversial'], *args, **kwargs)
java
public TaskDefinition withRequiresCompatibilities(Compatibility... requiresCompatibilities) { com.amazonaws.internal.SdkInternalList<String> requiresCompatibilitiesCopy = new com.amazonaws.internal.SdkInternalList<String>( requiresCompatibilities.length); for (Compatibility value : requiresCompatibilities) { requiresCompatibilitiesCopy.add(value.toString()); } if (getRequiresCompatibilities() == null) { setRequiresCompatibilities(requiresCompatibilitiesCopy); } else { getRequiresCompatibilities().addAll(requiresCompatibilitiesCopy); } return this; }
python
def reversed_blocks(handle, blocksize=4096): """Generate blocks of file's contents in reverse order.""" handle.seek(0, os.SEEK_END) here = handle.tell() while 0 < here: delta = min(blocksize, here) here -= delta handle.seek(here, os.SEEK_SET) yield handle.read(delta)
python
def prepare_metadata(metadata, source_metadata=None, append=False, append_list=False): """Prepare a metadata dict for an :class:`S3PreparedRequest <S3PreparedRequest>` or :class:`MetadataPreparedRequest <MetadataPreparedRequest>` object. :type metadata: dict :param metadata: The metadata dict to be prepared. :type source_metadata: dict :param source_metadata: (optional) The source metadata for the item being modified. :rtype: dict :returns: A filtered metadata dict to be used for generating IA S3 and Metadata API requests. """ # Make a deepcopy of source_metadata if it exists. A deepcopy is # necessary to avoid modifying the original dict. source_metadata = {} if not source_metadata else copy.deepcopy(source_metadata) prepared_metadata = {} # Functions for dealing with metadata keys containing indexes. def get_index(key): match = re.search(r'(?<=\[)\d+(?=\])', key) if match is not None: return int(match.group()) def rm_index(key): return key.split('[')[0] # Create indexed_keys counter dict. i.e.: {'subject': 3} -- subject # (with the index removed) appears 3 times in the metadata dict. indexed_keys = {} for key in metadata: # Convert number values to strings! if isinstance(metadata[key], (six.integer_types, float, complex)): metadata[key] = str(metadata[key]) if get_index(key) is None: continue count = len([x for x in metadata if rm_index(x) == rm_index(key)]) indexed_keys[rm_index(key)] = count # Initialize the values for all indexed_keys. for key in indexed_keys: # Increment the counter so we know how many values the final # value in prepared_metadata should have. indexed_keys[key] += len(source_metadata.get(key, [])) # Intialize the value in the prepared_metadata dict. prepared_metadata[key] = source_metadata.get(key, []) if not isinstance(prepared_metadata[key], list): prepared_metadata[key] = [prepared_metadata[key]] # Fill the value of the prepared_metadata key with None values # so all indexed items can be indexed in order. while len(prepared_metadata[key]) < indexed_keys[key]: prepared_metadata[key].append(None) # Index all items which contain an index. for key in metadata: # Insert values from indexed keys into prepared_metadata dict. if (rm_index(key) in indexed_keys): try: prepared_metadata[rm_index(key)][get_index(key)] = metadata[key] except IndexError: prepared_metadata[rm_index(key)].append(metadata[key]) # If append is True, append value to source_metadata value. elif append_list and source_metadata.get(key): if not isinstance(metadata[key], list): metadata[key] = [metadata[key]] for v in metadata[key]: if not isinstance(source_metadata[key], list): if v in [source_metadata[key]]: continue else: if v in source_metadata[key]: continue if not isinstance(source_metadata[key], list): prepared_metadata[key] = [source_metadata[key]] else: prepared_metadata[key] = source_metadata[key] prepared_metadata[key].append(v) elif append and source_metadata.get(key): prepared_metadata[key] = '{0} {1}'.format( source_metadata[key], metadata[key]) else: prepared_metadata[key] = metadata[key] # Remove values from metadata if value is REMOVE_TAG. _done = [] for key in indexed_keys: # Filter None values from items with arrays as values prepared_metadata[key] = [v for v in prepared_metadata[key] if v] # Only filter the given indexed key if it has not already been # filtered. if key not in _done: indexes = [] for k in metadata: if not get_index(k): continue elif not rm_index(k) == key: continue elif not metadata[k] == 'REMOVE_TAG': continue else: indexes.append(get_index(k)) # Delete indexed values in reverse to not throw off the # subsequent indexes. for i in sorted(indexes, reverse=True): del prepared_metadata[key][i] _done.append(key) return prepared_metadata
java
public BufferedImage createImage(File file) throws IOException { resetAvailabilityFlags(); this.data = PELoader.loadPE(file); image = new BufferedImage(fileWidth, height, IMAGE_TYPE); drawSections(); Overlay overlay = new Overlay(data); if (overlay.exists()) { long overlayOffset = overlay.getOffset(); drawPixels(colorMap.get(OVERLAY), overlayOffset, withMinLength(overlay.getSize())); overlayAvailable = true; } drawPEHeaders(); drawSpecials(); drawResourceTypes(); assert image != null; assert image.getWidth() == fileWidth; assert image.getHeight() == height; return image; }
java
@Override public void connect() throws Exception { String host = sysConfig.getProperty(RestCommunication.SYSPROP_HOST, "localhost"); int port = sysConfig.getIntProperty(RestCommunication.SYSPROP_PORT, 443); RestAssured.useRelaxedHTTPSValidation(); this.reqSpec = RestAssured.given(); if (port % 1000 == 443) { this.reqSpec = this.reqSpec.baseUri("https://" + host + ":" + port); } else { this.reqSpec = this.reqSpec.baseUri("http://" + host + ":" + port); } String user = sysConfig.getProperty(RestCommunication.SYSPROP_USER); String pass = sysConfig.getProperty(RestCommunication.SYSPROP_PASS); if (null != user && null != pass) { this.reqSpec = this.reqSpec.auth().preemptive().basic(user, pass); } String clientCert = sysConfig.getProperty(RestCommunication.SYSPROP_CLIENT_CERT); String clientCertPass = sysConfig.getProperty(RestCommunication.SYSPROP_CLIENT_CERT_PASS); if (null != clientCert && null != clientCertPass) { this.reqSpec = this.reqSpec.auth().certificate(clientCert, clientCertPass); } }
java
public static String evaluate(CharSequence text) { if(text==null) return null; Matcher m = JS_PATTERN.matcher(text); StringBuffer ret = new StringBuffer(); final boolean isNas = scriptEngine.getFactory().getEngineName().toLowerCase().contains("nashorn"); while(m.find()) { String source = (isNas ? "load(\"nashorn:mozilla_compat.js\");\nimportPackage(java.lang); " : "\nimportPackage(java.lang); " ) + m.group(1); try { Object obj = scriptEngine.eval(source, bindings); if(obj!=null) { //log("Evaled [%s] --> [%s]", source, obj); m.appendReplacement(ret, obj.toString()); } else { m.appendReplacement(ret, ""); } } catch (Exception ex) { ex.printStackTrace(System.err); throw new IllegalArgumentException("Failed to evaluate expression [" + text + "]"); } } m.appendTail(ret); return ret.toString(); }
java
public static boolean isInstalled(int major, int minor) { try { // see http://support.microsoft.com/?scid=kb;en-us;315291 for the basic algorithm // observation in my registry shows that the actual key name can be things like "v2.0 SP1" // or "v2.0.50727", so the regexp is written to accommodate this. RegistryKey key = RegistryKey.LOCAL_MACHINE.openReadonly("SOFTWARE\\Microsoft\\.NETFramework"); try { for( String keyName : key.getSubKeys() ) { if (matches(keyName, major, minor)) return true; } return false; } finally { key.dispose(); } } catch (JnaException e) { if(e.getErrorCode()==2) // thrown when openReadonly fails because the key doesn't exist. return false; throw e; } }
java
public final BaseDescr equalityExpression() throws RecognitionException { BaseDescr result = null; Token op=null; BaseDescr left =null; BaseDescr right =null; try { // src/main/resources/org/drools/compiler/lang/DRL6Expressions.g:300:3: (left= instanceOfExpression ( (op= EQUALS |op= NOT_EQUALS ) right= instanceOfExpression )* ) // src/main/resources/org/drools/compiler/lang/DRL6Expressions.g:300:5: left= instanceOfExpression ( (op= EQUALS |op= NOT_EQUALS ) right= instanceOfExpression )* { pushFollow(FOLLOW_instanceOfExpression_in_equalityExpression1477); left=instanceOfExpression(); state._fsp--; if (state.failed) return result; if ( state.backtracking==0 ) { if( buildDescr ) { result = left; } } // src/main/resources/org/drools/compiler/lang/DRL6Expressions.g:301:3: ( (op= EQUALS |op= NOT_EQUALS ) right= instanceOfExpression )* loop32: while (true) { int alt32=2; int LA32_0 = input.LA(1); if ( (LA32_0==EQUALS||LA32_0==NOT_EQUALS) ) { alt32=1; } switch (alt32) { case 1 : // src/main/resources/org/drools/compiler/lang/DRL6Expressions.g:301:5: (op= EQUALS |op= NOT_EQUALS ) right= instanceOfExpression { // src/main/resources/org/drools/compiler/lang/DRL6Expressions.g:301:5: (op= EQUALS |op= NOT_EQUALS ) int alt31=2; int LA31_0 = input.LA(1); if ( (LA31_0==EQUALS) ) { alt31=1; } else if ( (LA31_0==NOT_EQUALS) ) { alt31=2; } else { if (state.backtracking>0) {state.failed=true; return result;} NoViableAltException nvae = new NoViableAltException("", 31, 0, input); throw nvae; } switch (alt31) { case 1 : // src/main/resources/org/drools/compiler/lang/DRL6Expressions.g:301:7: op= EQUALS { op=(Token)match(input,EQUALS,FOLLOW_EQUALS_in_equalityExpression1489); if (state.failed) return result; } break; case 2 : // src/main/resources/org/drools/compiler/lang/DRL6Expressions.g:301:19: op= NOT_EQUALS { op=(Token)match(input,NOT_EQUALS,FOLLOW_NOT_EQUALS_in_equalityExpression1495); if (state.failed) return result; } break; } if ( state.backtracking==0 ) { helper.setHasOperator( true ); if( input.LA( 1 ) != DRL6Lexer.EOF ) helper.emit( Location.LOCATION_LHS_INSIDE_CONDITION_ARGUMENT ); } pushFollow(FOLLOW_instanceOfExpression_in_equalityExpression1511); right=instanceOfExpression(); state._fsp--; if (state.failed) return result; if ( state.backtracking==0 ) { if( buildDescr ) { result = new RelationalExprDescr( (op!=null?op.getText():null), false, null, left, right ); } } } break; default : break loop32; } } } } catch (RecognitionException re) { throw re; } finally { // do for sure before leaving } return result; }
python
def plot_learning_curve(clf, X, y, title='Learning Curve', cv=None, shuffle=False, random_state=None, train_sizes=None, n_jobs=1, scoring=None, ax=None, figsize=None, title_fontsize="large", text_fontsize="medium"): """Generates a plot of the train and test learning curves for a classifier. Args: clf: Classifier instance that implements ``fit`` and ``predict`` methods. X (array-like, shape (n_samples, n_features)): Training vector, where n_samples is the number of samples and n_features is the number of features. y (array-like, shape (n_samples) or (n_samples, n_features)): Target relative to X for classification or regression; None for unsupervised learning. title (string, optional): Title of the generated plot. Defaults to "Learning Curve" cv (int, cross-validation generator, iterable, optional): Determines the cross-validation strategy to be used for splitting. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if ``y`` is binary or multiclass, :class:`StratifiedKFold` used. If the estimator is not a classifier or if ``y`` is neither binary nor multiclass, :class:`KFold` is used. shuffle (bool, optional): Used when do_cv is set to True. Determines whether to shuffle the training data before splitting using cross-validation. Default set to True. random_state (int :class:`RandomState`): Pseudo-random number generator state used for random sampling. train_sizes (iterable, optional): Determines the training sizes used to plot the learning curve. If None, ``np.linspace(.1, 1.0, 5)`` is used. n_jobs (int, optional): Number of jobs to run in parallel. Defaults to 1. scoring (string, callable or None, optional): default: None A string (see scikit-learn model evaluation documentation) or a scorerbcallable object / function with signature scorer(estimator, X, y). ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to plot the curve. If None, the plot is drawn on a new set of axes. figsize (2-tuple, optional): Tuple denoting figure size of the plot e.g. (6, 6). Defaults to ``None``. title_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "large". text_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "medium". Returns: ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was drawn. Example: >>> import scikitplot as skplt >>> rf = RandomForestClassifier() >>> skplt.estimators.plot_learning_curve(rf, X, y) <matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490> >>> plt.show() .. image:: _static/examples/plot_learning_curve.png :align: center :alt: Learning Curve """ if ax is None: fig, ax = plt.subplots(1, 1, figsize=figsize) if train_sizes is None: train_sizes = np.linspace(.1, 1.0, 5) ax.set_title(title, fontsize=title_fontsize) ax.set_xlabel("Training examples", fontsize=text_fontsize) ax.set_ylabel("Score", fontsize=text_fontsize) train_sizes, train_scores, test_scores = learning_curve( clf, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes, scoring=scoring, shuffle=shuffle, random_state=random_state) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) ax.grid() ax.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") ax.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g") ax.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score") ax.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Cross-validation score") ax.tick_params(labelsize=text_fontsize) ax.legend(loc="best", fontsize=text_fontsize) return ax
python
def evaluate_expression(expression, vars): '''evaluation an expression''' try: v = eval(expression, globals(), vars) except NameError: return None except ZeroDivisionError: return None return v
python
def temp_land(self, pcps, water): """Derive high/low percentiles of land temperature Equations 12 an 13 (Zhu and Woodcock, 2012) Parameters ---------- pcps: ndarray potential cloud pixels, boolean water: ndarray water mask, boolean tirs1: ndarray Output ------ tuple: 17.5 and 82.5 percentile temperature over clearsky land """ # eq 12 clearsky_land = ~(pcps | water) # use clearsky_land to mask tirs1 clear_land_temp = self.tirs1.copy() clear_land_temp[~clearsky_land] = np.nan clear_land_temp[~self.mask] = np.nan # take 17.5 and 82.5 percentile, eq 13 low, high = np.nanpercentile(clear_land_temp, (17.5, 82.5)) return low, high
python
def write(self, source=None, rows=None, **kwargs): ''' Transfform structural metadata, i.e. codelists, concept-schemes, lists of dataflow definitions or category-schemes from a :class:`pandasdmx.model.StructureMessage` instance into a pandas DataFrame. This method is called by :meth:`pandasdmx.api.Response.write` . It is not part of the public-facing API. Yet, certain kwargs are propagated from there. Args: source(pandasdmx.model.StructureMessage): a :class:`pandasdmx.model.StructureMessage` instance. rows(str): sets the desired content to be extracted from the StructureMessage. Must be a name of an attribute of the StructureMessage. The attribute must be an instance of `dict` whose keys are strings. These will be interpreted as ID's and used for the MultiIndex of the DataFrame to be returned. Values can be either instances of `dict` such as for codelists and categoryscheme, or simple nameable objects such as for dataflows. In the latter case, the DataFrame will have a flat index. (default: depends on content found in Message. Common is 'codelist') columns(str, list): if str, it denotes the attribute of attributes of the values (nameable SDMX objects such as Code or ConceptScheme) that will be stored in the DataFrame. If a list, it must contain strings that are valid attibute values. Defaults to: ['name', 'description'] lang(str): locale identifier. Specifies the preferred language for international strings such as names. Default is 'en'. ''' # Set convenient default values for args # is rows a string? if rows is not None and not isinstance(rows, (list, tuple)): rows = [rows] return_df = True elif isinstance(rows, (list, tuple)) and len(rows) == 1: return_df = True else: return_df = False if rows is None: rows = [i for i in self._row_content if hasattr(source, i)] # Generate the DataFrame or -Frames and store them in a DictLike with # content-type names as keys frames = DictLike( {r: self._make_dataframe(source, r, **kwargs) for r in rows}) if return_df: # There is only one item. So return the only value. return frames.any() else: return frames
java
public static <V extends FeatureVector<?>> VectorFieldTypeInformation<V> assumeVectorField(Relation<V> relation) { try { return ((VectorFieldTypeInformation<V>) relation.getDataTypeInformation()); } catch(Exception e) { throw new UnsupportedOperationException("Expected a vector field, got type information: " + relation.getDataTypeInformation().toString(), e); } }
python
def fetch_git(repo, filename, branch="master", hash=None): """Fetch a gist and return the contents as a string.""" url = git_url(repo, filename, branch, hash) response = requests.get(url) if response.status_code != 200: raise Exception('Got a bad status looking up gist.') body = response.text if not body: raise Exception('Unable to get the gist contents.') return body
java
public Observable<WorkflowRunInner> getAsync(String resourceGroupName, String workflowName, String runName) { return getWithServiceResponseAsync(resourceGroupName, workflowName, runName).map(new Func1<ServiceResponse<WorkflowRunInner>, WorkflowRunInner>() { @Override public WorkflowRunInner call(ServiceResponse<WorkflowRunInner> response) { return response.body(); } }); }
python
def angle(self, vector): """Return the angle between two vectors in degrees.""" return math.degrees( math.acos( self.dot(vector) / (self.magnitude() * vector.magnitude()) ) )
python
def _load_folder(folder_entry, corpus): """ Load the given subfolder into the corpus (e.g. bed, one, ...) """ for wav_path in glob.glob(os.path.join(folder_entry.path, '*.wav')): wav_name = os.path.basename(wav_path) basename, __ = os.path.splitext(wav_name) command = folder_entry.name file_idx = '{}_{}'.format(basename, command) issuer_idx = str(basename).split('_', maxsplit=1)[0] corpus.new_file(wav_path, file_idx) if issuer_idx not in corpus.issuers.keys(): corpus.import_issuers(issuers.Speaker( issuer_idx )) utt = corpus.new_utterance(file_idx, file_idx, issuer_idx) labels = annotations.LabelList.create_single(command, idx=audiomate.corpus.LL_WORD_TRANSCRIPT) utt.set_label_list(labels)
java
@Override public void setNameMap(Map<java.util.Locale, String> nameMap, java.util.Locale defaultLocale) { _commerceTaxMethod.setNameMap(nameMap, defaultLocale); }
python
def __prepare_record(self, record, enabled_fields): """Prepare log record with given fields.""" message = record.getMessage() if hasattr(record, 'prefix'): message = "{}{}".format((str(record.prefix) + ' ') if record.prefix else '', message) obj = { 'name': record.name, 'asctime': self.formatTime(record, self.datefmt), 'created': record.created, 'msecs': record.msecs, 'relativeCreated': record.relativeCreated, 'levelno': record.levelno, 'levelname': self._level_names[record.levelname], 'thread': record.thread, 'threadName': record.threadName, 'process': record.process, 'pathname': record.pathname, 'filename': record.filename, 'module': record.module, 'lineno': record.lineno, 'funcName': record.funcName, 'message': message, 'exception': record.exc_info[0].__name__ if record.exc_info else None, 'stacktrace': record.exc_text, } if not isinstance(enabled_fields, list): enabled_fields = [str(enabled_fields)] ef = {} for item in enabled_fields: if not isinstance(item, (str, tuple)): continue if not isinstance(item, tuple): ef[item] = item else: ef[item[0]] = item[1] result = {} for key, val in obj.items(): if key in ef: result[ef[key]] = val return result
python
def iget_list_column_slice(list_, start=None, stop=None, stride=None): """ iterator version of get_list_column """ if isinstance(start, slice): slice_ = start else: slice_ = slice(start, stop, stride) return (row[slice_] for row in list_)
java
public static int[] selectPyramidScale( int imageWidth , int imageHeight, int minSize ) { int w = Math.max(imageWidth,imageHeight); int maxScale = w/minSize; int n = 1; int scale = 1; while( scale*2 < maxScale ) { n++; scale *= 2; } int ret[] = new int[n]; scale = 1; for( int i = 0; i < n; i++ ) { ret[i] = scale; scale *= 2; } return ret; }
java
public JBBPTextWriter Long(final long[] values, int off, int len) throws IOException { while (len-- > 0) { this.Long(values[off++]); } return this; }
java
public EmbeddedGobblin enableMetrics() { this.usePlugin(new GobblinMetricsPlugin.Factory()); this.sysConfig(ConfigurationKeys.METRICS_ENABLED_KEY, Boolean.toString(true)); return this; }
python
def generate_bytes(cls, payload, fin_bit, opcode, mask_payload): """ Format data to string (buffered_bytes) to send to server. """ # the first byte contains the FIN bit, the 3 RSV bits and the # 4 opcode bits and for a client will *always* be 1000 0001 (or 129). # so we want the first byte to look like... # # 1 0 0 0 0 0 0 1 (1 is a text frame) # +-+-+-+-+-------+ # |F|R|R|R| opcode| # |I|S|S|S| | # |N|V|V|V| | # | |1|2|3| | # +-+-+-+-+-------+ # note that because all RSV bits are zero, we can ignore them # this shifts each bit into position and bitwise ORs them together, # using the struct module to pack them as incoming network bytes frame = pack( '!B', ( (fin_bit << 7) | opcode ) ) # the second byte - and maybe the 7 after this, we'll use to tell # the server how long our payload is. # +-+-------------+-------------------------------+ # |M| Payload len | Extended payload length | # |A| (7) | (16/63) | # |S| | (if payload len==126/127) | # |K| | | # +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - + # | Extended payload length continued, if payload len == 127 | # + - - - - - - - - - - - - - - - +-------------------------------+ # the mask is always included with client -> server, so the first bit # of the second byte is always 1 which flags that the data is masked, # i.e. encoded if mask_payload: mask_bit = 1 << 7 else: mask_bit = 0 << 7 # next we have to | this bit with the payload length. # note that we ensure that the payload is utf-8 encoded before we take # the length because unicode characters can be >1 bytes in length and # lead to bugs if we just do ``len(payload)``. length = len(payload.encode('utf-8')) if length >= Frame.MAX_LENGTH: raise WebsocktProtocolError("data is too long") # the second byte contains the payload length and mask if length < Frame.LENGTH_7: # we can simply represent payload length with first 7 bits frame += pack('!B', (mask_bit | length)) elif length < Frame.LENGTH_16: frame += pack('!B', (mask_bit | 126)) + pack('!H', length) else: frame += pack('!B', (mask_bit | 127)) + pack('!Q', length) if mask_payload: # we always mask frames from the client to server # use a string of n random buffered_bytes for the mask mask_key = os.urandom(4) mask_data = cls.generate_mask(mask_key=mask_key, data=payload) mask = mask_key + mask_data frame += mask else: frame += bytearray(payload, 'utf-8') return bytearray(frame)
java
public void processResponseEvent(HttpClientNIOResponseEvent event, HttpClientNIORequestActivityImpl activity) { HttpClientNIORequestActivityHandle ah = new HttpClientNIORequestActivityHandle(activity.getId()); if (tracer.isFineEnabled()) tracer.fine("==== FIRING ResponseEvent EVENT TO LOCAL SLEE, Event: " + event + " ===="); try { resourceAdaptorContext.getSleeEndpoint().fireEvent(ah, fireableEventType, event, null, null, EVENT_FLAGS); } catch (Throwable e) { tracer.severe(e.getMessage(), e); } }
python
def ut_datetime_to_mjd( self, utDatetime): """*ut datetime to mjd* If the date given has no time associated with it (e.g. ``20160426``), then the datetime assumed is ``20160426 00:00:00.0``. Precision should be respected. **Key Arguments:** - ``utDatetime`` -- UT datetime. Can accept various formats e.g. ``201604261444``, ``20160426``, ``20160426144444.5452``, ``2016-04-26 14:44:44.234``, ``20160426 14h44m44.432s`` **Return:** - ``mjd`` -- the MJD .. todo :: - replace getMJDFromSqlDate in all code **Usage:** .. code-block:: python from astrocalc.times import conversions converter = conversions( log=log ) mjd = converter.ut_datetime_to_mjd(utDatetime="20160426t1446") print mjd # OUT: 57504.6153 mjd = converter.ut_datetime_to_mjd(utDatetime="2016-04-26 14:44:44.234") print mjd # OUT: 57504.61440 """ self.log.info('starting the ``ut_datetime_to_mjd`` method') import time import re mjd = None # TRIM WHITESPACE FROM AROUND STRING utDatetime = utDatetime.strip() # DATETIME REGEX matchObject = re.match( r'^(?P<year>\d{4})\D?(?P<month>(0\d|1[0-2]))\D?(?P<day>([0-2]\d|3[0-1])(\.\d+)?)(\D?(?P<hours>([0-1]\d|2[0-3]))\D?(?P<minutes>\d{2})(\D?(?P<seconds>\d{2}(\.\d*?)?))?)?s?$', utDatetime) # RETURN ERROR IF REGEX NOT MATCHED if not matchObject: self.log.error( 'UT Datetime is not in a recognised format. Input value was `%(utDatetime)s`' % locals()) raise IOError( 'UT Datetime is not in a recognised format. Input value was `%(utDatetime)s`' % locals()) year = matchObject.group("year") month = matchObject.group("month") day = matchObject.group("day") hours = matchObject.group("hours") minutes = matchObject.group("minutes") seconds = matchObject.group("seconds") # CLEAN NUMBERS AND SET OUTPUT PRECISION if "." in day: fhours = (float(day) - int(float(day))) * 24 hours = int(fhours) fminutes = (fhours - hours) * 60 minutes = int(fminutes) seconds = fhours - minutes precision = len(repr(day).split(".")[-1]) elif not hours: hours = "00" minutes = "00" seconds = "00" precision = 1 elif not seconds: seconds = "00" # PRECISION TO NEAREST MIN i.e. 0.000694444 DAYS precision = 4 else: if "." not in seconds: precision = 5 else: decLen = len(seconds.split(".")[-1]) precision = 5 + decLen # CONVERT VALUES TO FLOAT seconds = float(seconds) year = float(year) month = float(month) day = float(day) hours = float(hours) minutes = float(minutes) # DETERMINE EXTRA TIME (SMALLER THAN A SEC) extraTime = 0. if "." in repr(seconds): extraTime = float("." + repr(seconds).split(".") [-1]) / (24. * 60. * 60.) # CONVERT TO UNIXTIME THEN MJD t = (int(year), int(month), int(day), int(hours), int(minutes), int(seconds), 0, 0, 0) unixtime = int(time.mktime(t)) mjd = (unixtime / 86400.0 - 2400000.5 + 2440587.5) + extraTime mjd = "%0.*f" % (precision, mjd) self.log.info('completed the ``ut_datetime_to_mjd`` method') return mjd
java
private CmsPushButton createButton(String buttonText) { CmsPushButton button = new CmsPushButton(); button.setTitle(buttonText); button.setText(buttonText); button.setSize(I_CmsButton.Size.medium); button.setUseMinWidth(true); return button; }
python
def _filter_startswith(self, term, field_name, field_type, is_not): """ Returns a startswith query on the un-stemmed term. Assumes term is not a list. """ if field_type == 'text': if len(term.split()) == 1: term = '^ %s*' % term query = self.backend.parse_query(term) else: term = '^ %s' % term query = self._phrase_query(term.split(), field_name, field_type) else: term = '^%s*' % term query = self.backend.parse_query(term) if is_not: return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(), query) return query
python
def add_chassis(self, chassis, port=22611, password='xena'): """ Add chassis. XenaManager-2G -> Add Chassis. :param chassis: chassis IP address :param port: chassis port number :param password: chassis password :return: newly created chassis :rtype: xenamanager.xena_app.XenaChassis """ if chassis not in self.chassis_list: try: XenaChassis(self, chassis, port, password) except Exception as error: self.objects.pop('{}/{}'.format(self.owner, chassis)) raise error return self.chassis_list[chassis]
java
public long getCardinality() { if (pos > 0) { return totalHits; } try { EsResponse res = client.search(index, type, query); Document hits = (Document) res.get("hits"); totalHits = hits.getInteger("total"); return totalHits; } catch (Exception e) { throw new EsIndexException(e); } }
python
def calculate_ethinca_metric_comps(metricParams, ethincaParams, mass1, mass2, spin1z=0., spin2z=0., full_ethinca=True): """ Calculate the Gamma components needed to use the ethinca metric. At present this outputs the standard TaylorF2 metric over the end time and chirp times \tau_0 and \tau_3. A desirable upgrade might be to use the \chi coordinates [defined WHERE?] for metric distance instead of \tau_0 and \tau_3. The lower frequency cutoff is currently hard-coded to be the same as the bank layout options fLow and f0 (which must be the same as each other). Parameters ----------- metricParams : metricParameters instance Structure holding all the options for construction of the metric and the eigenvalues, eigenvectors and covariance matrix needed to manipulate the space. ethincaParams : ethincaParameters instance Structure holding options relevant to the ethinca metric computation. mass1 : float Mass of the heavier body in the considered template. mass2 : float Mass of the lighter body in the considered template. spin1z : float (optional, default=0) Spin of the heavier body in the considered template. spin2z : float (optional, default=0) Spin of the lighter body in the considered template. full_ethinca : boolean (optional, default=True) If True calculate the ethinca components in all 3 directions (mass1, mass2 and time). If False calculate only the time component (which is stored in Gamma0). Returns -------- fMax_theor : float Value of the upper frequency cutoff given by the template parameters and the cutoff formula requested. gammaVals : numpy_array Array holding 6 independent metric components in (end_time, tau_0, tau_3) coordinates to be stored in the Gamma0-5 slots of a SnglInspiral object. """ if (float(spin1z) != 0. or float(spin2z) != 0.) and full_ethinca: raise NotImplementedError("Ethinca cannot at present be calculated " "for nonzero component spins!") f0 = metricParams.f0 if f0 != metricParams.fLow: raise ValueError("If calculating ethinca the bank f0 value must be " "equal to f-low!") if ethincaParams.fLow is not None and ( ethincaParams.fLow != metricParams.fLow): raise NotImplementedError("An ethinca metric f-low different from the" " bank metric f-low is not supported!") twicePNOrder = ethinca_order_from_string(ethincaParams.pnOrder) piFl = PI * f0 totalMass, eta = pnutils.mass1_mass2_to_mtotal_eta(mass1, mass2) totalMass = totalMass * MTSUN_SI v0cube = totalMass*piFl v0 = v0cube**(1./3.) # Get theoretical cutoff frequency and work out the closest # frequency for which moments were calculated fMax_theor = pnutils.frequency_cutoff_from_name( ethincaParams.cutoff, mass1, mass2, spin1z, spin2z) fMaxes = metricParams.moments['J4'].keys() fMaxIdx = abs(numpy.array(fMaxes,dtype=float) - fMax_theor).argmin() fMax = fMaxes[fMaxIdx] # Set the appropriate moments Js = numpy.zeros([18,3],dtype=float) for i in range(18): Js[i,0] = metricParams.moments['J%d'%(i)][fMax] Js[i,1] = metricParams.moments['log%d'%(i)][fMax] Js[i,2] = metricParams.moments['loglog%d'%(i)][fMax] # Compute the time-dependent metric term. two_pi_flower_sq = TWOPI * f0 * TWOPI * f0 gammaVals = numpy.zeros([6],dtype=float) gammaVals[0] = 0.5 * two_pi_flower_sq * \ ( Js[(1,0)] - (Js[(4,0)]*Js[(4,0)]) ) # If mass terms not required stop here if not full_ethinca: return fMax_theor, gammaVals # 3pN is a mess, so split it into pieces a0 = 11583231236531/200286535680 - 5*PI*PI - 107*GAMMA/14 a1 = (-15737765635/130056192 + 2255*PI*PI/512)*eta a2 = (76055/73728)*eta*eta a3 = (-127825/55296)*eta*eta*eta alog = numpy.log(4*v0) # Log terms are tricky - be careful # Get the Psi coefficients Psi = [{},{}] #Psi = numpy.zeros([2,8,2],dtype=float) Psi[0][0,0] = 3/5 Psi[0][2,0] = (743/756 + 11*eta/3)*v0*v0 Psi[0][3,0] = 0. Psi[0][4,0] = (-3058673/508032 + 5429*eta/504 + 617*eta*eta/24)\ *v0cube*v0 Psi[0][5,1] = (-7729*PI/126)*v0cube*v0*v0/3 Psi[0][6,0] = (128/15)*(-3*a0 - a1 + a2 + 3*a3 + 107*(1+3*alog)/14)\ *v0cube*v0cube Psi[0][6,1] = (6848/35)*v0cube*v0cube/3 Psi[0][7,0] = (-15419335/63504 - 75703*eta/756)*PI*v0cube*v0cube*v0 Psi[1][0,0] = 0. Psi[1][2,0] = (3715/12096 - 55*eta/96)/PI/v0; Psi[1][3,0] = -3/2 Psi[1][4,0] = (15293365/4064256 - 27145*eta/16128 - 3085*eta*eta/384)\ *v0/PI Psi[1][5,1] = (193225/8064)*v0*v0/3 Psi[1][6,0] = (4/PI)*(2*a0 + a1/3 - 4*a2/3 - 3*a3 -107*(1+6*alog)/42)\ *v0cube Psi[1][6,1] = (-428/PI/7)*v0cube/3 Psi[1][7,0] = (77096675/1161216 + 378515*eta/24192 + 74045*eta*eta/8064)\ *v0cube*v0 # Set the appropriate moments Js = numpy.zeros([18,3],dtype=float) for i in range(18): Js[i,0] = metricParams.moments['J%d'%(i)][fMax] Js[i,1] = metricParams.moments['log%d'%(i)][fMax] Js[i,2] = metricParams.moments['loglog%d'%(i)][fMax] # Calculate the g matrix PNterms = [(0,0),(2,0),(3,0),(4,0),(5,1),(6,0),(6,1),(7,0)] PNterms = [term for term in PNterms if term[0] <= twicePNOrder] # Now can compute the mass-dependent gamma values for m in [0, 1]: for k in PNterms: gammaVals[1+m] += 0.5 * two_pi_flower_sq * Psi[m][k] * \ ( Js[(9-k[0],k[1])] - Js[(12-k[0],k[1])] * Js[(4,0)] ) g = numpy.zeros([2,2],dtype=float) for (m,n) in [(0,0),(0,1),(1,1)]: for k in PNterms: for l in PNterms: g[m,n] += Psi[m][k] * Psi[n][l] * \ ( Js[(17-k[0]-l[0], k[1]+l[1])] - Js[(12-k[0],k[1])] * Js[(12-l[0],l[1])] ) g[m,n] = 0.5 * two_pi_flower_sq * g[m,n] g[n,m] = g[m,n] gammaVals[3] = g[0,0] gammaVals[4] = g[0,1] gammaVals[5] = g[1,1] return fMax_theor, gammaVals
java
private Map<String, String> launchArgs(int port, String remoteVMOptions) { Map<String, String> argumentName2Value = new HashMap<>(); argumentName2Value.put("main", remoteAgent + " " + port); argumentName2Value.put("options", remoteVMOptions); return argumentName2Value; }
python
def verify_pubkey_sig(self, message, sig): ''' Wraps the verify_signature method so we have additional checks. :rtype: bool :return: Success or failure of public key verification ''' if self.opts['master_sign_key_name']: path = os.path.join(self.opts['pki_dir'], self.opts['master_sign_key_name'] + '.pub') if os.path.isfile(path): res = verify_signature(path, message, binascii.a2b_base64(sig)) else: log.error( 'Verification public key %s does not exist. You need to ' 'copy it from the master to the minions pki directory', os.path.basename(path) ) return False if res: log.debug( 'Successfully verified signature of master public key ' 'with verification public key %s', self.opts['master_sign_key_name'] + '.pub' ) return True else: log.debug('Failed to verify signature of public key') return False else: log.error( 'Failed to verify the signature of the message because the ' 'verification key-pairs name is not defined. Please make ' 'sure that master_sign_key_name is defined.' ) return False
java
public IAtomContainer proposeStructure() { logger.debug("RandomGenerator->proposeStructure() Start"); do { try { trial = molecule.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException("Could not clone IAtomContainer!" + e.getMessage()); } mutate(trial); if (logger.isDebugEnabled()) { String s = "BondCounts: "; for (int f = 0; f < trial.getAtomCount(); f++) { s += trial.getConnectedBondsCount(trial.getAtom(f)) + " "; } logger.debug(s); s = "BondOrderSums: "; for (int f = 0; f < trial.getAtomCount(); f++) { s += trial.getBondOrderSum(trial.getAtom(f)) + " "; } logger.debug(s); } } while (trial == null || !ConnectivityChecker.isConnected(trial)); proposedStructure = trial; return proposedStructure; }
python
def __buttonEvent(event=None, buttons=None, virtual_event=None): """ Handle an event that is generated by a person interacting with a button. It may be a button press or a key press. """ # TODO: Replace globals with tkinter variables global boxRoot, __replyButtonText # Determine window location and save to global m = re.match("(\d+)x(\d+)([-+]\d+)([-+]\d+)", boxRoot.geometry()) if not m: raise ValueError( "failed to parse geometry string: {}".format(boxRoot.geometry())) width, height, xoffset, yoffset = [int(s) for s in m.groups()] st.rootWindowPosition = '{0:+g}{1:+g}'.format(xoffset, yoffset) # print('{0}:{1}:{2}'.format(event, buttons, virtual_event)) if virtual_event == 'cancel': for button_name, button in list(buttons.items()): if 'cancel_choice' in button: __replyButtonText = button['original_text'] __replyButtonText = None boxRoot.quit() return if virtual_event == 'select': text = event.widget.config('text')[-1] if not isinstance(text, ut.str): text = ' '.join(text) for button_name, button in list(buttons.items()): if button['clean_text'] == text: __replyButtonText = button['original_text'] boxRoot.quit() return # Hotkeys if buttons: for button_name, button in list(buttons.items()): hotkey_pressed = event.keysym if event.keysym != event.char: # A special character hotkey_pressed = '<{}>'.format(event.keysym) if button['hotkey'] == hotkey_pressed: __replyButtonText = button_name boxRoot.quit() return print("Event not understood")
java
public void refreshSignature () { if (postStartTimestamp == 0 || (System.nanoTime() - postStartTimestamp) > Duration.ofMinutes(sigExpireInMinute).toNanos()) { // generate signature try { signature = SharedAccessSignatureTokenProvider .generateSharedAccessSignature(sasKeyName, sasKey, namespaceName, Duration.ofMinutes(sigExpireInMinute)); postStartTimestamp = System.nanoTime(); LOG.info ("Signature is refreshing: " + signature); } catch (Exception e) { throw new RuntimeException(e); } } }
python
def transformer(self, instance_count, instance_type, strategy=None, assemble_with=None, output_path=None, output_kms_key=None, accept=None, env=None, max_concurrent_transforms=None, max_payload=None, tags=None, role=None, model_server_workers=None, volume_kms_key=None): """Return a ``Transformer`` that uses a SageMaker Model based on the training job. It reuses the SageMaker Session and base job name used by the Estimator. Args: instance_count (int): Number of EC2 instances to use. instance_type (str): Type of EC2 instance to use, for example, 'ml.c4.xlarge'. strategy (str): The strategy used to decide how to batch records in a single request (default: None). Valid values: 'MULTI_RECORD' and 'SINGLE_RECORD'. assemble_with (str): How the output is assembled (default: None). Valid values: 'Line' or 'None'. output_path (str): S3 location for saving the transform result. If not specified, results are stored to a default bucket. output_kms_key (str): Optional. KMS key ID for encrypting the transform output (default: None). accept (str): The content type accepted by the endpoint deployed during the transform job. env (dict): Environment variables to be set for use during the transform job (default: None). max_concurrent_transforms (int): The maximum number of HTTP requests to be made to each individual transform container at one time. max_payload (int): Maximum size of the payload in a single HTTP request to the container in MB. tags (list[dict]): List of tags for labeling a transform job. If none specified, then the tags used for the training job are used for the transform job. role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also used during transform jobs. If not specified, the role from the Estimator will be used. model_server_workers (int): Optional. The number of worker processes used by the inference server. If None, server will use one worker per vCPU. volume_kms_key (str): Optional. KMS key ID for encrypting the volume attached to the ML compute instance (default: None). """ role = role or self.role if self.latest_training_job is not None: model = self.create_model(role=role, model_server_workers=model_server_workers) container_def = model.prepare_container_def(instance_type) model_name = model.name or name_from_image(container_def['Image']) vpc_config = model.vpc_config self.sagemaker_session.create_model(model_name, role, container_def, vpc_config) transform_env = model.env.copy() if env is not None: transform_env.update(env) else: logging.warning('No finished training job found associated with this estimator. Please make sure' 'this estimator is only used for building workflow config') model_name = self._current_job_name transform_env = env or {} tags = tags or self.tags return Transformer(model_name, instance_count, instance_type, strategy=strategy, assemble_with=assemble_with, output_path=output_path, output_kms_key=output_kms_key, accept=accept, max_concurrent_transforms=max_concurrent_transforms, max_payload=max_payload, env=transform_env, tags=tags, base_transform_job_name=self.base_job_name, volume_kms_key=volume_kms_key, sagemaker_session=self.sagemaker_session)
java
@Override Path relativizeAgainstRoot(final WatchedDirectory pWatchedDirectory, final Path pPath) { // Because we are on the last root directory possible we can ignore the // directory key here. return getPath().relativize(pPath); }
python
def _get_agent_grounding(agent): """Convert an agent to the corresponding PyBEL DSL object (to be filled with variants later).""" def _get_id(_agent, key): _id = _agent.db_refs.get(key) if isinstance(_id, list): _id = _id[0] return _id hgnc_id = _get_id(agent, 'HGNC') if hgnc_id: hgnc_name = hgnc_client.get_hgnc_name(hgnc_id) if not hgnc_name: logger.warning('Agent %s with HGNC ID %s has no HGNC name.', agent, hgnc_id) return return protein('HGNC', hgnc_name) uniprot_id = _get_id(agent, 'UP') if uniprot_id: return protein('UP', uniprot_id) fplx_id = _get_id(agent, 'FPLX') if fplx_id: return protein('FPLX', fplx_id) pfam_id = _get_id(agent, 'PF') if pfam_id: return protein('PFAM', pfam_id) ip_id = _get_id(agent, 'IP') if ip_id: return protein('IP', ip_id) fa_id = _get_id(agent, 'FA') if fa_id: return protein('NXPFA', fa_id) chebi_id = _get_id(agent, 'CHEBI') if chebi_id: if chebi_id.startswith('CHEBI:'): chebi_id = chebi_id[len('CHEBI:'):] return abundance('CHEBI', chebi_id) pubchem_id = _get_id(agent, 'PUBCHEM') if pubchem_id: return abundance('PUBCHEM', pubchem_id) go_id = _get_id(agent, 'GO') if go_id: return bioprocess('GO', go_id) mesh_id = _get_id(agent, 'MESH') if mesh_id: return bioprocess('MESH', mesh_id) return
java
private List<String> searchTokens(SNode n, long cnr) { List<String> result = new LinkedList<String>(); if (n instanceof SToken) { result.add(n.getId()); if (componentOfToken.get(n.getId()) == null) { List<Long> newlist = new LinkedList<Long>(); newlist.add(cnr); componentOfToken.put(n.getId(), newlist); } else { List<Long> newlist = componentOfToken.get(n.getId()); if (!newlist.contains(cnr)) { newlist.add(cnr); } } } else { List<SRelation<SNode, SNode>> outgoing = n.getGraph().getOutRelations(n.getId()); if(outgoing != null) { for (SRelation<? extends SNode,? extends SNode> e : outgoing) { if(!(e instanceof SPointingRelation) && e.getSource() instanceof SNode && e.getTarget() instanceof SNode) { List<String> Med = searchTokens((SNode) e.getTarget(), cnr); for (String s : Med) { if (!result.contains(s)) { result.add(s); } } } } } } return result; }
python
def unpause(self): """ Unpause the animation. """ self._pause_level -= 1 if not self._pause_level: self._offset = self._paused_time - self._clock()
java
@CheckForNull public UserDetails getCached(String idOrFullName) throws UsernameNotFoundException { Boolean exists = existenceCache.getIfPresent(idOrFullName); if (exists != null && !exists) { throw new UserMayOrMayNotExistException(String.format("\"%s\" does not exist", idOrFullName)); } else { return detailsCache.getIfPresent(idOrFullName); } }
python
def getLabel(self,form): """A label can be a string, dict (lookup by name) or a callable (passed the form).""" return specialInterpretValue(self.label,self.name,form=form)
java
@SuppressWarnings({ "checkstyle:returncount", "checkstyle:cyclomaticcomplexity" }) public static Level parseLoggingLevel(String level) { if (level == null) { return Level.INFO; } switch (level.toLowerCase()) { case "none": //$NON-NLS-1$ case "false": //$NON-NLS-1$ case "0": //$NON-NLS-1$ return Level.OFF; case "severe": //$NON-NLS-1$ case "error": //$NON-NLS-1$ case "1": //$NON-NLS-1$ return Level.SEVERE; case "warn": //$NON-NLS-1$ case "warning": //$NON-NLS-1$ case "2": //$NON-NLS-1$ return Level.WARNING; case "info": //$NON-NLS-1$ case "true": //$NON-NLS-1$ case "3": //$NON-NLS-1$ return Level.INFO; case "fine": //$NON-NLS-1$ case "config": //$NON-NLS-1$ case "4": //$NON-NLS-1$ return Level.FINE; case "finer": //$NON-NLS-1$ case "5": //$NON-NLS-1$ return Level.FINER; case "finest": //$NON-NLS-1$ case "debug": //$NON-NLS-1$ case "6": //$NON-NLS-1$ return Level.FINEST; case "all": //$NON-NLS-1$ case "7": //$NON-NLS-1$ return Level.ALL; default: try { return fromInt(Integer.parseInt(level)); } catch (Throwable exception) { // } return Level.INFO; } }
java
public JsonObject getJsonObject(String name) throws JsonException { JsonElement el = get(name); if (!el.isJsonObject()) { throw Util.typeMismatch(name, el, "JsonObject"); } return el.asJsonObject(); }
python
def get_range_kwargs(self): """ Convert row range object to dict which can be passed to google.bigtable.v2.RowRange add method. """ range_kwargs = {} if self.start_key is not None: start_key_key = "start_key_open" if self.start_inclusive: start_key_key = "start_key_closed" range_kwargs[start_key_key] = _to_bytes(self.start_key) if self.end_key is not None: end_key_key = "end_key_open" if self.end_inclusive: end_key_key = "end_key_closed" range_kwargs[end_key_key] = _to_bytes(self.end_key) return range_kwargs
python
def create_feature_class(out_path, out_name, geom_type, wkid, fields, objectIdField): """ creates a feature class in a given gdb or folder """ if arcpyFound == False: raise Exception("ArcPy is required to use this function") arcpy.env.overwriteOutput = True field_names = [] fc =arcpy.CreateFeatureclass_management(out_path=out_path, out_name=out_name, geometry_type=lookUpGeometry(geom_type), spatial_reference=arcpy.SpatialReference(wkid))[0] for field in fields: if field['name'] != objectIdField: field_names.append(field['name']) arcpy.AddField_management(out_path + os.sep + out_name, field['name'], lookUpFieldType(field['type'])) return fc, field_names
python
def db(self): """Return the correct KV store for this execution.""" if self._db is None: if self.tcex.default_args.tc_playbook_db_type == 'Redis': from .tcex_redis import TcExRedis self._db = TcExRedis( self.tcex.default_args.tc_playbook_db_path, self.tcex.default_args.tc_playbook_db_port, self.tcex.default_args.tc_playbook_db_context, ) elif self.tcex.default_args.tc_playbook_db_type == 'TCKeyValueAPI': from .tcex_key_value import TcExKeyValue self._db = TcExKeyValue(self.tcex) else: err = u'Invalid DB Type: ({})'.format(self.tcex.default_args.tc_playbook_db_type) raise RuntimeError(err) return self._db
python
def AddEventData(self, event_data): """Adds event data. Args: event_data (EventData): event data. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed. """ self._RaiseIfNotWritable() event_data = self._PrepareAttributeContainer(event_data) identifier = event_data.GetIdentifier() lookup_key = identifier.CopyToString() self._event_data[lookup_key] = event_data
java
protected VoltTable runDML(String dml, boolean transformDml) { String modifiedDml = (transformDml ? transformDML(dml) : dml); printTransformedSql(dml, modifiedDml); return super.runDML(modifiedDml); }
python
def timedelta_seconds(timedelta): """Returns the total timedelta duration in seconds.""" return (timedelta.total_seconds() if hasattr(timedelta, "total_seconds") else timedelta.days * 24 * 3600 + timedelta.seconds + timedelta.microseconds / 1000000.)
java
public <T, K> T don( Thing<K> core, Class<T> trait, boolean logical, Mode... modes ) { return don( core.getCore(), trait, logical, modes ); }
java
public <T> void use(Class<T> type, Annotation qualifier, Class<? extends Annotation> injectAnnotation, Consumer<T> consumer) throws ProvideException, CircularDependenciesException, ProviderMissingException { T instance = reference(type, qualifier, injectAnnotation); consumer.consume(instance); dereference(instance, type, qualifier, injectAnnotation); }
java
public boolean checkProperty(T prop, Object value) { Object attribute = getProperty(prop); return (attribute != null && attribute.equals(value)); }
python
def run(self): """ run the plugin """ if self.workflow.builder.base_from_scratch and not self.workflow.builder.parent_images: self.log.info("Skipping add yum repo by url: unsupported for FROM-scratch images") return if self.repourls: for repourl in self.repourls: yumrepo = YumRepo(repourl) self.log.info("fetching yum repo from '%s'", yumrepo.repourl) try: yumrepo.fetch() except Exception as e: msg = "Failed to fetch yum repo {repo}: {exc}".format( repo=yumrepo.repourl, exc=e) raise RuntimeError(msg) else: self.log.info("fetched yum repo from '%s'", yumrepo.repourl) if self.inject_proxy: if yumrepo.is_valid(): yumrepo.set_proxy_for_all_repos(self.inject_proxy) self.workflow.files[yumrepo.dst_filename] = yumrepo.content self.log.debug("saving yum repo '%s', length %d", yumrepo.dst_filename, len(yumrepo.content))
java
public String convertRenderingIntentReserved2ToString(EDataType eDataType, Object instanceValue) { return instanceValue == null ? null : instanceValue.toString(); }
python
def normalize_locale(locale): """ Normalize locale Extracts language code from passed in locale string to be used later for dictionaries loading. :param locale: string, locale (en, en_US) :return: string, language code """ import re match = re.match(r'^[a-z]+', locale.lower()) if match: return match.group()
python
def modflow_sfr_gag_to_instruction_file(gage_output_file, ins_file=None, parse_filename=False): """writes an instruction file for an SFR gage output file to read Flow only at all times Parameters ---------- gage_output_file : str the gage output filename (ASCII). ins_file : str the name of the instruction file to create. If None, the name is <gage_output_file>.ins. Default is None parse_filename : bool if True, get the gage_num parameter by parsing the gage output file filename if False, get the gage number from the file itself Returns ------- df : pandas.DataFrame a dataframe with obsnme and obsval for the sfr simulated flows. If inschek was not successfully run, then returns None ins_file : str file name of instructions file relating to gage output. obs_file : str file name of processed gage output for all times Note ---- sets up observations for gage outputs only for the Flow column. if parse_namefile is true, only text up to first '.' is used as the gage_num TODO : allow other observation types and align explicitly with times - now returns all values """ if ins_file is None: ins_file = gage_output_file + '.ins' # navigate the file to be sure the header makes sense indat = [line.strip() for line in open(gage_output_file, 'r').readlines()] header = [i for i in indat if i.startswith('"')] # yank out the gage number to identify the observation names if parse_filename: gage_num = os.path.basename(gage_output_file).split('.')[0] else: gage_num = re.sub("[^0-9]", "", indat[0].lower().split("gage no.")[-1].strip().split()[0]) # get the column names cols = [i.lower() for i in header if 'data' in i.lower()][0].lower().replace('"', '').replace('data:', '').split() # make sure "Flow" is included in the columns if 'flow' not in cols: raise Exception('Requested field "Flow" not in gage output columns') # find which column is for "Flow" flowidx = np.where(np.array(cols) == 'flow')[0][0] # write out the instruction file lines inslines = ['l1 ' + (flowidx + 1) * 'w ' + '!g{0}_{1:d}!'.format(gage_num, j) for j in range(len(indat) - len(header))] inslines[0] = inslines[0].replace('l1', 'l{0:d}'.format(len(header) + 1)) # write the instruction file with open(ins_file, 'w') as ofp: ofp.write('pif ~\n') [ofp.write('{0}\n'.format(line)) for line in inslines] df = _try_run_inschek(ins_file, gage_output_file) if df is not None: return df, ins_file, gage_output_file else: print("Inschek didn't run so nothing returned") return None
java
@Override public int compareTo(final Path other) { if (other == null) { throw new IllegalArgumentException("other path must be specified"); } // Just defer to alpha ordering since we're absolute return this.toString().compareTo(other.toString()); }
python
def get_authorize_url(self, redirect_uri='', **kw): ''' return the authorization url that the user should be redirected to. ''' return self._mixin.get_authorize_url(redirect_uri or self._mixin._redirect_uri, **kw)
java
public NodeCache getNodeCache( String workspaceName ) throws WorkspaceNotFoundException { NodeCache cache = overriddenNodeCachesByWorkspaceName.get(workspaceName); if (cache == null) { cache = repositoryCache.getWorkspaceCache(workspaceName); } return cache; }
python
def fetch_entity_cls_from_registry(entity): """Util Method to fetch an Entity class from an entity's name""" # Defensive check to ensure we only process if `to_cls` is a string if isinstance(entity, str): try: return repo_factory.get_entity(entity) except AssertionError: # Entity has not been registered (yet) # FIXME print a helpful debug message raise else: return entity
java
public static <T> StreamEx<T> ofTree(T root, Function<T, Stream<T>> mapper) { TreeSpliterator<T, T> spliterator = new TreeSpliterator.Plain<>(root, mapper); return new StreamEx<>(spliterator, StreamContext.SEQUENTIAL.onClose(spliterator::close)); }
java
public GetMaintenanceWindowExecutionTaskResult withTaskParameters(java.util.Map<String, MaintenanceWindowTaskParameterValueExpression>... taskParameters) { if (this.taskParameters == null) { setTaskParameters(new com.amazonaws.internal.SdkInternalList<java.util.Map<String, MaintenanceWindowTaskParameterValueExpression>>( taskParameters.length)); } for (java.util.Map<String, MaintenanceWindowTaskParameterValueExpression> ele : taskParameters) { this.taskParameters.add(ele); } return this; }
java
public synchronized boolean next(long seqNum) { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(tc, "next", new Long(seqNum)); // cancel the existing alarm and create a new one expiryAlarmHandle.cancel(); expiryAlarmHandle = am.create(parent.getMessageProcessor().getCustomProperties().get_browse_expiry_timeout(), this); if (closed) { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(tc, "next", new Boolean(closed)); return true; } if (seqNum == expectedSequenceNumber) { try { JsMessage msg = browseCursor.next(); if (msg != null) { // a message was found parent.sendBrowseData(msg, remoteMEUuid, gatheringTargetDestUuid, key.getRemoteMEUuid(), key.getBrowseId(), expectedSequenceNumber); expectedSequenceNumber++; } else { // no more messages in the cursor parent.sendBrowseEnd(remoteMEUuid, gatheringTargetDestUuid, key.getBrowseId(), SIMPConstants.BROWSE_OK); close(); } } catch (SIException e) { // SIResourceException shouldn't occur so FFDC. FFDCFilter.processException( e, "com.ibm.ws.sib.processor.impl.AOBrowserSession.next", "1:182:1.30", this); Exception e2 = new SIResourceException(nls.getFormattedMessage( "INTERNAL_MESSAGING_ERROR_CWSIP0002", new Object[] { "com.ibm.ws.sib.processor.impl.AOBrowserSession", "1:190:1.30", e }, null), e); SibTr.exception(tc, e2); SibTr.error(tc, "INTERNAL_MESSAGING_ERROR_CWSIP0002", new Object[] { "com.ibm.ws.sib.processor.impl.AOBrowserSession", "1:199:1.30", e }); parent.sendBrowseEnd(remoteMEUuid, gatheringTargetDestUuid, key.getBrowseId(), SIMPConstants.BROWSE_STORE_EXCEPTION); close(); } } else { // wrong sequence number parent.sendBrowseEnd(remoteMEUuid, gatheringTargetDestUuid, key.getBrowseId(), SIMPConstants.BROWSE_OUT_OF_ORDER); close(); } if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(tc, "next", new Boolean(closed)); return closed; }
python
def cause_repertoire(self, mechanism, purview): """Return the cause repertoire of a mechanism over a purview. Args: mechanism (tuple[int]): The mechanism for which to calculate the cause repertoire. purview (tuple[int]): The purview over which to calculate the cause repertoire. Returns: np.ndarray: The cause repertoire of the mechanism over the purview. .. note:: The returned repertoire is a distribution over purview node states, not the states of the whole network. """ # If the purview is empty, the distribution is empty; return the # multiplicative identity. if not purview: return np.array([1.0]) # If the mechanism is empty, nothing is specified about the previous # state of the purview; return the purview's maximum entropy # distribution. if not mechanism: return max_entropy_distribution(purview, self.tpm_size) # Use a frozenset so the arguments to `_single_node_cause_repertoire` # can be hashed and cached. purview = frozenset(purview) # Preallocate the repertoire with the proper shape, so that # probabilities are broadcasted appropriately. joint = np.ones(repertoire_shape(purview, self.tpm_size)) # The cause repertoire is the product of the cause repertoires of the # individual nodes. joint *= functools.reduce( np.multiply, [self._single_node_cause_repertoire(m, purview) for m in mechanism] ) # The resulting joint distribution is over previous states, which are # rows in the TPM, so the distribution is a column. The columns of a # TPM don't necessarily sum to 1, so we normalize. return distribution.normalize(joint)
python
def datetime_to_timestamp(dt): """Convert timezone-aware `datetime` to POSIX timestamp and return seconds since UNIX epoch. Note: similar to `datetime.timestamp()` in Python 3.3+. """ epoch = datetime.utcfromtimestamp(0).replace(tzinfo=UTC) return (dt - epoch).total_seconds()
java
public static void runExample( AdWordsServicesInterface adWordsServices, AdWordsSession session, @Nullable Long adGroupId) throws RemoteException { // Get the TargetingIdeaService. TargetingIdeaServiceInterface targetingIdeaService = adWordsServices.get(session, TargetingIdeaServiceInterface.class); // Create selector. TargetingIdeaSelector selector = new TargetingIdeaSelector(); selector.setRequestType(RequestType.IDEAS); selector.setIdeaType(IdeaType.KEYWORD); selector.setRequestedAttributeTypes(new AttributeType[] { AttributeType.KEYWORD_TEXT, AttributeType.SEARCH_VOLUME, AttributeType.AVERAGE_CPC, AttributeType.COMPETITION, AttributeType.CATEGORY_PRODUCTS_AND_SERVICES}); // Set selector paging (required for targeting idea service). Paging paging = new Paging(); paging.setStartIndex(0); paging.setNumberResults(10); selector.setPaging(paging); List<SearchParameter> searchParameters = new ArrayList<>(); // Create related to query search parameter. RelatedToQuerySearchParameter relatedToQuerySearchParameter = new RelatedToQuerySearchParameter(); relatedToQuerySearchParameter.setQueries(new String[] {"bakery", "pastries", "birthday cake"}); searchParameters.add(relatedToQuerySearchParameter); // Language setting (optional). // The ID can be found in the documentation: // https://developers.google.com/adwords/api/docs/appendix/languagecodes // See the documentation for limits on the number of allowed language parameters: // https://developers.google.com/adwords/api/docs/reference/latest/TargetingIdeaService.LanguageSearchParameter LanguageSearchParameter languageParameter = new LanguageSearchParameter(); Language english = new Language(); english.setId(1000L); languageParameter.setLanguages(new Language[] {english}); searchParameters.add(languageParameter); // Create network search parameter (optional). NetworkSetting networkSetting = new NetworkSetting(); networkSetting.setTargetGoogleSearch(true); networkSetting.setTargetSearchNetwork(false); networkSetting.setTargetContentNetwork(false); networkSetting.setTargetPartnerSearchNetwork(false); NetworkSearchParameter networkSearchParameter = new NetworkSearchParameter(); networkSearchParameter.setNetworkSetting(networkSetting); searchParameters.add(networkSearchParameter); // Optional: Use an existing ad group to generate ideas. if (adGroupId != null) { SeedAdGroupIdSearchParameter seedAdGroupIdSearchParameter = new SeedAdGroupIdSearchParameter(); seedAdGroupIdSearchParameter.setAdGroupId(adGroupId); searchParameters.add(seedAdGroupIdSearchParameter); } selector.setSearchParameters( searchParameters.toArray(new SearchParameter[searchParameters.size()])); // Get keyword ideas. TargetingIdeaPage page = targetingIdeaService.get(selector); // Display keyword ideas. for (TargetingIdea targetingIdea : page.getEntries()) { Map<AttributeType, Attribute> data = Maps.toMap(targetingIdea.getData()); StringAttribute keyword = (StringAttribute) data.get(AttributeType.KEYWORD_TEXT); IntegerSetAttribute categories = (IntegerSetAttribute) data.get(AttributeType.CATEGORY_PRODUCTS_AND_SERVICES); String categoriesString = "(none)"; if (categories != null && categories.getValue() != null) { categoriesString = Joiner.on(", ").join(Ints.asList(categories.getValue())); } Long averageMonthlySearches = ((LongAttribute) data.get(AttributeType.SEARCH_VOLUME)) .getValue(); Money averageCpc = ((MoneyAttribute) data.get(AttributeType.AVERAGE_CPC)).getValue(); Double competition = ((DoubleAttribute) data.get(AttributeType.COMPETITION)).getValue(); System.out.printf("Keyword with text '%s', average monthly search volume %d, " + "average CPC %d, and competition %.2f " + "was found with categories: %s%n", keyword.getValue(), averageMonthlySearches, averageCpc.getMicroAmount(), competition, categoriesString); } if (page.getEntries() == null) { System.out.println("No related keywords were found."); } }
python
def list_tasks(location='\\'): r''' List all tasks located in a specific location in the task scheduler. :param str location: A string value representing the folder from which you want to list tasks. Default is '\\' which is the root for the task scheduler (C:\Windows\System32\tasks). :return: Returns a list of tasks. :rtype: list CLI Example: .. code-block:: bash salt 'minion-id' task.list_tasks ''' # Create the task service object with salt.utils.winapi.Com(): task_service = win32com.client.Dispatch("Schedule.Service") task_service.Connect() # Get the folder to list tasks from task_folder = task_service.GetFolder(location) tasks = task_folder.GetTasks(0) ret = [] for task in tasks: ret.append(task.Name) return ret
python
def resample_1d(arr, n_out=None, random_state=None): """Resample an array, with replacement. Parameters ========== arr: np.ndarray The array is resampled along the first axis. n_out: int, optional Number of samples to return. If not specified, return ``len(arr)`` samples. """ if random_state is None: random_state = np.random.RandomState() arr = np.atleast_1d(arr) n = len(arr) if n_out is None: n_out = n idx = random_state.randint(0, n, size=n) return arr[idx]
python
def escalate_incident( self, incident, update_mask=None, subscriptions=None, tags=None, roles=None, artifacts=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Escalates an incident. Example: >>> from google.cloud import irm_v1alpha2 >>> >>> client = irm_v1alpha2.IncidentServiceClient() >>> >>> # TODO: Initialize `incident`: >>> incident = {} >>> >>> response = client.escalate_incident(incident) Args: incident (Union[dict, ~google.cloud.irm_v1alpha2.types.Incident]): The incident to escalate with the new values. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.irm_v1alpha2.types.Incident` update_mask (Union[dict, ~google.cloud.irm_v1alpha2.types.FieldMask]): List of fields that should be updated. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.irm_v1alpha2.types.FieldMask` subscriptions (list[Union[dict, ~google.cloud.irm_v1alpha2.types.Subscription]]): Subscriptions to add or update. Existing subscriptions with the same channel and address as a subscription in the list will be updated. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.irm_v1alpha2.types.Subscription` tags (list[Union[dict, ~google.cloud.irm_v1alpha2.types.Tag]]): Tags to add. Tags identical to existing tags will be ignored. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.irm_v1alpha2.types.Tag` roles (list[Union[dict, ~google.cloud.irm_v1alpha2.types.IncidentRoleAssignment]]): Roles to add or update. Existing roles with the same type (and title, for TYPE_OTHER roles) will be updated. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.irm_v1alpha2.types.IncidentRoleAssignment` artifacts (list[Union[dict, ~google.cloud.irm_v1alpha2.types.Artifact]]): Artifacts to add. All artifacts are added without checking for duplicates. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.irm_v1alpha2.types.Artifact` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.irm_v1alpha2.types.EscalateIncidentResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "escalate_incident" not in self._inner_api_calls: self._inner_api_calls[ "escalate_incident" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.escalate_incident, default_retry=self._method_configs["EscalateIncident"].retry, default_timeout=self._method_configs["EscalateIncident"].timeout, client_info=self._client_info, ) request = incidents_service_pb2.EscalateIncidentRequest( incident=incident, update_mask=update_mask, subscriptions=subscriptions, tags=tags, roles=roles, artifacts=artifacts, ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("incident.name", incident.name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["escalate_incident"]( request, retry=retry, timeout=timeout, metadata=metadata )
java
public static authenticationsamlpolicy_authenticationvserver_binding[] get(nitro_service service, String name) throws Exception{ authenticationsamlpolicy_authenticationvserver_binding obj = new authenticationsamlpolicy_authenticationvserver_binding(); obj.set_name(name); authenticationsamlpolicy_authenticationvserver_binding response[] = (authenticationsamlpolicy_authenticationvserver_binding[]) obj.get_resources(service); return response; }
python
def as_cross(self, delimiter=''): ''' Return a cross rate representation with respect USD. @param delimiter: could be '' or '/' normally ''' if self.order > usd_order: return 'USD%s%s' % (delimiter, self.code) else: return '%s%sUSD' % (self.code, delimiter)
python
def readbits(self, bits): """ Read the specified number of bits from the stream. Returns 0 for bits == 0. """ if bits == 0: return 0 # fast byte-aligned path if bits % 8 == 0 and self._bits_pending == 0: return self._read_bytes_aligned(bits // 8) out = 0 masks = self._masks def transfer_bits(x, y, n, t): """ transfers t bits from the top of y_n to the bottom of x. then returns x and the remaining bits in y """ if n == t: # taking all return (x << t) | y, 0 mask = masks[t] # (1 << t) - 1 remainmask = masks[n - t] # (1 << n - t) - 1 taken = ((y >> n - t) & mask) return (x << t) | taken, y & remainmask while bits > 0: if self._bits_pending > 0: assert self._partial_byte is not None take = min(self._bits_pending, bits) out, self._partial_byte = transfer_bits(out, self._partial_byte, self._bits_pending, take) if take == self._bits_pending: # we took them all self._partial_byte = None self._bits_pending -= take bits -= take continue r = self.f.read(1) if r == b'': raise EOFError self._partial_byte = ord(r) self._bits_pending = 8 return out
java
private Tree<K, V> del(Tree<K, V> tree, K k) { if (tree == null) return null; int cmp = ordering.compare(k, tree.getKey(kf)); if (cmp < 0) return delLeft(tree, k); else if (cmp > 0) return delRight(tree, k); else return append(tree.getLeft(), tree.getRight()); }
python
def compare_version(value): """ Determines if the provided version value compares with program version. `value` Version comparison string (e.g. ==1.0, <=1.0, >1.1) Supported operators: <, <=, ==, >, >= """ # extract parts from value import re res = re.match(r'(<|<=|==|>|>=)(\d{1,2}\.\d{1,2}(\.\d{1,2})?)$', str(value).strip()) if not res: return False operator, value, _ = res.groups() # break into pieces value = tuple(int(x) for x in str(value).split('.')) if len(value) < 3: value += (0,) version = __version_info__ if operator in ('<', '<='): if version < value: return True if operator != '<=': return False elif operator in ('>=', '>'): if version > value: return True if operator != '>=': return False return value == version
python
def change_interface_id(self, interface_id): """ Generic change interface ID for VLAN interfaces that are not Inline Interfaces (non-VLAN sub interfaces do not have an interface_id field). :param str, int interface_id: interface ID value """ _, second = self.nicid.split('.') self.update(nicid='{}.{}'.format(str(interface_id), second))
python
def add_result_hook(self, hook: Type["QueryResultHook"]) -> Type["QueryResultHook"]: """ Add a query result hook to the chain :param hook: hook to add :return: added hook (same as hook to add) """ hook.next_hook = self._query_result_hook self._query_result_hook = hook return hook
java
public static SyncMapPermissionFetcher fetcher(final String pathServiceSid, final String pathMapSid, final String pathIdentity) { return new SyncMapPermissionFetcher(pathServiceSid, pathMapSid, pathIdentity); }
python
def get_map_matrix(inputfile): """ Return the matrix representation of the genetic map. :arg inputfile: the path to the input file from which to retrieve the genetic map. """ matrix = read_input_file(inputfile, sep=',', noquote=True) output = [['Locus', 'Group', 'Position']] for row in matrix: if row[0] and not re.match(r'c\d+\.loc[\d\.]+', row[0]): output.append([row[0], row[1], row[2]]) return output
python
def full_path(self): '''The full path of this node.''' with self._mutex: if self._parent: return self._parent.full_path + [self._name] else: return [self._name]
java
protected double smoothedProbability(Token tok, double freq, double totalWeight) { return (freq + pseudoCount * backgroundProb(tok)) / (totalWeight + pseudoCount); }
python
def load_map_projection(filename, center=None, center_right=None, radius=None, method='orthographic', registration='native', chirality=None, sphere_radius=None, pre_affine=None, post_affine=None, meta_data=None): ''' load_map_projection(filename) yields the map projection indicated by the given file name. Map projections define the parameters of a projection to the 2D cortical surface via a registartion name and projection parameters. This function is primarily a wrapper around the MapProjection.load() function; for information about options, see MapProjection.load. ''' return MapProjection.load(filename, center=center, center_right=center_right, radius=radius, method=method, registration=registration, chirality=chirality, sphere_radius=sphere_radius, pre_affine=pre_affine, post_affine=post_affine)
python
def setup_icons(self, ): """Set all icons on buttons :returns: None :rtype: None :raises: None """ plus_icon = get_icon('glyphicons_433_plus_bright.png', asicon=True) self.addnew_tb.setIcon(plus_icon)
python
def load_config_from_files(filenames=None): """Load D-Wave Cloud Client configuration from a list of files. .. note:: This method is not standardly used to set up D-Wave Cloud Client configuration. It is recommended you use :meth:`.Client.from_config` or :meth:`.config.load_config` instead. Configuration files comply with standard Windows INI-like format, parsable with Python's :mod:`configparser`. A section called ``defaults`` contains default values inherited by other sections. Each filename in the list (each configuration file loaded) progressively upgrades the final configuration, on a key by key basis, per each section. Args: filenames (list[str], default=None): D-Wave Cloud Client configuration files (paths and names). If ``None``, searches for a configuration file named ``dwave.conf`` in all system-wide configuration directories, in the user-local configuration directory, and in the current working directory, following the user/system configuration paths of :func:`get_configfile_paths`. Returns: :obj:`~configparser.ConfigParser`: :class:`dict`-like mapping of configuration sections (profiles) to mapping of per-profile keys holding values. Raises: :exc:`~dwave.cloud.exceptions.ConfigFileReadError`: Config file specified or detected could not be opened or read. :exc:`~dwave.cloud.exceptions.ConfigFileParseError`: Config file parse failed. Examples: This example loads configurations from two files. One contains a default section with key/values that are overwritten by any profile section that contains that key/value; for example, profile dw2000b in file dwave_b.conf overwrites the default URL and client type, which profile dw2000a inherits from the defaults section, while profile dw2000a overwrites the API token that profile dw2000b inherits. The files, which are located in the current working directory, are (1) dwave_a.conf:: [defaults] endpoint = https://url.of.some.dwavesystem.com/sapi client = qpu token = ABC-123456789123456789123456789 [dw2000a] solver = EXAMPLE_2000Q_SYSTEM token = DEF-987654321987654321987654321 and (2) dwave_b.conf:: [dw2000b] endpoint = https://url.of.some.other.dwavesystem.com/sapi client = sw solver = EXAMPLE_2000Q_SYSTEM The following example code loads configuration from both these files, with the defined overrides and inheritance. .. code:: python >>> import dwave.cloud as dc >>> import sys >>> configuration = dc.config.load_config_from_files(["./dwave_a.conf", "./dwave_b.conf"]) # doctest: +SKIP >>> configuration.write(sys.stdout) # doctest: +SKIP [defaults] endpoint = https://url.of.some.dwavesystem.com/sapi client = qpu token = ABC-123456789123456789123456789 [dw2000a] solver = EXAMPLE_2000Q_SYSTEM token = DEF-987654321987654321987654321 [dw2000b] endpoint = https://url.of.some.other.dwavesystem.com/sapi client = sw solver = EXAMPLE_2000Q_SYSTEM """ if filenames is None: filenames = get_configfile_paths() config = configparser.ConfigParser(default_section="defaults") for filename in filenames: try: with open(filename, 'r') as f: config.read_file(f, filename) except (IOError, OSError): raise ConfigFileReadError("Failed to read {!r}".format(filename)) except configparser.Error: raise ConfigFileParseError("Failed to parse {!r}".format(filename)) return config
java
public List<Milestone> getMilestones(Object projectIdOrPath, int page, int perPage) throws GitLabApiException { Response response = get(Response.Status.OK, getPageQueryParams(page, perPage), "projects", getProjectIdOrPath(projectIdOrPath), "milestones"); return (response.readEntity(new GenericType<List<Milestone>>() {})); }
java
@SuppressWarnings("unchecked") public static <X> X deserializeFunction(RuntimeContext context, byte[] serFun) throws FlinkException { if (!jythonInitialized) { // This branch is only tested by end-to-end tests String path = context.getDistributedCache().getFile(PythonConstants.FLINK_PYTHON_DC_ID).getAbsolutePath(); String scriptName = PythonStreamExecutionEnvironment.PythonJobParameters.getScriptName(context.getExecutionConfig().getGlobalJobParameters()); try { initPythonInterpreter( new String[]{Paths.get(path, scriptName).toString()}, path, scriptName); } catch (Exception e) { try { LOG.error("Initialization of jython failed.", e); throw new FlinkRuntimeException("Initialization of jython failed.", e); } catch (Exception ie) { // this may occur if the initial exception relies on jython being initialized properly LOG.error("Initialization of jython failed. Could not print original stacktrace.", ie); throw new FlinkRuntimeException("Initialization of jython failed. Could not print original stacktrace."); } } } try { return (X) SerializationUtils.deserializeObject(serFun); } catch (IOException | ClassNotFoundException ex) { throw new FlinkException("Deserialization of user-function failed.", ex); } }
python
def login(self, email, password): """ login using email and password :param email: email address :param password: password """ rsp = self._request() self.default_headers['Authorization'] = rsp.data['token'] return rsp
python
async def receive(self, pkt): """Receive packet from the client.""" self.server.logger.info('%s: Received packet %s data %s', self.sid, packet.packet_names[pkt.packet_type], pkt.data if not isinstance(pkt.data, bytes) else '<binary>') if pkt.packet_type == packet.PING: self.last_ping = time.time() await self.send(packet.Packet(packet.PONG, pkt.data)) elif pkt.packet_type == packet.MESSAGE: await self.server._trigger_event( 'message', self.sid, pkt.data, run_async=self.server.async_handlers) elif pkt.packet_type == packet.UPGRADE: await self.send(packet.Packet(packet.NOOP)) elif pkt.packet_type == packet.CLOSE: await self.close(wait=False, abort=True) else: raise exceptions.UnknownPacketError()
java
public static StorageObjectSummary createFromS3ObjectSummary(S3ObjectSummary objSummary) { return new StorageObjectSummary( objSummary.getBucketName(), objSummary.getKey(), // S3 ETag is not always MD5, but since this code path is only // used in skip duplicate files in PUT command, It's not // critical to guarantee that it's MD5 objSummary.getETag(), objSummary.getSize() ); }
java
@Nullable public static BigInteger parseBigInteger (@Nullable final String sStr, @Nonnegative final int nRadix, @Nullable final BigInteger aDefault) { if (sStr != null && sStr.length () > 0) try { return new BigInteger (sStr, nRadix); } catch (final NumberFormatException ex) { // Fall through } return aDefault; }
java
public void setAnimationFromJson(String jsonString, @Nullable String cacheKey) { setAnimation(new JsonReader(new StringReader(jsonString)), cacheKey); }
java
@Nonnull public final Launcher decorateByPrefix(final String... prefix) { final Launcher outer = this; return new Launcher(outer) { @Override public boolean isUnix() { return outer.isUnix(); } @Override public Proc launch(ProcStarter starter) throws IOException { starter.commands.addAll(0,Arrays.asList(prefix)); boolean[] masks = starter.masks; if (masks != null) { starter.masks = prefix(masks); } return outer.launch(starter); } @Override public Channel launchChannel(String[] cmd, OutputStream out, FilePath workDir, Map<String, String> envVars) throws IOException, InterruptedException { return outer.launchChannel(prefix(cmd),out,workDir,envVars); } @Override public void kill(Map<String, String> modelEnvVars) throws IOException, InterruptedException { outer.kill(modelEnvVars); } private String[] prefix(@Nonnull String[] args) { String[] newArgs = new String[args.length+prefix.length]; System.arraycopy(prefix,0,newArgs,0,prefix.length); System.arraycopy(args,0,newArgs,prefix.length,args.length); return newArgs; } private boolean[] prefix(@Nonnull boolean[] args) { boolean[] newArgs = new boolean[args.length+prefix.length]; System.arraycopy(args,0,newArgs,prefix.length,args.length); return newArgs; } }; }
java
@Override public HTTaxinvoiceSearchResult search(String CorpNum, String JobID, String[] Type, String[] TaxType, String[] PurposeType, String TaxRegIDYN, String TaxRegIDType, String TaxRegID, Integer Page, Integer PerPage, String Order, String UserID) throws PopbillException { if (JobID.length() != 18) throw new PopbillException(-99999999, "์ž‘์—…์•„์ด๋””๊ฐ€ ์˜ฌ๋ฐ”๋ฅด์ง€ ์•Š์Šต๋‹ˆ๋‹ค."); String uri = "/HomeTax/Taxinvoice/"+JobID; uri += "?Type=" + Arrays.toString(Type) .replaceAll("\\[|\\]|\\s", ""); uri += "&TaxType=" + Arrays.toString(TaxType) .replaceAll("\\[|\\]|\\s", ""); uri += "&PurposeType=" + Arrays.toString(PurposeType) .replaceAll("\\[|\\]|\\s", ""); if (TaxRegIDType != "" && TaxRegIDType != null) uri += "&TaxRegIDType=" + TaxRegIDType; if (TaxRegIDYN != "" && TaxRegIDYN != null) uri += "&TaxRegIDYN=" + TaxRegIDYN; if (TaxRegID != "" && TaxRegIDYN != null) uri += "&TaxRegID=" + TaxRegID; uri += "&Page=" + Integer.toString(Page); uri += "&PerPage=" + Integer.toString(PerPage); uri += "&Order=" + Order; return httpget(uri, CorpNum, UserID, HTTaxinvoiceSearchResult.class); }
java
public PayloadType<ConstraintType<T>> getOrCreatePayload() { Node node = childNode.getOrCreate("payload"); PayloadType<ConstraintType<T>> payload = new PayloadTypeImpl<ConstraintType<T>>(this, "payload", childNode, node); return payload; }
java
public EC2TagSet withEc2TagSetList(java.util.Collection<java.util.List<EC2TagFilter>> ec2TagSetList) { setEc2TagSetList(ec2TagSetList); return this; }
java
public static <T> Mapping<T> make(Class<T> clazz, AnnotationSet<?, ?> annotationSet, boolean includeParentFields) { return new Mapping<T>(clazz, annotationSet, includeParentFields); }