language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def bggreen(cls, string, auto=False): """Color-code entire string. :param str string: String to colorize. :param bool auto: Enable auto-color (dark/light terminal). :return: Class instance for colorized string. :rtype: Color """ return cls.colorize('bggreen', string, auto=auto)
java
private boolean containsKey(K key, int keyGroupIndex, N namespace) { checkKeyNamespacePreconditions(key, namespace); Map<N, Map<K, S>> namespaceMap = getMapForKeyGroup(keyGroupIndex); if (namespaceMap == null) { return false; } Map<K, S> keyedMap = namespaceMap.get(namespace); return keyedMap != null && keyedMap.containsKey(key); }
python
def screenshot(self, *args): """Take a screenshot, crop and save""" from mss import mss if not os.path.exists("screenshots"): os.makedirs("screenshots") box = { "top": self.winfo_y(), "left": self.winfo_x(), "width": self.winfo_width(), "height": self.winfo_height() } screenshot = mss().grab(box) screenshot = Image.frombytes("RGB", screenshot.size, screenshot.rgb) screenshot.save("screenshots/{}.png".format(ttk.Style(self).theme_use()))
python
def _delete(self, ): """Internal implementation for deleting a reftrack. This will just delete the reftrack, set the children to None, update the status, and the rootobject. If the object is an alien, it will also set the parent to None, so it dissapears from the model. :returns: None :rtype: None :raises: None """ refobjinter = self.get_refobjinter() refobjinter.delete(self.get_refobj()) self.set_refobj(None, setParent=False) if self.alien(): # it should not be in the scene # so also remove it from the model # so we cannot load it again parent = self.get_parent() if parent: parent.remove_child(self) self._treeitem.parent().remove_child(self._treeitem) else: # only remove all children from the model and set their parent to None for c in self.get_all_children(): c._parent = None self._treeitem.remove_child(c._treeitem) # this should not have any children anymore self._children = [] self.set_status(None)
python
def get_all_items(self): """ Returns all items in the combobox dictionary. """ return [self._widget.itemText(k) for k in range(self._widget.count())]
python
def update(self, other, join='left', overwrite=True, filter_func=None, errors='ignore'): """ Modify in place using non-NA values from another DataFrame. Aligns on indices. There is no return value. Parameters ---------- other : DataFrame, or object coercible into a DataFrame Should have at least one matching index/column label with the original DataFrame. If a Series is passed, its name attribute must be set, and that will be used as the column name to align with the original DataFrame. join : {'left'}, default 'left' Only left join is implemented, keeping the index and columns of the original object. overwrite : bool, default True How to handle non-NA values for overlapping keys: * True: overwrite original DataFrame's values with values from `other`. * False: only update values that are NA in the original DataFrame. filter_func : callable(1d-array) -> bool 1d-array, optional Can choose to replace values other than NA. Return True for values that should be updated. errors : {'raise', 'ignore'}, default 'ignore' If 'raise', will raise a ValueError if the DataFrame and `other` both contain non-NA data in the same place. .. versionchanged :: 0.24.0 Changed from `raise_conflict=False|True` to `errors='ignore'|'raise'`. Returns ------- None : method directly changes calling object Raises ------ ValueError * When `errors='raise'` and there's overlapping non-NA data. * When `errors` is not either `'ignore'` or `'raise'` NotImplementedError * If `join != 'left'` See Also -------- dict.update : Similar method for dictionaries. DataFrame.merge : For column(s)-on-columns(s) operations. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], ... 'B': [400, 500, 600]}) >>> new_df = pd.DataFrame({'B': [4, 5, 6], ... 'C': [7, 8, 9]}) >>> df.update(new_df) >>> df A B 0 1 4 1 2 5 2 3 6 The DataFrame's length does not increase as a result of the update, only values at matching index/column labels are updated. >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}) >>> df.update(new_df) >>> df A B 0 a d 1 b e 2 c f For Series, it's name attribute must be set. >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2]) >>> df.update(new_column) >>> df A B 0 a d 1 b y 2 c e >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2]) >>> df.update(new_df) >>> df A B 0 a x 1 b d 2 c e If `other` contains NaNs the corresponding values are not updated in the original dataframe. >>> df = pd.DataFrame({'A': [1, 2, 3], ... 'B': [400, 500, 600]}) >>> new_df = pd.DataFrame({'B': [4, np.nan, 6]}) >>> df.update(new_df) >>> df A B 0 1 4.0 1 2 500.0 2 3 6.0 """ import pandas.core.computation.expressions as expressions # TODO: Support other joins if join != 'left': # pragma: no cover raise NotImplementedError("Only left join is supported") if errors not in ['ignore', 'raise']: raise ValueError("The parameter errors must be either " "'ignore' or 'raise'") if not isinstance(other, DataFrame): other = DataFrame(other) other = other.reindex_like(self) for col in self.columns: this = self[col]._values that = other[col]._values if filter_func is not None: with np.errstate(all='ignore'): mask = ~filter_func(this) | isna(that) else: if errors == 'raise': mask_this = notna(that) mask_that = notna(this) if any(mask_this & mask_that): raise ValueError("Data overlaps.") if overwrite: mask = isna(that) else: mask = notna(this) # don't overwrite columns unecessarily if mask.all(): continue self[col] = expressions.where(mask, this, that)
python
def cast_to_a1_notation(method): """ Decorator function casts wrapped arguments to A1 notation in range method calls. """ @wraps(method) def wrapper(self, *args, **kwargs): try: if len(args): int(args[0]) # Convert to A1 notation range_start = rowcol_to_a1(*args[:2]) range_end = rowcol_to_a1(*args[-2:]) range_name = ':'.join((range_start, range_end)) args = (range_name,) + args[4:] except ValueError: pass return method(self, *args, **kwargs) return wrapper
java
public NameParser getNameParser() throws WIMException { if (iNameParser == null) { TimedDirContext ctx = iContextManager.getDirContext(); try { try { iNameParser = ctx.getNameParser(""); } catch (NamingException e) { if (!ContextManager.isConnectionException(e)) { throw e; } ctx = iContextManager.reCreateDirContext(ctx, e.toString()); iNameParser = ctx.getNameParser(""); } } catch (NamingException e) { String msg = Tr.formatMessage(tc, WIMMessageKey.NAMING_EXCEPTION, WIMMessageHelper.generateMsgParms(e.toString(true)), e); throw new WIMSystemException(WIMMessageKey.NAMING_EXCEPTION, msg, e); } finally { iContextManager.releaseDirContext(ctx); } } return iNameParser; }
java
@Override public XBELDocument convert(Document source) { if (source == null) return null; XBELDocument xd = new XBELDocument(); List<StatementGroup> stmtGroups = source.getStatementGroups(); List<XBELStatementGroup> xstmtGroups = xd.getStatementGroup(); StatementGroupConverter sgConverter = new StatementGroupConverter(); for (final StatementGroup sg : stmtGroups) { // Defer to StatementGroupConverter xstmtGroups.add(sgConverter.convert(sg)); } List<AnnotationDefinition> definitions = source.getDefinitions(); if (hasItems(definitions)) { XBELAnnotationDefinitionGroup xadGroup = new XBELAnnotationDefinitionGroup(); List<XBELInternalAnnotationDefinition> internals = xadGroup.getInternalAnnotationDefinition(); List<XBELExternalAnnotationDefinition> externals = xadGroup.getExternalAnnotationDefinition(); InternalAnnotationDefinitionConverter iConverter = new InternalAnnotationDefinitionConverter(); ExternalAnnotationDefinitionConverter eConverter = new ExternalAnnotationDefinitionConverter(); for (final AnnotationDefinition ad : definitions) { XBELInternalAnnotationDefinition iad = iConverter.convert(ad); if (iad != null) { internals.add(iad); continue; } XBELExternalAnnotationDefinition ead = eConverter.convert(ad); if (ead != null) { externals.add(ead); } } xd.setAnnotationDefinitionGroup(xadGroup); } Header header = source.getHeader(); HeaderConverter hConverter = new HeaderConverter(); // Defer to HeaderConverter xd.setHeader(hConverter.convert(header)); NamespaceGroup nsGroup = source.getNamespaceGroup(); if (nsGroup != null) { NamespaceGroupConverter ngConverter = new NamespaceGroupConverter(); // Defer to NamespaceGroupConverter xd.setNamespaceGroup(ngConverter.convert(nsGroup)); } return xd; }
java
private SslContext getSslContext(URI uri) { if (!"https".equalsIgnoreCase(uri.getScheme())) { return null; } try { return SslContextBuilder.forClient().build(); } catch (SSLException e) { throw new SdkClientException("Could not create SSL context", e); } }
java
private void fillEDBObjectWithEngineeringObjectInformation(EDBObject object, OpenEngSBModel model) throws IllegalAccessException { if (!new AdvancedModelWrapper(model).isEngineeringObject()) { return; } for (Field field : model.getClass().getDeclaredFields()) { OpenEngSBForeignKey annotation = field.getAnnotation(OpenEngSBForeignKey.class); if (annotation == null) { continue; } String value = (String) FieldUtils.readField(field, model, true); if (value == null) { continue; } value = String.format("%s/%s", ContextHolder.get().getCurrentContextId(), value); String key = getEOReferenceStringFromAnnotation(annotation); object.put(key, new EDBObjectEntry(key, value, String.class)); } }
python
def get_sign_key(exported_session_key, magic_constant): """ 3.4.5.2 SIGNKEY @param exported_session_key: A 128-bit session key used to derive signing and sealing keys @param magic_constant: A constant value set in the MS-NLMP documentation (constants.SignSealConstants) @return sign_key: Key used to sign messages """ sign_key = hashlib.md5(exported_session_key + magic_constant).digest() return sign_key
java
private Type pageableMixin(RepositoryRestConfiguration restConfiguration) { return new AlternateTypeBuilder() .fullyQualifiedClassName( String.format("%s.generated.%s", Pageable.class.getPackage().getName(), Pageable.class.getSimpleName())) .withProperties(Stream.of( property(Integer.class, restConfiguration.getPageParamName()), property(Integer.class, restConfiguration.getLimitParamName()), property(String.class, restConfiguration.getSortParamName()) ).collect(toList())) .build(); }
java
private String getPostData(final ICommandLine cl) throws Exception { // String encoded = ""; StringBuilder encoded = new StringBuilder(); String data = cl.getOptionValue("post"); if (data == null) { return null; } String[] values = data.split("&"); for (String value : values) { String[] splitted = value.split("="); String key = splitted[0]; String val = ""; if (splitted.length > 1) { val = splitted[1]; } if (encoded.length() != 0) { encoded.append('&'); } encoded.append(key).append('=').append(URLEncoder.encode(val, "UTF-8")); // encoded += key + "=" + URLEncoder.encode(val, "UTF-8") + "&"; } // if (encoded.endsWith("&")) { // StringUtils.removeEnd(encoded, "&"); // } return encoded.toString(); }
python
def prior_rev(C, alpha=-1.0): r"""Prior counts for sampling of reversible transition matrices. Prior is defined as b_ij= alpha if i<=j b_ij=0 else The reversible prior adds -1 to the upper triagular part of the given count matrix. This prior respects the fact that for a reversible transition matrix the degrees of freedom correspond essentially to the upper, respectively the lower triangular part of the matrix. Parameters ---------- C : (M, M) ndarray or scipy.sparse matrix Count matrix alpha : float (optional) Value of prior counts Returns ------- B : (M, M) ndarray Matrix of prior counts """ ind = np.triu_indices(C.shape[0]) B = np.zeros(C.shape) B[ind] = alpha return B
python
def show_option(self, option, _global=False): """Return a list of options for the window. Parameters ---------- option : str option name _global : bool, optional use global option scope, same as ``-g`` Returns ------- str, int, or bool Raises ------ :exc:`exc.OptionError`, :exc:`exc.UnknownOption`, :exc:`exc.InvalidOption`, :exc:`exc.AmbiguousOption` Notes ----- Uses ``_global`` for keyword name instead of ``global`` to avoid colliding with reserved keyword. Test and return True/False for on/off string. """ tmux_args = tuple() if _global: tmux_args += ('-g',) tmux_args += (option,) cmd = self.cmd('show-options', *tmux_args) if isinstance(cmd.stderr, list) and len(cmd.stderr): handle_option_error(cmd.stderr[0]) if not len(cmd.stdout): return None option = [item.split(' ') for item in cmd.stdout][0] if option[1].isdigit(): option = (option[0], int(option[1])) return option[1]
java
public String getValuesAsString() { StringBuilder builder = new StringBuilder(); for(E row: rowKeys.keySet()) for(E col: colKeys.keySet()) { builder.append(getValueAsString(row, col, true)); builder.append('\n'); } return builder.toString(); }
java
@Override public void open(FileInputSplit split) throws IOException { super.open(split); dataFileReader = initReader(split); dataFileReader.sync(split.getStart()); lastSync = dataFileReader.previousSync(); }
java
private void popAndResetToOldLevel() { this.levelStack.pop(); if (!this.levelStack.isEmpty()) { JsonLevel newTop = levelStack.peek(); if (newTop != null) { newTop.removeLastTokenFromJsonPointer(); } } }
java
private boolean nestedContains(ArchivePath path) { // Iterate through nested archives for (Entry<ArchivePath, ArchiveAsset> nestedArchiveEntry : nestedArchives.entrySet()) { ArchivePath archivePath = nestedArchiveEntry.getKey(); ArchiveAsset archiveAsset = nestedArchiveEntry.getValue(); // Check to see if the requested path starts with the nested archive path if (startsWith(path, archivePath)) { Archive<?> nestedArchive = archiveAsset.getArchive(); // Get the asset path from within the nested archive ArchivePath nestedAssetPath = getNestedPath(path, archivePath); // Recurse the call to the nested archive return nestedArchive.contains(nestedAssetPath); } } return false; }
python
def get_candidate_delegates(self, candidate): """ Get all pledged delegates for a candidate in this election. """ candidate_election = CandidateElection.objects.get( candidate=candidate, election=self ) return candidate_election.delegates.all()
python
def search(self, filepath=None, basedir=None, kind=None): """ Search for a settings file. Keyword Arguments: filepath (string): Path to a config file, either absolute or relative. If absolute set its directory as basedir (omitting given basedir argument). If relative join it to basedir. basedir (string): Directory path where to search for. kind (string): Backend engine kind name (value of attribute ``_kind_name``) to help discovering with empty or relative filepath. Also if explicit absolute filepath is given, this will enforce the backend engine (such as yaml kind will be forced for a ``foo.json`` file). Returns: tuple: Absolute filepath and backend engine class. """ # None values would cause trouble with path joining if filepath is None: filepath = '' if basedir is None: basedir = '.' if not basedir and not filepath: msg = "Either basedir or filepath is required for discovering" raise SettingsDiscoveryError(msg) if kind and kind not in self.engines: msg = "Given settings format is unknow: {}" raise SettingsDiscoveryError(msg.format(kind)) # Implicit filename to find from backend if not filepath: filename, engine = self.guess_filename(basedir, kind) filepath = os.path.join(basedir, filename) # Explicit filename dont have to search for default backend file and # blindly force given backend if any else: if os.path.isabs(filepath): basedir, filename = os.path.split(filepath) else: filepath = os.path.join(basedir, filepath) if not os.path.exists(filepath): msg = "Given settings file does not exists: {}" raise SettingsDiscoveryError(msg.format(filepath)) engine = self.get_engine(filepath, kind) return filepath, engine
java
public ResultSet getColumnPrivileges(final String catalog, final String schema, final String table, final String columnNamePattern) throws SQLException { log.info("getting empty result set, column privileges"); return getEmptyResultSet(); }
python
def clean_inasafe_fields(layer): """Clean inasafe_fields based on keywords. 1. Must use standard field names. 2. Sum up list of fields' value and put in the standard field name. 3. Remove un-used fields. :param layer: The layer :type layer: QgsVectorLayer """ fields = [] # Exposure if layer.keywords['layer_purpose'] == layer_purpose_exposure['key']: fields = get_fields( layer.keywords['layer_purpose'], layer.keywords['exposure']) # Hazard elif layer.keywords['layer_purpose'] == layer_purpose_hazard['key']: fields = get_fields( layer.keywords['layer_purpose'], layer.keywords['hazard']) # Aggregation elif layer.keywords['layer_purpose'] == layer_purpose_aggregation['key']: fields = get_fields( layer.keywords['layer_purpose']) # Add displaced_field definition to expected_fields # for minimum needs calculator. # If there is no displaced_field keyword, then pass try: if layer.keywords['inasafe_fields'][displaced_field['key']]: fields.append(displaced_field) except KeyError: pass expected_fields = {field['key']: field['field_name'] for field in fields} # Convert the field name and sum up if needed new_keywords = {} for key, val in list(layer.keywords.get('inasafe_fields').items()): if key in expected_fields: if isinstance(val, str): val = [val] sum_fields(layer, key, val) new_keywords[key] = expected_fields[key] # Houra, InaSAFE keywords match our concepts ! layer.keywords['inasafe_fields'].update(new_keywords) to_remove = [] # Remove unnecessary fields (the one that is not in the inasafe_fields) for field in layer.fields().toList(): if field.name() not in list(layer.keywords['inasafe_fields'].values()): to_remove.append(field.name()) remove_fields(layer, to_remove) LOGGER.debug( 'Fields which have been removed from %s : %s' % (layer.keywords['layer_purpose'], ' '.join(to_remove)))
python
def make_input_from_multiple_strings(sentence_id: SentenceId, strings: List[str]) -> TranslatorInput: """ Returns a TranslatorInput object from multiple strings, where the first element corresponds to the surface tokens and the remaining elements to additional factors. All strings must parse into token sequences of the same length. :param sentence_id: Sentence id. :param strings: A list of strings representing a factored input sequence. :return: A TranslatorInput. """ if not bool(strings): return TranslatorInput(sentence_id=sentence_id, tokens=[], factors=None) tokens = list(data_io.get_tokens(strings[0])) factors = [list(data_io.get_tokens(factor)) for factor in strings[1:]] if not all(len(factor) == len(tokens) for factor in factors): logger.error("Length of string sequences do not match: '%s'", strings) return _bad_input(sentence_id, reason=str(strings)) return TranslatorInput(sentence_id=sentence_id, tokens=tokens, factors=factors)
java
public ResolveRoomResult withRoomSkillParameters(RoomSkillParameter... roomSkillParameters) { if (this.roomSkillParameters == null) { setRoomSkillParameters(new java.util.ArrayList<RoomSkillParameter>(roomSkillParameters.length)); } for (RoomSkillParameter ele : roomSkillParameters) { this.roomSkillParameters.add(ele); } return this; }
java
@Override public Object getValue(ELContext context, Object base, Object property) throws NullPointerException, PropertyNotFoundException, ELException { Iterator<?> pos; try { pos = seek(context, base, property); } catch(PropertyNotFoundException e) { pos = null; } return pos == null ? null : pos.next(); }
java
public static Promise<Void> createDirectories(Executor executor, Path dir, FileAttribute... attrs) { return ofBlockingRunnable(executor, () -> { try { Files.createDirectories(dir, attrs); } catch (IOException e) { throw new UncheckedException(e); } }); }
java
public void setLocale(Locale locale) { List<SimpleDateFormat> formats = new ArrayList<SimpleDateFormat>(); for (SimpleDateFormat format : m_formats) { formats.add(new SimpleDateFormat(format.toPattern(), locale)); } m_formats = formats.toArray(new SimpleDateFormat[formats.size()]); }
java
public void shutdown() { gossipServiceRunning.set(false); gossipThreadExecutor.shutdown(); if (passiveGossipThread != null) { passiveGossipThread.shutdown(); } if (activeGossipThread != null) { activeGossipThread.shutdown(); } try { boolean result = gossipThreadExecutor.awaitTermination(1000, TimeUnit.MILLISECONDS); if (!result) { LOGGER.error("executor shutdown timed out"); } } catch (InterruptedException e) { LOGGER.error(e); } }
python
def reset(self): """ Reset target collection (rebuild index). """ self.connection.rebuild_index( self.index, coll_name=self.target_coll_name)
java
@SuppressWarnings({"unchecked"}) @Override public void writeTo(Viewable viewable, Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType, MultivaluedMap<String, Object> httpHeaders, OutputStream out) throws IOException, WebApplicationException { String resolvedPath = viewable.getTemplateName(); Object model = viewable.getModel(); LOG.debug("Evaluating freemarker template (" + resolvedPath + ") with model of type " + (model == null ? "null" : model.getClass().getSimpleName())); // Build the model context that will be passed to the page final Map<String, Object> vars; if (model instanceof Map) { vars = new HashMap<>((Map<String, Object>) model); } else { vars = new HashMap<>(); vars.put("it", model); } RequestContext requestContext = new RequestContext(); HttpServletRequest httpServletRequest = requestInvoker != null ? requestInvoker.get() : null; requestContext.setHttpServletRequest(httpServletRequest ); vars.put("RequestContext", requestContext); vars.put("Request", httpServletRequest); if (httpServletRequest != null && viewable.getResolvingClass() != null) { httpServletRequest.setAttribute(ViewableResourceTemplateLoader.KEY_NETFLIX_ADMIN_REQUEST_VIEWABLE, viewable); } Principal ctx = null; if (httpServletRequest != null) { ctx = httpServletRequest.getUserPrincipal(); if (ctx == null && httpServletRequest.getSession(false) != null) { final String username = (String) httpServletRequest.getSession().getAttribute("SSO_UserName"); if (username != null) { ctx = new Principal() { @Override public String getName() { return username; } }; } } } vars.put("Principal", ctx); // The following are here for backward compatibility and should be deprecated as soon as possible Map<String, Object> global = Maps.newHashMap(); if (manager != null) { GlobalModelContext globalModel = manager.getGlobalModel(); global.put("sysenv", globalModel.getEnvironment()); // TODO: DEPRECATE vars.put("Explorer", manager.getExplorer(AdminExplorerManager.ADMIN_EXPLORER_NAME)); } vars.put("global", global); // TODO: DEPRECATE vars.put("pathToRoot", requestContext.getPathToRoot()); // TODO: DEPRECATE final StringWriter stringWriter = new StringWriter(); try { if (requestContext.getIsAjaxRequest()) { fmConfig.getTemplate(resolvedPath).process(vars, stringWriter); } else { vars.put("nestedpage", resolvedPath); fmConfig.getTemplate("/layout/" + ADMIN_CONSOLE_LAYOUT + "/main.ftl").process(vars, stringWriter); } final OutputStreamWriter writer = new OutputStreamWriter(out); writer.write(stringWriter.getBuffer().toString()); writer.flush(); } catch (Throwable t) { LOG.error("Error processing freemarker template @ " + resolvedPath + ": " + t.getMessage(), t); throw new WebApplicationException(t, Response.Status.INTERNAL_SERVER_ERROR); } }
python
def pkeys(self,parent,field): "returns a list of pkey tuples by combining parent[field] with our attrs" template=[(parent[k] if k is not None else None) for k in self.pkey] inull=template.index(None) def mk(x): "helper for constructing pkey tuples in a list comp" template[inull]=x return tuple(template) val=parent[field] if self.getter is not None: return map(mk,self.getter(val)) elif isinstance(val,VDList): return map(mk,val.generate()) else: raise NotImplementedError(type(val))
python
def errorObject(object, cat, format, *args): """ Log a fatal error message in the given category. This will also raise a L{SystemExit}. """ doLog(ERROR, object, cat, format, args) # we do the import here because having it globally causes weird import # errors if our gstreactor also imports .log, which brings in errors # and pb stuff if args: raise SystemExit(format % args) else: raise SystemExit(format)
python
def prepare_attached(self, action, a_name, **kwargs): """ Prepares an attached volume for a container configuration. :param action: Action configuration. :type action: dockermap.map.runner.ActionConfig :param a_name: The full name or id of the container sharing the volume. :type a_name: unicode | str """ client = action.client config_id = action.config_id policy = self._policy if action.container_map.use_attached_parent_name: v_alias = '{0.config_name}.{0.instance_name}'.format(config_id) else: v_alias = config_id.instance_name user = policy.volume_users[config_id.map_name][v_alias] permissions = policy.volume_permissions[config_id.map_name][v_alias] if not (self.prepare_local and hasattr(client, 'run_cmd')): return self._prepare_container(client, action, a_name, v_alias) if action.client_config.features['volumes']: volume_detail = client.inspect_volume(a_name) local_path = volume_detail['Mountpoint'] else: instance_detail = client.inspect_container(a_name) volumes = get_instance_volumes(instance_detail, False) path = resolve_value(policy.default_volume_paths[config_id.map_name][v_alias]) local_path = volumes.get(path) if not local_path: raise ValueError("Could not locate local path of volume alias '{0}' / " "path '{1}' in container {2}.".format(action.config_id.instance_name, path, a_name)) return [ client.run_cmd(cmd) for cmd in get_preparation_cmd(user, permissions, local_path) ]
java
private Function<HttpRequestContext, String> createQuerySubstitution(final String param) { return new Function<HttpRequestContext, String>() { @Override public String apply(HttpRequestContext request) { MultivaluedMap<String, String> params = request.getQueryParameters(); if (!params.containsKey(param)) { throw new IllegalStateException("Parameter required for authentication is missing: " + param); } List<String> values = params.get(param); if (values.size() != 1) { throw new IllegalStateException("Exactly one parameter expected for authentication: " + param); } return values.get(0); } }; }
python
def __load_unique_identities(self, uidentities, matcher, match_new, reset, verbose): """Load unique identities""" self.new_uids.clear() n = 0 if reset: self.__reset_unique_identities() self.log("Loading unique identities...") for uidentity in uidentities: self.log("\n=====", verbose) self.log("+ Processing %s" % uidentity.uuid, verbose) try: stored_uuid = self.__load_unique_identity(uidentity, verbose) except LoadError as e: self.error("%s Skipping." % str(e)) self.log("=====", verbose) continue stored_uuid = self.__load_identities(uidentity.identities, stored_uuid, verbose) try: self.__load_profile(uidentity.profile, stored_uuid, verbose) except Exception as e: self.error("%s. Loading %s profile. Skipping profile." % (str(e), stored_uuid)) self.__load_enrollments(uidentity.enrollments, stored_uuid, verbose) if matcher and (not match_new or stored_uuid in self.new_uids): stored_uuid = self._merge_on_matching(stored_uuid, matcher, verbose) self.log("+ %s (old %s) loaded" % (stored_uuid, uidentity.uuid), verbose) self.log("=====", verbose) n += 1 self.log("%d/%d unique identities loaded" % (n, len(uidentities)))
python
def _dict_contents(self, use_dict=None, as_class=dict): """Return the contents of an object as a dict.""" if _debug: Object._debug("dict_contents use_dict=%r as_class=%r", use_dict, as_class) # make/extend the dictionary of content if use_dict is None: use_dict = as_class() klasses = list(self.__class__.__mro__) klasses.reverse() # build a list of property identifiers "bottom up" property_names = [] properties_seen = set() for c in klasses: for prop in getattr(c, 'properties', []): if prop.identifier not in properties_seen: property_names.append(prop.identifier) properties_seen.add(prop.identifier) # extract the values for property_name in property_names: # get the value property_value = self._properties.get(property_name).ReadProperty(self) if property_value is None: continue # if the value has a way to convert it to a dict, use it if hasattr(property_value, "dict_contents"): property_value = property_value.dict_contents(as_class=as_class) # save the value use_dict.__setitem__(property_name, property_value) # return what we built/updated return use_dict
java
public void setCustomAnimation(BaseAnimationInterface animation){ mCustomAnimation = animation; if(mViewPagerTransformer != null){ mViewPagerTransformer.setCustomAnimationInterface(mCustomAnimation); } }
java
public boolean isAnnotatable() { return ObjectUtils.defaultIfNull((Boolean) configurationValues.get(ConfigurationOption.ANNOTATE), Boolean.TRUE) .booleanValue(); }
python
def make_prediction_output_tensors(args, features, input_ops, model_fn_ops, keep_target): """Makes the final prediction output layer.""" target_name = feature_transforms.get_target_name(features) key_names = get_key_names(features) outputs = {} outputs.update({key_name: tf.squeeze(input_ops.features[key_name]) for key_name in key_names}) if is_classification_model(args.model): # build maps from ints to the origional categorical strings. class_names = read_vocab(args, target_name) table = tf.contrib.lookup.index_to_string_table_from_tensor( mapping=class_names, default_value='UNKNOWN') # Get the label of the input target. if keep_target: input_target_label = table.lookup(input_ops.features[target_name]) outputs[PG_TARGET] = tf.squeeze(input_target_label) # TODO(brandondutra): get the score of the target label too. probabilities = model_fn_ops.predictions['probabilities'] # if top_n == 0, this means use all the classes. We will use class names as # probabilities labels. if args.top_n == 0: predicted_index = tf.argmax(probabilities, axis=1) predicted = table.lookup(predicted_index) outputs.update({PG_CLASSIFICATION_FIRST_LABEL: predicted}) probabilities_list = tf.unstack(probabilities, axis=1) for class_name, p in zip(class_names, probabilities_list): outputs[class_name] = p else: top_n = args.top_n # get top k labels and their scores. (top_k_values, top_k_indices) = tf.nn.top_k(probabilities, k=top_n) top_k_labels = table.lookup(tf.to_int64(top_k_indices)) # Write the top_k values using 2*top_n columns. num_digits = int(math.ceil(math.log(top_n, 10))) if num_digits == 0: num_digits = 1 for i in range(0, top_n): # Pad i based on the size of k. So if k = 100, i = 23 -> i = '023'. This # makes sorting the columns easy. padded_i = str(i + 1).zfill(num_digits) if i == 0: label_alias = PG_CLASSIFICATION_FIRST_LABEL else: label_alias = PG_CLASSIFICATION_LABEL_TEMPLATE % padded_i label_tensor_name = (tf.squeeze( tf.slice(top_k_labels, [0, i], [tf.shape(top_k_labels)[0], 1]))) if i == 0: score_alias = PG_CLASSIFICATION_FIRST_SCORE else: score_alias = PG_CLASSIFICATION_SCORE_TEMPLATE % padded_i score_tensor_name = (tf.squeeze( tf.slice(top_k_values, [0, i], [tf.shape(top_k_values)[0], 1]))) outputs.update({label_alias: label_tensor_name, score_alias: score_tensor_name}) else: if keep_target: outputs[PG_TARGET] = tf.squeeze(input_ops.features[target_name]) scores = model_fn_ops.predictions['scores'] outputs[PG_REGRESSION_PREDICTED_TARGET] = tf.squeeze(scores) return outputs
java
public static <T> InputReader getInstance(T input, int size, String cs) throws IOException { return getInstance(input, size, Charset.forName(cs), NO_FEATURES); }
java
private void addPostParams(final Request request) { if (friendlyName != null) { request.addPostParam("FriendlyName", friendlyName); } if (chatServiceSid != null) { request.addPostParam("ChatServiceSid", chatServiceSid); } if (channelType != null) { request.addPostParam("ChannelType", channelType.toString()); } if (contactIdentity != null) { request.addPostParam("ContactIdentity", contactIdentity); } if (enabled != null) { request.addPostParam("Enabled", enabled.toString()); } if (integrationType != null) { request.addPostParam("IntegrationType", integrationType.toString()); } if (integrationFlowSid != null) { request.addPostParam("Integration.FlowSid", integrationFlowSid); } if (integrationUrl != null) { request.addPostParam("Integration.Url", integrationUrl.toString()); } if (integrationWorkspaceSid != null) { request.addPostParam("Integration.WorkspaceSid", integrationWorkspaceSid); } if (integrationWorkflowSid != null) { request.addPostParam("Integration.WorkflowSid", integrationWorkflowSid); } if (integrationChannel != null) { request.addPostParam("Integration.Channel", integrationChannel); } if (integrationTimeout != null) { request.addPostParam("Integration.Timeout", integrationTimeout.toString()); } if (integrationPriority != null) { request.addPostParam("Integration.Priority", integrationPriority.toString()); } if (integrationCreationOnMessage != null) { request.addPostParam("Integration.CreationOnMessage", integrationCreationOnMessage.toString()); } if (longLived != null) { request.addPostParam("LongLived", longLived.toString()); } }
python
def alias_catalog(self, catalog_id, alias_id): """Adds an ``Id`` to a ``Catalog`` for the purpose of creating compatibility. The primary ``Id`` of the ``Catalog`` is determined by the provider. The new ``Id`` performs as an alias to the primary ``Id``. If the alias is a pointer to another catalog, it is reassigned to the given catalog ``Id``. arg: catalog_id (osid.id.Id): the ``Id`` of a ``Catalog`` arg: alias_id (osid.id.Id): the alias ``Id`` raise: AlreadyExists - ``alias_id`` is already assigned raise: NotFound - ``catalog_id`` not found raise: NullArgument - ``catalog_id`` or ``alias_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinLookupSession.alias_bin_template if self._catalog_session is not None: return self._catalog_session.alias_catalog(catalog_id=catalog_id, alias_id=alias_id) self._alias_id(primary_id=catalog_id, equivalent_id=alias_id)
python
def stationary_distribution_sensitivity(T, j): r"""Sensitivity matrix of a stationary distribution element. Parameters ---------- T : (M, M) ndarray Transition matrix (stochastic matrix). j : int Index of stationary distribution element for which sensitivity matrix is computed. Returns ------- S : (M, M) ndarray Sensitivity matrix for the specified element of the stationary distribution. """ T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric') if _issparse(T): _showSparseConversionWarning() stationary_distribution_sensitivity(T.todense(), j) else: return dense.sensitivity.stationary_distribution_sensitivity(T, j)
java
@GetMapping(path = CasProtocolConstants.ENDPOINT_VALIDATE) protected ModelAndView handle(final HttpServletRequest request, final HttpServletResponse response) throws Exception { return super.handleRequestInternal(request, response); }
python
def download(objects): """ Retrieve remote file object """ def exists(object): if os.path.exists(TMPDIR + '/' + filename): return True else: msg = 'File object %s failed to download to %s. Exit' % (filename, TMPDIR) logger.warning(msg) stdout_message('%s: %s' % (inspect.stack()[0][3], msg)) return False try: for file_path in objects: filename = file_path.split('/')[-1] r = urllib.request.urlretrieve(file_path, TMPDIR + '/' + filename) if not exists(filename): return False except urllib.error.HTTPError as e: logger.exception( '%s: Failed to retrive file object: %s. Exception: %s, data: %s' % (inspect.stack()[0][3], file_path, str(e), e.read())) raise e return True
python
def verifyComponents(self, samplerate): """Checks the current components for invalidating conditions :param samplerate: generation samplerate (Hz), passed on to component verification :type samplerate: int :returns: str -- error message, if any, 0 otherwise """ # flatten list of components components = [comp for track in self._segments for comp in track] for comp in components: msg = comp.verify(samplerate=samplerate) if msg: return msg return 0
java
public Object getObject(int idx) { if (idx < 0) { throw new IllegalArgumentException("Negative size: " + idx); } if (idx >= fields.size()) { return null; } return fields.get(idx); }
java
private ValidationMessage<Origin> reportError(ValidationResult result, Feature feature, String messageId, Object ... params) { ValidationMessage<Origin> message = EntryValidations.createMessage(feature.getOrigin(), Severity.ERROR, messageId, params); if (SequenceEntryUtils.isQualifierAvailable(Qualifier.LOCUS_TAG_QUALIFIER_NAME, feature)) { Qualifier locusTag = SequenceEntryUtils.getQualifier(Qualifier.LOCUS_TAG_QUALIFIER_NAME, feature); message.appendCuratorMessage("locus tag = " + locusTag.getValue()); } if (SequenceEntryUtils.isQualifierAvailable(Qualifier.GENE_QUALIFIER_NAME, feature)) { Qualifier geneName = SequenceEntryUtils.getQualifier(Qualifier.GENE_QUALIFIER_NAME, feature); message.appendCuratorMessage("gene = " + geneName.getValue()); } result.append(message); return message; }
java
protected void parse(Node node) { queryable = isQueryable(node); styleInfo.clear(); NodeList childNodes = node.getChildNodes(); for (int i = 0; i < childNodes.getLength(); i++) { Node child = childNodes.item(i); String nodeName = child.getNodeName(); if ("Name".equalsIgnoreCase(nodeName)) { name = getValueRecursive(child); } else if ("Title".equalsIgnoreCase(nodeName)) { title = getValueRecursive(child); } else if ("Abstract".equalsIgnoreCase(nodeName)) { abstractt = getValueRecursive(child); } else if ("KeywordList".equalsIgnoreCase(nodeName)) { addKeyWords(child); } else if ("SRS".equalsIgnoreCase(nodeName)) { crs.add(getValueRecursive(child)); } else if ("BoundingBox".equalsIgnoreCase(nodeName)) { addBoundingBox(child); } else if ("LatLonBoundingBox".equalsIgnoreCase(nodeName)) { addLatLonBoundingBox(child); } else if ("MetadataURL".equalsIgnoreCase(nodeName)) { metadataUrls.add(new WmsLayerMetadataUrlInfo111(child)); } else if ("Style".equalsIgnoreCase(nodeName)) { styleInfo.add(new WmsLayerStyleInfo111(child)); } else if ("ScaleHint".equalsIgnoreCase(nodeName)) { if (hasAttribute(child, "min")) { minScaleDenominator = getAttributeAsDouble(child, "min"); } if (hasAttribute(child, "max")) { maxScaleDenominator = getAttributeAsDouble(child, "max"); } } } parsed = true; }
python
def get_info(self): """Return a list with all information available about this process""" return [self.display_name.encode(), self.enabled and b'enabled' or b'disabled', STATE_NAMES[self.state].encode() + b':', self.last_printed_line.strip()]
python
def measure(*qubits: raw_types.Qid, key: Optional[str] = None, invert_mask: Tuple[bool, ...] = () ) -> gate_operation.GateOperation: """Returns a single MeasurementGate applied to all the given qubits. The qubits are measured in the computational basis. Args: *qubits: The qubits that the measurement gate should measure. key: The string key of the measurement. If this is None, it defaults to a comma-separated list of the target qubits' str values. invert_mask: A list of Truthy or Falsey values indicating whether the corresponding qubits should be flipped. None indicates no inverting should be done. Returns: An operation targeting the given qubits with a measurement. Raises: ValueError if the qubits are not instances of Qid. """ for qubit in qubits: if isinstance(qubit, np.ndarray): raise ValueError( 'measure() was called a numpy ndarray. Perhaps you meant ' 'to call measure_state_vector on numpy array?' ) elif not isinstance(qubit, raw_types.Qid): raise ValueError( 'measure() was called with type different than Qid.') if key is None: key = _default_measurement_key(qubits) return MeasurementGate(len(qubits), key, invert_mask).on(*qubits)
java
public static BufferedWriter newWriter(File file, String charset) throws IOException { return newWriter(file, charset, false); }
python
def request_generic(self, act, coro, perform, complete): """ Performs an overlapped request (via `perform` callable) and saves the token and the (`overlapped`, `perform`, `complete`) trio. """ overlapped = pywintypes.OVERLAPPED() overlapped.object = act self.add_token(act, coro, (overlapped, perform, complete)) try: rc, nbytes = perform(act, overlapped) if rc == 0: # ah geez, it didn't got in the iocp, we have a result!" win32file.PostQueuedCompletionStatus( self.iocp, nbytes, 0, overlapped ) except pywintypes.error, exc: raise SocketError(exc)
python
def get_reference_line_numeration_marker_patterns(prefix=u''): """Return a list of compiled regex patterns used to search for the marker. Marker of a reference line in a full-text document. :param prefix: (string) the possible prefix to a reference line :return: (list) of compiled regex patterns. """ title = u"" if type(prefix) in (str, unicode): title = prefix g_name = u'(?P<mark>' g_close = u')' space = r'\s*' patterns = [ # [1] space + title + g_name + r'\[\s*(?P<marknum>\d+)\s*\]' + g_close, # [<letters and numbers] space + title + g_name + r'\[\s*[a-zA-Z:-]+\+?\s?(\d{1,4}[A-Za-z:-]?)?\s*\]' + g_close, # noqa # {1} space + title + g_name + r'\{\s*(?P<marknum>\d+)\s*\}' + g_close, # (1) space + title + g_name + r'\<\s*(?P<marknum>\d+)\s*\>' + g_close, space + title + g_name + r'\(\s*(?P<marknum>\d+)\s*\)' + g_close, space + title + g_name + r'(?P<marknum>\d+)\s*\.(?!\d)' + g_close, space + title + g_name + r'(?P<marknum>\d+)\s+' + g_close, space + title + g_name + r'(?P<marknum>\d+)\s*\]' + g_close, # 1] space + title + g_name + r'(?P<marknum>\d+)\s*\}' + g_close, # 1} space + title + g_name + r'(?P<marknum>\d+)\s*\)' + g_close, # 1) space + title + g_name + r'(?P<marknum>\d+)\s*\>' + g_close, # [1.1] space + title + g_name + r'\[\s*\d+\.\d+\s*\]' + g_close, # [ ] space + title + g_name + r'\[\s*\]' + g_close, # * space + title + g_name + r'\*' + g_close, ] return [re.compile(p, re.I | re.UNICODE) for p in patterns]
python
def get_string_at_rva(self, rva): """Get an ASCII string located at the given address.""" s = self.get_section_by_rva(rva) if not s: return self.get_string_from_data(0, self.__data__[rva:rva+MAX_STRING_LENGTH]) return self.get_string_from_data( 0, s.get_data(rva, length=MAX_STRING_LENGTH) )
python
def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs): """Shift this dataset by an offset along one or more dimensions. Only data variables are moved; coordinates stay in place. This is consistent with the behavior of ``shift`` in pandas. Parameters ---------- shifts : Mapping with the form of {dim: offset} Integer offset to shift along each of the given dimensions. Positive offsets shift to the right; negative offsets shift to the left. fill_value: scalar, optional Value to use for newly missing values **shifts_kwargs: The keyword arguments form of ``shifts``. One of shifts or shifts_kwarg must be provided. Returns ------- shifted : Dataset Dataset with the same coordinates and attributes but shifted data variables. See also -------- roll Examples -------- >>> ds = xr.Dataset({'foo': ('x', list('abcde'))}) >>> ds.shift(x=2) <xarray.Dataset> Dimensions: (x: 5) Coordinates: * x (x) int64 0 1 2 3 4 Data variables: foo (x) object nan nan 'a' 'b' 'c' """ shifts = either_dict_or_kwargs(shifts, shifts_kwargs, 'shift') invalid = [k for k in shifts if k not in self.dims] if invalid: raise ValueError("dimensions %r do not exist" % invalid) variables = OrderedDict() for name, var in self.variables.items(): if name in self.data_vars: var_shifts = {k: v for k, v in shifts.items() if k in var.dims} variables[name] = var.shift( fill_value=fill_value, shifts=var_shifts) else: variables[name] = var return self._replace_vars_and_dims(variables)
python
def get_subject(self): """ Returns: subject : Reactive Extension Subject """ if not self._run_flag.running: raise Exception('RuuviTagReactive stopped') subject = Subject() self._subjects.append(subject) return subject
java
@Override public ValidationReport validateBond(IBond subject) { ValidationReport report = new ValidationReport(); // only consider two atom bonds if (subject.getAtomCount() == 2) { double distance = subject.getBegin().getPoint3d().distance(subject.getEnd().getPoint3d()); if (distance > 3.0) { // should really depend on the elements ValidationTest badBondLengthError = new ValidationTest(subject, "Bond length cannot exceed 3 Angstroms.", "A bond length typically is between 0.5 and 3.0 Angstroms."); report.addError(badBondLengthError); } } return report; }
java
public long getDepth() throws SIMPControllableNotFoundException { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(tc, "getDepth"); long count=0; SIMPIterator iterator = getStreams(); while(iterator.hasNext()) { InternalOutputStreamControl ios = (InternalOutputStreamControl)iterator.next(); count+=ios.getNumberOfActiveMessages(); } if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(tc, "getDepth", Long.valueOf(count)); return count; }
python
def _read(self, fp, fpname): """Parse a sectioned setup file. The sections in setup file contains a title line at the top, indicated by a name in square brackets (`[]'), plus key/value options lines, indicated by `name: value' format lines. Continuations are represented by an embedded newline then leading whitespace. Blank lines, lines beginning with a '#', and just about everything else are ignored. """ cursect = None # None, or a dictionary optname = None lineno = 0 e = None # None, or an exception while True: line = fp.readline() if not line: break lineno = lineno + 1 # comment or blank line? if line.strip() == '' or line[0] in '#;': continue if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR": # no leading whitespace continue # continuation line? if line[0].isspace() and cursect is not None and optname: value = line.strip() if value: cursect[optname].append(value) # a section header or option header? else: # is it a section header? mo = self.SECTCRE.match(line) if mo: sectname = mo.group('header') if sectname in self._sections: cursect = self._sections[sectname] elif sectname == DEFAULTSECT: cursect = self._defaults else: cursect = self._dict() cursect['__name__'] = sectname self._sections[sectname] = cursect # So sections can't start with a continuation line optname = None # no section header in the file? elif cursect is None: raise MissingSectionHeaderError(fpname, lineno, line) # an option line? else: mo = self._optcre.match(line) if mo: optname, vi, optval = mo.group('option', 'vi', 'value') optname = self.optionxform(optname.rstrip()) # This check is fine because the OPTCRE cannot # match if it would set optval to None if optval is not None: if vi in ('=', ':') and ';' in optval: # ';' is a comment delimiter only if it follows # a spacing character pos = optval.find(';') if pos != -1 and optval[pos-1].isspace(): optval = optval[:pos] optval = optval.strip() # allow empty values if optval == '""': optval = '' cursect[optname] = [optval] else: # valueless option handling cursect[optname] = optval else: # a non-fatal parsing error occurred. set up the # exception but keep going. the exception will be # raised at the end of the file and will contain a # list of all bogus lines if not e: e = ParsingError(fpname) e.append(lineno, repr(line)) # if any parsing errors occurred, raise an exception if e: raise e # join the multi-line values collected while reading all_sections = [self._defaults] all_sections.extend(self._sections.values()) for options in all_sections: for name, val in options.items(): if isinstance(val, list): options[name] = '\n'.join(val)
java
static public byte[] asByteArray(long inValue) { long value = inValue; // check for 0 if (0 == value) { return ZERO_BYTEARRAY; } // make space for the largest long number byte[] bytes = new byte[SIZE_MAXLONG]; // check for negative ints boolean bNegative = false; if (0 > value) { // force it positive for parsing bNegative = true; value = -value; } // now loop back through each digit in the long int index = SIZE_MAXLONG - 1; for (; 0 <= index && 0 != value; index--) { bytes[index] = HEX_BYTES[(int) (value % 10)]; value /= 10; } // length is how ever many digits there were + a possible negative sign int len = (SIZE_MAXLONG - 1 - index); if (bNegative) { len++; } // now copy out the "real bytes" for returning byte[] realBytes = new byte[len]; for (int i = len - 1, x = SIZE_MAXLONG - 1; 0 <= i; i--, x--) { realBytes[i] = bytes[x]; } // add negative sign if we need to if (bNegative) { realBytes[0] = DASH; } return realBytes; }
java
@Override public void clearCache(CPDAvailabilityEstimate cpdAvailabilityEstimate) { entityCache.removeResult(CPDAvailabilityEstimateModelImpl.ENTITY_CACHE_ENABLED, CPDAvailabilityEstimateImpl.class, cpdAvailabilityEstimate.getPrimaryKey()); finderCache.clearCache(FINDER_CLASS_NAME_LIST_WITH_PAGINATION); finderCache.clearCache(FINDER_CLASS_NAME_LIST_WITHOUT_PAGINATION); clearUniqueFindersCache((CPDAvailabilityEstimateModelImpl)cpdAvailabilityEstimate, true); }
java
@ManagedAttribute() public List<String> getEffectiveProperties() { final List<String> properties = new LinkedList<String>(); for (final String key : myProperties.stringPropertyNames()) { properties.add(key + "=" + myProperties.get(key)); } return properties; }
java
public void setResult(Result result) throws IllegalArgumentException { Check.notNull(result); if(result instanceof SAXResult) { setTarget((SAXResult)result); } else { TransformerHandler th = saxHelper.newIdentityTransformerHandler(); th.setResult(result); setTarget(new SAXResult(th)); } }
java
@Override public void write(DataOutput out) throws IOException { out.writeInt(values.size()); for (ValueWritable vw : values) { vw.write(out); } }
java
@Override public void process(TestDeployment testDeployment, Archive<?> protocolArchive) { final TestClass testClass = this.testClass.get(); final Archive<?> applicationArchive = testDeployment.getApplicationArchive(); if (WarpCommons.isWarpTest(testClass.getJavaClass())) { if (!Validate.isArchiveOfType(WebArchive.class, protocolArchive)) { throw new IllegalArgumentException("Protocol archives of type " + protocolArchive.getClass() + " not supported by Warp. Please use the Servlet 3.0 protocol."); } addWarpPackageToDeployment(protocolArchive.as(WebArchive.class)); addWarpExtensionsDeployment(protocolArchive.as(WebArchive.class)); removeTestClassFromDeployment(applicationArchive, testClass); } }
java
@Override public EClass getIfcElementAssembly() { if (ifcElementAssemblyEClass == null) { ifcElementAssemblyEClass = (EClass) EPackage.Registry.INSTANCE.getEPackage(Ifc4Package.eNS_URI) .getEClassifiers().get(222); } return ifcElementAssemblyEClass; }
python
def read_whole_packet(self): """ Reads single packet and returns bytes payload of the packet Can only be called when transport's read pointer is at the beginning of the packet. """ self._read_packet() return readall(self, self._size - _header.size)
python
def _find_bounds(py_line_no, py_by_line_no, cheetah_by_line_no): """Searches before and after in the python source to find comments which denote cheetah line numbers. If a lower bound is not found, 0 is substituted. If an upper bound is not found, len(cheetah lines) is returned. The result is a lower-inclusive upper-exclusive range: [..., ...) """ # Find lower bound for line_no in range(py_line_no, 0, -1): lower_bound = _get_line_no_from_comments(py_by_line_no[line_no]) if lower_bound != 0: break else: lower_bound = 0 # Find upper bound for line_no in range(py_line_no, len(py_by_line_no)): upper_bound = _get_line_no_from_comments(py_by_line_no[line_no]) if upper_bound != 0: # Since we'll eventually be building a range(), let's make this # the non-inclusive upper-bound upper_bound += 1 break else: upper_bound = len(cheetah_by_line_no) return lower_bound, upper_bound
java
private void checkIndices(int row, int col) { if (row < 0 || col < 0 || row >= rows || col >= cols) { throw new IndexOutOfBoundsException(); } }
python
def questions(self): """获取用户的所有问题. :return: 用户的所有问题,返回生成器. :rtype: Question.Iterable """ from .question import Question if self.url is None or self.question_num == 0: return for page_index in range(1, (self.question_num - 1) // 20 + 2): html = self._session.get( self.url + 'asks?page=' + str(page_index)).text soup = BeautifulSoup(html) question_links = soup.find_all('a', class_='question_link') question_datas = soup.find_all( 'div', class_='zm-profile-section-main') for link, data in zip(question_links, question_datas): url = Zhihu_URL + link['href'] title = link.text.strip() answer_num = int( re_get_number.match(data.div.contents[4]).group(1)) follower_num = int( re_get_number.match(data.div.contents[6]).group(1)) q = Question(url, title, follower_num, answer_num, session=self._session) yield q
python
def detect_intent_texts(project_id, session_id, texts, language_code): """Returns the result of detect intent with texts as inputs. Using the same `session_id` between requests allows continuation of the conversation.""" import dialogflow_v2 as dialogflow session_client = dialogflow.SessionsClient() session = session_client.session_path(project_id, session_id) print('Session path: {}\n'.format(session)) for text in texts: text_input = dialogflow.types.TextInput( text=text, language_code=language_code) query_input = dialogflow.types.QueryInput(text=text_input) response = session_client.detect_intent( session=session, query_input=query_input) print('=' * 20) print('Query text: {}'.format(response.query_result.query_text)) print('Detected intent: {} (confidence: {})\n'.format( response.query_result.intent.display_name, response.query_result.intent_detection_confidence)) print('Fulfillment text: {}\n'.format( response.query_result.fulfillment_text))
python
def user_role_add(user_id=None, user=None, tenant_id=None, tenant=None, role_id=None, role=None, profile=None, project_id=None, project_name=None, **connection_args): ''' Add role for user in tenant (keystone user-role-add) CLI Examples: .. code-block:: bash salt '*' keystone.user_role_add \ user_id=298ce377245c4ec9b70e1c639c89e654 \ tenant_id=7167a092ece84bae8cead4bf9d15bb3b \ role_id=ce377245c4ec9b70e1c639c89e8cead4 salt '*' keystone.user_role_add user=admin tenant=admin role=admin ''' kstone = auth(profile, **connection_args) if project_id and not tenant_id: tenant_id = project_id elif project_name and not tenant: tenant = project_name if user: user_id = user_get(name=user, profile=profile, **connection_args)[user].get('id') else: user = next(six.iterkeys(user_get(user_id, profile=profile, **connection_args)))['name'] if not user_id: return {'Error': 'Unable to resolve user id'} if tenant: tenant_id = tenant_get(name=tenant, profile=profile, **connection_args)[tenant].get('id') else: tenant = next(six.iterkeys(tenant_get(tenant_id, profile=profile, **connection_args)))['name'] if not tenant_id: return {'Error': 'Unable to resolve tenant/project id'} if role: role_id = role_get(name=role, profile=profile, **connection_args)[role]['id'] else: role = next(six.iterkeys(role_get(role_id, profile=profile, **connection_args)))['name'] if not role_id: return {'Error': 'Unable to resolve role id'} if _OS_IDENTITY_API_VERSION > 2: kstone.roles.grant(role_id, user=user_id, project=tenant_id) else: kstone.roles.add_user_role(user_id, role_id, tenant_id) ret_msg = '"{0}" role added for user "{1}" for "{2}" tenant/project' return ret_msg.format(role, user, tenant)
java
@Override public Task<UserApiKey> createApiKey(@NonNull final String name) { return dispatcher.dispatchTask(new Callable<UserApiKey>() { @Override public UserApiKey call() { return createApiKeyInternal(name); } }); }
java
@Override public List<PlanNode> getAlternativePlans(CostEstimator estimator) { // check if we have a cached version if (this.cachedPlans != null) { return this.cachedPlans; } // calculate alternative sub-plans for predecessor List<? extends PlanNode> subPlans = getPredecessorNode().getAlternativePlans(estimator); List<PlanNode> outputPlans = new ArrayList<PlanNode>(); final int dop = getDegreeOfParallelism(); final int subPerInstance = getSubtasksPerInstance(); final int inDop = getPredecessorNode().getDegreeOfParallelism(); final int inSubPerInstance = getPredecessorNode().getSubtasksPerInstance(); final int numInstances = dop / subPerInstance + (dop % subPerInstance == 0 ? 0 : 1); final int inNumInstances = inDop / inSubPerInstance + (inDop % inSubPerInstance == 0 ? 0 : 1); final boolean globalDopChange = numInstances != inNumInstances; final boolean localDopChange = numInstances == inNumInstances & subPerInstance != inSubPerInstance; InterestingProperties ips = this.input.getInterestingProperties(); for (PlanNode p : subPlans) { for (RequestedGlobalProperties gp : ips.getGlobalProperties()) { for (RequestedLocalProperties lp : ips.getLocalProperties()) { Channel c = new Channel(p); gp.parameterizeChannel(c, globalDopChange, localDopChange); if (lp.isMetBy(c.getLocalPropertiesAfterShippingOnly())) { c.setLocalStrategy(LocalStrategy.NONE); } else { lp.parameterizeChannel(c); } // no need to check whether the created properties meet what we need in case // of ordering or global ordering, because the only interesting properties we have // are what we require outputPlans.add(new SinkPlanNode(this, "DataSink("+this.getPactContract().getName()+")" ,c)); } } } // cost and prune the plans for (PlanNode node : outputPlans) { estimator.costOperator(node); } prunePlanAlternatives(outputPlans); this.cachedPlans = outputPlans; return outputPlans; }
java
@Pure public static Map<String, List<Object>> getCommandLineOptions() { if (commandLineOptions != null) { return Collections.unmodifiableSortedMap(commandLineOptions); } return Collections.emptyMap(); }
java
public void setDomainStatusList(java.util.Collection<ElasticsearchDomainStatus> domainStatusList) { if (domainStatusList == null) { this.domainStatusList = null; return; } this.domainStatusList = new java.util.ArrayList<ElasticsearchDomainStatus>(domainStatusList); }
python
def configure( cls, impl: "Union[None, str, Type[Configurable]]", **kwargs: Any ) -> None: """Configures the `AsyncHTTPClient` subclass to use. ``AsyncHTTPClient()`` actually creates an instance of a subclass. This method may be called with either a class object or the fully-qualified name of such a class (or ``None`` to use the default, ``SimpleAsyncHTTPClient``) If additional keyword arguments are given, they will be passed to the constructor of each subclass instance created. The keyword argument ``max_clients`` determines the maximum number of simultaneous `~AsyncHTTPClient.fetch()` operations that can execute in parallel on each `.IOLoop`. Additional arguments may be supported depending on the implementation class in use. Example:: AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient") """ super(AsyncHTTPClient, cls).configure(impl, **kwargs)
java
@Override public BeanDeploymentArchive loadBeanDeploymentArchive(Class<?> beanClass) { BeanDeploymentArchive bda = getBeanDeploymentArchiveFromClass(beanClass); //return the bda if the class is contained in one of the bdas; if (bda == null) { try { bda = createBDAOntheFly(beanClass); } catch (CDIException e) { //FFDC and carry on bda = null; } } return bda; //add the graph to the deployment }
python
def update(self, **kwargs): u"""Updating or creation of new simple nodes. Each dict key is used as a tagname and value as text. """ for key, value in kwargs.items(): helper = helpers.CAST_DICT.get(type(value), str) tag = self._get_aliases().get(key, key) elements = list(self._xml.iterchildren(tag=tag)) if elements: for element in elements: element.text = helper(value) else: element = etree.Element(key) element.text = helper(value) self._xml.append(element) self._aliases = None
java
public static Path serialize(Object source, FileSystem fs, Path targetPath) { Assert.notNull(targetPath, "'targetPath' must not be null"); Assert.notNull(fs, "'fs' must not be null"); Assert.notNull(source, "'source' must not be null"); Path resultPath = targetPath.makeQualified(fs.getUri(), fs.getWorkingDirectory()); OutputStream targetOutputStream = null; try { targetOutputStream = fs.create(targetPath); SerializationUtils.serialize(source, targetOutputStream); } catch (Exception e) { throw new IllegalStateException("Failed to serialize " + source + " to " + resultPath, e); } return resultPath; }
java
public static Cipher getCipher(final @NotNull Key key, int mode) { try { Cipher cipher = Cipher.getInstance(key.getAlgorithm()); cipher.init(mode, key); return cipher; } catch (NoSuchAlgorithmException | InvalidKeyException | NoSuchPaddingException e) { throw new RuntimeException(e); } }
java
private boolean calcBool() { int cmp; switch (left.getType()) { // same as right.getType() case BOOL: switch (op) { case AND: return left.evalBool() && right.evalBool(); case OR: return left.evalBool() || right.evalBool(); case EQ: return left.evalBool() == right.evalBool(); case NE: return left.evalBool() != right.evalBool(); default: throw new RuntimeException("illegal bool op: " + op); } case INT: cmp = left.evalInt() - right.evalInt(); break; case STR: cmp = left.evalString().compareTo(right.evalString()); break; default: throw new RuntimeException("unknown type: " + left.getType()); } switch (op) { // same for string and int case LT: return cmp < 0; case GT: return cmp > 0; case LE: return cmp <= 0; case GE: return cmp >= 0; case EQ: return cmp == 0; case NE: return cmp != 0; default: throw new RuntimeException("illegal non-bool op: " +op); } }
java
public void subtract(final DoubleHistogram otherHistogram) { int arrayLength = otherHistogram.integerValuesHistogram.countsArrayLength; AbstractHistogram otherIntegerHistogram = otherHistogram.integerValuesHistogram; for (int i = 0; i < arrayLength; i++) { long otherCount = otherIntegerHistogram.getCountAtIndex(i); if (otherCount > 0) { double otherValue = otherIntegerHistogram.valueFromIndex(i) * otherHistogram.integerToDoubleValueConversionRatio; if (getCountAtValue(otherValue) < otherCount) { throw new IllegalArgumentException("otherHistogram count (" + otherCount + ") at value " + otherValue + " is larger than this one's (" + getCountAtValue(otherValue) + ")"); } recordValueWithCount(otherValue, -otherCount); } } }
python
def _authGetDBusCookie(self, cookie_context, cookie_id): """ Reads the requested cookie_id from the cookie_context file """ # XXX Ensure we obtain the correct directory for the # authenticating user and that that user actually # owns the keyrings directory if self.cookie_dir is None: cookie_dir = os.path.expanduser('~/.dbus-keyrings') else: cookie_dir = self.cookie_dir dstat = os.stat(cookie_dir) if dstat.st_mode & 0x36: # 066 raise Exception('User keyrings directory is writeable by other users. Aborting authentication') import pwd if dstat.st_uid != pwd.getpwuid(os.geteuid()).pw_uid: raise Exception('Keyrings directory is not owned by the current user. Aborting authentication!') f = open(os.path.join(cookie_dir, cookie_context), 'r') try: for line in f: try: k_id, k_time, k_cookie_hex = line.split() if k_id == cookie_id: return k_cookie_hex except: pass finally: f.close()
java
@Override public Description matchMemberReference(MemberReferenceTree tree, VisitorState state) { if (!matchWithinClass) { return Description.NO_MATCH; } Symbol.MethodSymbol referencedMethod = ASTHelpers.getSymbol(tree); Symbol.MethodSymbol funcInterfaceSymbol = NullabilityUtil.getFunctionalInterfaceMethod(tree, state.getTypes()); handler.onMatchMethodReference(this, tree, state, referencedMethod); return checkOverriding(funcInterfaceSymbol, referencedMethod, tree, state); }
java
private String replaceDelimiter(String value, char delimiter, String delimiterReplacement) { return CmsStringUtil.substitute(value, String.valueOf(delimiter), delimiterReplacement); }
python
def punToEnglish_number(number): """Thee punToEnglish_number function will take a string num which is the number written in punjabi, like ੧੨੩, this is 123, the function will convert it to 123 and return the output as 123 of type int """ output = 0 #This is a simple logic, here we go to each digit and check the number and compare its index with DIGITS list in alphabet.py for num in number: output = 10 * output + DIGITS.index(num) return output
python
def _readline(self, timeout=1): """ Read line from serial port. :param timeout: timeout, default is 1 :return: stripped line or None """ line = self.port.readline(timeout=timeout) return strip_escape(line.strip()) if line is not None else line
java
protected void destroyConnectionFactories(boolean destroyImmediately) { final boolean trace = TraceComponent.isAnyTracingEnabled(); if (trace && tc.isEntryEnabled()) Tr.entry(this, tc, "destroyConnectionFactories", destroyImmediately, destroyWasDeferred); // Notify the application recycle coordinator of an incompatible config update that requires restarting the application if (!appsToRecycle.isEmpty()) { if (trace && tc.isDebugEnabled()) Tr.debug(this, tc, "recycle applications", appsToRecycle); ApplicationRecycleCoordinator appRecycleCoord = null; appRecycleCoord = (ApplicationRecycleCoordinator) priv.locateService(componentContext,"appRecycleService"); Set<String> members = new HashSet<String>(appsToRecycle); appsToRecycle.removeAll(members); appRecycleCoord.recycleApplications(members); } lock.writeLock().lock(); try { if (isInitialized.get() || destroyImmediately && destroyWasDeferred) try { // Mark all traditional WAS data source wrappers as disabled isInitialized.set(false); if (destroyImmediately) { // Destroy the data source (it will only exist if it was looked up) conMgrSvc.destroyConnectionFactories(); if (isDerbyEmbedded) shutdownDerbyEmbedded(); conMgrSvc.deleteObserver(this); jdbcDriverSvc.deleteObserver(this); } destroyWasDeferred = !destroyImmediately; } catch (RuntimeException x) { throw x; } catch (Exception x) { throw new RuntimeException(x); } } finally { lock.writeLock().unlock(); } if (trace && tc.isEntryEnabled()) Tr.exit(this, tc, "destroyConnectionFactories"); }
python
def _make_serializer(meas, schema, rm_none, extra_tags, placeholder): # noqa: C901 """Factory of line protocol parsers""" _validate_schema(schema, placeholder) tags = [] fields = [] ts = None meas = meas for k, t in schema.items(): if t is MEASUREMENT: meas = f"{{i.{k}}}" elif t is TIMEINT: ts = f"{{i.{k}}}" elif t is TIMESTR: if pd: ts = f"{{pd.Timestamp(i.{k} or 0).value}}" else: ts = f"{{dt_to_int(str_to_dt(i.{k}))}}" elif t is TIMEDT: if pd: ts = f"{{pd.Timestamp(i.{k} or 0).value}}" else: ts = f"{{dt_to_int(i.{k})}}" elif t is TAG: tags.append(f"{k}={{str(i.{k}).translate(tag_escape)}}") elif t is TAGENUM: tags.append(f"{k}={{getattr(i.{k}, 'name', i.{k} or None)}}") elif t in (FLOAT, BOOL): fields.append(f"{k}={{i.{k}}}") elif t is INT: fields.append(f"{k}={{i.{k}}}i") elif t is STR: fields.append(f"{k}=\\\"{{str(i.{k}).translate(str_escape)}}\\\"") elif t is ENUM: fields.append(f"{k}=\\\"{{getattr(i.{k}, 'name', i.{k} or None)}}\\\"") else: raise SchemaError(f"Invalid attribute type {k!r}: {t!r}") extra_tags = extra_tags or {} for k, v in extra_tags.items(): tags.append(f"{k}={v}") if placeholder: fields.insert(0, f"_=true") sep = ',' if tags else '' ts = f' {ts}' if ts else '' fmt = f"{meas}{sep}{','.join(tags)} {','.join(fields)}{ts}" if rm_none: # Has substantial runtime impact. Best avoided if performance is critical. # First field can't be removed. pat = r',\w+="?None"?i?' f = eval('lambda i: re.sub(r\'{}\', "", f"{}").encode()'.format(pat, fmt)) else: f = eval('lambda i: f"{}".encode()'.format(fmt)) f.__doc__ = "Returns InfluxDB line protocol representation of user-defined class" f._args = dict(meas=meas, schema=schema, rm_none=rm_none, extra_tags=extra_tags, placeholder=placeholder) return f
java
public VoltTable[] run(SystemProcedureExecutionContext ctx) { VoltTable[] result = null; try { result = createAndExecuteSysProcPlan(SysProcFragmentId.PF_quiesce_sites, SysProcFragmentId.PF_quiesce_processed_sites); } catch (Exception ex) { ex.printStackTrace(); } return result; }
java
public boolean save() { boolean blnRet = false; File objFile = null; String strName = null; String strTemp = null; Iterator<String> itrSec = null; INISection objSec = null; FileWriter objWriter = null; try { if (this.mhmapSections.size() == 0) return false; objFile = new CSFile(this.mstrFile); if (objFile.exists()) objFile.delete(); objWriter = new FileWriter(objFile); itrSec = this.mhmapSections.keySet().iterator(); while (itrSec.hasNext()) { strName = (String) itrSec.next(); objSec = (INISection) this.mhmapSections.get(strName); strTemp = objSec.toString(); objWriter.write(strTemp); objWriter.write("\r\n"); objSec = null; } blnRet = true; } catch (IOException IOExIgnore) { } finally { if (objWriter != null) { closeWriter(objWriter); objWriter = null; } if (objFile != null) objFile = null; if (itrSec != null) itrSec = null; } return blnRet; }
java
public void setCompileDirectory(String directory) { if (directory != null && directory.trim().length() > 0) { File file = new File(directory); if (file.exists() || file.mkdirs()) { this.compileDirectory = file; } } }
python
def _expr2sat(ex, litmap): # pragma: no cover """Convert an expression to a DIMACS SAT string.""" if isinstance(ex, Literal): return str(litmap[ex]) elif isinstance(ex, NotOp): return "-(" + _expr2sat(ex.x, litmap) + ")" elif isinstance(ex, OrOp): return "+(" + " ".join(_expr2sat(x, litmap) for x in ex.xs) + ")" elif isinstance(ex, AndOp): return "*(" + " ".join(_expr2sat(x, litmap) for x in ex.xs) + ")" elif isinstance(ex, XorOp): return ("xor(" + " ".join(_expr2sat(x, litmap) for x in ex.xs) + ")") elif isinstance(ex, EqualOp): return "=(" + " ".join(_expr2sat(x, litmap) for x in ex.xs) + ")" else: fstr = ("expected ex to be a Literal or Not/Or/And/Xor/Equal op, " "got {0.__name__}") raise ValueError(fstr.format(type(ex)))
java
public Set<CmsResource> getPublishListFiles() throws CmsException { String context = "[" + RandomStringUtils.randomAlphabetic(8) + "] "; List<CmsResource> offlineResults = computeCollectorResults(OFFLINE); if (LOG.isDebugEnabled()) { LOG.debug(context + "Offline collector results for " + m_info + ": " + resourcesToString(offlineResults)); } List<CmsResource> onlineResults = computeCollectorResults(ONLINE); if (LOG.isDebugEnabled()) { LOG.debug(context + "Online collector results for " + m_info + ": " + resourcesToString(onlineResults)); } Set<CmsResource> result = Sets.newHashSet(); for (CmsResource offlineRes : offlineResults) { if (!(offlineRes.getState().isUnchanged())) { result.add(offlineRes); } } Set<CmsResource> onlineAndNotOffline = Sets.newHashSet(onlineResults); onlineAndNotOffline.removeAll(offlineResults); for (CmsResource res : onlineAndNotOffline) { try { // Because the resources have state 'unchanged' in the Online project, we need to read them again in the Offline project res = getCmsObject(OFFLINE).readResource(res.getStructureId(), CmsResourceFilter.ALL); result.add(res); } catch (CmsException e) { LOG.error(e.getLocalizedMessage(), e); } } result.addAll(onlineAndNotOffline); if (LOG.isDebugEnabled()) { LOG.debug(context + "Publish list contributions for " + m_info + ": " + resourcesToString(result)); } return result; }
python
def get_F1_EM(dataset, predict_data): """Calculate the F1 and EM scores of the predicted results. Use only with the SQuAD1.1 dataset. Parameters ---------- dataset_file: string Path to the data file. predict_data: dict All final predictions. Returns ------- scores: dict F1 and EM scores. """ f1 = exact_match = total = 0 for record in dataset: total += 1 if record[1] not in predict_data: message = 'Unanswered question ' + record[1] + \ ' will receive score 0.' print(message) continue ground_truths = record[4] prediction = predict_data[record[1]] exact_match += metric_max_over_ground_truths( exact_match_score, prediction, ground_truths) f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths) exact_match = 100.0 * exact_match / total f1 = 100.0 * f1 / total scores = {'exact_match': exact_match, 'f1': f1} return scores
python
def get_most_frequent_value(values: list): """ Return the most frequent value in list. If there is no unique one, return the maximum of the most frequent values :param values: :return: """ if len(values) == 0: return None most_common = Counter(values).most_common() result, max_count = most_common[0] for value, count in most_common: if count < max_count: return result else: result = value return result