language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def prompt_cfg(self, msg, sec, name, ispass=False): """Prompt for a config value, optionally saving it to the user-level cfg. Only runs if we are in an interactive mode. @param msg: Message to display to user. @param sec: Section of config to add to. @param name: Config item name. @param ispass: If True, hide the input from the terminal. Default: False. @type msg: string @type sec: string @type name: string @type ispass: boolean @return: the value entered by the user @rtype: string """ shutit_global.shutit_global_object.yield_to_draw() cfgstr = '[%s]/%s' % (sec, name) config_parser = self.config_parser usercfg = os.path.join(self.host['shutit_path'], 'config') self.log('\nPROMPTING FOR CONFIG: %s' % (cfgstr,),transient=True,level=logging.INFO) self.log('\n' + msg + '\n',transient=True,level=logging.INFO) if not shutit_global.shutit_global_object.determine_interactive(): self.fail('ShutIt is not in a terminal so cannot prompt for values.', throw_exception=False) # pragma: no cover if config_parser.has_option(sec, name): whereset = config_parser.whereset(sec, name) if usercfg == whereset: self.fail(cfgstr + ' has already been set in the user config, edit ' + usercfg + ' directly to change it', throw_exception=False) # pragma: no cover for subcp, filename, _ in reversed(config_parser.layers): # Is the config file loaded after the user config file? if filename == whereset: self.fail(cfgstr + ' is being set in ' + filename + ', unable to override on a user config level', throw_exception=False) # pragma: no cover elif filename == usercfg: break else: # The item is not currently set so we're fine to do so pass if ispass: val = getpass.getpass('>> ') else: val = shutit_util.util_raw_input(prompt='>> ') is_excluded = ( config_parser.has_option('save_exclude', sec) and name in config_parser.get('save_exclude', sec).split() ) # TODO: ideally we would remember the prompted config item for this invocation of shutit if not is_excluded: usercp = [ subcp for subcp, filename, _ in config_parser.layers if filename == usercfg ][0] if shutit_util.util_raw_input(prompt=shutit_util.colorise('32', 'Do you want to save this to your user settings? y/n: '),default='y') == 'y': sec_toset, name_toset, val_toset = sec, name, val else: # Never save it if config_parser.has_option('save_exclude', sec): excluded = config_parser.get('save_exclude', sec).split() else: excluded = [] excluded.append(name) excluded = ' '.join(excluded) sec_toset, name_toset, val_toset = 'save_exclude', sec, excluded if not usercp.has_section(sec_toset): usercp.add_section(sec_toset) usercp.set(sec_toset, name_toset, val_toset) usercp.write(open(usercfg, 'w')) config_parser.reload() return val
python
def alias_bin(self, bin_id, alias_id): """Adds an ``Id`` to a ``Bin`` for the purpose of creating compatibility. The primary ``Id`` of the ``Bin`` is determined by the provider. The new ``Id`` performs as an alias to the primary ``Id``. If the alias is a pointer to another bin, it is reassigned to the given bin ``Id``. arg: bin_id (osid.id.Id): the ``Id`` of a ``Bin`` arg: alias_id (osid.id.Id): the alias ``Id`` raise: AlreadyExists - ``alias_id`` is already assigned raise: NotFound - ``bin_id`` not found raise: NullArgument - ``bin_id`` or ``alias_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinLookupSession.alias_bin_template if self._catalog_session is not None: return self._catalog_session.alias_catalog(catalog_id=bin_id, alias_id=alias_id) self._alias_id(primary_id=bin_id, equivalent_id=alias_id)
python
def search_videohub(cls, query, filters=None, status=None, sort=None, size=None, page=None): """searches the videohub given a query and applies given filters and other bits :see: https://github.com/theonion/videohub/blob/master/docs/search/post.md :see: https://github.com/theonion/videohub/blob/master/docs/search/get.md :param query: query terms to search by :type query: str :example query: "brooklyn hipsters" # although, this is a little redundant... :param filters: video field value restrictions :type filters: dict :default filters: None :example filters: {"channel": "onion"} or {"series": "Today NOW"} :param status: limit the results to videos that are published, scheduled, draft :type status: str :default status: None :example status: "published" or "draft" or "scheduled" :param sort: video field related sorting :type sort: dict :default sort: None :example sort: {"title": "desc"} or {"description": "asc"} :param size: the page size (number of results) :type size: int :default size: None :example size": {"size": 20} :param page: the page number of the results :type page: int :default page: None :example page: {"page": 2} # note, you should use `size` in conjunction with `page` :return: a dictionary of results and meta information :rtype: dict """ # construct url url = getattr(settings, "VIDEOHUB_API_SEARCH_URL", cls.DEFAULT_VIDEOHUB_API_SEARCH_URL) # construct auth headers headers = { "Content-Type": "application/json", "Authorization": settings.VIDEOHUB_API_TOKEN, } # construct payload payload = { "query": query, } if filters: assert isinstance(filters, dict) payload["filters"] = filters if status: assert isinstance(status, six.string_types) payload.setdefault("filters", {}) payload["filters"]["status"] = status if sort: assert isinstance(sort, dict) payload["sort"] = sort if size: assert isinstance(size, (six.string_types, int)) payload["size"] = size if page: assert isinstance(page, (six.string_types, int)) payload["page"] = page # send request res = requests.post(url, data=json.dumps(payload), headers=headers) # raise if not 200 if res.status_code != 200: res.raise_for_status() # parse and return response return json.loads(res.content)
java
@Override public void onFinish(ISuite suite) { logger.entering(suite); if (ListenerManager.isCurrentMethodSkipped(this)) { logger.exiting(ListenerManager.THREAD_EXCLUSION_MSG); return; } LocalGridManager.shutDownHub(); logger.exiting(); }
java
private void assignValue(SimpleNode node, RelationQueryNode queryNode) { if (node.getId() == JJTSTRINGLITERAL) { queryNode.setStringValue(unescapeQuotes(node.getValue())); } else if (node.getId() == JJTDECIMALLITERAL) { queryNode.setDoubleValue(Double.parseDouble(node.getValue())); } else if (node.getId() == JJTDOUBLELITERAL) { queryNode.setDoubleValue(Double.parseDouble(node.getValue())); } else if (node.getId() == JJTINTEGERLITERAL) { // if this is an expression that contains position() do not change // the type. if (queryNode.getValueType() == QueryConstants.TYPE_POSITION) { queryNode.setPositionValue(Integer.parseInt(node.getValue())); } else { queryNode.setLongValue(Long.parseLong(node.getValue())); } } else { exceptions.add(new InvalidQueryException("Unsupported literal type:" + node.toString())); } }
python
def export_live_eggs(self, env=False): """Adds all of the eggs in the current environment to PYTHONPATH.""" path_eggs = [p for p in sys.path if p.endswith('.egg')] command = self.get_finalized_command("egg_info") egg_base = path.abspath(command.egg_base) unique_path_eggs = set(path_eggs + [egg_base]) os.environ['PYTHONPATH'] = ':'.join(unique_path_eggs)
python
def hil_state_encode(self, time_usec, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed, lat, lon, alt, vx, vy, vz, xacc, yacc, zacc): ''' DEPRECATED PACKET! Suffers from missing airspeed fields and singularities due to Euler angles. Please use HIL_STATE_QUATERNION instead. Sent from simulation to autopilot. This packet is useful for high throughput applications such as hardware in the loop simulations. time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) roll : Roll angle (rad) (float) pitch : Pitch angle (rad) (float) yaw : Yaw angle (rad) (float) rollspeed : Body frame roll / phi angular speed (rad/s) (float) pitchspeed : Body frame pitch / theta angular speed (rad/s) (float) yawspeed : Body frame yaw / psi angular speed (rad/s) (float) lat : Latitude, expressed as * 1E7 (int32_t) lon : Longitude, expressed as * 1E7 (int32_t) alt : Altitude in meters, expressed as * 1000 (millimeters) (int32_t) vx : Ground X Speed (Latitude), expressed as m/s * 100 (int16_t) vy : Ground Y Speed (Longitude), expressed as m/s * 100 (int16_t) vz : Ground Z Speed (Altitude), expressed as m/s * 100 (int16_t) xacc : X acceleration (mg) (int16_t) yacc : Y acceleration (mg) (int16_t) zacc : Z acceleration (mg) (int16_t) ''' return MAVLink_hil_state_message(time_usec, roll, pitch, yaw, rollspeed, pitchspeed, yawspeed, lat, lon, alt, vx, vy, vz, xacc, yacc, zacc)
python
def get_lux_count(lux_byte): """ Method to convert data from the TSL2550D lux sensor into more easily usable ADC count values. """ LUX_VALID_MASK = 0b10000000 LUX_CHORD_MASK = 0b01110000 LUX_STEP_MASK = 0b00001111 valid = lux_byte & LUX_VALID_MASK if valid != 0: step_num = (lux_byte & LUX_STEP_MASK) # Shift to normalize value chord_num = (lux_byte & LUX_CHORD_MASK) >> 4 step_val = 2**chord_num chord_val = int(16.5 * (step_val - 1)) count = chord_val + step_val * step_num return count else: raise SensorError("Invalid lux sensor data.")
java
private void checkError() throws IOException { final Throwable t = channelError.get(); if (t != null) { if (t instanceof IOException) { throw (IOException) t; } else { throw new IOException("There has been an error in the channel.", t); } } }
java
public List<RemoveMethodType<SessionBeanType<T>>> getAllRemoveMethod() { List<RemoveMethodType<SessionBeanType<T>>> list = new ArrayList<RemoveMethodType<SessionBeanType<T>>>(); List<Node> nodeList = childNode.get("remove-method"); for(Node node: nodeList) { RemoveMethodType<SessionBeanType<T>> type = new RemoveMethodTypeImpl<SessionBeanType<T>>(this, "remove-method", childNode, node); list.add(type); } return list; }
java
public synchronized void load(LCMSDataSubset subset, Object user) throws FileParsingException { if (user == null) { throw new IllegalArgumentException("User can't be null"); } // load data, if it's not there yet if (!isLoaded(subset)) { scans.loadData(subset, null); } // add the subset Set<LCMSDataSubset> userSubsets = cache.getIfPresent(user); if (userSubsets == null) { addNewUser(user); userSubsets = new HashSet<>(2); userSubsets.add(subset); cache.put(user, userSubsets); } else { userSubsets.add(subset); } }
java
@FFDCIgnore({ InterruptedException.class }) public synchronized void waitForLevel() { // Don't bother waiting if the vm has been shutdown in the meanwhile... while (levelReached.get() == false && !shutdownHook.vmShutdown()) { try { // waiting for a start level event should not take long wait(1000); } catch (InterruptedException e) { /** No-op **/ } } }
java
@Override public void removeConsumerPointMatchTarget(DispatchableKey consumerPointData) { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(tc, "removeConsumerPointMatchTarget", consumerPointData); // Remove the consumer point from the matchspace // This used to use the ptoPRealization but this is no longer the case. messageProcessor .getMessageProcessorMatching() .removeConsumerPointMatchTarget( consumerPointData); if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(tc, "removeConsumerPointMatchTarget"); }
python
def layer_register( log_shape=False, use_scope=True): """ Args: log_shape (bool): log input/output shape of this layer use_scope (bool or None): Whether to call this layer with an extra first argument as variable scope. When set to None, it can be called either with or without the scope name argument, depend on whether the first argument is string or not. Returns: A decorator used to register a layer. Example: .. code-block:: python @layer_register(use_scope=True) def add10(x): return x + tf.get_variable('W', shape=[10]) # use it: output = add10('layer_name', input) # the function will be called under variable scope "layer_name". """ def wrapper(func): @wraps(func) def wrapped_func(*args, **kwargs): assert args[0] is not None, args if use_scope: name, inputs = args[0], args[1] args = args[1:] # actual positional args used to call func assert isinstance(name, six.string_types), "First argument for \"{}\" should be a string. ".format( func.__name__) + "Did you forget to specify the name of the layer?" else: assert not log_shape if isinstance(args[0], six.string_types): if use_scope is False: logger.warn( "Please call layer {} without the first scope name argument, " "or register the layer with use_scope=None to allow " "two calling methods.".format(func.__name__)) name, inputs = args[0], args[1] args = args[1:] # actual positional args used to call func else: inputs = args[0] name = None if not (isinstance(inputs, (tf.Tensor, tf.Variable)) or (isinstance(inputs, (list, tuple)) and isinstance(inputs[0], (tf.Tensor, tf.Variable)))): raise ValueError("Invalid inputs to layer: " + str(inputs)) # use kwargs from current argument scope actual_args = copy.copy(get_arg_scope()[func.__name__]) # explicit kwargs overwrite argscope actual_args.update(kwargs) # if six.PY3: # # explicit positional args also override argscope. only work in PY3 # posargmap = inspect.signature(func).bind_partial(*args).arguments # for k in six.iterkeys(posargmap): # if k in actual_args: # del actual_args[k] if name is not None: # use scope with tfv1.variable_scope(name) as scope: # this name is only used to surpress logging, doesn't hurt to do some heuristics scope_name = re.sub('tower[0-9]+/', '', scope.name) do_log_shape = log_shape and scope_name not in _LAYER_LOGGED if do_log_shape: logger.info("{} input: {}".format(scope.name, get_shape_str(inputs))) # run the actual function outputs = func(*args, **actual_args) if do_log_shape: # log shape info and add activation logger.info("{} output: {}".format( scope.name, get_shape_str(outputs))) _LAYER_LOGGED.add(scope_name) else: # run the actual function outputs = func(*args, **actual_args) return outputs wrapped_func.symbolic_function = func # attribute to access the underlying function object wrapped_func.use_scope = use_scope _register(func.__name__, wrapped_func) return wrapped_func return wrapper
python
def str_if_nested_or_str(s): """Turn input into a native string if possible.""" if isinstance(s, ALL_STRING_TYPES): return str(s) if isinstance(s, (list, tuple)): return type(s)(map(str_if_nested_or_str, s)) if isinstance(s, (dict, )): return stringify_dict_contents(s) return s
python
def sysinfo2float(version_info=sys.version_info): """Convert a sys.versions_info-compatible list into a 'canonic' floating-point number which that can then be used to look up a magic number. Note that this can only be used for released version of C Python, not interim development versions, since we can't represent that as a floating-point number. For handling Pypy, pyston, jython, etc. and interim versions of C Python, use sysinfo2magic. """ vers_str = '.'.join([str(v) for v in version_info[0:3]]) if version_info[3] != 'final': vers_str += '.' + ''.join([str(i) for i in version_info[3:]]) if IS_PYPY: vers_str += 'pypy' else: try: import platform platform = platform.python_implementation() if platform in ('Jython', 'Pyston'): vers_str += platform pass except ImportError: # Python may be too old, e.g. < 2.6 or implementation may # just not have platform pass except AttributeError: pass return py_str2float(vers_str)
java
public void initForNextResponse(IResponse resp) { if (com.ibm.ejs.ras.TraceComponent.isAnyTracingEnabled()&&logger.isLoggable (Level.FINE)) //306998.15 logger.entering(CLASS_NAME,"initForNextResponse", "resp = " + resp + " ["+this+"]"); if (resp == null) { _rawOut.init(null); _bufferedWriter.clean(); if (com.ibm.ejs.ras.TraceComponent.isAnyTracingEnabled()&&logger.isLoggable (Level.FINE)) //306998.15 logger.exiting(CLASS_NAME,"initForNextResponse"); return; } // reset the state before initializing it again and before calling setCommonHeaders. resetState(); resp.setStatusCode(200); resp.setReason(REASON_OK); _statusCode = 200; // _responseContext.set(new SRTServletResponseContext()); this._response = resp; setCommonHeaders(); try { _rawOut.init(_response.getOutputStream()); } catch (IOException e) { logger.logp(Level.SEVERE, CLASS_NAME,"initForNextResponse", "error.initializing.output.stream", e); /*@283348.1*/ } // LIBERTY _bufferedOut.reset(); _bufferedWriter.reset(); // LIBERTY _responseBuffer = null; // PK53885 start _bufferSize = DEFAULT_BUFFER_SIZE; this._bufferedOut = createOutputStream(DEFAULT_BUFFER_SIZE); // LIBERTY if(this._bufferedOut instanceof WCOutputStream){ (((WCOutputStream) this._bufferedOut).getOutput()).setObserver(this); } _encoding = null; _gotOutputStream = false; _gotWriter = false; this._pwriter = null; // Pk53885 end if (com.ibm.ejs.ras.TraceComponent.isAnyTracingEnabled()&&logger.isLoggable (Level.FINE)) //306998.15 logger.exiting(CLASS_NAME,"initForNextResponse"); }
python
def namedb_get_record_states_at(cur, history_id, block_number): """ Get the state(s) that the given history record was in at a given block height. Normally, this is one state (i.e. if a name was registered at block 8, then it is in a NAME_REGISTRATION state in block 10) However, if the record changed at this block, then this method returns all states the record passed through. Returns an array of record states """ query = 'SELECT block_id,history_data FROM history WHERE history_id = ? AND block_id == ? ORDER BY block_id DESC,vtxindex DESC' args = (history_id, block_number) history_rows = namedb_query_execute(cur, query, args) ret = [] for row in history_rows: history_data = simplejson.loads(row['history_data']) ret.append(history_data) if len(ret) > 0: # record changed in this block return ret # if the record did not change in this block, then find the last version of the record query = 'SELECT block_id,history_data FROM history WHERE history_id = ? AND block_id < ? ORDER BY block_id DESC,vtxindex DESC LIMIT 1' args = (history_id, block_number) history_rows = namedb_query_execute(cur, query, args) for row in history_rows: history_data = simplejson.loads(row['history_data']) ret.append(history_data) return ret
python
def repel_text(texts, renderer=None, ax=None, expand=(1.2, 1.2), only_use_max_min=False, move=False): """ Repel texts from each other while expanding their bounding boxes by expand (x, y), e.g. (1.2, 1.2) would multiply width and height by 1.2. Requires a renderer to get the actual sizes of the text, and to that end either one needs to be directly provided, or the axes have to be specified, and the renderer is then got from the axes object. """ if ax is None: ax = plt.gca() if renderer is None: r = get_renderer(ax.get_figure()) else: r = renderer bboxes = get_bboxes(texts, r, expand, ax=ax) xmins = [bbox.xmin for bbox in bboxes] xmaxs = [bbox.xmax for bbox in bboxes] ymaxs = [bbox.ymax for bbox in bboxes] ymins = [bbox.ymin for bbox in bboxes] overlaps_x = np.zeros((len(bboxes), len(bboxes))) overlaps_y = np.zeros_like(overlaps_x) overlap_directions_x = np.zeros_like(overlaps_x) overlap_directions_y = np.zeros_like(overlaps_y) for i, bbox1 in enumerate(bboxes): overlaps = get_points_inside_bbox(xmins*2+xmaxs*2, (ymins+ymaxs)*2, bbox1) % len(bboxes) overlaps = np.unique(overlaps) for j in overlaps: bbox2 = bboxes[j] x, y = bbox1.intersection(bbox1, bbox2).size overlaps_x[i, j] = x overlaps_y[i, j] = y direction = np.sign(bbox1.extents - bbox2.extents)[:2] overlap_directions_x[i, j] = direction[0] overlap_directions_y[i, j] = direction[1] move_x = overlaps_x*overlap_directions_x move_y = overlaps_y*overlap_directions_y delta_x = move_x.sum(axis=1) delta_y = move_y.sum(axis=1) q = np.sum(overlaps_x), np.sum(overlaps_y) if move: move_texts(texts, delta_x, delta_y, bboxes, ax=ax) return delta_x, delta_y, q
java
private boolean checkForVariables(List<Map<String, String>> values) { if (values == null || values.isEmpty()) { return false; } else { for (Map<String, String> list : values) { if (list.containsKey("type") && list.get("type") .equals(MtasParserMapping.PARSER_TYPE_VARIABLE)) { return true; } } } return false; }
python
def enable_nn_ha(self, active_name, standby_host_id, nameservice, jns, standby_name_dir_list=None, qj_name=None, standby_name=None, active_fc_name=None, standby_fc_name=None, zk_service_name=None, force_init_znode=True, clear_existing_standby_name_dirs=True, clear_existing_jn_edits_dir=True): """ Enable High Availability (HA) with Auto-Failover for an HDFS NameNode. @param active_name: Name of Active NameNode. @param standby_host_id: ID of host where Standby NameNode will be created. @param nameservice: Nameservice to be used while enabling HA. Optional if Active NameNode already has this config set. @param jns: List of Journal Nodes to be created during the command. Each element of the list must be a dict containing the following keys: - B{jnHostId}: ID of the host where the new JournalNode will be created. - B{jnName}: Name of the JournalNode role (optional) - B{jnEditsDir}: Edits dir of the JournalNode. Can be omitted if the config is already set at RCG level. @param standby_name_dir_list: List of directories for the new Standby NameNode. If not provided then it will use same dirs as Active NameNode. @param qj_name: Name of the journal located on each JournalNodes' filesystem. This can be optionally provided if the config hasn't been already set for the Active NameNode. If this isn't provided and Active NameNode doesn't also have the config, then nameservice is used by default. @param standby_name: Name of the Standby NameNode role to be created (Optional). @param active_fc_name: Name of the Active Failover Controller role to be created (Optional). @param standby_fc_name: Name of the Standby Failover Controller role to be created (Optional). @param zk_service_name: Name of the ZooKeeper service to use for auto-failover. If HDFS service already depends on a ZooKeeper service then that ZooKeeper service will be used for auto-failover and in that case this parameter can either be omitted or should be the same ZooKeeper service. @param force_init_znode: Indicates if the ZNode should be force initialized if it is already present. Useful while re-enabling High Availability. (Default: TRUE) @param clear_existing_standby_name_dirs: Indicates if the existing name directories for Standby NameNode should be cleared during the workflow. Useful while re-enabling High Availability. (Default: TRUE) @param clear_existing_jn_edits_dir: Indicates if the existing edits directories for the JournalNodes for the specified nameservice should be cleared during the workflow. Useful while re-enabling High Availability. (Default: TRUE) @return: Reference to the submitted command. @since: API v6 """ args = dict ( activeNnName = active_name, standbyNnName = standby_name, standbyNnHostId = standby_host_id, standbyNameDirList = standby_name_dir_list, nameservice = nameservice, qjName = qj_name, activeFcName = active_fc_name, standbyFcName = standby_fc_name, zkServiceName = zk_service_name, forceInitZNode = force_init_znode, clearExistingStandbyNameDirs = clear_existing_standby_name_dirs, clearExistingJnEditsDir = clear_existing_jn_edits_dir, jns = jns ) return self._cmd('hdfsEnableNnHa', data=args, api_version=6)
python
def create_app(applet_id, applet_name, src_dir, publish=False, set_default=False, billTo=None, try_versions=None, try_update=True, confirm=True, regional_options=None): """ Creates a new app object from the specified applet. .. deprecated:: 0.204.0 Use :func:`create_app_multi_region()` instead. """ # In this case we don't know the region of the applet, so we use the # legacy API {"applet": applet_id} without specifying a region # specifically. return _create_app(dict(applet=applet_id), applet_name, src_dir, publish=publish, set_default=set_default, billTo=billTo, try_versions=try_versions, try_update=try_update, confirm=confirm)
java
public void setDocumentation(String doc) { if (Strings.isEmpty(doc)) { getSarlAnnotationType().eAdapters().removeIf(new Predicate<Adapter>() { public boolean test(Adapter adapter) { return adapter.isAdapterForType(DocumentationAdapter.class); } }); } else { DocumentationAdapter adapter = (DocumentationAdapter) EcoreUtil.getExistingAdapter( getSarlAnnotationType(), DocumentationAdapter.class); if (adapter == null) { adapter = new DocumentationAdapter(); getSarlAnnotationType().eAdapters().add(adapter); } adapter.setDocumentation(doc); } }
python
def getSystemVariable(self, remote, name): """Get single system variable from CCU / Homegear""" if self._server is not None: return self._server.getSystemVariable(remote, name)
python
def sslv2_derive_keys(self, key_material): """ There is actually only one key, the CLIENT-READ-KEY or -WRITE-KEY. Note that skip_first is opposite from the one with SSLv3 derivation. Also, if needed, the IV should be set elsewhere. """ skip_first = True if ((self.connection_end == "client" and self.row == "read") or (self.connection_end == "server" and self.row == "write")): skip_first = False cipher_alg = self.ciphersuite.cipher_alg start = 0 if skip_first: start += cipher_alg.key_len end = start + cipher_alg.key_len cipher_secret = key_material[start:end] self.cipher = cipher_alg(cipher_secret) self.debug_repr("cipher_secret", cipher_secret)
python
def view_current_app_behavior(self) -> str: '''View application behavior in the current window.''' output, _ = self._execute( '-s', self.device_sn, 'shell', 'dumpsys', 'window', 'windows') return re.findall(r'mCurrentFocus=.+(com[a-zA-Z0-9\.]+/.[a-zA-Z0-9\.]+)', output)[0]
python
def write_libxc_docs_json(xcfuncs, jpath): """Write json file with libxc metadata to path jpath.""" from copy import deepcopy xcfuncs = deepcopy(xcfuncs) # Remove XC_FAMILY from Family and XC_ from Kind to make strings more human-readable. for d in xcfuncs.values(): d["Family"] = d["Family"].replace("XC_FAMILY_", "", 1) d["Kind"] = d["Kind"].replace("XC_", "", 1) # Build lightweight version with a subset of keys. for num, d in xcfuncs.items(): xcfuncs[num] = {k: d[k] for k in ("Family", "Kind", "References")} # Descriptions are optional for opt in ("Description 1", "Description 2"): desc = d.get(opt) if desc is not None: xcfuncs[num][opt] = desc with open(jpath, "wt") as fh: json.dump(xcfuncs, fh) return xcfuncs
python
def serialize_with_sampled_logs(self, logs_limit=-1): """serialize a result with up to `logs_limit` logs. If `logs_limit` is -1, this function will return a result with all its logs. """ return { 'id': self.id, 'pathName': self.path_name, 'name': self.name, 'isUnregistered': self.is_unregistered, 'logs': [log.serialize for log in self.sampled_logs(logs_limit)], 'args': self.args.serialize if self.args is not None else [], 'commands': [cmd.serialize for cmd in self.commands], 'snapshots': [cmd.serialize for cmd in self.snapshots], 'logModifiedAt': self.log_modified_at.isoformat() }
python
def _warning(self, msg, node_id, ex, *args, **kwargs): """ Handles the error messages. .. note:: If `self.raises` is True the dispatcher interrupt the dispatch when an error occur, otherwise it logs a warning. """ raises = self.raises(ex) if callable(self.raises) else self.raises if raises and isinstance(ex, DispatcherError): ex.update(self) raise ex self._errors[node_id] = msg % ((node_id, ex) + args) node_id = '/'.join(self.full_name + (node_id,)) if raises: raise DispatcherError(msg, node_id, ex, *args, sol=self, **kwargs) else: kwargs['exc_info'] = kwargs.get('exc_info', 1) log.error(msg, node_id, ex, *args, **kwargs)
java
public static Geometry convert(JGeometry geometry) { switch (geometry.getType()) { case GTYPE_COLLECTION: return convertCollection(geometry); case GTYPE_CURVE: return convertCurve(geometry); case GTYPE_MULTICURVE: return convertMultiCurve(geometry); case GTYPE_MULTIPOINT: return convertMultiPoint(geometry); case GTYPE_MULTIPOLYGON: return convertMultiPolygon(geometry); case GTYPE_POINT: return convertPoint(geometry); case GTYPE_POLYGON: return convertPolygon(geometry); default: throw new IllegalArgumentException(geometry.toString()); } }
python
def Recurrent(step_model): """Apply a stepwise model over a sequence, maintaining state. For RNNs""" ops = step_model.ops def recurrent_fwd(seqs, drop=0.0): lengths = [len(X) for X in seqs] X, size_at_t, unpad = ops.square_sequences(seqs) Y = ops.allocate((X.shape[0], X.shape[1], step_model.nO)) cell_drop = ops.get_dropout_mask((len(seqs), step_model.nO), 0.0) hidden_drop = ops.get_dropout_mask((len(seqs), step_model.nO), 0.0) out_drop = ops.get_dropout_mask((len(seqs), step_model.nO), 0.0) backprops = [None] * max(lengths) state = step_model.weights.get_initial_state(len(seqs)) for t in range(max(lengths)): state = list(state) size = size_at_t[t] Xt = X[t, :size] state[0] = state[0][:size] state[1] = state[1][:size] if cell_drop is not None: state[0] *= cell_drop if hidden_drop is not None: state[1] *= hidden_drop inputs = (state, Xt) (state, Y[t, :size]), backprops[t] = step_model.begin_update(inputs) if out_drop is not None: Y[t, :size] *= out_drop outputs = unpad(Y) def recurrent_bwd(d_outputs, sgd=None): dY, size_at_t, unpad = step_model.ops.square_sequences(d_outputs) d_state = [ step_model.ops.allocate((dY.shape[1], step_model.nO)), step_model.ops.allocate((dY.shape[1], step_model.nO)), ] updates = {} def gather_updates(weights, gradient, key=None): updates[key] = (weights, gradient) dX = step_model.ops.allocate( (dY.shape[0], dY.shape[1], step_model.weights.nI) ) for t in range(max(lengths) - 1, -1, -1): if out_drop is not None: dY[t] *= out_drop d_state_t, dXt = backprops[t]((d_state, dY[t]), sgd=gather_updates) d_state[0][: d_state_t[0].shape[0]] = d_state_t[0] d_state[1][: d_state_t[1].shape[0]] = d_state_t[1] dX[t, : dXt.shape[0]] = dXt if cell_drop is not None: d_state[0] *= cell_drop if hidden_drop is not None: d_state[1] *= hidden_drop d_cell, d_hidden = d_state step_model.weights.d_initial_cells += d_cell.sum(axis=0) step_model.weights.d_initial_hiddens += d_hidden.sum(axis=0) if sgd is not None: for key, (weights, gradient) in updates.items(): sgd(weights, gradient, key=key) return unpad(dX) return outputs, recurrent_bwd model = wrap(recurrent_fwd, step_model) model.nO = step_model.nO return model
java
public static String driversLicense() { StringBuffer dl = new StringBuffer(JDefaultAddress.stateAbbr()); dl.append("-"); dl.append(JDefaultNumber.randomNumberString(8)); return dl.toString(); }
java
public static void createHtmlSequencePlotFile(String title, Schema schema, List<List<Writable>> sequence, File output) throws Exception { String s = createHtmlSequencePlots(title, schema, sequence); FileUtils.writeStringToFile(output, s, StandardCharsets.UTF_8); }
python
def CMY_to_RGB(cobj, target_rgb, *args, **kwargs): """ Converts CMY to RGB via simple subtraction. NOTE: Returned values are in the range of 0-255. """ rgb_r = 1.0 - cobj.cmy_c rgb_g = 1.0 - cobj.cmy_m rgb_b = 1.0 - cobj.cmy_y return target_rgb(rgb_r, rgb_g, rgb_b)
java
@NonNull public static Icon base64(byte[] bytes) { return new Icon(sanitize(DatatypeConverter.printBase64Binary(bytes)), true); }
java
public boolean isTemporary() { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(tc, "isTemporary"); boolean isTemporary = _foreignBus.isTemporary(); if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(tc, "isTemporary", new Boolean(isTemporary)); return isTemporary; }
python
def get_gradebook_column_lookup_session_for_gradebook(self, gradebook_id, proxy): """Gets the ``OsidSession`` associated with the gradebook column lookup service for the given gradebook. arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook arg: proxy (osid.proxy.Proxy): a proxy return: (osid.grading.GradebookColumnLookupSession) - ``a _gradebook_column_lookup_session`` raise: NotFound - ``gradebook_id`` not found raise: NullArgument - ``gradebook_id`` or ``proxy`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_gradebook_column_lookup()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_gradebook_column_lookup()`` and ``supports_visible_federation()`` are ``true``.* """ if not self.supports_gradebook_column_lookup(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.GradebookColumnLookupSession(gradebook_id, proxy, self._runtime)
java
public List<EntityExtractor> listCustomPrebuiltEntities(UUID appId, String versionId) { return listCustomPrebuiltEntitiesWithServiceResponseAsync(appId, versionId).toBlocking().single().body(); }
java
public static String trim(String xml) { String content = removeDeclaration(xml); return trimElements(content); }
python
def where(self, params): """Set a dict of parameters to be passed to the API when invoking this request. :param params: (dict) query parameters. :return: this :class:`.RequestArray` instance for convenience. """ self.params = dict(self.params, **params) # params overrides self.params return self
java
@Override public void cookieEquals(String cookieName, String expectedCookieValue) { assertEquals("Cookie Value Mismatch", expectedCookieValue, checkCookieEquals(cookieName, expectedCookieValue, 0, 0)); }
java
public static final String printTimestamp(Date value) { return (value == null ? null : TIMESTAMP_FORMAT.get().format(value)); }
java
public void convertClass(TypeElement te, DocPath outputdir) throws DocFileIOException, SimpleDocletException { if (te == null) { return; } FileObject fo = utils.getFileObject(te); if (fo == null) return; try { Reader r = fo.openReader(true); int lineno = 1; String line; relativePath = DocPaths.SOURCE_OUTPUT .resolve(DocPath.forPackage(utils, te)) .invert(); Content body = getHeader(); Content pre = new HtmlTree(HtmlTag.PRE); try (LineNumberReader reader = new LineNumberReader(r)) { while ((line = reader.readLine()) != null) { addLineNo(pre, lineno); addLine(pre, line, lineno); lineno++; } } addBlankLines(pre); Content div = HtmlTree.DIV(HtmlStyle.sourceContainer, pre); body.addContent((configuration.allowTag(HtmlTag.MAIN)) ? HtmlTree.MAIN(div) : div); writeToFile(body, outputdir.resolve(DocPath.forClass(utils, te))); } catch (IOException e) { String message = configuration.resources.getText("doclet.exception.read.file", fo.getName()); throw new SimpleDocletException(message, e); } }
python
def QA_util_format_date2str(cursor_date): """ 对输入日期进行格式化处理,返回格式为 "%Y-%m-%d" 格式字符串 支持格式包括: 1. str: "%Y%m%d" "%Y%m%d%H%M%S", "%Y%m%d %H:%M:%S", "%Y-%m-%d", "%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H%M%S" 2. datetime.datetime 3. pd.Timestamp 4. int -> 自动在右边加 0 然后转换,譬如 '20190302093' --> "2019-03-02" :param cursor_date: str/datetime.datetime/int 日期或时间 :return: str 返回字符串格式日期 """ if isinstance(cursor_date, datetime.datetime): cursor_date = str(cursor_date)[:10] elif isinstance(cursor_date, str): try: cursor_date = str(pd.Timestamp(cursor_date))[:10] except: raise ValueError('请输入正确的日期格式, 建议 "%Y-%m-%d"') elif isinstance(cursor_date, int): cursor_date = str(pd.Timestamp("{:<014d}".format(cursor_date)))[:10] else: raise ValueError('请输入正确的日期格式,建议 "%Y-%m-%d"') return cursor_date
python
def Size(self): """ Get the total size in bytes of the object. Returns: int: size. """ # Items should be an array of type CoinState, not of ints! corrected_items = list(map(lambda i: CoinState(i), self.Items)) return super(UnspentCoinState, self).Size() + GetVarSize(corrected_items)
java
protected Future<?> getFuture() { long stamp = lock.tryOptimisticRead(); Future<?> future = this.future; if (!lock.validate(stamp)) { // Not valid so wait for read lock stamp = lock.readLock(); try { future = this.future; } finally { lock.unlockRead(stamp); } } return future; }
python
def Setup(self, input, URL, encoding, options): """Setup an XML reader with new options """ if input is None: input__o = None else: input__o = input._o ret = libxml2mod.xmlTextReaderSetup(self._o, input__o, URL, encoding, options) return ret
java
public void loadExampleStatementsFromXMLString(String xmlString) throws JSONException, IllegalArgumentException, JsonParseException, JsonMappingException, IOException { if (LOGGER.isTraceEnabled()) { LOGGER.trace("Receiving json string: " + xmlString); } JSONObject jsonobject = XML.toJSONObject(xmlString); String[] root = JSONObject.getNames(jsonobject); if (null == root) { throw new IllegalArgumentException("no root object in xml found"); } if (root.length > 1) { throw new IllegalArgumentException("more than one root objects found"); } JSONObject statement = jsonobject.getJSONObject(root[0]); loadExampleStatementsFromJsonString(statement.toString()); }
java
public List<TColumn> columnsOfType(GeoPackageDataType type) { List<TColumn> columnsOfType = new ArrayList<>(); for (TColumn column : columns) { if (column.getDataType() == type) { columnsOfType.add(column); } } return columnsOfType; }
python
def encryption_key(self, alg, **kwargs): """ Return an encryption key as per http://openid.net/specs/openid-connect-core-1_0.html#Encryption :param alg: encryption algorithm :param kwargs: :return: encryption key as byte string """ if not self.key: self.deserialize() try: tsize = ALG2KEYLEN[alg] except KeyError: raise UnsupportedAlgorithm(alg) if tsize <= 32: # SHA256 _enc_key = sha256_digest(self.key)[:tsize] elif tsize <= 48: # SHA384 _enc_key = sha384_digest(self.key)[:tsize] elif tsize <= 64: # SHA512 _enc_key = sha512_digest(self.key)[:tsize] else: raise JWKException("No support for symmetric keys > 512 bits") logger.debug('Symmetric encryption key: {}'.format( as_unicode(b64e(_enc_key)))) return _enc_key
java
private String appendMoreLogsLink(final String fileName, String url) throws IOException { FileBackedStringBuffer buffer = new FileBackedStringBuffer(); int index = retrieveIndexValueFromFileName(fileName); index++; File logFileName = retrieveFileFromLogsFolder(Integer.toString(index)); if (logFileName == null) { return ""; } // TODO put this html code in a template buffer.append("<form name ='myform' action=").append(url).append(" method= 'post'>"); buffer.append("<input type='hidden'").append(" name ='fileName'").append(" value ='") .append(logFileName.getName()).append("'>"); buffer.append("<a href= 'javascript: submitform();' > More Logs </a>"); buffer.append("</form>"); return buffer.toString(); }
python
def getObjectByPid(self, pid): """ Args: pid : str Returns: str : URIRef of the entry identified by ``pid``.""" self._check_initialized() opid = rdflib.term.Literal(pid) res = [o for o in self.subjects(predicate=DCTERMS.identifier, object=opid)] return res[0]
python
def is_all_field_none(self): """ :rtype: bool """ if self._uuid is not None: return False if self._type_ is not None: return False if self._second_line is not None: return False if self._expiry_date is not None: return False if self._status is not None: return False if self._label_user is not None: return False return True
java
public LoginConfigType<WebAppType<T>> getOrCreateLoginConfig() { List<Node> nodeList = childNode.get("login-config"); if (nodeList != null && nodeList.size() > 0) { return new LoginConfigTypeImpl<WebAppType<T>>(this, "login-config", childNode, nodeList.get(0)); } return createLoginConfig(); }
python
def redirect_to_url(req, url, redirection_type=None, norobot=False): """ Redirect current page to url. @param req: request as received from apache @param url: url to redirect to @param redirection_type: what kind of redirection is required: e.g.: apache.HTTP_MULTIPLE_CHOICES = 300 apache.HTTP_MOVED_PERMANENTLY = 301 apache.HTTP_MOVED_TEMPORARILY = 302 apache.HTTP_SEE_OTHER = 303 apache.HTTP_NOT_MODIFIED = 304 apache.HTTP_USE_PROXY = 305 apache.HTTP_TEMPORARY_REDIRECT = 307 The default is apache.HTTP_MOVED_TEMPORARILY @param norobot: wether to instruct crawlers and robots such as GoogleBot not to index past this point. @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3 """ url = url.strip() if redirection_type is None: redirection_type = apache.HTTP_MOVED_TEMPORARILY from flask import redirect r = redirect(url, code=redirection_type) raise apache.SERVER_RETURN(r) # FIXME enable code bellow del req.headers_out["Cache-Control"] req.headers_out["Cache-Control"] = "no-cache, private, no-store, " \ "must-revalidate, post-check=0, pre-check=0, max-age=0" req.headers_out["Pragma"] = "no-cache" if norobot: req.headers_out[ "X-Robots-Tag"] = "noarchive, nosnippet, noindex, nocache" user_agent = req.headers_in.get('User-Agent', '') if 'Microsoft Office Existence Discovery' in user_agent or 'ms-office' in user_agent: # HACK: this is to workaround Microsoft Office trying to be smart # when users click on URLs in Office documents that require # authentication. Office will check the validity of the URL # but will pass the browser the redirected URL rather than # the original one. This is incompatible with e.g. Shibboleth # based SSO since the referer would be lost. # See: http://support.microsoft.com/kb/899927 req.status = 200 req.content_type = 'text/html' if req.method != 'HEAD': req.write(""" <html> <head> <title>Intermediate page for URLs clicked on MS Office Documents</title> <meta http-equiv="REFRESH" content="5;url=%(url)s"></meta> </head> <body> <p>You are going to be redirected to the desired content within 5 seconds. If the redirection does not happen automatically please click on <a href="%(url)s">%(url_ok)s</a>.</p> </body> </html>""" % { 'url': escape(req.unparsed_uri, True), 'url_ok': escape(req.unparsed_uri) }) raise apache.SERVER_RETURN(apache.DONE) req.headers_out["Location"] = url if req.response_sent_p: raise IOError("Cannot redirect after headers have already been sent.") req.status = redirection_type req.write('<p>Please go to <a href="%s">here</a></p>\n' % url) raise apache.SERVER_RETURN(apache.DONE)
java
public void warn(Object message) { if (IS12) { getLogger().log(FQCN, Level.WARN, message, null); } else { getLogger().log(FQCN, Level.WARN, message, null); } }
java
@com.fasterxml.jackson.annotation.JsonProperty("RequestID") public void setRequestID(String requestID) { this.requestID = requestID; }
java
public static void write(byte[] bytes,File file) throws IOException { BufferedOutputStream outputStream = new BufferedOutputStream(new FileOutputStream(file)); outputStream.write(bytes); outputStream.close(); }
python
def _align_backtrack(fastq_file, pair_file, ref_file, out_file, names, rg_info, data): """Perform a BWA alignment using 'aln' backtrack algorithm. """ bwa = config_utils.get_program("bwa", data["config"]) config = data["config"] sai1_file = "%s_1.sai" % os.path.splitext(out_file)[0] sai2_file = "%s_2.sai" % os.path.splitext(out_file)[0] if pair_file else "" if not utils.file_exists(sai1_file): with file_transaction(data, sai1_file) as tx_sai1_file: _run_bwa_align(fastq_file, ref_file, tx_sai1_file, config) if sai2_file and not utils.file_exists(sai2_file): with file_transaction(data, sai2_file) as tx_sai2_file: _run_bwa_align(pair_file, ref_file, tx_sai2_file, config) with postalign.tobam_cl(data, out_file, pair_file != "") as (tobam_cl, tx_out_file): align_type = "sampe" if sai2_file else "samse" cmd = ("unset JAVA_HOME && {bwa} {align_type} -r '{rg_info}' {ref_file} {sai1_file} {sai2_file} " "{fastq_file} {pair_file} | ") cmd = cmd.format(**locals()) + tobam_cl do.run(cmd, "bwa %s" % align_type, data) return out_file
python
def send_env_text(self, text, episode_id): ''' text channel to communicate with the agent ''' reactor.callFromThread(self._send_env_text, text, episode_id)
java
public final DataHasher addData(File file, int bufferSize) { Util.notNull(file, "File"); FileInputStream inStream = null; try { inStream = new FileInputStream(file); return addData(inStream, bufferSize); } catch (FileNotFoundException e) { throw new IllegalArgumentException("File not found, when calculating data hash", e); } finally { Util.closeQuietly(inStream); } }
python
def _wait_for_tasks(tasks, service_instance): ''' Wait for tasks created via the VSAN API ''' log.trace('Waiting for vsan tasks: {0}', ', '.join([six.text_type(t) for t in tasks])) try: vsanapiutils.WaitForTasks(tasks, service_instance) except vim.fault.NoPermission as exc: log.exception(exc) raise VMwareApiError('Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise VMwareRuntimeError(exc.msg) log.trace('Tasks %s finished successfully', ', '.join([six.text_type(t) for t in tasks]))
python
def add_z(self, name, prior, q, index=True): """ Adds latent variable Parameters ---------- name : str Name of the latent variable prior : Prior object Which prior distribution? E.g. Normal(0,1) q : Distribution object Which distribution to use for variational approximation index : boolean Whether to index the variable in the z_indices dictionary Returns ---------- None (changes priors in LatentVariables object) """ self.z_list.append(LatentVariable(name,len(self.z_list),prior,q)) if index is True: self.z_indices[name] = {'start': len(self.z_list)-1, 'end': len(self.z_list)-1}
java
public void writeExternal(ObjectOutput out) throws IOException { out.writeInt(orderNumber); byte[] data = value.getIdentity().getBytes(Constants.DEFAULT_ENCODING); out.writeInt(data.length); if (data.length > 0) { out.write(data); } data = value.getPermission().getBytes(Constants.DEFAULT_ENCODING); out.writeInt(data.length); if (data.length > 0) { out.write(data); } }
java
public static <T> StreamEx<List<T>> cartesianProduct(Collection<? extends Collection<T>> source) { if (source.isEmpty()) return StreamEx.of(new ConstSpliterator.OfRef<>(Collections.emptyList(), 1, true)); return of(new CrossSpliterator.ToList<>(source)); }
python
def wipe_cfg_vals_from_git_cfg(*cfg_opts): """Remove a set of options from Git config.""" for cfg_key_suffix in cfg_opts: cfg_key = f'cherry-picker.{cfg_key_suffix.replace("_", "-")}' cmd = "git", "config", "--local", "--unset-all", cfg_key subprocess.check_call(cmd, stderr=subprocess.STDOUT)
python
def pop(self): """Retrieve the next element in line, this will remove it from the queue""" e = self.data[self.start] self.start += 1 if self.start > 5 and self.start > len(self.data)//2: self.data = self.data[self.start:] self.start = 0 return e
java
private SplitBrainJoinMessage sendSplitBrainJoinMessage(Address target, SplitBrainJoinMessage request) { if (logger.isFineEnabled()) { logger.fine("Sending SplitBrainJoinMessage to " + target); } Connection conn = node.getEndpointManager(MEMBER).getOrConnect(target, true); long timeout = SPLIT_BRAIN_CONN_TIMEOUT_MILLIS; while (conn == null) { timeout -= SPLIT_BRAIN_SLEEP_TIME_MILLIS; if (timeout < 0) { logger.fine("Returning null timeout<0, " + timeout); return null; } try { //noinspection BusyWait Thread.sleep(SPLIT_BRAIN_SLEEP_TIME_MILLIS); } catch (InterruptedException e) { currentThread().interrupt(); return null; } conn = node.getEndpointManager(MEMBER).getConnection(target); } NodeEngine nodeEngine = node.nodeEngine; Future future = nodeEngine.getOperationService().createInvocationBuilder(ClusterServiceImpl.SERVICE_NAME, new SplitBrainMergeValidationOp(request), target) .setTryCount(1).invoke(); try { return (SplitBrainJoinMessage) future.get(SPLIT_BRAIN_JOIN_CHECK_TIMEOUT_SECONDS, TimeUnit.SECONDS); } catch (TimeoutException e) { logger.fine("Timeout during join check!", e); } catch (Exception e) { logger.warning("Error during join check!", e); } return null; }
java
public void addMembership(Membership membership) throws RedmineException { final Project project = membership.getProject(); if (project == null) throw new IllegalArgumentException("Project must be set"); if (membership.getUser() == null) throw new IllegalArgumentException("User must be set"); transport.addChildEntry(Project.class, getProjectKey(project), membership); }
python
def get_cim_ns(namespaces): """ Tries to obtain the CIM version from the given map of namespaces and returns the appropriate *nsURI* and *packageMap*. """ try: ns = namespaces['cim'] if ns.endswith('#'): ns = ns[:-1] except KeyError: ns = '' logger.error('No CIM namespace defined in input file.') CIM16nsURI = 'http://iec.ch/TC57/2013/CIM-schema-cim16' nsuri = ns import CIM14, CIM15 if ns == CIM14.nsURI: ns = 'CIM14' elif ns == CIM15.nsURI: ns = 'CIM15' elif ns == CIM16nsURI: ns = 'CIM15' else: ns = 'CIM15' logger.warn('Could not detect CIM version. Using %s.' % ns) cim = __import__(ns, globals(), locals(), ['nsURI', 'packageMap']) return nsuri, cim.packageMap
python
def col_frequencies(col, weights=None, gap_chars='-.'): """Frequencies of each residue type (totaling 1.0) in a single column.""" counts = col_counts(col, weights, gap_chars) # Reduce to frequencies scale = 1.0 / sum(counts.values()) return dict((aa, cnt * scale) for aa, cnt in counts.iteritems())
java
public com.squareup.okhttp.Call getCorporationsCorporationIdAlliancehistoryAsync(Integer corporationId, String datasource, String ifNoneMatch, final ApiCallback<List<CorporationAlliancesHistoryResponse>> callback) throws ApiException { com.squareup.okhttp.Call call = getCorporationsCorporationIdAlliancehistoryValidateBeforeCall(corporationId, datasource, ifNoneMatch, callback); Type localVarReturnType = new TypeToken<List<CorporationAlliancesHistoryResponse>>() { }.getType(); apiClient.executeAsync(call, localVarReturnType, callback); return call; }
python
def band_path(self, band_id, for_gdal=False, absolute=False): """Return paths of given band's jp2 files for all granules.""" band_id = str(band_id).zfill(2) if not isinstance(band_id, str) or band_id not in BAND_IDS: raise ValueError("band ID not valid: %s" % band_id) if self.dataset.is_zip and for_gdal: zip_prefix = "/vsizip/" if absolute: granule_basepath = zip_prefix + os.path.dirname(os.path.join( self.dataset.path, self.dataset.product_metadata_path )) else: granule_basepath = zip_prefix + os.path.dirname( self.dataset.product_metadata_path ) else: if absolute: granule_basepath = os.path.dirname(os.path.join( self.dataset.path, self.dataset.product_metadata_path )) else: granule_basepath = os.path.dirname( self.dataset.product_metadata_path ) product_org = self.dataset._product_metadata.iter( "Product_Organisation").next() granule_item = [ g for g in chain(*[gl for gl in product_org.iter("Granule_List")]) if self.granule_identifier == g.attrib["granuleIdentifier"] ] if len(granule_item) != 1: raise S2ReaderMetadataError( "Granule ID cannot be found in product metadata." ) rel_path = [ f.text for f in granule_item[0].iter() if f.text[-2:] == band_id ] if len(rel_path) != 1: # Apparently some SAFE files don't contain all bands. In such a # case, raise a warning and return None. warnings.warn( "%s: image path to band %s could not be extracted" % ( self.dataset.path, band_id ) ) return img_path = os.path.join(granule_basepath, rel_path[0]) + ".jp2" # Above solution still fails on the "safe" test dataset. Therefore, # the path gets checked if it contains the IMG_DATA folder and if not, # try to guess the path from the old schema. Not happy with this but # couldn't find a better way yet. if "IMG_DATA" in img_path: return img_path else: if self.dataset.is_zip: zip_prefix = "/vsizip/" granule_basepath = zip_prefix + os.path.join( self.dataset.path, self.granule_path) else: granule_basepath = self.granule_path return os.path.join( os.path.join(granule_basepath, "IMG_DATA"), "".join([ "_".join((self.granule_identifier).split("_")[:-1]), "_B", band_id, ".jp2" ]) )
java
public int[] getElementIndices() { if (indices != null) return indices; Integer[] objIndices = indexToValue.keySet().toArray(new Integer[0]); indices = new int[objIndices.length]; for (int i = 0; i < objIndices.length; ++i) indices[i] = objIndices[i].intValue(); // sort the indices Arrays.sort(indices); return indices; }
python
def yank_fields_from_attrs(attrs, _as=None, sort=True): """ Extract all the fields in given attributes (dict) and return them ordered """ fields_with_names = [] for attname, value in list(attrs.items()): field = get_field_as(value, _as) if not field: continue fields_with_names.append((attname, field)) if sort: fields_with_names = sorted(fields_with_names, key=lambda f: f[1]) return OrderedDict(fields_with_names)
java
private static boolean isDefinitePrimitive(SoyType type) { return type.getKind() == SoyType.Kind.BOOL || isNumericPrimitive(type) || type.getKind().isKnownStringOrSanitizedContent(); }
java
public Set<String> getUnconditionalClasses() { Set<String> filtered = new HashSet<>(this.unconditionalClasses); filtered.removeAll(this.exclusions); return Collections.unmodifiableSet(filtered); }
java
public boolean isRevisitDigest() { String dupeType = get(CAPTURE_DUPLICATE_ANNOTATION); return (dupeType != null && dupeType.equals(CAPTURE_DUPLICATE_DIGEST)); }
python
def gaussian(cls, mu=0, sigma=1): ''' :mu: mean :sigma: standard deviation :return: Point subclass Returns a point whose coordinates are picked from a Gaussian distribution with mean 'mu' and standard deviation 'sigma'. See random.gauss for further explanation of those parameters. ''' return cls(random.gauss(mu, sigma), random.gauss(mu, sigma), random.gauss(mu, sigma))
python
def _write(self, command, future): """Write a command to the socket :param Command command: the Command data structure """ def on_written(): self._on_written(command, future) try: self._stream.write(command.command, callback=on_written) except iostream.StreamClosedError as error: future.set_exception(exceptions.ConnectionError(error)) except Exception as error: LOGGER.exception('unhandled write failure - %r', error) future.set_exception(exceptions.ConnectionError(error))
python
def _remove_accents(filename): """ Function that will try to remove accents from a unicode string to be used in a filename. input filename should be either an ascii or unicode string """ # noinspection PyBroadException try: filename = filename.replace(" ", "_") if isinstance(filename, type(six.u(''))): unicode_filename = filename else: unicode_filename = six.u(filename) cleaned_filename = unicodedata.normalize('NFKD', unicode_filename).encode('ASCII', 'ignore').decode('ASCII') cleaned_filename = re.sub(r'[^\w\s-]', '', cleaned_filename.strip().lower()) cleaned_filename = re.sub(r'[-\s]+', '-', cleaned_filename) return cleaned_filename except: traceback.print_exc() return filename
java
public Account getAccount(String sessionId) throws MovieDbException { TmdbParameters parameters = new TmdbParameters(); parameters.add(Param.SESSION_ID, sessionId); URL url = new ApiUrl(apiKey, MethodBase.ACCOUNT).buildUrl(parameters); String webpage = httpTools.getRequest(url); try { return MAPPER.readValue(webpage, Account.class); } catch (IOException ex) { throw new MovieDbException(ApiExceptionType.MAPPING_FAILED, "Failed to get Account", url, ex); } }
java
public CmsResourceFilter addRequireTimerange() { CmsResourceFilter extendedFilter = (CmsResourceFilter)clone(); extendedFilter.m_filterTimerange = true; extendedFilter.updateCacheId(); return extendedFilter; }
python
def handle_pubrec(self): """Handle incoming PUBREC packet.""" self.logger.info("PUBREC received") ret, mid = self.in_packet.read_uint16() if ret != NC.ERR_SUCCESS: return ret evt = event.EventPubrec(mid) self.push_event(evt) return NC.ERR_SUCCESS
java
protected void writeToArchive(File[] sources, ArchiveOutputStream archive) throws IOException { for (File source : sources) { if (!source.exists()) { throw new FileNotFoundException(source.getPath()); } else if (!source.canRead()) { throw new FileNotFoundException(source.getPath() + " (Permission denied)"); } writeToArchive(source.getParentFile(), new File[]{ source }, archive); } }
java
public void setForwardRoutingPath(List<SIDestinationAddress> value) { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(this, tc, "setForwardRoutingPath"); setFRP(value); if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(this, tc, "setForwardRoutingPath"); }
python
def _s3_resource(dallinger_region=False): """A boto3 S3 resource using the AWS keys in the config.""" config = get_config() if not config.ready: config.load() region = "us-east-1" if dallinger_region else config.get("aws_region") return boto3.resource( "s3", region_name=region, aws_access_key_id=config.get("aws_access_key_id"), aws_secret_access_key=config.get("aws_secret_access_key"), )
java
@SuppressWarnings("unchecked") @Override public void setup() throws CoreException { final boolean canProcessAnnotation = ComponentEnhancer.canProcessAnnotation((Class<? extends Component<?>>) this.getClass()); if (canProcessAnnotation) { // Search Singleton and Multiton annotation on field ComponentEnhancer.injectComponent(this); // Attach custom method configured with custom Lifecycle annotation this.lifecycleMethod = ComponentEnhancer.defineLifecycleMethod(this); // Search OnWave annotation to manage auto wave handler setup ComponentEnhancer.manageOnWaveAnnotation(this); } callAnnotatedMethod(BeforeInit.class); manageOptionalData(); if (canProcessAnnotation) { ComponentEnhancer.injectInnerComponent(this); } // Initialize all inner components initInternalInnerComponents(); // Prepare the current component ready(); callAnnotatedMethod(AfterInit.class); }
java
public Observable<BlobContainerInner> getAsync(String resourceGroupName, String accountName, String containerName) { return getWithServiceResponseAsync(resourceGroupName, accountName, containerName).map(new Func1<ServiceResponse<BlobContainerInner>, BlobContainerInner>() { @Override public BlobContainerInner call(ServiceResponse<BlobContainerInner> response) { return response.body(); } }); }
python
def parse_kwargs(kwargs): """ Convert a list of kwargs into a dictionary. Duplicates of the same keyword get added to an list within the dictionary. >>> parse_kwargs(['--var1=1', '--var2=2', '--var1=3'] {'var1': [1, 3], 'var2': 2} """ d = defaultdict(list) for k, v in ((k.lstrip('-'), v) for k,v in (a.split('=') for a in kwargs)): d[k].append(v) ret = {} for k, v in d.items(): # replace single item lists with just the item. if len(v) == 1 and type(v) is list: ret[k] = v[0] else: ret[k] = v return ret
java
public static void main(String[] args) { if (args.length < 1) { System.out.println("Usage: java twitter4j.examples.list.GetUserListMemberships [list member screen name]"); System.exit(-1); } try { Twitter twitter = new TwitterFactory().getInstance(); long cursor = -1; PagableResponseList<UserList> lists; do { lists = twitter.getUserListMemberships(args[0], cursor); for (UserList list : lists) { System.out.println("id:" + list.getId() + ", name:" + list.getName() + ", description:" + list.getDescription() + ", slug:" + list.getSlug() + ""); } } while ((cursor = lists.getNextCursor()) != 0); System.exit(0); } catch (TwitterException te) { te.printStackTrace(); System.out.println("Failed to list the lists: " + te.getMessage()); System.exit(-1); } }
java
@Override public <T> List<T> dynamicQuery(DynamicQuery dynamicQuery) { return cpDefinitionPersistence.findWithDynamicQuery(dynamicQuery); }
java
public void addDataElement(String elemName, String content, Attributes attrs) { writeDataElement(elemName, attrs, content); }
python
def read_var_uint32(self): """Reads a varint from the stream, interprets this varint as an unsigned, 32-bit integer, and returns the integer. """ i = self.read_var_uint64() if i > wire_format.UINT32_MAX: raise errors.DecodeError('Value out of range for uint32: %d' % i) return i
java
public static Type[] getImplicitLowerBounds(final WildcardType wildcardType) { Validate.notNull(wildcardType, "wildcardType is null"); final Type[] bounds = wildcardType.getLowerBounds(); return bounds.length == 0 ? new Type[] { null } : bounds; }
java
@Override public GallicWeight commonDivisor(GallicWeight a, GallicWeight b) { double newWeight = this.weightSemiring.plus(a.getWeight(), b.getWeight()); if (isZero(a)) { if (isZero(b)) { return zero; } if (b.getLabels().isEmpty()) { return GallicWeight.create(GallicWeight.EMPTY, newWeight); } // just the first char of b return GallicWeight.createSingleLabel(b.getLabels().get(0), newWeight); } else if (isZero(b)) { if (a.getLabels().isEmpty()) { return GallicWeight.create(GallicWeight.EMPTY, newWeight); } // just the first char of a return GallicWeight.createSingleLabel(a.getLabels().get(0), newWeight); } else { // neither are zero, emit one char if they share it, otherwise empty if (a.getLabels().isEmpty() || b.getLabels().isEmpty()) { return GallicWeight.create(GallicWeight.EMPTY, newWeight); } if (a.getLabels().get(0) == b.getLabels().get(0)) { return GallicWeight.createSingleLabel(a.getLabels().get(0), newWeight); } return GallicWeight.create(GallicWeight.EMPTY, newWeight); } }
python
def portalAdmin(self): """gets a reference to a portal administration class""" from ..manageportal import PortalAdministration return PortalAdministration(admin_url="https://%s/portaladmin" % self.portalHostname, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port, initalize=False)
python
def strtol(value, strict=True): """As most as possible close equivalent of strtol(3) function (with base=0), used by postgres to parse parameter values. >>> strtol(0) == (0, '') True >>> strtol(1) == (1, '') True >>> strtol(9) == (9, '') True >>> strtol(' +0x400MB') == (1024, 'MB') True >>> strtol(' -070d') == (-56, 'd') True >>> strtol(' d ') == (None, 'd') True >>> strtol('9s', False) == (9, 's') True >>> strtol(' s ', False) == (1, 's') True """ value = str(value).strip() ln = len(value) i = 0 # skip sign: if i < ln and value[i] in ('-', '+'): i += 1 # we always expect to get digit in the beginning if i < ln and value[i].isdigit(): if value[i] == '0': i += 1 if i < ln and value[i] in ('x', 'X'): # '0' followed by 'x': HEX base = 16 i += 1 else: # just starts with '0': OCT base = 8 else: # any other digit: DEC base = 10 ret = None while i <= ln: try: # try to find maximally long number i += 1 # by giving to `int` longer and longer strings ret = int(value[:i], base) except ValueError: # until we will not get an exception or end of the string i -= 1 break if ret is not None: # yay! there is a number in the beginning of the string return ret, value[i:].strip() # return the number and the "rest" return (None if strict else 1), value.strip()
java
@RequirePOST public HttpResponse doNewLogRecorder(@QueryParameter String name) { Jenkins.checkGoodName(name); logRecorders.put(name,new LogRecorder(name)); // redirect to the config screen return new HttpRedirect(name+"/configure"); }
python
def rename_property(cr, model, old_name, new_name): """Rename property old_name owned by model to new_name. This should happen in a pre-migration script.""" cr.execute( "update ir_model_fields f set name=%s " "from ir_model m " "where m.id=f.model_id and m.model=%s and f.name=%s " "returning f.id", (new_name, model, old_name)) field_ids = tuple(i for i, in cr.fetchall()) cr.execute( "update ir_model_data set name=%s where model='ir.model.fields' and " "res_id in %s", ('%s,%s' % (model, new_name), field_ids)) cr.execute( "update ir_property set name=%s where fields_id in %s", (new_name, field_ids))