language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
public void marshall(DeleteInventoryRequest deleteInventoryRequest, ProtocolMarshaller protocolMarshaller) { if (deleteInventoryRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(deleteInventoryRequest.getTypeName(), TYPENAME_BINDING); protocolMarshaller.marshall(deleteInventoryRequest.getSchemaDeleteOption(), SCHEMADELETEOPTION_BINDING); protocolMarshaller.marshall(deleteInventoryRequest.getDryRun(), DRYRUN_BINDING); protocolMarshaller.marshall(deleteInventoryRequest.getClientToken(), CLIENTTOKEN_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
java
private void setCommonHeaders() { String xPoweredByValue = null; if (!WCCustomProperties.DISABLE_X_POWERED_BY){ if (WCCustomProperties.X_POWERED_BY==null){ xPoweredByValue = getXPoweredbyHeader(); } else { xPoweredByValue = WCCustomProperties.X_POWERED_BY; } if (xPoweredByValue!=null) setHeader(WebContainerConstants.X_POWERED_BY_KEY,xPoweredByValue); } }
python
def _handle_triple_refresh(self, auto_refresh): ''' method to refresh self.rdf.triples if auto_refresh or defaults set to True ''' # if auto_refresh set, and True, refresh if auto_refresh: self.parse_object_like_triples() # else, if auto_refresh is not set (None), check repository instance default elif auto_refresh == None: if self.repo.default_auto_refresh: self.parse_object_like_triples()
python
def normalize(self, inplace: bool = False, percent: bool = False) -> "HistogramBase": """Normalize the histogram, so that the total weight is equal to 1. Parameters ---------- inplace: If True, updates itself. If False (default), returns copy percent: If True, normalizes to percent instead of 1. Default: False Returns ------- HistogramBase : either modified copy or self See also -------- densities HistogramND.partial_normalize """ if inplace: self /= self.total * (.01 if percent else 1) return self else: return self / self.total * (100 if percent else 1)
java
public int convertInternalToExternal(Object recordOwner) { if (this.getConvertToNative() == null) { BaseMessageHeader trxMessageHeader = this.getMessage().getMessageHeader(); String strMessageClass = (String)trxMessageHeader.get(TrxMessageHeader.MESSAGE_MARSHALLER_CLASS); String strPackage = (String)trxMessageHeader.get(TrxMessageHeader.BASE_PACKAGE); strMessageClass = ClassServiceUtility.getFullClassName(strPackage, strMessageClass); return ((RecordOwner)recordOwner).getTask().setLastError("Converter does not exist: " + strMessageClass); } Object root = this.getConvertToNative().convertInternalToMarshallableObject((RecordOwner)recordOwner); if (root != null) { this.setRawData(root); return DBConstants.NORMAL_RETURN; // Success } return super.convertInternalToExternal(recordOwner); }
java
public static void attach(final View panelLayout, /* Nullable **/final View switchPanelKeyboardBtn, /* Nullable **/final View focusView, /* Nullable **/final SwitchClickListener switchClickListener) { final Activity activity = (Activity) panelLayout.getContext(); if (switchPanelKeyboardBtn != null) { switchPanelKeyboardBtn.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { final boolean switchToPanel = switchPanelAndKeyboard(panelLayout, focusView); if (switchClickListener != null) { switchClickListener.onClickSwitch(v, switchToPanel); } } }); } if (isHandleByPlaceholder(activity)) { focusView.setOnTouchListener(new View.OnTouchListener() { @Override public boolean onTouch(View v, MotionEvent event) { if (event.getAction() == MotionEvent.ACTION_UP) { /* * Show the fake empty keyboard-same-height panel to fix the conflict when * keyboard going to show. * @see KPSwitchConflictUtil#showKeyboard(View, View) */ panelLayout.setVisibility(View.INVISIBLE); } return false; } }); } }
python
def delete_messages(self, ids): """ Delete selected messages for the current user :param ids: list of ids """ str_ids = self._return_comma_list(ids) return self.request('MsgAction', {'action': {'op': 'delete', 'id': str_ids}})
java
@SuppressWarnings("unchecked") public static List<Float> getAt(float[] array, Range range) { return primitiveArrayGet(array, range); }
python
def create_parser(self): """ Create and return the ``OptionParser`` which will be used to parse the arguments to the worker. """ return OptionParser(prog=self.prog_name, usage=self.usage(), version='%%prog %s' % self.get_version(), option_list=self.option_list)
python
def set_tile(self, codepoint: int, tile: np.array) -> None: """Upload a tile into this array. The tile can be in 32-bit color (height, width, rgba), or grey-scale (height, width). The tile should have a dtype of ``np.uint8``. This data may need to be sent to graphics card memory, this is a slow operation. """ tile = np.ascontiguousarray(tile, dtype=np.uint8) if tile.shape == self.tile_shape: full_tile = np.empty(self.tile_shape + (4,), dtype=np.uint8) full_tile[:, :, :3] = 255 full_tile[:, :, 3] = tile return self.set_tile(codepoint, full_tile) required = self.tile_shape + (4,) if tile.shape != required: raise ValueError( "Tile shape must be %r or %r, got %r." % (required, self.tile_shape, tile.shape) ) lib.TCOD_tileset_set_tile_( self._tileset_p, codepoint, ffi.cast("struct TCOD_ColorRGBA*", tile.ctypes.data), )
python
def _checkMemberName(name): """ See if a member name indicates that it should be private. Private variables in Python (starting with a double underscore but not ending in a double underscore) and bed lumps (variables that are not really private but are by common convention treated as protected because they begin with a single underscore) get Doxygen tags labeling them appropriately. """ assert isinstance(name, str) restrictionLevel = None if not name.endswith('__'): if name.startswith('__'): restrictionLevel = 'private' elif name.startswith('_'): restrictionLevel = 'protected' return restrictionLevel
python
def _scheme_propagation(self, scheme, definitions): """ Will updated a scheme based on inheritance. This is defined in a scheme objects with ``'inherit': '$definition'``. Will also updated parent objects for nested inheritance. Usage:: >>> SCHEME = { >>> 'thing1': { >>> 'inherit': '$thing2' >>> }, >>> '_': { >>> 'thing2': { >>> 'this_is': 'thing2 is a definition' >>> } >>> } >>> } >>> scheme = SCHEME.get('thing1') >>> if 'inherit' in scheme: >>> scheme = self._scheme_propagation(scheme, SCHEME.get('_')) >>> >>> scheme.get('some_data') :param scheme: A dict, should be a scheme defining validation. :param definitions: A dict, should be defined in the scheme using '_'. :rtype: A :dict: will return a updated copy of the scheme. """ if not isinstance(scheme, dict): raise TypeError('scheme must be a dict to propagate.') inherit_from = scheme.get('inherit') if isinstance(inherit_from, six.string_types): if not inherit_from.startswith('$'): raise AttributeError('When inheriting from an object it must start with a $.') if inherit_from.count('$') > 1: raise AttributeError('When inheriting an object it can only have one $.') if not isinstance(definitions, dict): raise AttributeError("Must define definitions in the root of the SCHEME. " "It is done so with '_': { objs }.") name = inherit_from[1:] definition = definitions.copy().get(name) if not definition: raise LookupError( 'Was unable to find {0} in definitions. The follow are available: {1}.'.format(name, definitions) ) else: raise AttributeError('inherit must be defined in your scheme and be a string value. format: $variable.') updated_scheme = {key: value for key, value in six.iteritems(scheme) if key not in definition} nested_scheme = None for key, value in six.iteritems(definition): if key in scheme: updated_scheme[key] = scheme[key] else: updated_scheme[key] = value if key == 'inherit': nested_scheme = self._scheme_propagation(definition, definitions) # remove inherit key if 'inherit' in updated_scheme: del updated_scheme['inherit'] if nested_scheme is not None: updated_scheme.update(nested_scheme) return updated_scheme
python
def get_all_conda_bins(): """Retrieve all possible conda bin directories, including environments. """ bcbio_bin = get_bcbio_bin() conda_dir = os.path.dirname(bcbio_bin) if os.path.join("anaconda", "envs") in conda_dir: conda_dir = os.path.join(conda_dir[:conda_dir.rfind(os.path.join("anaconda", "envs"))], "anaconda") return [bcbio_bin] + list(glob.glob(os.path.join(conda_dir, "envs", "*", "bin")))
python
def bambus(args): """ %prog bambus bambus.bed bambus.mates total.fasta Insert unplaced scaffolds based on mates. """ from jcvi.utils.iter import pairwise from jcvi.formats.bed import BedLine from jcvi.formats.posmap import MatesFile p = OptionParser(bambus.__doc__) p.add_option("--prefix", default="scaffold", help="Prefix of the unplaced scaffolds [default: %default]") p.add_option("--minlinks", default=3, type="int", help="Minimum number of links to place [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) bedfile, matesfile, fastafile = args pf = matesfile.rsplit(".", 1)[0] logfile = pf + ".log" log = open(logfile, "w") mf = MatesFile(matesfile) maxdist = max(x.max for x in mf.libraries.values()) logging.debug("Max separation: {0}".format(maxdist)) prefix = opts.prefix minlinks = opts.minlinks is_unplaced = lambda x: x.startswith(prefix) bed = Bed(bedfile, sorted=False) beds = [] unplaced = defaultdict(list) for a, b in pairwise(bed): aname, bname = a.accn, b.accn aseqid, bseqid = a.seqid, b.seqid if aname not in mf: continue pa, la = mf[aname] if pa != bname: continue ia = is_unplaced(aseqid) ib = is_unplaced(bseqid) if ia == ib: continue if ia: a, b = b, a unplaced[b.seqid].append((a, b)) beds.extend([a, b]) sizes = Sizes(fastafile) candidatebed = Bed() cbeds = [] # For each unplaced scaffold, find most likely placement and orientation for scf, beds in sorted(unplaced.items()): print(file=log) ranges = [] for a, b in beds: aname, astrand = a.accn, a.strand bname, bstrand = b.accn, b.strand aseqid, bseqid = a.seqid, b.seqid pa, lib = mf[aname] print(a, file=log) print(b, file=log) flip_b = (astrand == bstrand) fbstrand = '-' if flip_b else '+' if flip_b: b.reverse_complement(sizes) lmin, lmax = lib.min, lib.max L = sizes.get_size(scf) assert astrand in ('+', '-') if astrand == '+': offset = a.start - b.end sstart, sstop = offset + lmin, offset + lmax else: offset = a.end - b.start + L sstart, sstop = offset - lmax, offset - lmin # Prevent out of range error size = sizes.get_size(aseqid) sstart = max(0, sstart) sstop = max(0, sstop) sstart = min(size - 1, sstart) sstop = min(size - 1, sstop) start_range = (aseqid, sstart, sstop, scf, 1, fbstrand) print("*" + "\t".join(str(x) for x in start_range), file=log) ranges.append(start_range) mranges = [x[:3] for x in ranges] # Determine placement by finding the interval with the most support rd = ranges_depth(mranges, sizes.mapping, verbose=False) alldepths = [] for depth in rd: alldepths.extend(depth) print(alldepths, file=log) maxdepth = max(alldepths, key=lambda x: x[-1])[-1] if maxdepth < minlinks: print("Insufficient links ({0} < {1})".format(maxdepth, minlinks), file=log) continue candidates = [x for x in alldepths if x[-1] == maxdepth] nseqids = len(set(x[0] for x in candidates)) if nseqids != 1: msg = "Multiple conflicting candidates found" print(msg, file=log) continue seqid, mmin, mmax, depth = candidates[0] mmin, mmax = range_minmax([x[1:3] for x in candidates]) if mmin >= mmax: msg = "Invalid (min, max) range" print("Invalid (min, max) range", file=log) continue if (mmax - mmin) > maxdist: msg = "(min, max) distance greater than library maxdist" print(msg, file=log) continue # Determine orientation by voting nplus, nminus = 0, 0 arange = (seqid, mmin, mmax) for sid, start, end, sf, sc, fbstrand in ranges: brange = (sid, start, end) if range_overlap(arange, brange): if fbstrand == '+': nplus += 1 else: nminus += 1 fbstrand = '+' if nplus >= nminus else '-' candidate = (seqid, mmin, mmax, scf, depth, fbstrand) bedline = BedLine("\t".join((str(x) for x in candidate))) cbeds.append(bedline) print("Plus: {0}, Minus: {1}".format(nplus, nminus), file=log) print(candidate, file=log) candidatebed.extend(cbeds) logging.debug("A total of {0} scaffolds can be placed.".\ format(len(candidatebed))) log.close() candidatebedfile = pf + ".candidate.bed" candidatebed.print_to_file(candidatebedfile, sorted=True)
python
def put(self, user_name: str) -> User: """ Updates the User Resource with the name. """ current = current_user() if current.name == user_name or current.is_admin: user = self._get_or_abort(user_name) self.update(user) session.commit() session.add(user) return user else: abort(403)
python
def _clashes(self, startTime, duration): """ verifies that this measurement does not clash with an already scheduled measurement. :param startTime: the start time. :param duration: the duration. :return: true if the measurement is allowed. """ return [m for m in self.activeMeasurements if m.overlapsWith(startTime, duration)]
python
def from_table(cls, table, length, prefix=0, flatten=False): """ Extract from the given table a tree for word length, taking only prefixes of prefix length (if greater than 0) into account to compute successors. :param table: the table to extract the tree from; :param length: the length of words generated by the extracted tree; greater or equal to 1; :param prefix: if greater than 0, the length of the prefixes used for computing successors; :param flatten: whether to flatten the table or not; :return: the tree corresponding to words of length from table. """ # Build the expanded tree with necessary suffix and length tree = defaultdict(dict) # The tree pending = {(">", 0)} # The nodes to expand while pending: suffix, size = pending.pop() if size < length: choices = table.weighted_choices(suffix, exclude={"<"}, flatten=flatten) # The word length is not reached yet, expand for successor, weight in choices.items(): expanded = suffix + successor if prefix > 0: expanded = expanded[-prefix:] new_node = (expanded, size + 1) tree[(suffix, size)][new_node] = weight pending.add(new_node) else: choices = table.weighted_choices(suffix, flatten=flatten) # The word length is reached, only add < if present if "<" in choices: tree[(suffix, size)][("<", size + 1)] = 1 else: tree[(suffix, size)] = dict() return cls(cls.trim_tree(tree))
python
def list_blobs(kwargs=None, storage_conn=None, call=None): ''' .. versionadded:: 2015.8.0 List blobs associated with the container CLI Example: .. code-block:: bash salt-cloud -f list_blobs my-azure container=mycontainer container: The name of the storage container prefix: Optional. Filters the results to return only blobs whose names begin with the specified prefix. marker: Optional. A string value that identifies the portion of the list to be returned with the next list operation. The operation returns a marker value within the response body if the list returned was not complete. The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque to the client. maxresults: Optional. Specifies the maximum number of blobs to return, including all BlobPrefix elements. If the request does not specify maxresults or specifies a value greater than 5,000, the server will return up to 5,000 items. Setting maxresults to a value less than or equal to zero results in error response code 400 (Bad Request). include: Optional. Specifies one or more datasets to include in the response. To specify more than one of these options on the URI, you must separate each option with a comma. Valid values are: snapshots: Specifies that snapshots should be included in the enumeration. Snapshots are listed from oldest to newest in the response. metadata: Specifies that blob metadata be returned in the response. uncommittedblobs: Specifies that blobs for which blocks have been uploaded, but which have not been committed using Put Block List (REST API), be included in the response. copy: Version 2012-02-12 and newer. Specifies that metadata related to any current or previous Copy Blob operation should be included in the response. delimiter: Optional. When the request includes this parameter, the operation returns a BlobPrefix element in the response body that acts as a placeholder for all blobs whose names begin with the same substring up to the appearance of the delimiter character. The delimiter may be a single character or a string. ''' if call != 'function': raise SaltCloudSystemExit( 'The list_blobs function must be called with -f or --function.' ) if kwargs is None: kwargs = {} if 'container' not in kwargs: raise SaltCloudSystemExit('An storage container name must be specified as "container"') if not storage_conn: storage_conn = get_storage_conn(conn_kwargs=kwargs) return salt.utils.msazure.list_blobs(storage_conn=storage_conn, **kwargs)
java
public static void exportCSVSequenceLocal(File baseDir, JavaRDD<List<List<Writable>>> sequences, long seed) throws Exception { baseDir.mkdirs(); if (!baseDir.isDirectory()) throw new IllegalArgumentException("File is not a directory: " + baseDir.toString()); String baseDirStr = baseDir.toString(); List<String> fileContents = sequences.map(new SequenceToStringFunction(",")).collect(); if (!(fileContents instanceof ArrayList)) fileContents = new ArrayList<>(fileContents); Collections.shuffle(fileContents, new Random(seed)); int i = 0; for (String s : fileContents) { String path = FilenameUtils.concat(baseDirStr, i + ".csv"); File f = new File(path); FileUtils.writeStringToFile(f, s); i++; } }
java
public static base_responses add(nitro_service client, sslocspresponder resources[]) throws Exception { base_responses result = null; if (resources != null && resources.length > 0) { sslocspresponder addresources[] = new sslocspresponder[resources.length]; for (int i=0;i<resources.length;i++){ addresources[i] = new sslocspresponder(); addresources[i].name = resources[i].name; addresources[i].url = resources[i].url; addresources[i].cache = resources[i].cache; addresources[i].cachetimeout = resources[i].cachetimeout; addresources[i].batchingdepth = resources[i].batchingdepth; addresources[i].batchingdelay = resources[i].batchingdelay; addresources[i].resptimeout = resources[i].resptimeout; addresources[i].respondercert = resources[i].respondercert; addresources[i].trustresponder = resources[i].trustresponder; addresources[i].producedattimeskew = resources[i].producedattimeskew; addresources[i].signingcert = resources[i].signingcert; addresources[i].usenonce = resources[i].usenonce; addresources[i].insertclientcert = resources[i].insertclientcert; } result = add_bulk_request(client, addresources); } return result; }
java
public byte[] getChannelConfigurationSignature(ChannelConfiguration channelConfiguration, User signer) throws InvalidArgumentException { clientCheck(); Channel systemChannel = Channel.newSystemChannel(this); return systemChannel.getChannelConfigurationSignature(channelConfiguration, signer); }
java
@Override public void onClick(View v) { switch (v.getId()) { case R.id.openLog: ARouter.openLog(); break; case R.id.openDebug: ARouter.openDebug(); break; case R.id.init: // 调试模式不是必须开启,但是为了防止有用户开启了InstantRun,但是 // 忘了开调试模式,导致无法使用Demo,如果使用了InstantRun,必须在 // 初始化之前开启调试模式,但是上线前需要关闭,InstantRun仅用于开 // 发阶段,线上开启调试模式有安全风险,可以使用BuildConfig.DEBUG // 来区分环境 ARouter.openDebug(); ARouter.init(getApplication()); break; case R.id.normalNavigation: ARouter.getInstance() .build("/test/activity2") .navigation(); break; case R.id.kotlinNavigation: ARouter.getInstance() .build("/kotlin/test") .withString("name", "老王") .withInt("age", 23) .navigation(); break; case R.id.normalNavigationWithParams: // ARouter.getInstance() // .build("/test/activity2") // .withString("key1", "value1") // .navigation(); Uri testUriMix = Uri.parse("arouter://m.aliyun.com/test/activity2"); ARouter.getInstance().build(testUriMix) .withString("key1", "value1") .navigation(); break; case R.id.oldVersionAnim: ARouter.getInstance() .build("/test/activity2") .withTransition(R.anim.slide_in_bottom, R.anim.slide_out_bottom) .navigation(this); break; case R.id.newVersionAnim: if (Build.VERSION.SDK_INT >= 16) { ActivityOptionsCompat compat = ActivityOptionsCompat. makeScaleUpAnimation(v, v.getWidth() / 2, v.getHeight() / 2, 0, 0); ARouter.getInstance() .build("/test/activity2") .withOptionsCompat(compat) .navigation(); } else { Toast.makeText(this, "API < 16,不支持新版本动画", Toast.LENGTH_SHORT).show(); } break; case R.id.interceptor: ARouter.getInstance() .build("/test/activity4") .navigation(this, new NavCallback() { @Override public void onArrival(Postcard postcard) { } @Override public void onInterrupt(Postcard postcard) { Log.d("ARouter", "被拦截了"); } }); break; case R.id.navByUrl: ARouter.getInstance() .build("/test/webview") .withString("url", "file:///android_asset/schame-test.html") .navigation(); break; case R.id.autoInject: TestSerializable testSerializable = new TestSerializable("Titanic", 555); TestParcelable testParcelable = new TestParcelable("jack", 666); TestObj testObj = new TestObj("Rose", 777); List<TestObj> objList = new ArrayList<>(); objList.add(testObj); Map<String, List<TestObj>> map = new HashMap<>(); map.put("testMap", objList); ARouter.getInstance().build("/test/activity1") .withString("name", "老王") .withInt("age", 18) .withBoolean("boy", true) .withLong("high", 180) .withString("url", "https://a.b.c") .withSerializable("ser", testSerializable) .withParcelable("pac", testParcelable) .withObject("obj", testObj) .withObject("objList", objList) .withObject("map", map) .navigation(); break; case R.id.navByName: ((HelloService) ARouter.getInstance().build("/yourservicegroupname/hello").navigation()).sayHello("mike"); break; case R.id.navByType: ARouter.getInstance().navigation(HelloService.class).sayHello("mike"); break; case R.id.navToMoudle1: ARouter.getInstance().build("/module/1").navigation(); break; case R.id.navToMoudle2: // 这个页面主动指定了Group名 ARouter.getInstance().build("/module/2", "m2").navigation(); break; case R.id.destroy: ARouter.getInstance().destroy(); break; case R.id.failNav: ARouter.getInstance().build("/xxx/xxx").navigation(this, new NavCallback() { @Override public void onFound(Postcard postcard) { Log.d("ARouter", "找到了"); } @Override public void onLost(Postcard postcard) { Log.d("ARouter", "找不到了"); } @Override public void onArrival(Postcard postcard) { Log.d("ARouter", "跳转完了"); } @Override public void onInterrupt(Postcard postcard) { Log.d("ARouter", "被拦截了"); } }); break; case R.id.callSingle: ARouter.getInstance().navigation(SingleService.class).sayHello("Mike"); break; case R.id.failNav2: ARouter.getInstance().build("/xxx/xxx").navigation(); break; case R.id.failNav3: ARouter.getInstance().navigation(MainActivity.class); break; case R.id.normalNavigation2: ARouter.getInstance() .build("/test/activity2") .navigation(this, 666); break; case R.id.getFragment: Fragment fragment = (Fragment) ARouter.getInstance().build("/test/fragment").navigation(); Toast.makeText(this, "找到Fragment:" + fragment.toString(), Toast.LENGTH_SHORT).show(); break; default: break; } }
python
def format_check(settings): """ Check the format of a osmnet_config object. Parameters ---------- settings : dict osmnet_config as a dictionary Returns ------- Nothing """ valid_keys = ['logs_folder', 'log_file', 'log_console', 'log_name', 'log_filename', 'keep_osm_tags'] for key in list(settings.keys()): assert key in valid_keys, \ ('{} not found in list of valid configuation keys').format(key) assert isinstance(key, str), ('{} must be a string').format(key) if key == 'keep_osm_tags': assert isinstance(settings[key], list), \ ('{} must be a list').format(key) for value in settings[key]: assert all(isinstance(element, str) for element in value), \ 'all elements must be a string' if key == 'log_file' or key == 'log_console': assert isinstance(settings[key], bool), \ ('{} must be boolean').format(key)
python
def get(self, key, *, encoding=_NOTSET): """Get the value of a key.""" return self.execute(b'GET', key, encoding=encoding)
python
def is_opposite(self, ns1, id1, ns2, id2): """Return True if two entities are in an "is_opposite" relationship Parameters ---------- ns1 : str Namespace code for an entity. id1 : str URI for an entity. ns2 : str Namespace code for an entity. id2 : str URI for an entity. Returns ------- bool True if t1 has an "is_opposite" relationship with t2. """ u1 = self.get_uri(ns1, id1) u2 = self.get_uri(ns2, id2) t1 = rdflib.term.URIRef(u1) t2 = rdflib.term.URIRef(u2) rel = rdflib.term.URIRef(self.relations_prefix + 'is_opposite') to = self.graph.objects(t1, rel) if t2 in to: return True return False
java
private boolean useNetworkInterface(NetworkInterface networkInterface) throws HarvestException { try { return !networkInterface.isLoopback() && networkInterface.isUp(); } catch (SocketException e) { throw new HarvestException("Could not evaluate whether network interface is loopback.", e); } }
java
@Override public void init(Configuration phoneNumberProvisioningConfiguration, Configuration telestaxProxyConfiguration, ContainerConfiguration containerConfiguration) { this.containerConfiguration = containerConfiguration; telestaxProxyEnabled = telestaxProxyConfiguration.getBoolean("enabled", false); if (telestaxProxyEnabled) { uri = telestaxProxyConfiguration.getString("uri"); username = telestaxProxyConfiguration.getString("login"); password = telestaxProxyConfiguration.getString("password"); endpoint = telestaxProxyConfiguration.getString("endpoint"); activeConfiguration = telestaxProxyConfiguration; } else { Configuration viConf = phoneNumberProvisioningConfiguration.subset("voip-innovations"); uri = viConf.getString("uri"); username = viConf.getString("login"); password = viConf.getString("password"); endpoint = viConf.getString("endpoint"); activeConfiguration = viConf; } this.header = header(username, password); xstream = new XStream(); xstream.alias("response", VoipInnovationsResponse.class); xstream.alias("header", VoipInnovationsHeader.class); xstream.alias("body", VoipInnovationsBody.class); xstream.alias("lata", LATAConverter.class); xstream.alias("npa", NPAConverter.class); xstream.alias("nxx", NXXConverter.class); xstream.alias("rate_center", RateCenterConverter.class); xstream.alias("state", StateConverter.class); xstream.alias("tn", TNConverter.class); xstream.registerConverter(new VoipInnovationsResponseConverter()); xstream.registerConverter(new VoipInnovationsHeaderConverter()); xstream.registerConverter(new VoipInnovationsBodyConverter()); xstream.registerConverter(new GetDIDListResponseConverter()); xstream.registerConverter(new LATAConverter()); xstream.registerConverter(new NPAConverter()); xstream.registerConverter(new NXXConverter()); xstream.registerConverter(new RateCenterConverter()); xstream.registerConverter(new StateConverter()); xstream.registerConverter(new TNConverter()); }
python
def post_install_package(): """ Run any functions post install a matching package. Hook functions are in the form post_install_[package name] and are defined in a deploy.py file Will be executed post install_packages and upload_etc """ module_name = '.'.join([env.project_package_name,'deploy']) funcs_run = [] try: imported = import_module(module_name) funcs = vars(imported) for f in env.installed_packages[env.host]: func = funcs.get(''.join(['post_install_',f.replace('.','_').replace('-','_')])) if func: func() funcs_run.append(func) except ImportError: pass #run per app for app in env.INSTALLED_APPS: if app == 'woven': continue module_name = '.'.join([app,'deploy']) try: imported = import_module(module_name) funcs = vars(imported) for f in env.installed_packages[env.host]: func = funcs.get(''.join(['post_install_',f.replace('.','_').replace('-','_')])) if func and func not in funcs_run: func() funcs_run.append(func) except ImportError: pass #run woven last import woven.deploy funcs = vars(woven.deploy) for f in env.installed_packages[env.host]: func = funcs.get(''.join(['post_install_',f.replace('.','_').replace('-','_')])) if func and func not in funcs_run: func()
java
public static <T> List<T> grep(List<T> self, Object filter) { return (List<T>) grep((Collection<T>) self, filter); }
java
private void resetControlID() { _controlID = null; for (Object child : this) { if (child instanceof ControlBeanContext) ((ControlBeanContext) child).resetControlID(); } }
python
def extract_name(self, data): """Extract man page name from web page.""" name = re.search('<h1[^>]*>(.+?)</h1>', data).group(1) name = re.sub(r'<([^>]+)>', r'', name) name = re.sub(r'&gt;', r'>', name) name = re.sub(r'&lt;', r'<', name) return name
java
@SneakyThrows public static <T> T executeGroovyScript(final Resource groovyScript, final String methodName, final Object[] args, final Class<T> clazz, final boolean failOnError) { if (groovyScript == null || StringUtils.isBlank(methodName)) { return null; } try { return AccessController.doPrivileged((PrivilegedAction<T>) () -> getGroovyResult(groovyScript, methodName, args, clazz, failOnError)); } catch (final Exception e) { var cause = (Throwable) null; if (e instanceof PrivilegedActionException) { cause = PrivilegedActionException.class.cast(e).getException(); } else { cause = e; } if (failOnError) { throw cause; } LOGGER.error(cause.getMessage(), cause); } return null; }
python
def from_image(cls, image): """ Create a PrintableImage from a PIL Image :param image: a PIL Image :return: """ (w, h) = image.size # Thermal paper is 512 pixels wide if w > 512: ratio = 512. / w h = int(h * ratio) image = image.resize((512, h), Image.ANTIALIAS) if image.mode != '1': image = image.convert('1') pixels = np.array(list(image.getdata())).reshape(h, w) # Add white pixels so that image fits into bytes extra_rows = int(math.ceil(h / 24)) * 24 - h extra_pixels = np.ones((extra_rows, w), dtype=bool) pixels = np.vstack((pixels, extra_pixels)) h += extra_rows nb_stripes = h / 24 pixels = pixels.reshape(nb_stripes, 24, w).swapaxes(1, 2).reshape(-1, 8) nh = int(w / 256) nl = w % 256 data = [] pixels = np.invert(np.packbits(pixels)) stripes = np.split(pixels, nb_stripes) for stripe in stripes: data.extend([ ESC, 42, # * 33, # double density mode nl, nh]) data.extend(stripe) data.extend([ 27, # ESC 74, # J 48]) # account for double density mode height = h * 2 return cls(data, height)
java
public static Measurement first(final Iterable<Measurement> ms, final String k, final String v) { return first(ms, value -> v.equals(getTagValue(value.id(), k))); }
java
@Override public void setUsers(final String[] theUsers) { mUsers = theUsers; // Cleanup new line and ws for (int i = 0; i < theUsers.length; i++) { mUsers[i] = mUsers[i].trim(); } }
python
def extract_builder_result(builder_result, toolchain_cls=Toolchain): """ Extract the builder result to produce a ``Toolchain`` and ``Spec`` instance. """ try: toolchain, spec = builder_result except Exception: return None, None if not isinstance(toolchain, toolchain_cls) or not isinstance(spec, Spec): return None, None return toolchain, spec
python
def assignment_action(self, text, loc, assign): """Code executed after recognising an assignment statement""" exshared.setpos(loc, text) if DEBUG > 0: print("ASSIGN:",assign) if DEBUG == 2: self.symtab.display() if DEBUG > 2: return var_index = self.symtab.lookup_symbol(assign.var, [SharedData.KINDS.GLOBAL_VAR, SharedData.KINDS.PARAMETER, SharedData.KINDS.LOCAL_VAR]) if var_index == None: raise SemanticException("Undefined lvalue '%s' in assignment" % assign.var) if not self.symtab.same_types(var_index, assign.exp[0]): raise SemanticException("Incompatible types in assignment") self.codegen.move(assign.exp[0], var_index)
python
def init_db(): """ Drops and re-creates the SQL schema """ db.drop_all() db.configure_mappers() db.create_all() db.session.commit()
python
def destroy(self, eip_or_aid, disassociate=False): """Release an EIP. If the EIP was allocated for a VPC instance, an AllocationId(aid) must be provided instead of a PublicIp. Setting disassociate to True will attempt to disassociate the IP before releasing it (required for associated nondefault VPC instances). """ if "." in eip_or_aid: # If an IP is given (Classic) # NOTE: EIPs are automatically disassociated for Classic instances. return "true" == self.call("ReleaseAddress", response_data_key="return", PublicIp=eip_or_aid) else: # If an AID is given (VPC) if disassociate: self.disassociate(eip_or_aid) return "true" == self.call("ReleaseAddress", response_data_key="return", AllocationId=eip_or_aid)
java
public SimpleListHolder<T> getFlatAbove() { if (flatAbove == null) { flatAbove = newSimpleListHolder(above, getSortProperty()); } return flatAbove; }
python
def plot_account(self, row, per_capita=False, sector=None, file_name=False, file_dpi=600, population=None, **kwargs): """ Plots D_pba, D_cba, D_imp and D_exp for the specified row (account) Plot either the total country accounts or for a specific sector, depending on the 'sector' parameter. Per default the accounts are plotted as bar charts. However, any valid keyword for the pandas.DataFrame.plot method can be passed. Notes ----- This looks prettier with the seaborn module (import seaborn before calling this method) Parameters ---------- row : string, tuple or int A valid index for the row in the extension which should be plotted (one(!) row - no list allowed) per_capita : boolean, optional Plot the per capita accounts instead of the absolute values default is False sector: string, optional Plot the results for a specific sector of the IO table. If None is given (default), the total regional accounts are plotted. population : pandas.DataFrame or np.array, optional Vector with population per region. This must be given if values should be plotted per_capita for a specific sector since these values are calculated on the fly. file_name : path string, optional If given, saves the plot to the given filename file_dpi : int, optional Dpi for saving the figure, default 600 **kwargs : key word arguments, optional This will be passed directly to the pd.DataFrame.plot method Returns ------- Axis as given by pandas.DataFrame.plot, None in case of errors """ # necessary if row is given for Multiindex without brackets if type(per_capita) is not bool: logging.error('per_capita parameter must be boolean') return None if type(row) is int: row = self.D_cba.ix[row].name name_row = (str(row). replace('(', ''). replace(')', ''). replace("'", ""). replace('[', ''). replace(']', '')) if sector: graph_name = name_row + ' for sector ' + sector else: graph_name = name_row + ' total account' if per_capita: graph_name = graph_name + ' - per capita' graph_name = self.name + ' - ' + graph_name if self.unit is not None: try: # for multiindex the entry is given with header, # for single index just the entry y_label_name = (name_row + ' (' + str(self.unit.ix[row, 'unit'].tolist()[0]) + ')') except: y_label_name = (name_row + ' (' + str(self.unit.ix[row, 'unit']) + ')') else: y_label_name = name_row if 'kind' not in kwargs: kwargs['kind'] = 'bar' if 'colormap' not in kwargs: kwargs['colormap'] = 'Spectral' accounts = collections.OrderedDict() if sector: accounts['Footprint'] = 'D_cba' accounts['Territorial'] = 'D_pba' accounts['Imports'] = 'D_imp' accounts['Exports'] = 'D_exp' else: if per_capita: accounts['Footprint'] = 'D_cba_cap' accounts['Territorial'] = 'D_pba_cap' accounts['Imports'] = 'D_imp_cap' accounts['Exports'] = 'D_exp_cap' else: accounts['Footprint'] = 'D_cba_reg' accounts['Territorial'] = 'D_pba_reg' accounts['Imports'] = 'D_imp_reg' accounts['Exports'] = 'D_exp_reg' data_row = pd.DataFrame(columns=[key for key in accounts]) for key in accounts: if sector: try: _data = pd.DataFrame( getattr(self, accounts[key]).xs( key=sector, axis=1, level='sector').ix[row].T) except (AssertionError, KeyError): _data = pd.DataFrame( getattr(self, accounts[key]).xs( key=sector, axis=1, level=1).ix[row].T) if per_capita: if population is not None: if type(population) is pd.DataFrame: # check for right order: if (population.columns.tolist() != self.D_cba_reg.columns.tolist()): logging.warning( 'Population regions are inconsistent ' 'with IO regions') population = population.values population = population.reshape((-1, 1)) _data = _data / population else: logging.error('Population must be given for sector ' 'results per capita') return else: _data = pd.DataFrame(getattr(self, accounts[key]).ix[row].T) _data.columns = [key] data_row[key] = _data[key] if 'title' not in kwargs: kwargs['title'] = graph_name ax = data_row.plot(**kwargs) plt.xlabel('Regions') plt.ylabel(y_label_name) plt.legend(loc='best') try: plt.tight_layout() except: pass if file_name: plt.savefig(file_name, dpi=file_dpi) return ax
java
public void setApiKeyIdPropertyName(String apiKeyIdPropertyName) { this.clientBuilder.setApiKey(ApiKeys.builder().setIdPropertyName(apiKeyIdPropertyName).build()); }
python
def reset_error_status(self): """ Check if the dyn_env has got any exception when executing the steps and restore the value of status to False. :return: True if any exception has been raised when executing steps """ try: return self.feature_error or self.scenario_error finally: self.feature_error = False self.scenario_error = False
python
def __callback (self, rgb, d): ''' Callback function to receive and save Rgbd Scans. @param rgb: ROS color Image to translate @param d: ROS depth image to translate @type rgb: ImageROS @type d: ImageROS ''' data = Images2Rgbd(rgb, d) self.lock.acquire() self.data = data self.lock.release()
java
public static <E> Iterator<E> dropWhile(Iterator<E> iterator, Predicate<E> predicate) { return new FilteringIterator<E>(iterator, new DropWhile<E>(predicate)); }
python
def _get_gene2pubmed(self, limit): """ Loops through the gene2pubmed file and adds a simple triple to say that a given publication is_about a gene. Publications are added as NamedIndividuals. These are filtered on the taxon. :param limit: :return: """ src_key = 'gene2pubmed' if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) LOG.info("Processing Gene records") line_counter = 0 myfile = '/'.join((self.rawdir, self.files[src_key]['file'])) LOG.info("FILE: %s", myfile) assoc_counter = 0 col = self.files[src_key]['columns'] with gzip.open(myfile, 'rb') as tsv: row = tsv.readline().decode().strip().split('\t') row[0] = row[0][1:] # strip comment if col != row: LOG.info( '%s\nExpected Headers:\t%s\nRecived Headers:\t %s\n', src_key, col, row) for line in tsv: line_counter += 1 # skip comments row = line.decode().strip().split('\t') if row[0][0] == '#': continue # (tax_num, gene_num, pubmed_num) = line.split('\t') # ## set id_filter=None in init if you don't want to have a filter # if self.id_filter is not None: # if ((self.id_filter == 'taxids' and \ # (int(tax_num) not in self.tax_ids)) # or (self.id_filter == 'geneids' and \ # (int(gene_num) not in self.gene_ids))): # continue # #### end filter gene_num = row[col.index('GeneID')].strip() if self.test_mode and int(gene_num) not in self.gene_ids: continue tax_num = row[col.index('tax_id')].strip() if not self.test_mode and tax_num not in self.tax_ids: continue pubmed_num = row[col.index('PubMed_ID')].strip() if gene_num == '-' or pubmed_num == '-': continue gene_id = ':'.join(('NCBIGene', gene_num)) pubmed_id = ':'.join(('PMID', pubmed_num)) if self.class_or_indiv.get(gene_id) == 'C': model.addClassToGraph(gene_id, None) else: model.addIndividualToGraph(gene_id, None) # add the publication as a NamedIndividual # add type publication model.addIndividualToGraph(pubmed_id, None, None) reference = Reference( graph, pubmed_id, self.globaltt['journal article']) reference.addRefToGraph() graph.addTriple( pubmed_id, self.globaltt['is_about'], gene_id) assoc_counter += 1 if not self.test_mode and limit is not None and line_counter > limit: break LOG.info( "Processed %d pub-gene associations", assoc_counter) return
python
def call_plac(f): "Decorator to create a simple CLI from `func` using `plac`" name = inspect.currentframe().f_back.f_globals['__name__'] if name == '__main__': import plac res = plac.call(f) if callable(res): res() else: return f
java
public void registerBufferPool(BufferPool bufferPool) { checkArgument(bufferPool.getNumberOfRequiredMemorySegments() >= getNumberOfSubpartitions(), "Bug in result partition setup logic: Buffer pool has not enough guaranteed buffers for this result partition."); checkState(this.bufferPool == null, "Bug in result partition setup logic: Already registered buffer pool."); this.bufferPool = checkNotNull(bufferPool); }
java
public static Method getSingleAbstractMethod(Class<?> baseClass) { if (!baseClass.isInterface()) { throw new InvalidTypesException("Given class: " + baseClass + "is not a FunctionalInterface."); } Method sam = null; for (Method method : baseClass.getMethods()) { if (Modifier.isAbstract(method.getModifiers())) { if (sam == null) { sam = method; } else { throw new InvalidTypesException("Given class: " + baseClass + " is not a FunctionalInterface. It has more than one abstract method."); } } } if (sam == null) { throw new InvalidTypesException( "Given class: " + baseClass + " is not a FunctionalInterface. It does not have any abstract methods."); } return sam; }
java
@VisibleForTesting HeaderCacheElement getHeaderUnsafe(long timeout, TimeUnit timeUnit) { // Optimize for the common case: do a volatile read to peek for a Good cache value HeaderCacheElement headerCacheUnsync = this.headerCache; // TODO(igorbernstein2): figure out how to make this work with appengine request scoped threads switch (headerCacheUnsync.getCacheState()) { case Good: return headerCacheUnsync; case Stale: asyncRefresh(); return headerCacheUnsync; case Expired: case Exception: // defer the future resolution (asyncRefresh will spin up a thread that will try to acquire the lock) return syncRefresh(timeout, timeUnit); default: String message = "Could not process state: " + headerCacheUnsync.getCacheState(); LOG.warn(message); return new HeaderCacheElement( Status.UNAUTHENTICATED .withCause(new IllegalStateException(message))); } }
python
def complete_classname(classname): """ Attempts to complete a partial classname like '.J48' and returns the full classname if a single match was found, otherwise an exception is raised. :param classname: the partial classname to expand :type classname: str :return: the full classname :rtype: str """ result = javabridge.get_collection_wrapper( javabridge.static_call( "Lweka/Run;", "findSchemeMatch", "(Ljava/lang/String;Z)Ljava/util/List;", classname, True)) if len(result) == 1: return str(result[0]) elif len(result) == 0: raise Exception("No classname matches found for: " + classname) else: matches = [] for i in range(len(result)): matches.append(str(result[i])) raise Exception("Found multiple matches for '" + classname + "':\n" + '\n'.join(matches))
java
private int buildLookUpTable() { int i = 0; int incDen = Math.round(8F * radiusMinPixel); // increment denominator lut = new int[2][incDen][depth]; for( int radius = radiusMinPixel; radius <= radiusMaxPixel; radius = radius + radiusIncPixel ) { i = 0; for( int incNun = 0; incNun < incDen; incNun++ ) { double angle = (2 * Math.PI * (double) incNun) / (double) incDen; int indexR = (radius - radiusMinPixel) / radiusIncPixel; int rcos = (int) Math.round((double) radius * Math.cos(angle)); int rsin = (int) Math.round((double) radius * Math.sin(angle)); if ((i == 0) | (rcos != lut[0][i][indexR]) & (rsin != lut[1][i][indexR])) { lut[0][i][indexR] = rcos; lut[1][i][indexR] = rsin; i++; } } } return i; }
java
private ImmutableList<Function> getAtomsForGenerators(Collection<TreeWitnessGenerator> gens, Term r0) { return TreeWitnessGenerator.getMaximalBasicConcepts(gens, reasoner).stream() .map(con -> { log.debug(" BASIC CONCEPT: {}", con); if (con instanceof OClass) { return atomFactory.getMutableTripleBodyAtom(r0, ((OClass) con).getIRI()); } else if (con instanceof ObjectSomeValuesFrom) { ObjectPropertyExpression ope = ((ObjectSomeValuesFrom)con).getProperty(); return (!ope.isInverse()) ? atomFactory.getMutableTripleBodyAtom(r0, ope.getIRI(), getFreshVariable()) : atomFactory.getMutableTripleBodyAtom(getFreshVariable(), ope.getIRI(), r0); } else { DataPropertyExpression dpe = ((DataSomeValuesFrom)con).getProperty(); return atomFactory.getMutableTripleBodyAtom(r0, dpe.getIRI(), getFreshVariable()); } }) .collect(ImmutableCollectors.toList()); }
python
def expand(self, msgpos): """expand message at given position""" MT = self._tree[msgpos] MT.expand(MT.root)
python
def _create_metadata_from_state(self, state, ts): """ state must be disired or reported stype dict object replces primitive type with {"timestamp": ts} in dict """ if state is None: return None def _f(elem, ts): if isinstance(elem, dict): return {_: _f(elem[_], ts) for _ in elem.keys()} if isinstance(elem, list): return [_f(_, ts) for _ in elem] return {"timestamp": ts} return _f(state, ts)
java
String getColIsNullable(int i) { ColumnSchema column = table.getColumn(i); return (column.isNullable() && !column.isPrimaryKey()) ? "YES" : "NO"; }
java
@Deprecated public void setCellMerge(final String pos, final int rowMerge, final int columnMerge) throws FastOdsException, IOException { this.builder.setCellMerge(this, this.appender, pos, rowMerge, columnMerge); }
java
@Override public String getValue() throws WidgetException { List<WebElement> elements = findElements(); for (WebElement we : elements) { if (we.getAttribute("checked") != null && we.getAttribute("checked").equalsIgnoreCase("true")) { return we.getAttribute("value"); } } throw new WidgetException("Error while finding selected option on radio group", getLocator()); }
java
public static String getClassName(Class<?> c) { String name = c.getName(); return name.substring(name.lastIndexOf('.') + 1, name.length()); }
java
public MlsxEntry mlst(String fileName) throws IOException, ServerException { try { Reply reply = controlChannel.execute(new Command("MLST", fileName)); String replyMessage = reply.getMessage(); StringTokenizer replyLines = new StringTokenizer( replyMessage, System.getProperty("line.separator")); if (replyLines.hasMoreElements()) { replyLines.nextElement(); } else { throw new FTPException(FTPException.UNSPECIFIED, "Expected multiline reply"); } if (replyLines.hasMoreElements()) { String line = (String) replyLines.nextElement(); return new MlsxEntry(line); } else { throw new FTPException(FTPException.UNSPECIFIED, "Expected multiline reply"); } } catch (FTPReplyParseException rpe) { throw ServerException.embedFTPReplyParseException(rpe); } catch (UnexpectedReplyCodeException urce) { throw ServerException.embedUnexpectedReplyCodeException( urce, "Server refused MLST command"); } catch (FTPException e) { ServerException ce = new ServerException( ClientException.UNSPECIFIED, "Could not create MlsxEntry"); ce.setRootCause(e); throw ce; } }
python
def next_frame_basic_recurrent(): """Basic 2-frame recurrent model with stochastic tower.""" hparams = basic_stochastic.next_frame_basic_stochastic_discrete() hparams.filter_double_steps = 2 hparams.hidden_size = 64 hparams.video_num_input_frames = 4 hparams.video_num_target_frames = 4 hparams.concat_internal_states = False hparams.add_hparam("num_lstm_layers", 2) hparams.add_hparam("num_lstm_filters", 256) return hparams
java
protected void handleUrlActivated(HyperlinkEvent e, URL url) { try { Desktop.getDesktop().browse(url.toURI()); } catch (Exception ex) { throw new ApplicationException("Error handling URL " + url, ex); } }
python
def infer(self, sequence, reset=True, sequenceNumber=None, burnIn=2, enableFeedback=True, apicalTiebreak=True, apicalModulationBasalThreshold=True, inertia=True): """ Infer on a single given sequence. Sequence format: sequence = [ set([16, 22, 32]), # Position 0 set([13, 15, 33]) # Position 1 ] Parameters: ---------------------------- @param sequence (list) Sequence to infer, in the canonical format specified above @param reset (bool) If set to True (which is the default value), the network will be reset after inference. @param sequenceNumber (int) Number of the sequence (must match the number given during learning). @param burnIn (int) Number of patterns to wait within a sequence before computing accuracy figures """ if enableFeedback is False: self._disableL2() self.network.regions["L4Column_0"].getSelf()._tm.disableApicalDependence = True else: self._enableL2() self._setLearningMode(l4Learning=False, l2Learning=False) if sequenceNumber is not None: if sequenceNumber not in self.objectL2Representations: raise ValueError("The provided sequence was not given during learning") self.network.regions["L4Column_0"].getSelf()._tm.setUseApicalModulationBasalThreshold(apicalModulationBasalThreshold) self.network.regions["L4Column_0"].getSelf()._tm.setUseApicalTiebreak(apicalTiebreak) self.network.regions["L2Column_0"].getSelf()._pooler.setUseInertia(inertia) L2Responses=[] L4Responses=[] L4Predicted=[] activityTrace = numpy.zeros(len(sequence)) totalActiveCells = 0 totalPredictedActiveCells = 0 for i,s in enumerate(sequence): self.sensorInputs[0].addDataToQueue(list(s), 0, 0) self.network.run(1) activityTrace[i] = len(self.getL4Representations()[0]) L4Responses.append(self.getL4Representations()[0]) L4Predicted.append(self.getL4PredictedCells()[0]) L2Responses.append(self.getL2Representations()[0]) if i >= burnIn: totalActiveCells += len(self.getL4Representations()[0]) totalPredictedActiveCells += len(self.getL4PredictedActiveCells()[0]) if reset: self.sendReset() avgActiveCells = float(totalActiveCells) / len(sequence) avgPredictedActiveCells = float(totalPredictedActiveCells) / len(sequence) responses = { "L2Responses": L2Responses, "L4Responses": L4Responses, "L4Predicted": L4Predicted } return avgActiveCells,avgPredictedActiveCells,activityTrace, responses
java
private String findErrorPage(Throwable exception) { if (exception instanceof EJBException && exception.getCause() != null) { exception = exception.getCause(); } String errorPage = WebXml.INSTANCE.findErrorPageLocation(exception); return errorPage; }
java
public static boolean isEnabled (@Nonnull final Class <?> aLoggingClass, @Nonnull final IHasErrorLevel aErrorLevelProvider) { return isEnabled (LoggerFactory.getLogger (aLoggingClass), aErrorLevelProvider.getErrorLevel ()); }
java
public void addObjectResult(ObjectResult objResult) { m_objResultList.add(objResult); // Elevate batch-level status to warning if needed. Status batchResult = getStatus(); if (batchResult == Status.OK && objResult.getStatus() != ObjectResult.Status.OK) { setStatus(Status.WARNING); } if (objResult.isUpdated()) { setHasUpdates(true); } }
python
def is_descendant_of_bank(self, id_, bank_id): """Tests if an ``Id`` is a descendant of a bank. arg: id (osid.id.Id): an ``Id`` arg: bank_id (osid.id.Id): the ``Id`` of a bank return: (boolean) - ``true`` if the ``id`` is a descendant of the ``bank_id,`` ``false`` otherwise raise: NotFound - ``bank_id`` not found raise: NullArgument - ``bank_id`` or ``id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` is not found return ``false``. """ # Implemented from template for # osid.resource.BinHierarchySession.is_descendant_of_bin if self._catalog_session is not None: return self._catalog_session.is_descendant_of_catalog(id_=id_, catalog_id=bank_id) return self._hierarchy_session.is_descendant(id_=id_, descendant_id=bank_id)
python
def is_symbol_wildcard(term: Any) -> bool: """Return True iff the given term is a subclass of :class:`.Symbol`.""" return isinstance(term, type) and issubclass(term, Symbol)
java
public ServiceFuture<Void> disableAsync(String jobId, DisableJobOption disableTasks, final ServiceCallback<Void> serviceCallback) { return ServiceFuture.fromHeaderResponse(disableWithServiceResponseAsync(jobId, disableTasks), serviceCallback); }
python
def name(self): """The data type name.""" if self._name is None: return re.sub(r"(?<=\w)([A-Z])", r" \1", self.__class__.__name__) else: return self._name
python
def get_veto_segs(workflow, ifo, category, start_time, end_time, out_dir, veto_gen_job, tags=None, execute_now=False): """ Obtain veto segments for the selected ifo and veto category and add the job to generate this to the workflow. Parameters ----------- workflow: pycbc.workflow.core.Workflow An instance of the Workflow class that manages the workflow. ifo : string The string describing the ifo to generate vetoes for. category : int The veto category to generate vetoes for. start_time : gps time (either int/LIGOTimeGPS) The time at which to begin searching for segments. end_time : gps time (either int/LIGOTimeGPS) The time at which to stop searching for segments. out_dir : path The directory in which output will be stored. vetoGenJob : Job The veto generation Job class that will be used to create the Node. tag : string, optional (default=None) Use this to specify a tag. This can be used if this module is being called more than once to give call specific configuration (by setting options in [workflow-datafind-${TAG}] rather than [workflow-datafind]). This is also used to tag the Files returned by the class to uniqueify the Files and uniqueify the actual filename. FIXME: Filenames may not be unique with current codes! execute_now : boolean, optional If true, jobs are executed immediately. If false, they are added to the workflow to be run later. Returns -------- veto_def_file : pycbc.workflow.core.SegFile The workflow File object corresponding to this DQ veto file. """ if tags is None: tags = [] seg_valid_seg = segments.segment([start_time,end_time]) # FIXME: This job needs an internet connection and X509_USER_PROXY # For internet connection, it may need a headnode (ie universe local) # For X509_USER_PROXY, I don't know what pegasus is doing node = Node(veto_gen_job) node.add_opt('--veto-categories', str(category)) node.add_opt('--ifo-list', ifo) node.add_opt('--gps-start-time', str(start_time)) node.add_opt('--gps-end-time', str(end_time)) if tags: veto_xml_file_name = "%s-VETOTIME_CAT%d_%s-%d-%d.xml" \ %(ifo, category, '_'.join(tags), start_time, end_time-start_time) else: veto_xml_file_name = "%s-VETOTIME_CAT%d-%d-%d.xml" \ %(ifo, category, start_time, end_time-start_time) veto_xml_file_path = os.path.abspath(os.path.join(out_dir, veto_xml_file_name)) curr_url = urlparse.urlunparse(['file', 'localhost', veto_xml_file_path, None, None, None]) if tags: curr_tags = tags + ['VETO_CAT%d' %(category)] else: curr_tags = ['VETO_CAT%d' %(category)] if file_needs_generating(veto_xml_file_path, workflow.cp, tags=tags): if execute_now: workflow.execute_node(node, verbatim_exe = True) veto_xml_file = SegFile.from_segment_xml(veto_xml_file_path, tags=curr_tags, valid_segment=seg_valid_seg) else: veto_xml_file = SegFile(ifo, 'SEGMENTS', seg_valid_seg, file_url=curr_url, tags=curr_tags) node._add_output(veto_xml_file) workflow.add_node(node) else: node.executed = True for fil in node._outputs: fil.node = None veto_xml_file = SegFile.from_segment_xml(veto_xml_file_path, tags=curr_tags, valid_segment=seg_valid_seg) return veto_xml_file
java
public static void copyReader(OutputStream outs, Reader reader) throws IOException { try { OutputStreamWriter writer = new OutputStreamWriter(outs, StandardCharsets.UTF_8); char[] bytes = new char[1024]; int r = reader.read(bytes); while (r > 0) { writer.write(bytes, 0, r); r = reader.read(bytes); } } catch (IOException e) { throw e; } finally{ reader.close(); } }
java
public double getTorsionAngle(Point3d a, Point3d b, Point3d c, Point3d d) { Vector3d ab = new Vector3d(a.x - b.x, a.y - b.y, a.z - b.z); Vector3d cb = new Vector3d(c.x - b.x, c.y - b.y, c.z - b.z); Vector3d dc = new Vector3d(d.x - c.x, d.y - c.y, d.z - c.z); Vector3d bc = new Vector3d(b.x - c.x, b.y - c.y, b.z - c.z); Vector3d n1 = new Vector3d(); Vector3d n2 = new Vector3d(); n1.cross(ab, cb); if (getSpatproduct(ab, cb, n1) > 0) { n1.cross(cb, ab); } n1.normalize(); n2.cross(dc, bc); if (getSpatproduct(dc, bc, n2) < 0) { n2.cross(bc, dc); } n2.normalize(); return n1.dot(n2); }
python
def base_query(cls, db_session=None): """ returns base query for specific service :param db_session: :return: query """ db_session = get_db_session(db_session) return db_session.query(cls.model)
java
@SafeVarargs public static Query query(DB db, String sql, Object... args) { return new Query(db, sql, args); }
python
def _encode_mapping(mapping, f): """Encodes the mapping items in lexical order (spec)""" f.write(_TYPE_DICT) for key, value in sorted(mapping.items()): _encode_buffer(key, f) bencode(value, f) f.write(_TYPE_END)
python
def decode_bu64(b): """Encode bytes to a URL safe flavor of Base64 used by JWTs. - Reverse of encode_bu64(). Args: b: bytes URL safe Base64 encoded bytes to encode. Returns: bytes: Decoded bytes. """ s = b s = s.replace(b'-', b'+') s = s.replace(b'_', b'/') p = len(s) % 4 if p == 0: pass elif p == 2: s += b'==' elif p == 3: s += b'=' else: raise ValueError('Illegal Base64url string') return base64.standard_b64decode(s)
java
public void terminateConnectionsAssociatedWithChain(String chainName) throws Exception { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) Tr.entry(this, tc, "terminateConnectionsAssociatedWithChain", chainName); synchronized (endPointToGroupMap) { try { final HashMap<EndPointDescriptor, ConnectionDataGroup> endPointToGroupMapClone = (HashMap<EndPointDescriptor, ConnectionDataGroup>) endPointToGroupMap.clone(); Iterator<EndPointDescriptor> it = endPointToGroupMapClone.keySet().iterator(); while (it.hasNext()) { EndPointDescriptor ed = it.next(); if ((((HostPortEndPointDescriptor) ed).chainName).equals(chainName)) { ConnectionDataGroup cdGroup = endPointToGroupMapClone.get(ed); final List connectionData = cdGroup.getConnections(); for (final Iterator connectionDataIterator = connectionData.iterator(); connectionDataIterator.hasNext();) { final ConnectionData thisConnectionData = (ConnectionData) connectionDataIterator.next(); final OutboundConnection oc = thisConnectionData.getConnection(); //close all the conservations associated with the OC Conversation[] conv = oc.getConversations(); for (Conversation c : conv) { try { c.fastClose(); } catch (Exception e) {//Don't let the exception mess up closing other remaining conservations if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) SibTr.debug(tc, "Error while fast closing the conversation", e); } } try { // terminate the physical connection purgeFromInvalidateImpl(oc, false); } catch (Exception e) {//Don't let the exception mess up closing other remaining connection if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) SibTr.debug(tc, "Error while purging the physical connection", e); } } try { // destroy the OutboundVirtualConnection associated with the chainName cdGroup.getNetworkConnectionFactory().getOutboundVirtualConFactory().destroy(); } catch (Exception e) {//Don't let the exception mess up closing other remaining connection if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) SibTr.debug(tc, "Error while destroying the outbound virtual connection", e); } // in fastClose() the entry is removed but still we do a remove endPointToGroupMap.remove(ed); } } } catch (Exception e) { throw e; } finally { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) Tr.exit(this, tc, "terminateConnectionsAssociatedWithChain"); } } }
java
void rollback() { switch (data_type) { case Tango_DEV_BOOLEAN : bool_val = old_bool_val; break; case Tango_DEV_SHORT : short_val = old_short_val; break; case Tango_DEV_LONG : long_val = old_long_val; break; case Tango_DEV_LONG64 : long64_val = old_long64_val; break; case Tango_DEV_DOUBLE : double_val = old_double_val; break; case Tango_DEV_STRING : str_val = old_str_val; break; } }
java
@Override protected int readBytesWireFormat ( byte[] buffer, int bufferIndex ) throws SMBProtocolDecodingException { int start = bufferIndex; int structureSize = SMBUtil.readInt2(buffer, bufferIndex); if ( structureSize != 89 ) { throw new SMBProtocolDecodingException("Structure size is not 89"); } this.oplockLevel = buffer[ bufferIndex + 2 ]; this.openFlags = buffer[ bufferIndex + 3 ]; bufferIndex += 4; this.createAction = SMBUtil.readInt4(buffer, bufferIndex); bufferIndex += 4; this.creationTime = SMBUtil.readTime(buffer, bufferIndex); bufferIndex += 8; this.lastAccessTime = SMBUtil.readTime(buffer, bufferIndex); bufferIndex += 8; this.lastWriteTime = SMBUtil.readTime(buffer, bufferIndex); bufferIndex += 8; this.changeTime = SMBUtil.readTime(buffer, bufferIndex); bufferIndex += 8; this.allocationSize = SMBUtil.readInt8(buffer, bufferIndex); bufferIndex += 8; this.endOfFile = SMBUtil.readInt8(buffer, bufferIndex); bufferIndex += 8; this.fileAttributes = SMBUtil.readInt4(buffer, bufferIndex); bufferIndex += 4; bufferIndex += 4; // Reserved2 System.arraycopy(buffer, bufferIndex, this.fileId, 0, 16); bufferIndex += 16; int createContextOffset = SMBUtil.readInt4(buffer, bufferIndex); bufferIndex += 4; int createContextLength = SMBUtil.readInt4(buffer, bufferIndex); bufferIndex += 4; if ( createContextOffset > 0 && createContextLength > 0 ) { List<CreateContextResponse> contexts = new LinkedList<>(); int createContextStart = getHeaderStart() + createContextOffset; int next = 0; do { int cci = createContextStart; next = SMBUtil.readInt4(buffer, cci); cci += 4; int nameOffset = SMBUtil.readInt2(buffer, cci); int nameLength = SMBUtil.readInt2(buffer, cci + 2); cci += 4; int dataOffset = SMBUtil.readInt2(buffer, cci + 2); cci += 4; int dataLength = SMBUtil.readInt4(buffer, cci); cci += 4; byte[] nameBytes = new byte[nameLength]; System.arraycopy(buffer, createContextStart + nameOffset, nameBytes, 0, nameBytes.length); cci = Math.max(cci, createContextStart + nameOffset + nameLength); CreateContextResponse cc = createContext(nameBytes); if ( cc != null ) { cc.decode(buffer, createContextStart + dataOffset, dataLength); contexts.add(cc); } cci = Math.max(cci, createContextStart + dataOffset + dataLength); if ( next > 0 ) { createContextStart += next; } bufferIndex = Math.max(bufferIndex, cci); } while ( next > 0 ); this.createContexts = contexts.toArray(new CreateContextResponse[0]); } if ( log.isDebugEnabled() ) { log.debug("Opened " + this.fileName + ": " + Hexdump.toHexString(this.fileId)); } return bufferIndex - start; }
java
@VisibleForTesting void setAdditionalProperties(Map<String, String> props) { props.put(SonarProperties.INCLUDE_FILES, StringUtils.join(files, ", ")); props.put(SonarProperties.SCM_ENABLED, "false"); props.put(SonarProperties.SCM_STAT_ENABLED, "false"); props.put(SonarProperties.ISSUEASSIGN_PLUGIN, "false"); props.put(SonarProperties.EXPORT_PATH, OUTPUT_FILE); props.put(SonarProperties.VERBOSE, configuration.getProperty(GeneralOption.SONAR_VERBOSE)); props.put(SonarProperties.WORKDIR, OUTPUT_DIR); props.put(SonarProperties.PROJECT_BASEDIR, "."); props.put(SonarProperties.SOURCES, "."); }
java
@Override protected Operand createAndExpression(final Operand leftExpression, final Operand rightExpression) { if (leftExpression == null || rightExpression == null) { return null; } final Set<Operand> operands = new HashSet<Operand>(); operands.add(leftExpression); operands.add(rightExpression); return new Operand(Operator.AND, operands); }
python
def insert_file(self, f, namespace, timestamp): """Inserts a file to the doc dict. """ doc = f.get_metadata() doc["content"] = f.read() self.doc_dict[f._id] = Entry(doc=doc, ns=namespace, ts=timestamp)
python
def get_graphviz_dirtree(self, engine="automatic", **kwargs): """ Generate directory graph in the DOT language. The graph show the files and directories in the node workdir. Returns: graphviz.Digraph <https://graphviz.readthedocs.io/en/stable/api.html#digraph> """ if engine == "automatic": engine = "fdp" return Dirviz(self.workdir).get_cluster_graph(engine=engine, **kwargs)
python
def wrapper(self, updateParams=None): """ create wrapper for flask app route """ def decorator(f): _headers = self._getHeaders(updateParams) """ flask decorator to include headers """ @wraps(f) def decorated_function(*args, **kwargs): resp = make_response(f(*args, **kwargs)) self._setRespHeader(resp, _headers) resp.has_secure_headers = True return resp return decorated_function return decorator
python
def _stable_names(self): ''' This private method extracts the element names from stable_el. Note that stable_names is a misnomer as stable_el also contains unstable element names with a number 999 for the *stable* mass numbers. (?!??) ''' stable_names=[] for i in range(len(self.stable_el)): stable_names.append(self.stable_el[i][0]) self.stable_names=stable_names
java
public Filter label(@Nonnull String label) { Preconditions.checkNotNull(label); return new SimpleFilter(RowFilter.newBuilder().setApplyLabelTransformer(label).build()); }
java
@Override public void onActivityDestroyed(Activity activity) { HMSAgentLog.d("onDestroyed:" + StrUtils.objDesc(activity)); removeActivity(activity); // activity onDestroyed 事件回调 | Activity Ondestroyed Event Callback List<IActivityDestroyedCallback> tmdCallbacks = new ArrayList<IActivityDestroyedCallback>(destroyedCallbacks); for (IActivityDestroyedCallback callback : tmdCallbacks) { callback.onActivityDestroyed(activity, getLastActivityInner()); } }
python
def get_installed_distributions(local_only=True, skip=stdlib_pkgs, include_editables=True, editables_only=False, user_only=False): # type: (bool, Container[str], bool, bool, bool) -> List[Distribution] """ Return a list of installed Distribution objects. If ``local_only`` is True (default), only return installations local to the current virtualenv, if in a virtualenv. ``skip`` argument is an iterable of lower-case project names to ignore; defaults to stdlib_pkgs If ``include_editables`` is False, don't report editables. If ``editables_only`` is True , only report editables. If ``user_only`` is True , only report installations in the user site directory. """ if local_only: local_test = dist_is_local else: def local_test(d): return True if include_editables: def editable_test(d): return True else: def editable_test(d): return not dist_is_editable(d) if editables_only: def editables_only_test(d): return dist_is_editable(d) else: def editables_only_test(d): return True if user_only: user_test = dist_in_usersite else: def user_test(d): return True # because of pkg_resources vendoring, mypy cannot find stub in typeshed return [d for d in pkg_resources.working_set # type: ignore if local_test(d) and d.key not in skip and editable_test(d) and editables_only_test(d) and user_test(d) ]
python
def tree_attribute(identifier): """ Predicate that returns True for custom attributes added to AttrTrees that are not methods, properties or internal attributes. These custom attributes start with a capitalized character when applicable (not applicable to underscore or certain unicode characters) """ if identifier[0].upper().isupper() is False and identifier[0] != '_': return True else: return identifier[0].isupper()
python
def in_place( self, mode='r', buffering=-1, encoding=None, errors=None, newline=None, backup_extension=None, ): """ A context in which a file may be re-written in-place with new content. Yields a tuple of :samp:`({readable}, {writable})` file objects, where `writable` replaces `readable`. If an exception occurs, the old file is restored, removing the written data. Mode *must not* use ``'w'``, ``'a'``, or ``'+'``; only read-only-modes are allowed. A :exc:`ValueError` is raised on invalid modes. For example, to add line numbers to a file:: p = Path(filename) assert p.isfile() with p.in_place() as (reader, writer): for number, line in enumerate(reader, 1): writer.write('{0:3}: '.format(number))) writer.write(line) Thereafter, the file at `filename` will have line numbers in it. """ import io if set(mode).intersection('wa+'): raise ValueError('Only read-only file modes can be used') # move existing file to backup, create new file with same permissions # borrowed extensively from the fileinput module backup_fn = self + (backup_extension or os.extsep + 'bak') try: os.unlink(backup_fn) except os.error: pass os.rename(self, backup_fn) readable = io.open( backup_fn, mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) try: perm = os.fstat(readable.fileno()).st_mode except OSError: writable = open( self, 'w' + mode.replace('r', ''), buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) else: os_mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC if hasattr(os, 'O_BINARY'): os_mode |= os.O_BINARY fd = os.open(self, os_mode, perm) writable = io.open( fd, "w" + mode.replace('r', ''), buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) try: if hasattr(os, 'chmod'): os.chmod(self, perm) except OSError: pass try: yield readable, writable except Exception: # move backup back readable.close() writable.close() try: os.unlink(self) except os.error: pass os.rename(backup_fn, self) raise else: readable.close() writable.close() finally: try: os.unlink(backup_fn) except os.error: pass
java
private HttpUriRequest createPostMethod(final ServerDetails serverDetails, final IndexCommand command) throws URISyntaxException { LOGGER.trace("createPostMethod() called..."); final HttpPost httpPost = new HttpPost(createIndexCommandURI(serverDetails, command)); httpPost.setEntity(new PostDataHttpEntity(command.getPostData())); return httpPost; }
java
public void marshall(SMSMessage sMSMessage, ProtocolMarshaller protocolMarshaller) { if (sMSMessage == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(sMSMessage.getBody(), BODY_BINDING); protocolMarshaller.marshall(sMSMessage.getKeyword(), KEYWORD_BINDING); protocolMarshaller.marshall(sMSMessage.getMessageType(), MESSAGETYPE_BINDING); protocolMarshaller.marshall(sMSMessage.getOriginationNumber(), ORIGINATIONNUMBER_BINDING); protocolMarshaller.marshall(sMSMessage.getSenderId(), SENDERID_BINDING); protocolMarshaller.marshall(sMSMessage.getSubstitutions(), SUBSTITUTIONS_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
java
public static <R> Stream<R> zip(final LongIterator a, final LongIterator b, final LongIterator c, final LongTriFunction<R> zipFunction) { return new IteratorStream<>(new ObjIteratorEx<R>() { @Override public boolean hasNext() { return a.hasNext() && b.hasNext() && c.hasNext(); } @Override public R next() { return zipFunction.apply(a.nextLong(), b.nextLong(), c.nextLong()); } }); }
python
def sendEmail(self, emails, mass_type='SingleEmailMessage'): """ Send one or more emails from Salesforce. Parameters: emails - a dictionary or list of dictionaries, each representing a single email as described by https://www.salesforce.com /us/developer/docs/api/Content/sforce_api_calls_sendemail .htm massType - 'SingleEmailMessage' or 'MassEmailMessage'. MassEmailMessage is used for mailmerge of up to 250 recepients in a single pass. Note: Newly created Salesforce Sandboxes default to System email only. In this situation, sendEmail() will fail with NO_MASS_MAIL_PERMISSION. """ preparedEmails = _prepareSObjects(emails) if isinstance(preparedEmails, dict): # If root element is a dict, then this is a single object not an # array del preparedEmails['fieldsToNull'] else: # else this is an array, and each elelment should be prepped. for listitems in preparedEmails: del listitems['fieldsToNull'] res = BaseClient.sendEmail(self, preparedEmails, mass_type) if type(res) not in (TupleType, ListType): res = [res] data = list() for resu in res: d = dict() data.append(d) d['success'] = success = _bool(resu[_tPartnerNS.success]) if not success: d['errors'] = [_extractError(e) for e in resu[_tPartnerNS.errors,]] else: d['errors'] = list() return data
java
public com.google.api.ads.adwords.axis.v201809.cm.AppConversionAppPlatform getAppPlatform() { return appPlatform; }
java
public com.google.api.ads.admanager.axis.v201805.CompanionDeliveryOption getBuiltInCompanionDeliveryOption() { return builtInCompanionDeliveryOption; }
python
def RV_1(self): """Instantaneous RV of star 1 with respect to system center-of-mass """ return self.orbpop_long.RV * (self.orbpop_long.M2 / (self.orbpop_long.M1 + self.orbpop_long.M2))
java
public void unlink(Object source, CollectionDescriptor cds, Object referenceToUnlink) { if(cds.isMtoNRelation()) { m_broker.deleteMtoNImplementor(new MtoNImplementor(cds, source, referenceToUnlink)); } else { ClassDescriptor cld = m_broker.getClassDescriptor(referenceToUnlink.getClass()); m_broker.unlinkFK(referenceToUnlink, cld, cds); } }
java
public final boolean isAccepted( String mimeType ) { if (mimeType != null && hasAcceptedMimeTypes()) { return getAcceptedMimeTypes().contains(mimeType.trim()); } return true; // accept all mime types }