language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
@NotNull public static <T> Single<T> from(@NotNull final ApolloStoreOperation<T> operation) { checkNotNull(operation, "operation == null"); return Single.create(new Single.OnSubscribe<T>() { @Override public void call(final SingleSubscriber<? super T> subscriber) { operation.enqueue(new ApolloStoreOperation.Callback<T>() { @Override public void onSuccess(T result) { subscriber.onSuccess(result); } @Override public void onFailure(Throwable t) { subscriber.onError(t); } }); } }); }
java
public void marshall(OutputArtifact outputArtifact, ProtocolMarshaller protocolMarshaller) { if (outputArtifact == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(outputArtifact.getName(), NAME_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
java
public boolean merge(final PluginXmlAccess other) { boolean _xblockexpression = false; { String _path = this.getPath(); String _path_1 = other.getPath(); boolean _notEquals = (!Objects.equal(_path, _path_1)); if (_notEquals) { String _path_2 = this.getPath(); String _plus = ("Merging plugin.xml files with different paths: " + _path_2); String _plus_1 = (_plus + ", "); String _path_3 = other.getPath(); String _plus_2 = (_plus_1 + _path_3); PluginXmlAccess.LOG.warn(_plus_2); } _xblockexpression = this.entries.addAll(other.entries); } return _xblockexpression; }
java
@Override @SuppressWarnings("unchecked") public <T> Future<T> submit(Callable<T> task) { Map<String, String> execProps = getExecutionProperties(task); ThreadContextDescriptor contextDescriptor; if (task instanceof ContextualAction) { ContextualAction<Callable<T>> a = (ContextualAction<Callable<T>>) task; contextDescriptor = a.getContextDescriptor(); task = a.getAction(); } else { WSContextService contextSvc = getContextService(); contextDescriptor = contextSvc.captureThreadContext(execProps); } TaskLifeCycleCallback callback = new TaskLifeCycleCallback(this, contextDescriptor); return callback.policyExecutor.submit(task, callback); }
java
private void populateValues(GriddedTile griddedTile, TImage image, Double[][] leftLastColumns, Double[][] topLeftRows, Double[][] topRows, int minX, int maxX, int minY, int maxY, Double[][] values) { for (int yLocation = maxY; values != null && yLocation >= minY; yLocation--) { for (int xLocation = maxX; xLocation >= minX; xLocation--) { Double value = getValueOverBorders(griddedTile, image, leftLastColumns, topLeftRows, topRows, xLocation, yLocation); if (value == null) { values = null; break; } else { values[yLocation - minY][xLocation - minX] = value; } } } }
java
@Override public FileBaseStatistics getStatistics(BaseStatistics cachedStats) throws IOException { final FileBaseStatistics stats = super.getStatistics(cachedStats); return stats == null ? null : new FileBaseStatistics(stats.getLastModificationTime(), stats.getTotalInputSize(), this.recordLength); }
java
public ComputeNodeGetRemoteLoginSettingsResult getComputeNodeRemoteLoginSettings(String poolId, String nodeId) throws BatchErrorException, IOException { return getComputeNodeRemoteLoginSettings(poolId, nodeId, null); }
python
def get_commit_if_possible(filename): """Try to retrieve VCS information for a given file. Currently only supports git using the gitpython package. Parameters ---------- filename : str Returns ------- path: str The base path of the repository commit: str The commit hash is_dirty: bool True if there are uncommitted changes in the repository """ # git if opt.has_gitpython: from git import Repo, InvalidGitRepositoryError try: directory = os.path.dirname(filename) repo = Repo(directory, search_parent_directories=True) try: path = repo.remote().url except ValueError: path = 'git:/' + repo.working_dir is_dirty = repo.is_dirty() commit = repo.head.commit.hexsha return path, commit, is_dirty except (InvalidGitRepositoryError, ValueError): pass return None, None, None
python
def get_outputs(self, merge_multi_context=True): """Gets outputs from a previous forward computation. Parameters ---------- merge_multi_context : bool Default is ``True``. In the case when data-parallelism is used, the outputs will be collected from multiple devices. A ``True`` value indicate that we should merge the collected results so that they look like from a single executor. Returns ------- list of NDArray or list of list of NDArray If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output elements are numpy arrays. """ assert self.binded and self.params_initialized return self._modules[-1].get_outputs(merge_multi_context=merge_multi_context)
python
def find_label(label, label_color, label_description): """Find label.""" edit = None for name, values in label_list.items(): color, description = values if isinstance(name, tuple): old_name = name[0] new_name = name[1] else: old_name = name new_name = name if label.lower() == old_name.lower(): edit = LabelEdit(old_name, new_name, color, description) break return edit
python
def isScheduleValid(self, schedule, node_srvs, force) -> (bool, str): """ Validates schedule of planned node upgrades :param schedule: dictionary of node ids and upgrade times :param node_srvs: dictionary of node ids and services :return: a 2-tuple of whether schedule valid or not and the reason """ # flag "force=True" ignore basic checks! only datetime format is # checked times = [] non_demoted_nodes = set([k for k, v in node_srvs.items() if v]) if not force and set(schedule.keys()) != non_demoted_nodes: return False, 'Schedule should contain id of all nodes' now = datetime.utcnow().replace(tzinfo=dateutil.tz.tzutc()) for dateStr in schedule.values(): try: when = dateutil.parser.parse(dateStr) if when <= now and not force: return False, '{} is less than current time'.format(when) times.append(when) except ValueError: return False, '{} cannot be parsed to a time'.format(dateStr) if force: return True, '' times = sorted(times) for i in range(len(times) - 1): diff = (times[i + 1] - times[i]).total_seconds() if diff < self.config.MinSepBetweenNodeUpgrades: return False, 'time span between upgrades is {} ' \ 'seconds which is less than specified ' \ 'in the config'.format(diff) return True, ''
java
private void commit(boolean force) { if (_db == null) { return; } if (_transaction && !force) { return; } try { _db.commit(); } catch (Exception e) { throw new IllegalStateException("Could not commit transaction", e); } }
python
def get(self, name, defval=None): ''' Retrieve a value from the closest scope frame. ''' for frame in reversed(self.frames): valu = frame.get(name, s_common.novalu) if valu != s_common.novalu: return valu task = self.ctors.get(name) if task is not None: func, args, kwargs = task item = func(*args, **kwargs) self.frames[-1][name] = item return item return defval
python
def metastable_sets(self): """ Crisp clustering using PCCA. This is only recommended for visualization purposes. You *cannot* compute any actual quantity of the coarse-grained kinetics without employing the fuzzy memberships! Returns ------- A list of length equal to metastable states. Each element is an array with microstate indexes contained in it """ res = [] assignment = self.metastable_assignment for i in range(self.m): res.append(np.where(assignment == i)[0]) return res
python
def get_redirect_args(self, request, callback): "Get request parameters for redirect url." callback = request.build_absolute_uri(callback) args = { 'client_id': self.provider.consumer_key, 'redirect_uri': callback, 'response_type': 'code', } state = self.get_application_state(request, callback) if state is not None: args['state'] = state request.session[self.session_key] = state return args
python
def customchain(**kwargsChain): """ This decorator allows you to access ``ctx.bitshares`` which is an instance of BitShares. But in contrast to @chain, this is a decorator that expects parameters that are directed right to ``BitShares()``. ... code-block::python @main.command() @click.option("--worker", default=None) @click.pass_context @customchain(foo="bar") @unlock def list(ctx, worker): print(ctx.obj) """ def wrap(f): @click.pass_context @verbose def new_func(ctx, *args, **kwargs): newoptions = ctx.obj newoptions.update(kwargsChain) ctx.bitshares = BitShares(**newoptions) ctx.blockchain = ctx.bitshares set_shared_bitshares_instance(ctx.bitshares) return ctx.invoke(f, *args, **kwargs) return update_wrapper(new_func, f) return wrap
java
@Override void writeStreamBlob(OutputStream os, byte[] buffer, int rowOffset, byte []blobBuffer, PageServiceImpl tableService) throws IOException { int offset = rowOffset + offset(); int blobLen = BitsUtil.readInt16(buffer, offset + 2); int blobOffset = BitsUtil.readInt16(buffer, offset); if (blobLen == 0) { BitsUtil.writeInt(os, 0); return; } if (isLargeBlob(blobLen)) { int blobId = BitsUtil.readInt(blobBuffer, blobOffset); ArrayList<PageBlob> blobList = new ArrayList<>(); while (blobId > 0) { PageBlob page = tableService.getBlobPage(blobId); blobList.add(page); blobId = page.getNextId(); } for (int i = 0; i < blobList.size(); i++) { PageBlob page = blobList.get(i); int length = page.getLength(); if ((length & 0xc000_0000) != 0) { throw new IllegalStateException(L.l("Unexpected blob length {0} for {1}", length, page)); } if (i + 1 < blobList.size()) { length |= BLOB_CONT_MASK; } BitsUtil.writeInt(os, length); page.writeToStream(os); } } else { BitsUtil.writeInt(os, blobLen); os.write(blobBuffer, blobOffset, blobLen); } }
java
public static DataStoreEvent removalEvent(DBIDs removals) { return new DataStoreEvent(DBIDUtil.EMPTYDBIDS, removals, DBIDUtil.EMPTYDBIDS); }
python
def gcg(a, b, M, reg1, reg2, f, df, G0=None, numItermax=10, numInnerItermax=200, stopThr=1e-9, verbose=False, log=False): """ Solve the general regularized OT problem with the generalized conditional gradient The function solves the following optimization problem: .. math:: \gamma = arg\min_\gamma <\gamma,M>_F + reg1\cdot\Omega(\gamma) + reg2\cdot f(\gamma) s.t. \gamma 1 = a \gamma^T 1= b \gamma\geq 0 where : - M is the (ns,nt) metric cost matrix - :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - :math:`f` is the regularization term ( and df is its gradient) - a and b are source and target weights (sum to 1) The algorithm used for solving the problem is the generalized conditional gradient as discussed in [5,7]_ Parameters ---------- a : np.ndarray (ns,) samples weights in the source domain b : np.ndarray (nt,) samples in the target domain M : np.ndarray (ns,nt) loss matrix reg1 : float Entropic Regularization term >0 reg2 : float Second Regularization term >0 G0 : np.ndarray (ns,nt), optional initial guess (default is indep joint density) numItermax : int, optional Max number of iterations numInnerItermax : int, optional Max number of iterations of Sinkhorn stopThr : float, optional Stop threshol on error (>0) verbose : bool, optional Print information along iterations log : bool, optional record log if True Returns ------- gamma : (ns x nt) ndarray Optimal transportation matrix for the given parameters log : dict log dictionary return only if log==True in parameters References ---------- .. [5] N. Courty; R. Flamary; D. Tuia; A. Rakotomamonjy, "Optimal Transport for Domain Adaptation," in IEEE Transactions on Pattern Analysis and Machine Intelligence , vol.PP, no.99, pp.1-1 .. [7] Rakotomamonjy, A., Flamary, R., & Courty, N. (2015). Generalized conditional gradient: analysis of convergence and applications. arXiv preprint arXiv:1510.06567. See Also -------- ot.optim.cg : conditional gradient """ loop = 1 if log: log = {'loss': []} if G0 is None: G = np.outer(a, b) else: G = G0 def cost(G): return np.sum(M * G) + reg1 * np.sum(G * np.log(G)) + reg2 * f(G) f_val = cost(G) if log: log['loss'].append(f_val) it = 0 if verbose: print('{:5s}|{:12s}|{:8s}'.format( 'It.', 'Loss', 'Delta loss') + '\n' + '-' * 32) print('{:5d}|{:8e}|{:8e}'.format(it, f_val, 0)) while loop: it += 1 old_fval = f_val # problem linearization Mi = M + reg2 * df(G) # solve linear program with Sinkhorn # Gc = sinkhorn_stabilized(a,b, Mi, reg1, numItermax = numInnerItermax) Gc = sinkhorn(a, b, Mi, reg1, numItermax=numInnerItermax) deltaG = Gc - G # line search dcost = Mi + reg1 * (1 + np.log(G)) # ?? alpha, fc, f_val = line_search_armijo(cost, G, deltaG, dcost, f_val) G = G + alpha * deltaG # test convergence if it >= numItermax: loop = 0 delta_fval = (f_val - old_fval) / abs(f_val) if abs(delta_fval) < stopThr: loop = 0 if log: log['loss'].append(f_val) if verbose: if it % 20 == 0: print('{:5s}|{:12s}|{:8s}'.format( 'It.', 'Loss', 'Delta loss') + '\n' + '-' * 32) print('{:5d}|{:8e}|{:8e}'.format(it, f_val, delta_fval)) if log: return G, log else: return G
python
def plot_pointings(self,pointings=None): """Plot pointings on canavs""" if pointings is None: pointings=self.pointings i=0 for pointing in pointings: items=[] i=i+1 label={} label['text']=pointing['label']['text'] for ccd in pointing["camera"].getGeometry(): (x1,y1)=self.p2c((ccd[0],ccd[1])) (x2,y2)=self.p2c((ccd[2],ccd[3])) item=self.create_rectangle(x1,y1,x2,y2,stipple='gray25',fill=pointing.get('color','')) items.append(item) if w.show_labels.get()==1: label['id']=self.label(pointing["camera"].ra,pointing["camera"].dec,label['text']) pointing["items"]=items pointing["label"]=label
python
def _in(field, value, document): """ Returns True if document[field] is in the interable value. If the supplied value is not an iterable, then a MalformedQueryException is raised """ try: values = iter(value) except TypeError: raise MalformedQueryException("'$in' must accept an iterable") return document.get(field, None) in values
java
public static void registerFunction(Statement st,Function function,String packagePrepend,boolean dropAlias) throws SQLException { String functionClass = function.getClass().getName(); String functionAlias = getAlias(function); if(function instanceof ScalarFunction) { ScalarFunction scalarFunction = (ScalarFunction)function; String functionName = scalarFunction.getJavaStaticMethod(); if(dropAlias) { try { st.execute("DROP ALIAS IF EXISTS " + functionAlias); } catch (SQLException ex) { // Ignore, some tables constraints may depend on this function LOGGER.debug(ex.getLocalizedMessage(), ex); } } String deterministic = ""; if(getBooleanProperty(function,ScalarFunction.PROP_DETERMINISTIC,false)) { deterministic = " DETERMINISTIC"; } String nobuffer = ""; if(getBooleanProperty(function, ScalarFunction.PROP_NOBUFFER, false)) { nobuffer = " NOBUFFER"; } // Create alias, H2 does not support prepare statement on create alias // "FORCE ALIAS means that the class not existing will not prevent the database from being opened." st.execute("CREATE FORCE ALIAS IF NOT EXISTS " + functionAlias + deterministic + nobuffer + " FOR \"" + packagePrepend + functionClass + "." + functionName + "\""); // Set comment String functionRemarks = getStringProperty(function, Function.PROP_REMARKS); if(!functionRemarks.isEmpty()) { PreparedStatement ps = st.getConnection().prepareStatement("COMMENT ON ALIAS "+functionAlias+" IS ?"); ps.setString(1, functionRemarks); ps.execute(); } } else if(function instanceof Aggregate) { if(dropAlias) { st.execute("DROP AGGREGATE IF EXISTS " + functionAlias); } st.execute("CREATE FORCE AGGREGATE IF NOT EXISTS " + functionAlias + " FOR \"" + packagePrepend + functionClass + "\""); } else { throw new SQLException("Unsupported function "+functionClass); } }
python
def _gatk_extract_reads_cl(data, region, prep_params, tmp_dir): """Use GATK to extract reads from full BAM file. """ args = ["PrintReads", "-L", region_to_gatk(region), "-R", dd.get_ref_file(data), "-I", data["work_bam"]] # GATK3 back compatibility, need to specify analysis type if "gatk4" in dd.get_tools_off(data): args = ["--analysis_type"] + args runner = broad.runner_from_config(data["config"]) return runner.cl_gatk(args, tmp_dir)
java
public int compare(CaptureSearchResult o1, CaptureSearchResult o2) { String k1 = objectToKey(o1); String k2 = objectToKey(o2); if(backwards) { return k2.compareTo(k1); } return k1.compareTo(k2); }
python
def listdir_matches(match): """Returns a list of filenames contained in the named directory. Only filenames which start with `match` will be returned. Directories will have a trailing slash. """ import os last_slash = match.rfind('/') if last_slash == -1: dirname = '.' match_prefix = match result_prefix = '' else: match_prefix = match[last_slash + 1:] if last_slash == 0: dirname = '/' result_prefix = '/' else: dirname = match[0:last_slash] result_prefix = dirname + '/' def add_suffix_if_dir(filename): try: if (os.stat(filename)[0] & 0x4000) != 0: return filename + '/' except FileNotFoundError: # This can happen when a symlink points to a non-existant file. pass return filename matches = [add_suffix_if_dir(result_prefix + filename) for filename in os.listdir(dirname) if filename.startswith(match_prefix)] return matches
java
public int getAlignedResIndex(Group g, Chain c) { boolean contained = false; for (Chain member:getChains()) { if (c.getId().equals(member.getId())) { contained = true; break; } } if (!contained) throw new IllegalArgumentException("Given chain with asym_id "+c.getId()+" is not a member of this entity: "+getChainIds().toString()); if (!chains2pdbResNums2ResSerials.containsKey(c.getId())) { // we do lazy initialisation of the map initResSerialsMap(c); } // if no seqres groups are available at all the map will be null Map<ResidueNumber,Integer> map = chains2pdbResNums2ResSerials.get(c.getId()); int serial; if (map!=null) { ResidueNumber resNum = g.getResidueNumber(); // the resNum will be null for groups that are SEQRES only and not in ATOM, // still it can happen that a group is in ATOM in one chain but not in other of the same entity. // This is what we try to find out here (analogously to what we do in initResSerialsMap() ): if (resNum==null && c.getSeqResGroups()!=null && !c.getSeqResGroups().isEmpty()) { int index = c.getSeqResGroups().indexOf(g); resNum = findResNumInOtherChains(index, c); } if (resNum == null) { // still null, we really can't map serial = -1; } else { Integer alignedSerial = map.get(resNum); if (alignedSerial==null) { // the map doesn't contain this group, something's wrong: return -1 serial = -1; } else { serial = alignedSerial; } } } else { // no seqres groups available we resort to using the pdb residue numbers are given serial = g.getResidueNumber().getSeqNum(); } return serial; }
java
private void handleEnd(GuacamoleInstruction instruction) { // Verify all required arguments are present List<String> args = instruction.getArgs(); if (args.size() < 1) return; // Terminate stream closeInterceptedStream(args.get(0)); }
java
public String readLine() throws IOException { String line = readLine(false); if (line != null) { bytesRead = bytesRead + line.length(); } return line; }
python
def _decode(self): """ Convert the characters of string s to standard value (WFN value). Inspect each character in value of component. Copy quoted characters, with their escaping, into the result. Look for unquoted non alphanumerics and if not "*" or "?", add escaping. :exception: ValueError - invalid character in value of component """ result = [] idx = 0 s = self._encoded_value embedded = False errmsg = [] errmsg.append("Invalid character '") while (idx < len(s)): c = s[idx] # get the idx'th character of s errmsg.append(c) errmsg.append("'") errmsg_str = "".join(errmsg) if (CPEComponentSimple._is_alphanum(c)): # Alphanumeric characters pass untouched result.append(c) idx += 1 embedded = True continue if c == "\\": # Anything quoted in the bound string stays quoted # in the unbound string. result.append(s[idx: idx + 2]) idx += 2 embedded = True continue if (c == CPEComponent2_3_FS.WILDCARD_MULTI): # An unquoted asterisk must appear at the beginning or # end of the string. if (idx == 0) or (idx == (len(s) - 1)): result.append(c) idx += 1 embedded = True continue else: raise ValueError(errmsg_str) if (c == CPEComponent2_3_FS.WILDCARD_ONE): # An unquoted question mark must appear at the beginning or # end of the string, or in a leading or trailing sequence: # - ? legal at beginning or end # - embedded is false, so must be preceded by ? # - embedded is true, so must be followed by ? if (((idx == 0) or (idx == (len(s) - 1))) or ((not embedded) and (s[idx - 1] == CPEComponent2_3_FS.WILDCARD_ONE)) or (embedded and (s[idx + 1] == CPEComponent2_3_FS.WILDCARD_ONE))): result.append(c) idx += 1 embedded = False continue else: raise ValueError(errmsg_str) # all other characters must be quoted result.append("\\") result.append(c) idx += 1 embedded = True self._standard_value = "".join(result)
java
protected static void checkCache(final Request request, final Instant modified, final EntityTag etag) { final ResponseBuilder builder = request.evaluatePreconditions(from(modified), etag); if (nonNull(builder)) { throw new WebApplicationException(builder.build()); } }
java
static byte[] incrementBlocks(byte[] counter, long blockDelta) { if (blockDelta == 0) return counter; if (counter == null || counter.length != 16) throw new IllegalArgumentException(); // Can optimize this later. KISS for now. if (blockDelta > MAX_GCM_BLOCKS) throw new IllegalStateException(); // Allocate 8 bytes for a long ByteBuffer bb = ByteBuffer.allocate(8); // Copy the right-most 32 bits from the counter for (int i=12; i <= 15; i++) bb.put(i-8, counter[i]); long val = bb.getLong() + blockDelta; // increment by delta if (val > MAX_GCM_BLOCKS) throw new IllegalStateException(); // overflow 2^32-2 bb.rewind(); // Get the incremented value (result) as an 8-byte array byte[] result = bb.putLong(val).array(); // Copy the rightmost 32 bits from the resultant array to the input counter; for (int i=12; i <= 15; i++) counter[i] = result[i-8]; return counter; }
java
protected synchronized List<TaskInProgress> findSpeculativeTaskCandidates (Collection<TaskInProgress> list) { ArrayList<TaskInProgress> candidates = new ArrayList<TaskInProgress>(); long now = JobTracker.getClock().getTime(); Iterator<TaskInProgress> iter = list.iterator(); while (iter.hasNext()) { TaskInProgress tip = iter.next(); if (tip.canBeSpeculated(now)) { candidates.add(tip); } } if (candidates.size() > 0 ) { Comparator<TaskInProgress> LateComparator = new EstimatedTimeLeftComparator(now); Collections.sort(candidates, LateComparator); } return candidates; }
python
def _format_pr(pr_): ''' Helper function to format API return information into a more manageable and useful dictionary for pull request information. pr_ The pull request to format. ''' ret = {'id': pr_.get('id'), 'pr_number': pr_.get('number'), 'state': pr_.get('state'), 'title': pr_.get('title'), 'user': pr_.get('user').get('login'), 'html_url': pr_.get('html_url'), 'base_branch': pr_.get('base').get('ref')} return ret
python
def delete(self, record): """Delete a record. :param record: Record instance. """ index, doc_type = self.record_to_index(record) return self.client.delete( id=str(record.id), index=index, doc_type=doc_type, )
python
def is_output_supports_color(): """ Returns True if the running system's terminal supports color, and False otherwise. """ plat = sys.platform supported_platform = plat != 'Pocket PC' and (plat != 'win32' or 'ANSICON' in os.environ) is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty() if not supported_platform or not is_a_tty: return False return True
java
public static URL createHttpUrl(LibertyServer server, String contextRoot, String path) throws Exception { return new URL(createHttpUrlString(server, contextRoot, path)); }
python
def timeline(self, user_id=None, screen_name=None, max_id=None, since_id=None, max_pages=None): """ Returns a collection of the most recent tweets posted by the user indicated by the user_id or screen_name parameter. Provide a user_id or screen_name. """ if user_id and screen_name: raise ValueError('only user_id or screen_name may be passed') # Strip if screen_name is prefixed with '@' if screen_name: screen_name = screen_name.lstrip('@') id = screen_name or str(user_id) id_type = "screen_name" if screen_name else "user_id" log.info("starting user timeline for user %s", id) if screen_name or user_id: url = "https://api.twitter.com/1.1/statuses/user_timeline.json" else: url = "https://api.twitter.com/1.1/statuses/home_timeline.json" params = {"count": 200, id_type: id, "include_ext_alt_text": "true"} retrieved_pages = 0 reached_end = False while True: if since_id: # Make the since_id inclusive, so we can avoid retrieving # an empty page of results in some cases params['since_id'] = str(int(since_id) - 1) if max_id: params['max_id'] = max_id try: resp = self.get(url, params=params, allow_404=True) retrieved_pages += 1 except requests.exceptions.HTTPError as e: if e.response.status_code == 404: log.warn("no timeline available for %s", id) break elif e.response.status_code == 401: log.warn("protected account %s", id) break raise e statuses = resp.json() if len(statuses) == 0: log.info("no new tweets matching %s", params) break for status in statuses: # We've certainly reached the end of new results if since_id is not None and status['id_str'] == str(since_id): reached_end = True break # If you request an invalid user_id, you may still get # results so need to check. if not user_id or id == status.get("user", {}).get("id_str"): yield status if reached_end: log.info("no new tweets matching %s", params) break if max_pages is not None and retrieved_pages == max_pages: log.info("reached max page limit for %s", params) break max_id = str(int(status["id_str"]) - 1)
python
def write_report(summary_dict, seqid, genus, key): """ Parse the PointFinder outputs, and write the summary report for the current analysis type :param summary_dict: nested dictionary containing data such as header strings, and paths to reports :param seqid: name of the strain, :param genus: MASH-calculated genus of current isolate :param key: current result type. Options are 'prediction', and 'results' """ # Set the header string if the summary report doesn't already exist if not os.path.isfile(summary_dict[genus][key]['summary']): header_string = summary_dict[genus][key]['header'] else: header_string = str() summary_string = str() try: # Read in the predictions with open(summary_dict[genus][key]['output'], 'r') as outputs: # Skip the header next(outputs) for line in outputs: # Skip empty lines if line != '\n': # When processing the results outputs, add the seqid to the summary string if key == 'results': summary_string += '{seq},{genus},'.format(seq=seqid, genus=genus) # Clean up the string before adding it to the summary string - replace commas # with semi-colons, and replace tabs with commas summary_string += line.replace(',', ';').replace('\t', ',') # Ensure that there were results to report if summary_string: if not summary_string.endswith('\n'): summary_string += '\n' else: if key == 'results': summary_string += '{seq},{genus}\n'.format(seq=seqid, genus=genus) else: summary_string += '{seq}\n'.format(seq=seqid) # Write the summaries to the summary file with open(summary_dict[genus][key]['summary'], 'a+') as summary: # Write the header if necessary if header_string: summary.write(header_string) summary.write(summary_string) # Add the strain information If no FASTA file could be created by reference mapping except FileNotFoundError: # Extract the length of the header from the dictionary. Subtract two (don't need the strain, or the # empty column created by a trailing comma header_len = len(summary_dict[genus][key]['header'].split(',')) - 2 # When processing the results outputs, add the seqid to the summary string if key == 'results': summary_string += '{seq},{genus}\n'.format(seq=seqid, genus=genus) # For the prediction summary, populate the summary string with the appropriate number of comma-separated # '0' entries elif key == 'prediction': summary_string += '{seq}{empty}\n'.format(seq=seqid, empty=',0' * header_len) # Write the summaries to the summary file with open(summary_dict[genus][key]['summary'], 'a+') as summary: # Write the header if necessary if header_string: summary.write(header_string) summary.write(summary_string)
python
def _parse_acl_config(self, acl_config): """Parse configured ACLs and rules ACLs are returned as a dict of rule sets: {<eos_acl1_name>: set([<eos_acl1_rules>]), <eos_acl2_name>: set([<eos_acl2_rules>]), ..., } """ parsed_acls = dict() for acl in acl_config['aclList']: parsed_acls[acl['name']] = set() for rule in acl['sequence']: parsed_acls[acl['name']].add(rule['text']) return parsed_acls
java
final public Selector PrimaryNotPlusMinus(boolean negated) throws ParseException { Selector ans; Token tok; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case TRUE: jj_consume_token(TRUE); ans = new LiteralImpl(Boolean.TRUE); break; case FALSE: jj_consume_token(FALSE); ans = new LiteralImpl(Boolean.FALSE); break; case 11: jj_consume_token(11); ans = BooleanExpr(); jj_consume_token(12); break; case INTEGER_LITERAL: tok = jj_consume_token(INTEGER_LITERAL); ans = ParseUtil.parseIntegerLiteral(((negated) ? "-" : "") + tok.image); negated = false; break; case FLOATING_POINT_LITERAL: tok = jj_consume_token(FLOATING_POINT_LITERAL); ans = ParseUtil.parseFloatingLiteral(((negated) ? "-" : "") + tok.image); negated = false; break; case STRING_LITERAL: tok = jj_consume_token(STRING_LITERAL); ans = ParseUtil.parseStringLiteral(tok.image); break; case IDENTIFIER: case QUOTED_IDENTIFIER: ans = FieldRef(); break; default: jj_la1[10] = jj_gen; jj_consume_token(-1); throw new ParseException(); } if (negated) {if (true) return new OperatorImpl(Operator.NEG, ans);} else {if (true) return ans;} throw new Error("Missing return statement in function"); }
python
def inverted(self): """Return the inverse of the transform.""" # This is a bit of hackery so that we can put a single "inverse" # function here. If we just made "self._inverse_type" point to the class # in question, it wouldn't be defined yet. This way, it's done at # at runtime and we avoid the definition problem. Hackish, but better # than repeating code everywhere or making a relatively complex # metaclass. inverse_type = globals()[self._inverse_type] return inverse_type(self._center_longitude, self._center_latitude, self._resolution)
java
protected synchronized void incrFileFixReadBytesRemoteRack(long incr) { if (incr < 0) { throw new IllegalArgumentException("Cannot increment by negative value " + incr); } RaidNodeMetrics.getInstance(RaidNodeMetrics.DEFAULT_NAMESPACE_ID).numFileFixReadBytesRemoteRack.inc(incr); numfileFixBytesReadRemoteRack += incr; }
java
@Override public Array getArray(int index) { check(index); final Object obj = fleeceValueToObject(index); return obj instanceof Array ? (Array) obj : null; }
java
public float[] t3(float[] z, int k, int M) { float[] result = new float[M]; for (int i = 1; i <= M - 1; i++) { int head = (i - 1) * k / (M - 1) + 1; int tail = i * k / (M - 1); float[] subZ = subVector(z, head - 1, tail - 1); result[i - 1] = (new Transformations()).rNonsep(subZ, k / (M - 1)); } int head = k + 1; int tail = z.length; int l = z.length - k; float[] subZ = subVector(z, head - 1, tail - 1); result[M - 1] = (new Transformations()).rNonsep(subZ, l); return result; }
java
public List<Section> getAreas() { List<Section> areasCopy = new ArrayList<Section>(10); areasCopy.addAll(areas); return areasCopy; }
python
def tocimxmlstr(value, indent=None): """ Return the CIM-XML representation of the CIM object or CIM data type, as a :term:`unicode string`. *New in pywbem 0.9.* The returned CIM-XML representation is consistent with :term:`DSP0201`. Parameters: value (:term:`CIM object` or :term:`CIM data type` or :term:`Element`): The CIM object or CIM data type to be converted to CIM-XML, or an :term:`Element` object that already is the CIM-XML representation. indent (:term:`string` or :term:`integer`): `None` indicates that a single-line version of the XML should be returned, without any whitespace between the XML elements. Other values indicate that a prettified, multi-line version of the XML should be returned. A string value specifies the indentation string to be used for each level of nested XML elements. An integer value specifies an indentation string of so many blanks. Returns: The CIM-XML representation of the value, as a :term:`unicode string`. """ if isinstance(value, Element): xml_elem = value else: xml_elem = tocimxml(value) if indent is None: xml_str = xml_elem.toxml() else: if isinstance(indent, six.string_types): pass # use indent, as specified elif isinstance(indent, six.integer_types): indent = ' ' * indent else: raise TypeError( _format("Type of indent must be string or integer, but is: {0}", type(indent))) xml_str = xml_elem.toprettyxml(indent=indent) # xml_str is a unicode string if required based upon its content. return _ensure_unicode(xml_str)
java
public static IdGenerator getIdGeneratorByType(GenerationType generationType) { if (generationType == null) return null; switch (generationType) { case IDENTITY: return IdentityIdGenerator.INSTANCE; case AUTO: return AutoIdGenerator.INSTANCE; case UUID25: return UUID25Generator.INSTANCE; case UUID32: return UUID32Generator.INSTANCE; case UUID36: return UUID36Generator.INSTANCE; case TIMESTAMP: return TimeStampIdGenerator.INSTANCE; case SNOWFLAKE: return SnowflakeGenerator.INSTANCE; default: return null; } }
python
def subscribe(self, stream): """ Subscribe to a stream. :param stream: stream to subscribe to :type stream: str :raises: :class:`~datasift.exceptions.StreamSubscriberNotStarted`, :class:`~datasift.exceptions.DeleteRequired`, :class:`~datasift.exceptions.StreamNotConnected` Used as a decorator, eg.:: @client.subscribe(stream) def subscribe_to_hash(msg): print(msg) """ if not self._stream_process_started: raise StreamSubscriberNotStarted() def real_decorator(func): if not self._on_delete: raise DeleteRequired("""An on_delete function is required. You must process delete messages and remove them from your system (if stored) in order to remain compliant with the ToS""") if hasattr(self.factory, 'datasift') and 'send_message' in self.factory.datasift: # pragma: no cover self.subscriptions[stream] = func self.factory.datasift['send_message'](json.dumps({"action": "subscribe", "hash": stream}).encode("utf8")) else: # pragma: no cover raise StreamNotConnected('The client is not connected to DataSift, unable to subscribe to stream') return real_decorator
python
def observe(self, callback, err_callback, duration=60): """Observe resource and call callback when updated.""" def observe_callback(value): """ Called when end point is updated. Returns a Command. """ self.raw = value callback(self) return Command('get', self.path, process_result=observe_callback, err_callback=err_callback, observe=True, observe_duration=duration)
python
def from_raw_message(cls, rawmessage): """Create message from raw byte stream.""" userdata = Userdata.from_raw_message(rawmessage[11:25]) return ExtendedReceive(rawmessage[2:5], rawmessage[5:8], {'cmd1': rawmessage[9], 'cmd2': rawmessage[10]}, userdata, flags=rawmessage[8])
java
@Nonnull public static <T, R> LObjBoolFunction<T, R> objBoolFunctionFrom(Consumer<LObjBoolFunctionBuilder<T, R>> buildingFunction) { LObjBoolFunctionBuilder builder = new LObjBoolFunctionBuilder(); buildingFunction.accept(builder); return builder.build(); }
java
public void setPushLevel(Level newLevel) throws SecurityException { if (newLevel == null) { throw new NullPointerException(); } LogManager manager = LogManager.getLogManager(); checkPermission(); pushLevel = newLevel; }
python
def use_forwarded_port(graph): """ Inject the `X-Forwarded-Port` (if any) into the current URL adapter. The URL adapter is used by `url_for` to build a URLs. """ # There must be a better way! context = _request_ctx_stack.top if _request_ctx_stack is None: return None # determine the configured overrides forwarded_host = graph.config.port_forwarding.get("host") forwarded_port = request.headers.get("X-Forwarded-Port") if not forwarded_port and not forwarded_host: return None # determine the current server name if ":" in context.url_adapter.server_name: server_host, server_port = context.url_adapter.server_name.split(":", 1) else: server_host = context.url_adapter.server_name server_port = 443 if context.url_adapter.url_scheme == "https" else 80 # choose a new server name if forwarded_host: server_name = forwarded_host elif server_port: server_name = "{}:{}".format(server_host, forwarded_port) else: server_name = "{}:{}".format(server_host, server_port) context.url_adapter.server_name = server_name return server_name
java
public Formula getFormula() { Reagent[] reagents = new Reagent[] {KEY, VALUE, PARENT, EXPRESSION}; final Formula rslt = new SimpleFormula(getClass(), reagents); return rslt; }
python
def generateViewHierarchies(self): ''' Wrapper method to create the view hierarchies. Currently it just calls :func:`~exhale.graph.ExhaleRoot.generateClassView` and :func:`~exhale.graph.ExhaleRoot.generateDirectoryView` --- if you want to implement additional hierarchies, implement the additionaly hierarchy method and call it from here. Then make sure to ``include`` it in :func:`~exhale.graph.ExhaleRoot.generateAPIRootBody`. ''' # gather the class hierarchy data and write it out class_view_data = self.generateClassView() self.writeOutHierarchy(True, class_view_data) # gather the file hierarchy data and write it out file_view_data = self.generateDirectoryView() self.writeOutHierarchy(False, file_view_data)
java
public PdfName getVersionAsName(char version) { switch(version) { case PdfWriter.VERSION_1_2: return PdfWriter.PDF_VERSION_1_2; case PdfWriter.VERSION_1_3: return PdfWriter.PDF_VERSION_1_3; case PdfWriter.VERSION_1_4: return PdfWriter.PDF_VERSION_1_4; case PdfWriter.VERSION_1_5: return PdfWriter.PDF_VERSION_1_5; case PdfWriter.VERSION_1_6: return PdfWriter.PDF_VERSION_1_6; case PdfWriter.VERSION_1_7: return PdfWriter.PDF_VERSION_1_7; default: return PdfWriter.PDF_VERSION_1_4; } }
java
private static void thresholdBlock(byte[] luminances, int xoffset, int yoffset, int threshold, int stride, BitMatrix matrix) { for (int y = 0, offset = yoffset * stride + xoffset; y < BLOCK_SIZE; y++, offset += stride) { for (int x = 0; x < BLOCK_SIZE; x++) { // Comparison needs to be <= so that black == 0 pixels are black even if the threshold is 0. if ((luminances[offset + x] & 0xFF) <= threshold) { matrix.set(xoffset + x, yoffset + y); } } } }
python
def get_external_link(self, id, **kwargs): # noqa: E501 """Get a specific external link # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_external_link(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :return: ResponseContainerExternalLink If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_external_link_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.get_external_link_with_http_info(id, **kwargs) # noqa: E501 return data
python
def put(self, item, block=True, timeout=None, size=None): """Put *item* into the queue. If the queue is currently full and *block* is True (the default), then wait up to *timeout* seconds for space to become available. If no timeout is specified, then wait indefinitely. If the queue is full and *block* is False or a timeout occurs, then raise a :class:`QueueFull` exception. The optional *size* argument may be used to specify a custom size for the item. The total :meth:`qsize` of the queue is the sum of the sizes of all the items. The default size for an item is 1. """ if size is None: size = 1 with self._lock: priority = self._get_item_priority(item) while self._size + size > self.maxsize > 0: if not block: raise QueueFull if not self._notfull.wait_for(lambda: self._size+size <= self.maxsize, timeout): raise QueueFull heapq.heappush(self._heap, (priority, size, item)) self._size += size self._unfinished_tasks += 1 self._notempty.notify()
java
public static THttpService ofFormats( Object implementation, SerializationFormat defaultSerializationFormat, Iterable<SerializationFormat> otherAllowedSerializationFormats) { return new THttpService(ThriftCallService.of(implementation), newAllowedSerializationFormats(defaultSerializationFormat, otherAllowedSerializationFormats)); }
java
public void process(Iterator<String> text) { String nextToken = null, curToken = null; // Base case for the next token buffer to ensure we always have two // valid tokens present if (text.hasNext()) nextToken = text.next(); while (text.hasNext()) { curToken = nextToken; nextToken = text.next(); // Only process bigrams where the two tokens weren't excluded by the // token filter if (!(excludeToken(curToken) || excludeToken(nextToken))) processBigram(curToken, nextToken); } }
java
protected String columnNameToPropertyName(String columnName){ String normalized = columnName.replaceAll("[^a-zA-Z0-9]+", " "); String capitalized = WordUtils.capitalizeFully(normalized); String blankRemmoved = StringUtils.remove(capitalized, ' '); return StringUtils.uncapitalize(blankRemmoved); }
python
def set_from_config_file(self, filename): """ Loads lint config from a ini-style config file """ if not os.path.exists(filename): raise LintConfigError(u"Invalid file path: {0}".format(filename)) self._config_path = os.path.abspath(filename) try: parser = ConfigParser() parser.read(filename) for section_name in parser.sections(): for option_name, option_value in parser.items(section_name): self.set_option(section_name, option_name, ustr(option_value)) except ConfigParserError as e: raise LintConfigError(ustr(e))
python
def rewrite_guides(self, guides): """ Remove any ``<a:gd>`` element children of ``<a:avLst>`` and replace them with ones having (name, val) in *guides*. """ self._remove_avLst() avLst = self._add_avLst() for name, val in guides: gd = avLst._add_gd() gd.name = name gd.fmla = 'val %d' % val
java
@Override public EClass getIfcCsgPrimitive3D() { if (ifcCsgPrimitive3DEClass == null) { ifcCsgPrimitive3DEClass = (EClass) EPackage.Registry.INSTANCE.getEPackage(Ifc4Package.eNS_URI) .getEClassifiers().get(156); } return ifcCsgPrimitive3DEClass; }
python
def _get_library_os_path_from_library_dict_tree(self, library_path, library_name): """Hand verified library os path from libraries dictionary tree.""" if library_path is None or library_name is None: return None path_list = library_path.split(os.sep) target_lib_dict = self.libraries # go down the path to the correct library for path_element in path_list: if path_element not in target_lib_dict: # Library cannot be found target_lib_dict = None break target_lib_dict = target_lib_dict[path_element] return None if target_lib_dict is None or library_name not in target_lib_dict else target_lib_dict[library_name]
java
private boolean openTemplate(String filename) { InputStreamReader isr = null; try { isr = new InputStreamReader(IOUtil.newInputStream(filename), "UTF-8"); BufferedReader br = new BufferedReader(isr); String line; while ((line = br.readLine()) != null) { if (line.length() == 0 || line.charAt(0) == ' ' || line.charAt(0) == '#') { continue; } else if (line.charAt(0) == 'U') { unigramTempls_.add(line.trim()); } else if (line.charAt(0) == 'B') { bigramTempls_.add(line.trim()); } else { System.err.println("unknown type: " + line); } } br.close(); templs_ = makeTempls(unigramTempls_, bigramTempls_); } catch (Exception e) { if (isr != null) { try { isr.close(); } catch (Exception e2) { } } e.printStackTrace(); System.err.println("Error reading " + filename); return false; } return true; }
java
public final void makeDocReversed(final Map<String, Object> pReqVars, final ADoc pReversing, final ADoc pReversed, final String pLangDef) throws Exception { pReversing.setIdDatabaseBirth(pReversed.getIdDatabaseBirth()); pReversing.setReversedId(pReversed.getItsId()); pReversing.setReversedIdDatabaseBirth(pReversed.getIdDatabaseBirth()); pReversing.setItsDate(new Date(pReversed.getItsDate().getTime() + 1)); pReversing.setItsTotal(pReversed.getItsTotal().negate()); pReversing.setHasMadeAccEntries(false); getSrvOrm().insertEntity(pReqVars, pReversing); pReversing.setIsNew(false); String oldDesr = ""; if (pReversed.getDescription() != null) { oldDesr = pReversed.getDescription(); } pReversed.setDescription(oldDesr + " " + getSrvI18n() .getMsg("reversing_n", pLangDef) + pReversing.getIdDatabaseBirth() + "-" + pReversing.getItsId()); pReversed.setReversedId(pReversing.getItsId()); pReversed.setReversedIdDatabaseBirth(pReversing.getIdDatabaseBirth()); getSrvOrm().updateEntity(pReqVars, pReversed); this.srvAccEntry.reverseEntries(pReqVars, pReversing, pReversed); }
java
private static List<TransformationDescription> loadDescrtipionsFromXMLInputSource(InputSource source, String fileName) throws Exception { XMLReader xr = XMLReaderFactory.createXMLReader(); TransformationDescriptionXMLReader reader = new TransformationDescriptionXMLReader(fileName); xr.setContentHandler(reader); xr.parse(source); return reader.getResult(); }
java
public String getClassPath() { String rawClassPath = buildClassPath(); if (true) return rawClassPath; char sep = CauchoUtil.getPathSeparatorChar(); String []splitClassPath = rawClassPath.split("[" + sep + "]"); String javaHome = System.getProperty("java.home"); PathImpl pwd = VfsOld.lookup(System.getProperty("user.dir")); ArrayList<String> cleanClassPath = new ArrayList<String>(); StringBuilder sb = new StringBuilder(); for (String pathName : splitClassPath) { PathImpl path = pwd.lookup(pathName); pathName = path.getNativePath(); if (! pathName.startsWith(javaHome) && ! cleanClassPath.contains(pathName)) { cleanClassPath.add(pathName); if (sb.length() > 0) sb.append(sep); sb.append(pathName); } } return sb.toString(); }
java
public static <R extends Tuple, T extends Tuple> R summarize(DataSet<T> input) throws Exception { if (!input.getType().isTupleType()) { throw new IllegalArgumentException("summarize() is only implemented for DataSet's of Tuples"); } final TupleTypeInfoBase<?> inType = (TupleTypeInfoBase<?>) input.getType(); DataSet<TupleSummaryAggregator<R>> result = input.mapPartition(new MapPartitionFunction<T, TupleSummaryAggregator<R>>() { @Override public void mapPartition(Iterable<T> values, Collector<TupleSummaryAggregator<R>> out) throws Exception { TupleSummaryAggregator<R> aggregator = SummaryAggregatorFactory.create(inType); for (Tuple value : values) { aggregator.aggregate(value); } out.collect(aggregator); } }).reduce(new ReduceFunction<TupleSummaryAggregator<R>>() { @Override public TupleSummaryAggregator<R> reduce(TupleSummaryAggregator<R> agg1, TupleSummaryAggregator<R> agg2) throws Exception { agg1.combine(agg2); return agg1; } }); return result.collect().get(0).result(); }
java
public String getOutputUserDisplayName(String inputVirtualRealm) { // initialize the return value String returnValue = getOutputMapping(inputVirtualRealm, Service.CONFIG_DO_USER_DISPLAY_NAME_MAPPING, USER_DISPLAY_NAME_DEFAULT); return returnValue; }
java
public static void register() throws SQLException { if (isRegistered()) { throw new IllegalStateException( "Driver is already registered. It can only be registered once."); } CloudSpannerDriver registeredDriver = new CloudSpannerDriver(); DriverManager.registerDriver(registeredDriver); CloudSpannerDriver.registeredDriver = registeredDriver; }
python
def is_reached(self, uid=None): """ is_reached is to be called for every object that counts towards the limit. - When called with no uid, the Limiter assumes this is a new object and unconditionally increments the counter (less CPU and memory usage). - When a given object can be passed multiple times, a uid must be provided to deduplicate calls. Only the first occurrence of a uid will increment the counter. :param uid: (optional) unique identifier of the object, to deduplicate calls :returns: boolean, true if limit exceeded """ if self.reached_limit: return True if uid: if uid in self.seen: return False self.count += 1 self.seen.add(uid) else: self.count += 1 if self.count > self.limit: if self.warning: self.warning( "Check {} exceeded limit of {} {}, ignoring next ones".format( self.check_name, self.limit, self.name ) ) self.reached_limit = True return True return False
java
@Override protected Outlet createOutlet(final boolean append, final String encoding, final String name, final boolean overwrite, final String path) { Outlet _xifexpression = null; if (((Objects.equal(name, Generator.SRC_GEN) || Objects.equal(name, Generator.SRC_GEN_IDE)) || Objects.equal(name, Generator.SRC_GEN_UI))) { _xifexpression = super.createOutlet(append, encoding, name, overwrite, this.getTmpFolder().getAbsolutePath()); } else { _xifexpression = super.createOutlet(append, encoding, name, overwrite, path); } return _xifexpression; }
java
public ServiceFuture<ExpressRouteGatewayInner> beginCreateOrUpdateAsync(String resourceGroupName, String expressRouteGatewayName, ExpressRouteGatewayInner putExpressRouteGatewayParameters, final ServiceCallback<ExpressRouteGatewayInner> serviceCallback) { return ServiceFuture.fromResponse(beginCreateOrUpdateWithServiceResponseAsync(resourceGroupName, expressRouteGatewayName, putExpressRouteGatewayParameters), serviceCallback); }
python
def checksum(thing): """ Get the checksum of a calculation from the calculation ID (if already done) or from the job.ini/job.zip file (if not done yet). If `thing` is a source model logic tree file, get the checksum of the model by ignoring the job.ini, the gmpe logic tree file and possibly other files. """ try: job_id = int(thing) job_file = None except ValueError: job_id = None job_file = thing if not os.path.exists(job_file): sys.exit('%s does not correspond to an existing file' % job_file) if job_id: dstore = util.read(job_id) checksum = dstore['/'].attrs['checksum32'] elif job_file.endswith('.xml'): # assume it is a smlt file inputs = {'source_model_logic_tree': job_file} checksum = readinput.get_checksum32(mock.Mock(inputs=inputs)) else: oq = readinput.get_oqparam(job_file) checksum = readinput.get_checksum32(oq) print(checksum)
python
def _get_application_module(self, controller, application): """Return the module for an application. If it's a entry-point registered application name, return the module name from the entry points data. If not, the passed in application name is returned. :param str controller: The controller type :param str application: The application name or module :rtype: str """ for pkg in self._get_applications(controller): if pkg.name == application: return pkg.module_name return application
java
public void add(Entry entry) throws LdapException { CoreSession session = this.service.getAdminSession(); session.add(entry); }
java
private void maxiEncodingModeComboActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_maxiEncodingModeComboActionPerformed // TODO add your handling code here: if (maxiEncodingModeCombo.getSelectedIndex() == 0 || maxiEncodingModeCombo.getSelectedIndex() == 1) { maxiPrimaryData.setEnabled(true); maxiPrimaryDataLabel.setEnabled(true); } else { maxiPrimaryData.setEnabled(false); maxiPrimaryDataLabel.setEnabled(false); } encodeData(); }
java
public void deleteAddOn(final String planCode, final String addOnCode) { doDELETE(Plan.PLANS_RESOURCE + "/" + planCode + AddOn.ADDONS_RESOURCE + "/" + addOnCode); }
python
def pks_from_objects(self, objects): """ Extract all the primary key strings from the given objects. Objects may be Versionables, or bare primary keys. :rtype : set """ return {o.pk if isinstance(o, Model) else o for o in objects}
python
def build_journals_re_kb(fpath): """Load journals regexps knowledge base @see build_journals_kb """ def make_tuple(match): regexp = match.group('seek') repl = match.group('repl') return regexp, repl kb = [] with file_resolving(fpath) as fh: for rawline in fh: if rawline.startswith('#'): continue # Extract the seek->replace terms from this KB line: m_kb_line = re_kb_line.search(rawline) kb.append(make_tuple(m_kb_line)) return kb
python
def replace_rep_after(text: str) -> str: "Replace repetitions at the character level in `text` after the repetition" def _replace_rep(m): c, cc = m.groups() return f"{c}{TK_REP}{len(cc)+1}" re_rep = re.compile(r"(\S)(\1{2,})") return re_rep.sub(_replace_rep, text)
python
def to_bytes(self): ''' Create bytes from properties ''' # Verify that the properties make sense self.sanitize() # Write the version bitstream = BitStream('uint:4=%d' % self.version) # Write the traffic class bitstream += BitStream('uint:8=%d' % self.traffic_class) # Write the flow label bitstream += BitStream('uint:20=%d' % self.flow_label) # Write the payload length payload_bytes = bytes(self.payload) payload_length = len(payload_bytes) bitstream += BitStream('uint:16=%d' % payload_length) # Write the next header type bitstream += BitStream('uint:8=%d' % self.next_header) # Write the hop limit bitstream += BitStream('uint:8=%d' % self.hop_limit) # Write the source and destination addresses bitstream += BitStream('uint:128=%d, ' 'uint:128=%d' % (int(self.source), int(self.destination))) return bitstream.bytes + payload_bytes
python
def rnn(bptt, vocab_size, num_embed, nhid, num_layers, dropout, num_proj, batch_size): """ word embedding + LSTM Projected """ state_names = [] data = S.var('data') weight = S.var("encoder_weight", stype='row_sparse') embed = S.sparse.Embedding(data=data, weight=weight, input_dim=vocab_size, output_dim=num_embed, name='embed', sparse_grad=True) states = [] outputs = S.Dropout(embed, p=dropout) for i in range(num_layers): prefix = 'lstmp%d_' % i init_h = S.var(prefix + 'init_h', shape=(batch_size, num_proj), init=mx.init.Zero()) init_c = S.var(prefix + 'init_c', shape=(batch_size, nhid), init=mx.init.Zero()) state_names += [prefix + 'init_h', prefix + 'init_c'] lstmp = mx.gluon.contrib.rnn.LSTMPCell(nhid, num_proj, prefix=prefix) outputs, next_states = lstmp.unroll(bptt, outputs, begin_state=[init_h, init_c], \ layout='NTC', merge_outputs=True) outputs = S.Dropout(outputs, p=dropout) states += [S.stop_gradient(s) for s in next_states] outputs = S.reshape(outputs, shape=(-1, num_proj)) trainable_lstm_args = [] for arg in outputs.list_arguments(): if 'lstmp' in arg and 'init' not in arg: trainable_lstm_args.append(arg) return outputs, states, trainable_lstm_args, state_names
python
def convert(self, value, param, ctx): """ ParamType.convert() is the actual processing method that takes a provided parameter and parses it. """ # passthrough conditions: None or already processed if value is None or isinstance(value, tuple): return value # split the value on the first colon, leave the rest intact splitval = value.split(":", 1) # first element is the endpoint_id endpoint_id = click.UUID(splitval[0]) # get the second element, defaulting to `None` if there was no colon in # the original value try: path = splitval[1] except IndexError: path = None # coerce path="" to path=None # means that we treat "enpdoint_id" and "endpoint_id:" equivalently path = path or None if path is None and self.path_required: self.fail("The path component is required", param=param) return (endpoint_id, path)
python
def _escape_parameters(self, char): """ Parse parameters in an escape sequence. Parameters are a list of numbers in ascii (e.g. '12', '4', '42', etc) separated by a semicolon (e.g. "12;4;42"). See the [vt102 user guide](http://vt100.net/docs/vt102-ug/) for more details on the formatting of escape parameters. """ if char == ";": self.params.append(int(self.current_param)) self.current_param = "" elif char == "?": self.state = "mode" elif not char.isdigit(): if len(self.current_param) > 0: self.params.append(int(self.current_param)) # If we're in parameter parsing mode, but we see a non-numeric # value, it must be the end of the control sequence. self._end_escape_sequence(char) else: self.current_param += char
java
public static final void writeScript(Writer writer) throws IOException { String newline = IO.newline(); boolean isWindows = System.getProperty("os.name").startsWith("Windows"); String variablePrefix = "$C"; String variableSuffix = File.pathSeparator; if(isWindows) { variablePrefix = "%C"; variableSuffix = "%"+File.pathSeparator; } String setSyntax = isWindows ? "@set " : "export "; String classpath = System.getProperty(CLASSPATH_PROP); String cpath = null; HashMap<String, HashSet<Object>> map = new HashMap<String, HashSet<Object>>(); String folderPath = null; HashSet<Object> folderPathClassPaths = null; for(StringTokenizer tok = new StringTokenizer(classpath,File.pathSeparator);tok.hasMoreTokens();) { cpath = tok.nextToken(); //get path folderPath = IO.parseFolderPath(cpath); //System.out.println("folderPath="+folderPath); //get list for folder path folderPathClassPaths = (HashSet<Object>)map.get(folderPath); //create if needed if(folderPathClassPaths == null) { folderPathClassPaths = new HashSet<Object>(); } folderPathClassPaths.add(cpath); //put in map map.put(folderPath, folderPathClassPaths); } //loop thru keys in map int cnt = 0; for(Map.Entry<String, HashSet<Object>> entry : map.entrySet()) { folderPath = entry.getKey(); //get list of classpaths folderPathClassPaths = entry.getValue(); //write variable writer.write(IO.newline()); writer.write(setSyntax); writer.write(new StringBuilder(" C").append(cnt).append("=").toString()); //loop thru paths int printedCnt = 0; String line = null; for(Iterator<Object> pathI = folderPathClassPaths.iterator();pathI.hasNext();) { //limit number of entries per path if(printedCnt > limitPerPath) { //increment cnt++; writer.write(newline); writer.write(new StringBuilder(setSyntax).append(" C").append(cnt).append("=").toString()); printedCnt = 0; } line = pathI.next().toString(); writer.write(line); printedCnt += line.length(); writer.write(File.pathSeparator); } writer.write(newline); writer.write(newline); writer.flush(); } //print classpath writer.write(setSyntax); writer.write(" CLASSPATH="); for(int i=0; i <cnt;i++) { writer.write(variablePrefix+i); writer.write(variableSuffix); } writer.write(newline); writer.write(newline); writer.write("java <CLASS> <ARG>"); writer.flush(); writer.write(newline); writer.write("java junit.textui.TestRunner <CLASS> <ARG>"); writer.write(newline); writer.flush(); //System.out.println("File.pathSeparator="+File.pathSeparator); //System.out.println("File.separator="+File.separator); }
java
public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException { this.size = in.readLong(); this.nodeId = in.readInt(); }
python
def computer_desc(name): ''' Manage the computer's description field name The desired computer description ''' # Just in case someone decides to enter a numeric description name = six.text_type(name) ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'Computer description already set to \'{0}\''.format(name)} before_desc = __salt__['system.get_computer_desc']() if before_desc == name: return ret if __opts__['test']: ret['result'] = None ret['comment'] = ('Computer description will be changed to \'{0}\'' .format(name)) return ret result = __salt__['system.set_computer_desc'](name) if result['Computer Description'] == name: ret['comment'] = ('Computer description successfully changed to \'{0}\'' .format(name)) ret['changes'] = {'old': before_desc, 'new': name} else: ret['result'] = False ret['comment'] = ('Unable to set computer description to ' '\'{0}\''.format(name)) return ret
java
protected boolean validateLayout(Dimension dim) { if (majorAxis == X_AXIS) { majorRequest = getRequirements(X_AXIS, majorRequest); minorRequest = getRequirements(Y_AXIS, minorRequest); oldDimension.setSize(majorRequest.preferred, minorRequest.preferred); } else { majorRequest = getRequirements(Y_AXIS, majorRequest); minorRequest = getRequirements(X_AXIS, minorRequest); oldDimension.setSize(minorRequest.preferred, majorRequest.preferred); } majorReqValid = true; minorReqValid = true; majorAllocValid = true; minorAllocValid = true; return false; }
java
@Benchmark @BenchmarkMode(Mode.Throughput) public StackFrame stackWalkerWithLambda() { return StackWalker.getInstance().walk(stream -> stream.skip(1).findFirst().get()); }
python
def get_weather(test=False): """ Returns weather reports from the dataset. """ if _Constants._TEST or test: rows = _Constants._DATABASE.execute("SELECT data FROM weather LIMIT {hardware}".format( hardware=_Constants._HARDWARE)) data = [r[0] for r in rows] data = [_Auxiliary._byteify(_json.loads(r)) for r in data] return _Auxiliary._byteify(data) else: rows = _Constants._DATABASE.execute("SELECT data FROM weather".format( hardware=_Constants._HARDWARE)) data = [r[0] for r in rows] data = [_Auxiliary._byteify(_json.loads(r)) for r in data] return _Auxiliary._byteify(data)
python
def update(self): ''' Updates LaserData. ''' if self.hasproxy(): laserD = LaserData() values = [] data = self.proxy.getLaserData() #laserD.values = laser.distanceData for i in range (data.numLaser): values.append(data.distanceData[i] / 1000.0) laserD.maxAngle = data.maxAngle laserD.minAngle = data.minAngle laserD.maxRange = data.maxRange laserD.minRange = data.minRange laserD.values = values self.lock.acquire() self.laser = laserD self.lock.release()
java
private static void print(Metadata metadata, String method) { System.out.println(); System.out.println("-------------------------------------------------"); System.out.print(' '); System.out.print(method); System.out.println("-------------------------------------------------"); System.out.println(); // // A Metadata object contains multiple Directory objects // for (Directory directory : metadata.getDirectories()) { // // Each Directory stores values in Tag objects // for (Tag tag : directory.getTags()) { System.out.println(tag); } // // Each Directory may also contain error messages // for (String error : directory.getErrors()) { System.err.println("ERROR: " + error); } } }
python
def get_gateway_id(self): """Return a unique id for the gateway.""" host, _ = self.server_address try: ip_address = ipaddress.ip_address(host) except ValueError: # Only hosts using ip address supports unique id. return None if ip_address.version == 6: mac = get_mac_address(ip6=host) else: mac = get_mac_address(ip=host) return mac
java
@Override Transformers.ResourceIgnoredTransformationRegistry createRegistry(OperationContext context, Resource remote, Set<String> remoteExtensions) { final ReadMasterDomainModelUtil.RequiredConfigurationHolder rc = new ReadMasterDomainModelUtil.RequiredConfigurationHolder(); final PathElement host = PathElement.pathElement(HOST, localHostName); final Resource hostModel = context.readResourceFromRoot(PathAddress.EMPTY_ADDRESS.append(host)); final Resource original = this.originalModel; // Process the required using the remote model to include content which may not be available locally ReadMasterDomainModelUtil.processHostModel(rc, remote, hostModel, parameters.getExtensionRegistry()); // Process the original ReadMasterDomainModelUtil.processHostModel(rc, original, original.getChild(host), parameters.getExtensionRegistry()); final Transformers.ResourceIgnoredTransformationRegistry delegate = new Transformers.ResourceIgnoredTransformationRegistry() { @Override public boolean isResourceTransformationIgnored(PathAddress address) { return parameters.getIgnoredResourceRegistry().isResourceExcluded(address); } }; return ReadMasterDomainModelUtil.createServerIgnoredRegistry(rc, delegate); }
python
def image_predict_proba(self, X): """ Predicts class probabilities for the entire image. Parameters: ----------- X: array, shape = [n_samples, n_pixels_x, n_pixels_y, n_bands] Array of training images y: array, shape = [n_samples] or [n_samples, n_pixels_x, n_pixels_y, n_classes] Target probabilities """ self._check_image(X) new_shape = (X.shape[0] * X.shape[1] * X.shape[2],) if len(X.shape) == 4: new_shape += (X.shape[3],) pixels = X.reshape(new_shape) probabilities = self.classifier.predict_proba(self._transform_input(pixels)) return probabilities.reshape(X.shape[0], X.shape[1], X.shape[2], probabilities.shape[1])
python
def bind_socket(self, config): """ :meth:`.WNetworkNativeTransportProto.bind_socket` method implementation """ address = config[self.__bind_socket_config.section][self.__bind_socket_config.address_option] port = config.getint(self.__bind_socket_config.section, self.__bind_socket_config.port_option) return WIPV4SocketInfo(address, port)