language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def transform_symmop(self, symmop): # type: (Union[SymmOp, MagSymmOp]) -> Union[SymmOp, MagSymmOp] """ Takes a symmetry operation and transforms it. :param symmop: SymmOp or MagSymmOp :return: """ W = symmop.rotation_matrix w = symmop.translation_vector Q = np.linalg.inv(self.P) W_ = np.matmul(np.matmul(Q, W), self.P) I = np.identity(3) w_ = np.matmul(Q, (w + np.matmul(W - I, self.p))) if isinstance(symmop, MagSymmOp): return MagSymmOp.from_rotation_and_translation_and_time_reversal( rotation_matrix=W_, translation_vec=w_, time_reversal=symmop.time_reversal, tol=symmop.tol) elif isinstance(symmop, SymmOp): return SymmOp.from_rotation_and_translation( rotation_matrix=W_, translation_vec=w_, tol=symmop.tol)
python
def setUser(request): """In standalone mode, change the current user""" if not settings.PIAPI_STANDALONE or settings.PIAPI_REALUSERS: raise Http404 request.session['plugit-standalone-usermode'] = request.GET.get('mode') return HttpResponse('')
java
@Override public java.util.List<com.liferay.commerce.model.CommerceAddressRestriction> getCommerceAddressRestrictions( int start, int end) { return _commerceAddressRestrictionLocalService.getCommerceAddressRestrictions(start, end); }
java
public static void makeAccessible(Constructor<?> ctor) { if ((!Modifier.isPublic(ctor.getModifiers()) || !Modifier.isPublic(ctor.getDeclaringClass().getModifiers())) && !ctor.isAccessible()) { ctor.setAccessible(true); } }
python
def init_app(self, app): """Flask application initialization. :param app: The Flask application. :returns: The :class:`invenio_pages.ext.InvenioPages` instance initialized. """ self.init_config(app) self.wrap_errorhandler(app) app.extensions['invenio-pages'] = _InvenioPagesState(app) return app.extensions['invenio-pages']
java
Stream<String> writeMeter(Meter meter) { // Snapshot values should be used throughout this method as there are chances for values to be changed in-between. List<Attribute> attributes = new ArrayList<>(); for (Measurement measurement : meter.measure()) { double value = measurement.getValue(); if (!Double.isFinite(value)) { continue; } attributes.add(new Attribute(measurement.getStatistic().getTagValueRepresentation(), value)); } if (attributes.isEmpty()) { return Stream.empty(); } return Stream.of(event(meter.getId(), attributes.toArray(new Attribute[0]))); }
python
def track(context, file_names): """Keep track of each file in list file_names. Tracking does not create or delete the actual file, it only tells the version control system whether to maintain versions (to keep track) of the file. """ context.obj.find_repo_type() for fn in file_names: context.obj.call([context.obj.vc_name, 'add', fn])
python
def MI_get_item(self, key, index=0): 'return list of item' index = _key_to_index_single(force_list(self.indices.keys()), index) if index != 0: key = self.indices[index][key] # always use first index key # key must exist value = super(MIMapping, self).__getitem__(key) N = len(self.indices) if N == 1: return [key] if N == 2: value = [value] return [key] + value
python
def _isomorphism_rewrite_to_NECtree(q_s, qgraph): """ Neighborhood Equivalence Class tree (see Turbo_ISO paper) """ qadj = qgraph.adj adjsets = lambda x: set(chain.from_iterable(qadj[x].values())) t = ([q_s], []) # (NEC_set, children) visited = {q_s} vcur, vnext = [t], [] while vcur: for (nec, children) in vcur: c = defaultdict(list) for u in nec: for sig, adjlist in qadj[u].items(): c[sig].extend(x for x, _, _ in adjlist if x not in visited) for sig, c_adjlist in c.items(): visited.update(c_adjlist) # these are already grouped by label; now group by adjacents for key, grp in groupby(c_adjlist, key=adjsets): grp = list(grp) if len(grp) > 1: children.append((list(grp), [])) else: # NOTE: the paper says to look for mergeable things, # but I don't know what else to merge by. children.append((list(grp), [])) vnext.extend(children) vcur, vnext = vnext, [] return t
java
private static OptionalEntity<StemmerOverrideItem> getEntity(final CreateForm form) { switch (form.crudMode) { case CrudMode.CREATE: final StemmerOverrideItem entity = new StemmerOverrideItem(0, StringUtil.EMPTY, StringUtil.EMPTY); return OptionalEntity.of(entity); case CrudMode.EDIT: if (form instanceof EditForm) { return ComponentUtil.getComponent(StemmerOverrideService.class).getStemmerOverrideItem(form.dictId, ((EditForm) form).id); } break; default: break; } return OptionalEntity.empty(); }
java
private void doWakeUpWorker() { if (idleWorkers.get() == 0) { synchronized (workers) { if (workers.size() >= getMaximumPoolSize()) { return; } if (workers.isEmpty() || (idleWorkers.get() == 0)) { addWorkerUnsafe(); } } } }
python
def importPreflibFile(self, fileName): """ Imports a preflib format file that contains all the information of a Profile. This function will completely override all members of the current Profile object. Currently, we assume that in an election where incomplete ordering are allowed, if a voter ranks only one candidate, then the voter did not prefer any candidates over another. This may lead to some discrepancies when importing and exporting a .toi preflib file or a .soi preflib file. :ivar str fileName: The name of the input file to be imported. """ # Use the functionality found in io to read the file. elecFileObj = open(fileName, 'r') self.candMap, rankMaps, wmgMapsCounts, self.numVoters = prefpy_io.read_election_file(elecFileObj) elecFileObj.close() self.numCands = len(self.candMap.keys()) # Go through the rankMaps and generate a wmgMap for each vote. Use the wmgMap to create a # Preference object. self.preferences = [] for i in range(0, len(rankMaps)): wmgMap = self.genWmgMapFromRankMap(rankMaps[i]) self.preferences.append(Preference(wmgMap, wmgMapsCounts[i]))
python
def insert(self, s): ''' Insert string @s at the current cursor location. ''' for c in s: self.text.insert(self.cursor_loc, c) self.cursor_loc += 1
java
@Override public SetIdentityMailFromDomainResult setIdentityMailFromDomain(SetIdentityMailFromDomainRequest request) { request = beforeClientExecution(request); return executeSetIdentityMailFromDomain(request); }
java
public boolean data_setCookie(Integer userId, CharSequence cookieName, CharSequence cookieValue, Long expiresTimestamp, CharSequence path) throws FacebookException, IOException { if (null == userId || 0 >= userId) throw new IllegalArgumentException("userId should be provided."); if (null == cookieName || null == cookieValue) throw new IllegalArgumentException("cookieName and cookieValue should be provided."); ArrayList<Pair<String, CharSequence>> params = new ArrayList<Pair<String, CharSequence>>(FacebookMethod.DATA_GET_COOKIES.numParams()); params.add(new Pair<String, CharSequence>("uid", userId.toString())); params.add(new Pair<String, CharSequence>("name", cookieName)); params.add(new Pair<String, CharSequence>("value", cookieValue)); if (null != expiresTimestamp && expiresTimestamp >= 0L) params.add(new Pair<String, CharSequence>("expires", expiresTimestamp.toString())); if (null != path) params.add(new Pair<String, CharSequence>("path", path)); return extractBoolean(this.callMethod(FacebookMethod.DATA_GET_COOKIES, params)); }
python
def get_predictions_under_minimal_repair(instance, repair_options, optimum): ''' Computes the set of signs on edges/vertices that can be cautiously derived from [instance], minus those that are a direct consequence of obs_[ev]label predicates ''' inst = instance.to_file() repops = repair_options.to_file() prg = [ inst, repops, prediction_core_prg, repair_cardinality_prg ] options = '--project --enum-mode cautious --opt-mode=optN --opt-bound='+str(optimum) solver = GringoClasp(clasp_options=options) models = solver.run(prg, collapseTerms=True, collapseAtoms=False) os.unlink(inst) os.unlink(repops) return whatsnew(instance,models[0])
java
public final MessageSerializer getDefaultSerializer() { // Construct a default serializer if we don't have one if (null == this.defaultSerializer) { // Don't grab a lock unless we absolutely need it synchronized(this) { // Now we have a lock, double check there's still no serializer // and create one if so. if (null == this.defaultSerializer) { // As the serializers are intended to be immutable, creating // two due to a race condition should not be a problem, however // to be safe we ensure only one exists for each network. this.defaultSerializer = getSerializer(false); } } } return defaultSerializer; }
python
def reflectance(self, band): """ :param band: An optical band, i.e. 1-5, 7 :return: At satellite reflectance, [-] """ if band == 6: raise ValueError('LT5 reflectance must be other than band 6') rad = self.radiance(band) esun = self.ex_atm_irrad[band - 1] toa_reflect = (pi * rad * self.earth_sun_dist ** 2) / (esun * cos(self.solar_zenith_rad)) return toa_reflect
java
public static <M extends Model> FXMLComponentBase loadFXML(final M model, final String fxmlPath) { return loadFXML(model, fxmlPath, null); }
java
public RESTResponse runCommand(Command command) { if (this.applicationName != null) { command.setApplicationName(this.applicationName); } if (this.storageService != null) { command.setStorageService(this.storageService); } if (this.restMetadataJson != null) { command.setRestMetadataJson(this.restMetadataJson); } command.validate(restClient); return command.call(restClient); }
java
@Override protected boolean internalEquals(ValueData another) { if (another instanceof StreamPersistedValueData) { StreamPersistedValueData streamValue = (StreamPersistedValueData)another; if (file != null && file.equals(streamValue.file)) { return true; } else if (tempFile != null && tempFile.equals(streamValue.tempFile)) { return true; } else if (stream != null && stream == streamValue.stream) { return true; } else if (url != null && streamValue.url != null && url.getFile().equals(streamValue.url.getFile())) { return true; } } return false; }
java
public Object getEnumValue(String name) { if (this.kind != Kind.ENUM) throw new RuntimeException("getEnumValue(..) can only be called on an enum"); Object[] vals = getEnumValues(); if (vals != null) { for (Object val : vals) { if (((Enum<?>)val).name().equals(name)) return val; } } return null; }
python
def element_not_contains(self, element_id, value): """ Assert provided content is not contained within an element found by ``id``. """ elem = world.browser.find_elements_by_xpath(str( 'id("{id}")[contains(., "{value}")]'.format( id=element_id, value=value))) assert not elem, \ "Expected element not to contain the given text."
java
public OvhPrivateLinkRoute serviceName_privateLink_peerServiceName_route_network_GET(String serviceName, String peerServiceName, String network) throws IOException { String qPath = "/router/{serviceName}/privateLink/{peerServiceName}/route/{network}"; StringBuilder sb = path(qPath, serviceName, peerServiceName, network); String resp = exec(qPath, "GET", sb.toString(), null); return convertTo(resp, OvhPrivateLinkRoute.class); }
java
private static void exportSymbols(SymbolTable syms, String filename) { if (syms == null) { return; } try (PrintWriter out = new PrintWriter(new FileWriter(filename))) { for (ObjectIntCursor<String> sym : syms) { out.println(sym.key + "\t" + sym.value); } } catch (IOException e) { throw Throwables.propagate(e); } }
java
@Override public Vector<Object> marshallize() { Vector<Object> vector = super.marshallize(); // replace the URL set by DocumentNode. This is to maintain backwar compatibility. Shifting the indexes would break everything. String url = null; if (vector.size() == 5) { url = (String) vector.set(NODE_REPOSITORY_UID_INDEX, repositoryUID); } else { vector.add(NODE_REPOSITORY_UID_INDEX, repositoryUID); } vector.add(NODE_SUT_NAME_INDEX, sutName); vector.add(NODE_SECTION_INDEX, StringUtils.defaultString(section)); if (isNotBlank(url)) { vector.add(NODE_URL_INDEX, url); } return vector; }
python
def clustering_coef_bu(G): ''' The clustering coefficient is the fraction of triangles around a node (equiv. the fraction of nodes neighbors that are neighbors of each other). Parameters ---------- A : NxN np.ndarray binary undirected connection matrix Returns ------- C : Nx1 np.ndarray clustering coefficient vector ''' n = len(G) C = np.zeros((n,)) for u in range(n): V, = np.where(G[u, :]) k = len(V) if k >= 2: # degree must be at least 2 S = G[np.ix_(V, V)] C[u] = np.sum(S) / (k * k - k) return C
java
protected boolean executeGenerator() throws MojoExecutionException { // Add resources and output directory to plugins classpath List<Object> classpathEntries = new ArrayList<Object>(); classpathEntries.addAll(project.getResources()); classpathEntries.add(project.getBuild().getOutputDirectory()); extendPluginClasspath(classpathEntries); // Prepare properties for the code generator Properties generatorProperties = new Properties(); // Set properties defined properties in the plugins if (properties != null) { for (String key : properties.keySet()) { generatorProperties.setProperty(key, properties.get(key)); } } // Set properties with output slot paths generatorProperties.setProperty(OUTPUT_SLOT_PATH_PREFIX + "TO_SRC", outletSrcOnceDir.toString()); generatorProperties.setProperty(OUTPUT_SLOT_PATH_PREFIX + "TO_RESOURCES", outletResOnceDir.toString()); generatorProperties.setProperty(OUTPUT_SLOT_PATH_PREFIX + "TO_GEN_SRC", outletSrcDir.toString()); generatorProperties.setProperty(OUTPUT_SLOT_PATH_PREFIX + "TO_GEN_RESOURCES", outletResDir.toString()); generatorProperties.setProperty(OUTPUT_SLOT_PATH_PREFIX + "TO_WEBROOT", outletWebrootDir.toString()); generatorProperties.setProperty(OUTPUT_SLOT_PATH_PREFIX + "TO_SRC_TEST", outletSrcTestOnceDir.toString()); generatorProperties.setProperty(OUTPUT_SLOT_PATH_PREFIX + "TO_RESOURCES_TEST", outletResTestOnceDir.toString()); generatorProperties.setProperty(OUTPUT_SLOT_PATH_PREFIX + "TO_GEN_SRC_TEST", outletSrcTestDir.toString()); generatorProperties.setProperty(OUTPUT_SLOT_PATH_PREFIX + "TO_GEN_RESOURCES_TEST", outletResTestDir.toString()); generatorProperties.setProperty(OUTPUT_SLOT_PATH_PREFIX + "TO_DOC", outletDocDir.toString()); // Execute commandline and retrieve list of generated files List<File> generatedFiles = doRunGenerator(generatorProperties); if (generatedFiles != null) { // If the code generation succeeded then write status file (and refresh Eclipse workspace) else delete generated files if (isVerbose()) { for (File generatedFile : generatedFiles) { getLog().info("Generated: " + getProjectRelativePath(generatedFile)); } } updateStatusFile(generatedFiles); if (generatedFiles.size() > 0) { refreshEclipseWorkspace(); } getLog().info("Generated " + generatedFiles.size() + " files"); return true; } else { getLog().error("Executing generator workflow failed"); } return false; }
java
public static String dateFormat(long ts, String format, TimeZone tz) { SimpleDateFormat formatter = FORMATTER_CACHE.get(format); formatter.setTimeZone(tz); Date dateTime = new Date(ts); return formatter.format(dateTime); }
java
public void setWhitelistRules(java.util.Collection<InputWhitelistRuleCidr> whitelistRules) { if (whitelistRules == null) { this.whitelistRules = null; return; } this.whitelistRules = new java.util.ArrayList<InputWhitelistRuleCidr>(whitelistRules); }
python
def configure(self, transport, auth, address, port): """ Connect paramiko transport :type auth: :py:class`margaritashotgun.auth.AuthMethods` :param auth: authentication object :type address: str :param address: remote server ip or hostname :type port: int :param port: remote server port :type hostkey: :py:class:`paramiko.key.HostKey` :param hostkey: remote host ssh server key """ self.transport = transport self.username = auth.username self.address = address self.port = port
java
public XMLString fixWhiteSpace(boolean trimHead, boolean trimTail, boolean doublePunctuationSpaces) { // %OPT% !!!!!!! int len = this.length(); char[] buf = new char[len]; this.getChars(0, len, buf, 0); boolean edit = false; int s; for (s = 0; s < len; s++) { if (isSpace(buf[s])) { break; } } /* replace S to ' '. and ' '+ -> single ' '. */ int d = s; boolean pres = false; for (; s < len; s++) { char c = buf[s]; if (isSpace(c)) { if (!pres) { if (' ' != c) { edit = true; } buf[d++] = ' '; if (doublePunctuationSpaces && (s != 0)) { char prevChar = buf[s - 1]; if (!((prevChar == '.') || (prevChar == '!') || (prevChar == '?'))) { pres = true; } } else { pres = true; } } else { edit = true; pres = true; } } else { buf[d++] = c; pres = false; } } if (trimTail && 1 <= d && ' ' == buf[d - 1]) { edit = true; d--; } int start = 0; if (trimHead && 0 < d && ' ' == buf[0]) { edit = true; start++; } XMLStringFactory xsf = XMLStringFactoryImpl.getFactory(); return edit ? xsf.newstr(new String(buf, start, d - start)) : this; }
python
def sample_distinct(self, n_to_sample, **kwargs): """Sample a sequence of items from the pool until a minimum number of distinct items are queried Parameters ---------- n_to_sample : int number of distinct items to sample. If sampling with replacement, this number is not necessarily the same as the number of iterations. """ # Record how many distinct items have not yet been sampled n_notsampled = np.sum(np.isnan(self.cached_labels_)) if n_notsampled == 0: raise Exception("All distinct items have already been sampled.") if n_to_sample > n_notsampled: warnings.warn("Only {} distinct item(s) have not yet been sampled." " Setting n_to_sample = {}.".format(n_notsampled, \ n_notsampled)) n_to_sample = n_notsampled n_sampled = 0 # number of distinct items sampled this round while n_sampled < n_to_sample: self.sample(1,**kwargs) n_sampled += self._queried_oracle[self.t_ - 1]*1
java
private Iterator<Vector3> getConvexHullVerticesIterator() { List<Vector3> ans = new ArrayList<Vector3>(); Triangle curr = this.startTriangleHull; boolean cont = true; double x0 = bbMin.x, x1 = bbMax.x; double y0 = bbMin.y, y1 = bbMax.y; boolean sx, sy; while (cont) { sx = curr.p1().x == x0 || curr.p1().x == x1; sy = curr.p1().y == y0 || curr.p1().y == y1; if ((sx && sy) || (!sx && !sy)) { ans.add(curr.p1()); } if (curr.bcnext != null && curr.bcnext.halfplane) curr = curr.bcnext; if (curr == this.startTriangleHull) cont = false; } return ans.iterator(); }
python
def raw_repr(obj): '''Produce a representation using the default repr() regardless of whether the object provides an implementation of its own.''' if isproxy(obj): return '<%s with prime_id=%d>' % (obj.__class__.__name__, obj.prime_id) else: return repr(obj)
java
public String next(String ignoreZone) { if (entry == null) return null; entry = entry.next; if (entry.element.equals(ignoreZone)) { return entry.next.element; } else { return entry.element; } }
java
@Override public List<Scope> getScopesByCollector(ObjectId collectorId) { List<Scope> scopes = scopeRepository.findByCollectorId(collectorId); //clean up needed for < > characters for (Scope scope : scopes) { scope.setName( scope.getName().replaceAll("[<>]", "") ); scope.setProjectPath( scope.getProjectPath().replaceAll("[<>]", "") ); } return scopes; }
java
private void registerServletDescriptor(ServletDescriptor servletDescriptor) { try { servletDescriptor.register(httpService); } catch (RuntimeException e) { LOG.error( "Registration of ServletDescriptor under mountpoint {} fails with unexpected RuntimeException!", servletDescriptor.getAlias(), e); } catch (ServletException e) { LOG.error( "Unable to mount servlet on mount point '{}', either it was already registered under the same alias or the init method throws an exception", servletDescriptor.getAlias(), e); } catch (NamespaceException e) { LOG.error( "Unable to mount servlet on mount point '{}', another resource is already bound to this alias", servletDescriptor.getAlias(), e); } }
python
def add_routes(meteor_app, url_path=''): """ Adds search and retrieval routes to a :class:`meteorpi_server.MeteorServer` instance :param MeteorApp meteor_app: The :class:`meteorpi_server.MeteorApp` to which routes should be added :param string url_path: The base path used for the query routes, defaults to '' """ from meteorpi_server import MeteorApp app = meteor_app.app @app.after_request def after_request(response): response.headers.add('Accept-Ranges', 'bytes') return response # Return a list of all of the observatories which are registered in this repository # A dictionary of basic information is returned for each @app.route('{0}/obstories'.format(url_path), methods=['GET']) def get_obstories(): db = meteor_app.get_db() obstories = db.get_obstory_ids() output = {} for o in obstories: output[o] = db.get_obstory_from_id(o) db.con.execute("SELECT m.time FROM archive_metadata m " "INNER JOIN archive_observatories l ON m.observatory = l.uid " "AND l.publicId = %s AND m.time>0 " "ORDER BY m.time ASC LIMIT 1", (o,)) first_seen = 0 results = db.con.fetchall() if results: first_seen = results[0]['time'] db.con.execute("SELECT m.time FROM archive_metadata m " "INNER JOIN archive_observatories l ON m.observatory = l.uid " "AND l.publicId = %s AND m.time>0 " "ORDER BY m.time DESC LIMIT 1", (o,)) last_seen = 0 results = db.con.fetchall() if results: last_seen = results[0]['time'] output[o]['firstSeen'] = first_seen output[o]['lastSeen'] = last_seen db.close_db() return jsonify(output) # Return a list of all of the metadata tags which ever been set on a particular observatory, with time stamp @app.route('{0}/obstory/<obstory_id>/metadata'.format(url_path), methods=['GET']) def get_obstory_status_all(obstory_id): db = meteor_app.get_db() search = mp.ObservatoryMetadataSearch(obstory_ids=[obstory_id], time_min=0, time_max=time.time()) data = db.search_obstory_metadata(search)['items'] data.sort(key=lambda x: x.time) output = [[i.time, i.key, i.value] for i in data] db.close_db() return jsonify({'status': output}) # Return a list of all of the metadata which was valid for a particular observatory at a particular time @app.route('{0}/obstory/<obstory_id>/statusdict'.format(url_path), methods=['GET']) @app.route('{0}/obstory/<obstory_id>/statusdict/<unix_time>'.format(url_path), methods=['GET']) def get_obstory_status_by_time(obstory_id, unix_time=None): db = meteor_app.get_db() if unix_time is None: unix_time = time.time() status = {} try: obstory_info = db.get_obstory_from_id(obstory_id) if obstory_info: obstory_name = obstory_info['name'] status = db.get_obstory_status(obstory_name=obstory_name, time=float(unix_time)) except ValueError: return jsonify({'error': 'No such observatory "%s".' % obstory_id}) db.close_db() return jsonify({'status': status}) # Search for observations using a YAML search string @app.route('{0}/obs/<search_string>'.format(url_path), methods=['GET'], strict_slashes=True) def search_events(search_string): db = meteor_app.get_db() try: search = mp.ObservationSearch.from_dict(safe_load(unquote(search_string))) except ValueError: return jsonify({'error': str(sys.exc_info()[1])}) observations = db.search_observations(search) db.close_db() return jsonify({'obs': list(x.as_dict() for x in observations['obs']), 'count': observations['count']}) # Search for files using a YAML search string @app.route('{0}/files/<search_string>'.format(url_path), methods=['GET']) def search_files(search_string): db = meteor_app.get_db() try: search = mp.FileRecordSearch.from_dict(safe_load(unquote(search_string))) except ValueError: return jsonify({'error': str(sys.exc_info()[1])}) files = db.search_files(search) db.close_db() return jsonify({'files': list(x.as_dict() for x in files['files']), 'count': files['count']}) # Return a list of sky clarity measurements for a particular observatory (scale 0-100) @app.route('{0}/skyclarity/<obstory_id>/<utc_min>/<utc_max>/<period>'.format(url_path), methods=['GET']) def get_skyclarity(obstory_id, utc_min, utc_max, period): db = meteor_app.get_db() utc_min = float(utc_min) utc_max = float(utc_max) period = float(period) count = 0 output = [] while count < 250: a = utc_min + period * count b = a + period count += 1 db.con.execute("SELECT m.floatValue FROM archive_metadata m " "INNER JOIN archive_files f ON m.fileId = f.uid " "INNER JOIN archive_semanticTypes fs ON f.semanticType = fs.uid " "INNER JOIN archive_metadataFields mf ON m.fieldId = mf.uid " "INNER JOIN archive_observations o ON f.observationId = o.uid " "INNER JOIN archive_observatories l ON o.observatory = l.uid " "WHERE mf.metaKey='meteorpi:skyClarity' " "AND l.publicId = %s " "AND fs.name='meteorpi:timelapse/frame/bgrdSub/lensCorr' " "AND f.fileTime>%s AND f.fileTime<%s " "LIMIT 250", (obstory_id, a, b)) results = db.con.fetchall() if len(results) > 0: output.append(sum([i['floatValue'] for i in results]) / len(results)) else: output.append(0) if b >= utc_max: break db.close_db() return jsonify(output) # Return a list of the number of observations of a particular type in a sequence # of time intervals between utc_min and utc_max, with step size period @app.route('{0}/activity/<obstory_id>/<semantic_type>/<utc_min>/<utc_max>/<period>'.format(url_path), methods=['GET']) def get_activity(obstory_id, semantic_type, utc_min, utc_max, period): db = meteor_app.get_db() utc_min = float(utc_min) utc_max = float(utc_max) period = float(period) count = 0 output = [] while count < 250: a = utc_min + period * count b = a + period count += 1 db.con.execute("SELECT COUNT(*) FROM archive_observations o " "INNER JOIN archive_observatories l ON o.observatory = l.uid " "INNER JOIN archive_semanticTypes s ON o.obsType = s.uid " "WHERE l.publicId=%s AND s.name=%s AND o.obsTime>=%s AND o.obsTime<%s LIMIT 1", (obstory_id, semantic_type, a, b)) output.append(db.con.fetchone()['COUNT(*)']) if b >= utc_max: break db.close_db() return jsonify({"activity": output}) # Return a thumbnail version of an image @app.route('{0}/thumbnail/<file_id>/<file_name>'.format(url_path), methods=['GET']) def get_thumbnail(file_id, file_name): db = meteor_app.get_db() record = db.get_file(repository_fname=file_id) if record is None: db.close_db() return MeteorApp.not_found(entity_id=file_id) if record.mime_type != "image/png": db.close_db() return MeteorApp.not_found(entity_id=file_id) file_path = db.file_path_for_id(record.id) thumb_path = os.path.join(db.file_store_path, "../thumbnails", record.id) if not os.path.exists(thumb_path): resize_tool = os.path.join(meteor_app.binary_path, "resize") os.system("%s %s 220 %s" % (resize_tool, file_path, thumb_path)) db.close_db() return send_file(filename_or_fp=thumb_path, mimetype=record.mime_type) # Return a file from the repository @app.route('{0}/files/content/<file_id>/<file_name>'.format(url_path), methods=['GET']) @app.route('{0}/files/content/<file_id>'.format(url_path), methods=['GET']) def get_file_content(file_id, file_name=None): # http://blog.asgaard.co.uk/2012/08/03/http-206-partial-content-for-flask-python def send_file_partial(filename_or_fp, mimetype): range_header = request.headers.get('Range', None) if not range_header: return send_file(filename_or_fp, mimetype=record.mime_type) size = os.path.getsize(filename_or_fp) byte1, byte2 = 0, None m = re.search('(\d+)-(\d*)', range_header) g = m.groups() if g[0]: byte1 = int(g[0]) if g[1]: byte2 = int(g[1]) length = size - byte1 if byte2 is not None: length = byte2 - byte1 + 1 with open(filename_or_fp, 'rb') as f: f.seek(byte1) data = f.read(length) rv = Response(data, 206, mimetype=mimetype, direct_passthrough=True) rv.headers.add('Content-Range', 'bytes {0}-{1}/{2}'.format(byte1, byte1 + length - 1, size)) return rv db = meteor_app.get_db() record = db.get_file(repository_fname=file_id) if record is None: db.close_db() return MeteorApp.not_found(entity_id=file_id) file_path = db.file_path_for_id(record.id) db.close_db() return send_file_partial(filename_or_fp=file_path, mimetype=record.mime_type)
python
def _is_multiframe_4d(dicom_input): """ Use this function to detect if a dicom series is a philips multiframe 4D dataset """ # check if it is multi frame dicom if not common.is_multiframe_dicom(dicom_input): return False header = dicom_input[0] # check if there are multiple stacks number_of_stack_slices = common.get_ss_value(header[Tag(0x2001, 0x105f)][0][Tag(0x2001, 0x102d)]) number_of_stacks = int(int(header.NumberOfFrames) / number_of_stack_slices) if number_of_stacks <= 1: return False return True
java
public static Resource temporaryDirectory() { File tempDir = new File(SystemInfo.JAVA_IO_TMPDIR); String baseName = System.currentTimeMillis() + "-"; for (int i = 0; i < 1_000_000; i++) { File tmp = new File(tempDir, baseName + i); if (tmp.mkdir()) { return new FileResource(tmp); } } throw new RuntimeException("Unable to create temp directory"); }
python
def load_components(*paths, **kwargs): """ Loads all components on the paths. Each path should be a package or module. All components beneath a path are loaded. Args: paths (str): A package or module to load Keyword Args: include (str): A regular expression of packages and modules to include. Defaults to '.*' exclude (str): A regular expression of packges and modules to exclude. Defaults to 'test' continue_on_error (bool): If True, continue importing even if something raises an ImportError. If False, raise the first ImportError. Returns: int: The total number of modules loaded. Raises: ImportError """ num_loaded = 0 for path in paths: num_loaded += _load_components(path, **kwargs) return num_loaded
java
private String resolveName(String localName, String qualifiedName) { if ((localName == null) || (localName.length() == 0)) { return qualifiedName; } else { return localName; } }
python
def addFeature(self, feature): '''Appends Feature''' if isinstance(feature, Feature): self.features.append(feature) else: raise TypeError( 'feature Type should be Feature, not %s' % type(feature))
java
@Override protected void defineWidgets() { super.defineWidgets(); // widgets to display in first block (like edit view) addWidget(new CmsWidgetDialogParameter(getSearchIndexIndex(), "name", PAGES[0], new CmsDisplayWidget())); addWidget(new CmsWidgetDialogParameter(getSearchIndexIndex(), "rebuildMode", PAGES[0], new CmsDisplayWidget())); addWidget(new CmsWidgetDialogParameter(getSearchIndexIndex(), "locale", PAGES[0], new CmsDisplayWidget())); addWidget(new CmsWidgetDialogParameter(getSearchIndexIndex(), "project", PAGES[0], new CmsDisplayWidget())); addWidget(new CmsWidgetDialogParameter( getSearchIndexIndex(), "fieldConfigurationName", PAGES[0], new CmsDisplayWidget())); }
python
def cal_frame_according_boundaries(left, right, top, bottom, parent_size, gaphas_editor=True, group=True): """ Generate margin and relative position and size handed boundary parameter and parent size """ # print("parent_size ->", parent_size) margin = cal_margin(parent_size) # Add margin and ensure that the upper left corner is within the state if group: # frame of grouped state rel_pos = max(left - margin, 0), max(top - margin, 0) # Add margin and ensure that the lower right corner is within the state size = (min(right - left + 2 * margin, parent_size[0] - rel_pos[0]), min(bottom - top + 2 * margin, parent_size[1] - rel_pos[1])) else: # frame inside of state # rel_pos = max(margin, 0), max(margin, 0) rel_pos = left, top size = right - left, bottom - top return margin, rel_pos, size
python
def metadata(ctx, archive_name): ''' Get an archive's metadata ''' _generate_api(ctx) var = ctx.obj.api.get_archive(archive_name) click.echo(pprint.pformat(var.get_metadata()))
python
def remove_edges(self, from_idx, to_idx, symmetric=False, copy=False): '''Removes all from->to and to->from edges. Note: the symmetric kwarg is unused.''' flat_inds = self._pairs.dot((self._num_vertices, 1)) # convert to sorted order and flatten to_remove = (np.minimum(from_idx, to_idx) * self._num_vertices + np.maximum(from_idx, to_idx)) mask = np.in1d(flat_inds, to_remove, invert=True) res = self.copy() if copy else self res._pairs = res._pairs[mask] res._offdiag_mask = res._offdiag_mask[mask] return res
python
def update(self, **params): """ Sends locally staged mutations to Riak. :param w: W-value, wait for this many partitions to respond before returning to client. :type w: integer :param dw: DW-value, wait for this many partitions to confirm the write before returning to client. :type dw: integer :param pw: PW-value, require this many primary partitions to be available before performing the put :type pw: integer :param return_body: if the newly stored object should be retrieved, defaults to True :type return_body: bool :param include_context: whether to return the new opaque context when `return_body` is `True` :type include_context: bool :param timeout: a timeout value in milliseconds :type timeout: int :rtype: a subclass of :class:`~riak.datatypes.Datatype` """ if not self.modified: raise ValueError("No operation to perform") params.setdefault('return_body', True) self.bucket._client.update_datatype(self, **params) self.clear() return self
java
protected boolean objectVisibleToUser(String objectTenancyPath, String userTenancyPath) { // if in "same hierarchy" return objectTenancyPath.startsWith(userTenancyPath) || userTenancyPath.startsWith(objectTenancyPath); }
java
public static MemberUpdater updater(final String pathAccountSid, final String pathQueueSid, final String pathCallSid, final URI url, final HttpMethod method) { return new MemberUpdater(pathAccountSid, pathQueueSid, pathCallSid, url, method); }
python
def peek_all(self, model_class): """Return a list of models from the local cache. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. Returns: list: A list of instances of you model_class or and empty list. """ if self._cache: return self._cache.get_records(model_class.__name__) else: return []
java
private JButton getHelpButton() { if (btnHelp == null) { btnHelp = new JButton(); btnHelp.setBorder(null); btnHelp.setIcon(new ImageIcon(AbstractParamContainerPanel.class.getResource("/resource/icon/16/201.png"))); // help icon btnHelp.addActionListener(getShowHelpAction()); btnHelp.setToolTipText(Constant.messages.getString("menu.help")); } return btnHelp; }
python
def limit_current(self, curr): """Sets the current limit on the Grizzly. The units are in amps. The internal default value is 5 amps.""" if curr <= 0: raise ValueError("Current limit must be a positive number. You provided: %s" % str(curr)) current = int(curr * (1024.0 / 5.0) * (66.0 / 1000.0)) self._set_as_int(Addr.CurrentLimit, current, 2)
python
def setSpeed(self, personID, speed): """setSpeed(string, double) -> None Sets the maximum speed in m/s for the named person for subsequent step. """ self._connection._sendDoubleCmd( tc.CMD_SET_PERSON_VARIABLE, tc.VAR_SPEED, personID, speed)
python
def echo_attributes(request, config_loader_path=None, template='djangosaml2/echo_attributes.html'): """Example view that echo the SAML attributes of an user""" state = StateCache(request.session) conf = get_config(config_loader_path, request) client = Saml2Client(conf, state_cache=state, identity_cache=IdentityCache(request.session)) subject_id = _get_subject_id(request.session) try: identity = client.users.get_identity(subject_id, check_not_on_or_after=False) except AttributeError: return HttpResponse("No active SAML identity found. Are you sure you have logged in via SAML?") return render(request, template, {'attributes': identity[0]})
java
@Override public Object eGet(int featureID, boolean resolve, boolean coreType) { switch (featureID) { case AfplibPackage.BSG__REG_NAME: return getREGName(); case AfplibPackage.BSG__TRIPLETS: return getTriplets(); } return super.eGet(featureID, resolve, coreType); }
java
@Nullable public static CurrentLegAnnotation createCurrentAnnotation(CurrentLegAnnotation currentLegAnnotation, RouteLeg leg, double legDistanceRemaining) { LegAnnotation legAnnotation = leg.annotation(); if (legAnnotation == null) { return null; } List<Double> distanceList = legAnnotation.distance(); if (distanceList == null || distanceList.isEmpty()) { return null; } CurrentLegAnnotation.Builder annotationBuilder = CurrentLegAnnotation.builder(); int annotationIndex = findAnnotationIndex( currentLegAnnotation, annotationBuilder, leg, legDistanceRemaining, distanceList ); annotationBuilder.distance(distanceList.get(annotationIndex)); List<Double> durationList = legAnnotation.duration(); if (durationList != null) { annotationBuilder.duration(durationList.get(annotationIndex)); } List<Double> speedList = legAnnotation.speed(); if (speedList != null) { annotationBuilder.speed(speedList.get(annotationIndex)); } List<MaxSpeed> maxspeedList = legAnnotation.maxspeed(); if (maxspeedList != null) { annotationBuilder.maxspeed(maxspeedList.get(annotationIndex)); } List<String> congestionList = legAnnotation.congestion(); if (congestionList != null) { annotationBuilder.congestion(congestionList.get(annotationIndex)); } annotationBuilder.index(annotationIndex); return annotationBuilder.build(); }
python
def connect(host, username, password, port=443, verify=False, debug=False): ''' Connect to a vCenter via the API :param host: Hostname or IP of the vCenter :type host: str or unicode :param username: Username :type user: str or unicode :param password: Password :type user: str or unicode :param port: Port on which the vCenter API is running (default: 443) :type port: int :param verify: Whether to verify SSL certs upon connection (default: False) :type verify: bool :param debug: Debug option (default: False) :type debug: bool :return: Content :rtype: vim.ServiceInstanceContent ''' context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) if not verify: # Disable warnings about unsigned certificates context.verify_mode = ssl.CERT_NONE requests.packages.urllib3.disable_warnings() try: si = SmartConnect( host=host, user=username, pwd=password, port=port, sslContext=context ) # Register auto disconnect atexit.register(Disconnect, si) # Return content return si.RetrieveContent() except IOError as e: print('I/O error({0}): {1}'.format(e.errno, e.strerror)) except vmodl.MethodFault as e: print('Connection could not be established', file=sys.stderr) raise ConnectionError('Connection could not be established') print('Caught vmodl fault: ', e.msg, file=sys.stderr) if debug: traceback.print_exc() except Exception as e: print('Caught exception:', str(e), file=sys.stderr) if debug: traceback.print_exc()
java
public boolean endsWith(String str, String suffix) { return (str == null || suffix == null) ? (str == suffix) : str.endsWith(suffix); }
python
def change_name(self, new_name): """Change the name of the shell, possibly updating the maximum name length""" if not new_name: name = self.hostname else: name = new_name.decode() self.display_name = display_names.change( self.display_name, name)
python
def marshal_dict( obj, types, method=None, fields=None, **m_kwargs ): """ Recursively marshal a Python object to a dict that can be passed to json.{dump,dumps}, a web client, or a web server, document database, etc... Args: obj: object, It's members can be nested Python objects which will be converted to dictionaries types: tuple-of-types, The primitive types that can be serialized method: None-or-str, None to use 'marshal_dict' recursively, or a str that corresponds to the name of a class method to use. Any nested types that are not an instance of @types must have this method defined. fields: None-list-of-str, Explicitly marshal only these fields m_kwargs: Keyword arguments to pass to @method Returns: dict """ has_slots, d = _get_dict(obj) if fields: for field in fields: assert field in d return { k: v if isinstance(v, types) else ( getattr(v, method)(**m_kwargs) if method else marshal_dict(v, types) ) for k, v in d.items() if k in fields } excl = getattr(obj, '_marshal_exclude', []) if ( has_slots or getattr(obj, '_marshal_only_init_args', False) ): args = init_args(obj) excl.extend([x for x in d if x not in args]) if getattr(obj, '_marshal_exclude_none', False): excl.extend(k for k, v in d.items() if v is None) else: none_keys = getattr(obj, '_marshal_exclude_none_keys', []) if none_keys: excl.extend(x for x in none_keys if d.get(x) is None) return { k: v if isinstance(v, types) else ( getattr(v, method)(**m_kwargs) if method else marshal_dict(v, types) ) for k, v in d.items() if k not in excl }
java
public static DZcs cs_symperm(DZcs A, int[] pinv, boolean values) { int i, j, p, q, i2, j2, n, Ap[], Ai[], Cp[], Ci[], w[] ; DZcsa Cx = new DZcsa(), Ax = new DZcsa() ; DZcs C ; if (!CS_CSC (A)) return (null) ; /* check inputs */ n = A.n ; Ap = A.p ; Ai = A.i ; Ax.x = A.x ; C = cs_spalloc (n, n, Ap[n], values && (Ax.x != null), false) ; /* alloc result*/ w = new int [n] ; /* get workspace */ Cp = C.p ; Ci = C.i ; Cx.x = C.x ; for (j = 0 ; j < n ; j++) /* count entries in each column of C */ { j2 = pinv != null ? pinv [j] : j ; /* column j of A is column j2 of C */ for (p = Ap [j] ; p < Ap [j + 1] ; p++) { i = Ai [p] ; if (i > j) continue ; /* skip lower triangular part of A */ i2 = pinv != null ? pinv [i] : i ; /* row i of A is row i2 of C */ w [Math.max(i2, j2)]++ ; /* column count of C */ } } cs_cumsum (Cp, w, n) ; /* compute column pointers of C */ for (j = 0 ; j < n ; j++) { j2 = pinv != null ? pinv [j] : j ; /* column j of A is column j2 of C */ for (p = Ap [j] ; p < Ap [j + 1] ; p++) { i = Ai [p] ; if (i > j) continue ; /* skip lower triangular part of A*/ i2 = pinv != null ? pinv [i] : i ; /* row i of A is row i2 of C */ Ci [q = w [Math.max(i2, j2)]++] = Math.min(i2, j2) ; if (Cx.x != null) Cx.set(q, (i2 <= j2) ? Ax.get(p) : cs_conj(Ax.get(p))) ; } } return (C) ; }
python
def _line_format(line): """Determine the column format pattern for a line in an ASCII segment file. """ for pat in (FOUR_COL_REGEX, THREE_COL_REGEX, TWO_COL_REGEX): if pat.match(line): return pat raise ValueError("unable to parse segment from line {!r}".format(line))
java
public MavenInstallation getMaven() { for( MavenInstallation i : getDescriptor().getInstallations() ) { if(mavenName !=null && mavenName.equals(i.getName())) return i; } return null; }
python
def PenForNode( self, node, depth=0 ): """Determine the pen to use to display the given node""" if node == self.selectedNode: return self.SELECTED_PEN return self.DEFAULT_PEN
java
@Override public ListHsmsResult listHsms(ListHsmsRequest request) { request = beforeClientExecution(request); return executeListHsms(request); }
java
public void marshall(NodeOverrides nodeOverrides, ProtocolMarshaller protocolMarshaller) { if (nodeOverrides == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(nodeOverrides.getNumNodes(), NUMNODES_BINDING); protocolMarshaller.marshall(nodeOverrides.getNodePropertyOverrides(), NODEPROPERTYOVERRIDES_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
python
def find_prepositions(chunked): """ The input is a list of [token, tag, chunk]-items. The output is a list of [token, tag, chunk, preposition]-items. PP-chunks followed by NP-chunks make up a PNP-chunk. """ # Tokens that are not part of a preposition just get the O-tag. for ch in chunked: ch.append("O") for i, chunk in enumerate(chunked): if chunk[2].endswith("PP") and chunk[-1] == "O": # Find PP followed by other PP, NP with nouns and pronouns, VP with a gerund. if i < len(chunked)-1 and \ (chunked[i+1][2].endswith(("NP", "PP")) or \ chunked[i+1][1] in ("VBG", "VBN")): chunk[-1] = "B-PNP" pp = True for ch in chunked[i+1:]: if not (ch[2].endswith(("NP", "PP")) or ch[1] in ("VBG", "VBN")): break if ch[2].endswith("PP") and pp: ch[-1] = "I-PNP" if not ch[2].endswith("PP"): ch[-1] = "I-PNP" pp = False return chunked
python
def grid(lon_edges, lat_edges, values, cmap, alpha=255, vmin=None, vmax=None, levels=10, colormap_scale='lin', show_colorbar=True): """ Values on a uniform grid :param lon_edges: longitude edges :param lat_edges: latitude edges :param values: matrix representing values on the grid :param cmap: colormap name :param alpha: color alpha :param vmin: minimum value for the colormap :param vmax: maximum value for the colormap :param levels: number of levels for the colormap :param colormap_scale: colormap scale :param show_colorbar: show the colorbar in the UI """ from geoplotlib.layers import GridLayer _global_config.layers.append( GridLayer(lon_edges, lat_edges, values, cmap, alpha, vmin, vmax, levels, colormap_scale, show_colorbar))
python
def _fetch_app_role_token(vault_url, role_id, secret_id): """Get a Vault token, using the RoleID and SecretID""" url = _url_joiner(vault_url, 'v1/auth/approle/login') resp = requests.post(url, data={'role_id': role_id, 'secret_id': secret_id}) resp.raise_for_status() data = resp.json() if data.get('errors'): raise VaultException(u'Error fetching Vault token: {}'.format(data['errors'])) return data['auth']['client_token']
python
def map_equal_contributions(contributors): """assign numeric values to each unique equal-contrib id""" equal_contribution_map = {} equal_contribution_keys = [] for contributor in contributors: if contributor.get("references") and "equal-contrib" in contributor.get("references"): for key in contributor["references"]["equal-contrib"]: if key not in equal_contribution_keys: equal_contribution_keys.append(key) # Do a basic sort equal_contribution_keys = sorted(equal_contribution_keys) # Assign keys based on sorted values for i, equal_contribution_key in enumerate(equal_contribution_keys): equal_contribution_map[equal_contribution_key] = i+1 return equal_contribution_map
python
def compile_file(self, path, incl_search_paths=None): """ Parse & compile a single file and append it to RDLCompiler's root namespace. If any exceptions (:class:`~systemrdl.RDLCompileError` or other) occur during compilation, then the RDLCompiler object should be discarded. Parameters ---------- path:str Path to an RDL source file incl_search_paths:list List of additional paths to search to resolve includes. If unset, defaults to an empty list. Relative include paths are resolved in the following order: 1. Search each path specified in ``incl_search_paths``. 2. Path relative to the source file performing the include. Raises ------ :class:`~systemrdl.RDLCompileError` If any fatal compile error is encountered. """ if incl_search_paths is None: incl_search_paths = [] fpp = preprocessor.FilePreprocessor(self.env, path, incl_search_paths) preprocessed_text, seg_map = fpp.preprocess() input_stream = preprocessor.PreprocessedInputStream(preprocessed_text, seg_map) lexer = SystemRDLLexer(input_stream) lexer.removeErrorListeners() lexer.addErrorListener(messages.RDLAntlrErrorListener(self.msg)) token_stream = CommonTokenStream(lexer) parser = SystemRDLParser(token_stream) parser.removeErrorListeners() parser.addErrorListener(messages.RDLAntlrErrorListener(self.msg)) # Run Antlr parser on input parsed_tree = parser.root() if self.msg.had_error: self.msg.fatal("Parse aborted due to previous errors") # Traverse parse tree with RootVisitor self.visitor.visit(parsed_tree) # Reset default property assignments from namespace. # They should not be shared between files since that would be confusing. self.namespace.default_property_ns_stack = [{}] if self.msg.had_error: self.msg.fatal("Compile aborted due to previous errors")
python
def get_img_heatmap(orig_img, activation_map): """Draw a heatmap on top of the original image using intensities from activation_map""" heatmap = cv2.applyColorMap(activation_map, cv2.COLORMAP_COOL) heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB) img_heatmap = np.float32(heatmap) + np.float32(orig_img) img_heatmap = img_heatmap / np.max(img_heatmap) img_heatmap *= 255 return img_heatmap.astype(int)
python
def compute_samples(channels, nsamples=None): ''' create a generator which computes the samples. essentially it creates a sequence of the sum of each function in the channel at each sample in the file for each channel. ''' return islice(izip(*(imap(sum, izip(*channel)) for channel in channels)), nsamples)
java
protected String encrypt(String randomStr, String plainText) { ByteGroup byteCollector = new ByteGroup(); byte[] randomStringBytes = randomStr.getBytes(CHARSET); byte[] plainTextBytes = plainText.getBytes(CHARSET); byte[] bytesOfSizeInNetworkOrder = number2BytesInNetworkOrder(plainTextBytes.length); byte[] appIdBytes = appidOrCorpid.getBytes(CHARSET); // randomStr + networkBytesOrder + text + appid byteCollector.addBytes(randomStringBytes); byteCollector.addBytes(bytesOfSizeInNetworkOrder); byteCollector.addBytes(plainTextBytes); byteCollector.addBytes(appIdBytes); // ... + pad: 使用自定义的填充方式对明文进行补位填充 byte[] padBytes = PKCS7Encoder.encode(byteCollector.size()); byteCollector.addBytes(padBytes); // 获得最终的字节流, 未加密 byte[] unencrypted = byteCollector.toBytes(); try { // 设置加密模式为AES的CBC模式 Cipher cipher = Cipher.getInstance("AES/CBC/NoPadding"); SecretKeySpec keySpec = new SecretKeySpec(aesKey, "AES"); IvParameterSpec iv = new IvParameterSpec(aesKey, 0, 16); cipher.init(Cipher.ENCRYPT_MODE, keySpec, iv); // 加密 byte[] encrypted = cipher.doFinal(unencrypted); // 使用BASE64对加密后的字符串进行编码 String base64Encrypted = base64.encodeToString(encrypted); return base64Encrypted; } catch (Exception e) { throw new RuntimeException(e); } }
python
def task_property_present_predicate(service, task, prop): """ True if the json_element passed is present for the task specified. """ try: response = get_service_task(service, task) except Exception as e: pass return (response is not None) and (prop in response)
java
JsMsgPart getPart(int accessor, JMFSchema schema) { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(this, tc, "getPart", new Object[]{accessor, schema}); JsMsgPart result = null; try { if (jmfPart.isPresent(accessor)) result = new JsMsgPart(jmfPart.getNativePart(accessor, schema)); } catch (JMFException e) { FFDCFilter.processException(e, "getPart", "208", this); if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) SibTr.debug(this, tc, "getPart failed: " + e); } if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(this, tc, "getPart", result); return result; }
python
def walk(self): """Walk over the message tree, yielding each subpart. The walk is performed in depth-first order. This method is a generator. """ yield self if self.is_multipart(): for subpart in self.get_payload(): for subsubpart in subpart.walk(): yield subsubpart
python
def get_configs(self): """ :return: overall index, agent config, loadout config in that order """ loadout_config = self.loadout_preset.config.copy() config_path = None if self.agent_preset.config_path is not None: # Might be none if preset was never saved to disk. config_path = os.path.dirname(self.agent_preset.config_path) config = self.agent_preset.config.copy() config.set_value(BOT_CONFIG_MODULE_HEADER, BOT_NAME_KEY, self.ingame_name) config_bundle = BotConfigBundle(config_path, config, os.path.basename(self.agent_preset.config_path)) return self.overall_index, config_bundle, loadout_config
python
def read_file(file_path, default_content=''): """ Read file at the specified path. If file doesn't exist, it will be created with default-content. Returns the file content. """ if not os.path.exists(file_path): write_file(file_path, default_content) handler = open(file_path, 'r') content = handler.read() handler.close() return content or default_content
java
@Override public void tryUnchecked(ActionToTry<E> action) { tryUnchecked(noParam -> { action.execute(); return null; }, null); }
python
def locations_for(self, city_name, country=None, matching='nocase'): """ Returns a list of Location objects corresponding to the int IDs and relative toponyms and 2-chars country of the cities matching the provided city name. The rule for identifying matchings is according to the provided `matching` parameter value. If `country` is provided, the search is restricted to the cities of the specified country. :param country: two character str representing the country where to search for the city. Defaults to `None`, which means: search in all countries. :param matching: str among `exact` (literal, case-sensitive matching), `nocase` (literal, case-insensitive matching) and `like` (matches cities whose name contains as a substring the string fed to the function, no matter the case). Defaults to `nocase`. :raises ValueError if the value for `matching` is unknown :return: list of `weatherapi25.location.Location` objects """ if not city_name: return [] if matching not in self.MATCHINGS: raise ValueError("Unknown type of matching: " "allowed values are %s" % ", ".join(self.MATCHINGS)) if country is not None and len(country) != 2: raise ValueError("Country must be a 2-char string") splits = self._filter_matching_lines(city_name, country, matching) return [Location(item[0], float(item[3]), float(item[2]), int(item[1]), item[4]) for item in splits]
java
public static base_response update(nitro_service client, clusternodegroup resource) throws Exception { clusternodegroup updateresource = new clusternodegroup(); updateresource.name = resource.name; updateresource.strict = resource.strict; return updateresource.update_resource(client); }
python
def read_file(self, infile): """Read a reST file into a string. """ try: with open(infile, 'rt') as file: return file.read() except UnicodeDecodeError as e: err_exit('Error reading %s: %s' % (infile, e)) except (IOError, OSError) as e: err_exit('Error reading %s: %s' % (infile, e.strerror or e))
python
def read_from(value): """Read file and return contents.""" path = normalized_path(value) if not os.path.exists(path): raise argparse.ArgumentTypeError("%s is not a valid path." % path) LOG.debug("%s exists.", path) with open(path, 'r') as reader: read = reader.read() return read
python
def auto_type(s): """ Get a XML response and tries to convert it to Python base object """ if isinstance(s, bool): return s elif s is None: return '' elif s == 'TRUE': return True elif s == 'FALSE': return False else: try: try: # telephone numbers may be wrongly interpretted as ints if s.startswith('+'): return s else: return int(s) except ValueError: return float(s) except ValueError: return s
python
def get_last_components_by_type(component_types, topic_id, db_conn=None): """For each component type of a topic, get the last one.""" db_conn = db_conn or flask.g.db_conn schedule_components_ids = [] for ct in component_types: where_clause = sql.and_(models.COMPONENTS.c.type == ct, models.COMPONENTS.c.topic_id == topic_id, models.COMPONENTS.c.export_control == True, models.COMPONENTS.c.state == 'active') # noqa query = (sql.select([models.COMPONENTS.c.id]) .where(where_clause) .order_by(sql.desc(models.COMPONENTS.c.created_at))) cmpt_id = db_conn.execute(query).fetchone() if cmpt_id is None: msg = 'Component of type "%s" not found or not exported.' % ct raise dci_exc.DCIException(msg, status_code=412) cmpt_id = cmpt_id[0] if cmpt_id in schedule_components_ids: msg = ('Component types %s malformed: type %s duplicated.' % (component_types, ct)) raise dci_exc.DCIException(msg, status_code=412) schedule_components_ids.append(cmpt_id) return schedule_components_ids
java
public String[] getContext(int i, String[] toks, String[] tags, String[] preds) { List<String> e = new ArrayList<String>(); if(isWiderContext) createWindowFeats(i, toks, tags, preds, e); else create3WindowFeats(i, toks, tags, preds, e); if(i > 0) wrappWindowFeatures("prev_", i-1, toks, tags, preds, e); wrappWindowFeatures("", i, toks, tags, preds, e); if(i < toks.length - 1) wrappWindowFeatures("nxt_", i+1, toks, tags, preds, e); String[] context = e.toArray(new String[e.size()]); return context; }
java
@Override public String getDataQuery(QueryFilter filter) { String query = getQuery(); String whereClause = filter.getWhereClause(); String result = null; if (whereClause != null && whereClause.length() > 0) { result = query.replaceAll(SUBSTITUTION_STRING, " AND " + whereClause); } else { result = query.replaceAll(SUBSTITUTION_STRING, ""); } return result; }
python
def _get_content(self, response): """Checks for errors in the response. Returns response content, in bytes. :param response: response object :raise: :UnexpectedResponse: if the server responded with an unexpected response :return: - ServiceNow response content """ method = response.request.method self.last_response = response server_error = { 'summary': None, 'details': None } try: content_json = response.json() if 'error' in content_json: e = content_json['error'] if 'message' in e: server_error['summary'] = e['message'] if 'detail' in e: server_error['details'] = e['detail'] except ValueError: content_json = {} if method == 'DELETE': # Make sure the delete operation returned the expected response if response.status_code == 204: return {'success': True} else: raise UnexpectedResponse( 204, response.status_code, method, server_error['summary'], server_error['details'] ) # Make sure the POST operation returned the expected response elif method == 'POST' and response.status_code != 201: raise UnexpectedResponse( 201, response.status_code, method, server_error['summary'], server_error['details'] ) # It seems that Helsinki and later returns status 200 instead of 404 on empty result sets if ('result' in content_json and len(content_json['result']) == 0) or response.status_code == 404: if self.raise_on_empty is True: raise NoResults('Query yielded no results') elif 'error' in content_json: raise UnexpectedResponse( 200, response.status_code, method, server_error['summary'], server_error['details'] ) if 'result' not in content_json: raise MissingResult("The request was successful but the content didn't contain the expected 'result'") return content_json['result']
python
def _prepare_inputs(self, X, y=None, type_of_inputs='classic', **kwargs): """Initializes the preprocessor and processes inputs. See `check_input` for more details. Parameters ---------- input: array-like The input data array to check. y : array-like The input labels array to check. type_of_inputs: `str` {'classic', 'tuples'} The type of inputs to check. If 'classic', the input should be a 2D array-like of points or a 1D array like of indicators of points. If 'tuples', the input should be a 3D array-like of tuples or a 2D array-like of indicators of tuples. **kwargs: dict Arguments to pass to check_input. Returns ------- X : `numpy.ndarray` The checked input data array. y: `numpy.ndarray` (optional) The checked input labels array. """ self.check_preprocessor() return check_input(X, y, type_of_inputs=type_of_inputs, preprocessor=self.preprocessor_, estimator=self, tuple_size=getattr(self, '_tuple_size', None), **kwargs)
java
@Override public DescribeFleetUtilizationResult describeFleetUtilization(DescribeFleetUtilizationRequest request) { request = beforeClientExecution(request); return executeDescribeFleetUtilization(request); }
java
public T get(final ProcessCase processCase) { if(!isPresent(processCase)) { throw new NoSuchElementException(String.format("No value present in processCase '%s'.", processCase.name())); } return value.get(); }
python
def query_term(self, term, verbose=False): """Given a GO ID, return GO object.""" if term not in self: sys.stderr.write("Term %s not found!\n" % term) return rec = self[term] if verbose: print(rec) sys.stderr.write("all parents: {}\n".format( repr(rec.get_all_parents()))) sys.stderr.write("all children: {}\n".format( repr(rec.get_all_children()))) return rec
python
def _chk_truncate(self): ''' Checks whether the frame should be truncated. If so, slices the frame up. ''' # Column of which first element is used to determine width of a dot col self.tr_size_col = -1 # Cut the data to the information actually printed max_cols = self.max_cols max_rows = self.max_rows if max_cols == 0 or max_rows == 0: # assume we are in the terminal (why else = 0) (w, h) = get_terminal_size() self.w = w self.h = h if self.max_rows == 0: dot_row = 1 prompt_row = 1 if self.show_dimensions: show_dimension_rows = 3 n_add_rows = self.header + dot_row + show_dimension_rows + prompt_row max_rows_adj = self.h - n_add_rows # rows available to fill with actual data self.max_rows_adj = max_rows_adj # Format only rows and columns that could potentially fit the screen if max_cols == 0 and len(self.frame.columns) > w: max_cols = w if max_rows == 0 and len(self.frame) > h: max_rows = h if not hasattr(self, 'max_rows_adj'): self.max_rows_adj = max_rows if not hasattr(self, 'max_cols_adj'): self.max_cols_adj = max_cols max_cols_adj = self.max_cols_adj max_rows_adj = self.max_rows_adj truncate_h = max_cols_adj and (len(self.columns) > max_cols_adj) truncate_v = max_rows_adj and (len(self.frame) > max_rows_adj) frame = self.frame if truncate_h: if max_cols_adj == 0: col_num = len(frame.columns) elif max_cols_adj == 1: frame = frame[:, :max_cols] col_num = max_cols else: col_num = (max_cols_adj // 2) frame = frame[:, :col_num].concat(frame[:, -col_num:], axis=1) self.tr_col_num = col_num if truncate_v: if max_rows_adj == 0: row_num = len(frame) if max_rows_adj == 1: row_num = max_rows frame = frame[:max_rows, :] else: row_num = max_rows_adj // 2 frame = frame[:row_num, :].concat(frame[-row_num:, :]) self.tr_row_num = row_num self.tr_frame = frame self.truncate_h = truncate_h self.truncate_v = truncate_v self.is_truncated = self.truncate_h or self.truncate_v
java
public static <J extends Job<J,R>, R extends Run<J,R>> RunList<R> fromJobs(Iterable<? extends J> jobs) { List<Iterable<R>> runLists = new ArrayList<>(); for (Job j : jobs) runLists.add(j.getBuilds()); return new RunList<>(combine(runLists)); }
java
@SuppressWarnings("static-method") public List<Integer> findIntValues(JvmAnnotationReference reference) { assert reference != null; final List<Integer> values = new ArrayList<>(); for (final JvmAnnotationValue value : reference.getValues()) { if (value instanceof JvmIntAnnotationValue) { for (final Integer intValue : ((JvmIntAnnotationValue) value).getValues()) { if (intValue != null) { values.add(intValue); } } } } return values; }
python
def sidereal_time(t): """Compute Greenwich sidereal time at the given ``Time``.""" # Compute the Earth Rotation Angle. Time argument is UT1. theta = earth_rotation_angle(t.ut1) # The equinox method. See Circular 179, Section 2.6.2. # Precession-in-RA terms in mean sidereal time taken from third # reference, eq. (42), with coefficients in arcseconds. t = (t.tdb - T0) / 36525.0 st = ( 0.014506 + (((( - 0.0000000368 * t - 0.000029956 ) * t - 0.00000044 ) * t + 1.3915817 ) * t + 4612.156534 ) * t) # Form the Greenwich sidereal time. return (st / 54000.0 + theta * 24.0) % 24.0
java
private boolean checkChecksumISBN10(final String isbn) { int sum = 0; for (int i = 0; i < isbn.length() - 1; i++) { sum += (isbn.charAt(i) - '0') * (i + 1); } final char checkSum = isbn.charAt(9); return sum % 11 == (checkSum == 'X' ? 10 : checkSum - '0'); }