language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def control(self): """the type of control this player exhibits""" if self.isComputer: value = c.COMPUTER else: value = c.PARTICIPANT return c.PlayerControls(value)
python
def get_submissions(): """API endpoint to get submissions in JSON format""" print(request.args.to_dict()) print(request.args.get('search[value]')) print(request.args.get('draw', 1)) # submissions = session.query(Submission).all() if request.args.get('correct_filter', 'all') == 'all': correct_filter = [True, False] elif request.args['correct_filter'] == 'correct': correct_filter = [True] else: correct_filter = [False] if request.args.get('order[0][column]', '0') == '0': column = 'id' elif request.args['order[0][column]'] == '1': column = 'text' else: column = 'primary_error' order_str = "{} {}".format(column, request.args.get('order[0][dir]', 'desc')) search_val = request.args.get('search[value]') draw = request.args.get('draw', 1) filtered_len = session.query(Submission)\ .filter(Submission.text.startswith(search_val))\ .filter(Submission.correct.in_(correct_filter))\ .count() subs = \ session.query(Submission).filter(Submission.text.startswith(search_val))\ .filter(Submission.correct.in_(correct_filter))\ .order_by(order_str)\ .offset(request.args.get('start', 0))\ .limit(request.args.get('length', 10))\ .all() submissions = {'draw': draw, 'recordsTotal':0, 'recordsFiltered':0, 'data':[]} i = 0 for i, submission in enumerate(subs): submissions['data'].append([submission.id, submission.text, submission.primary_error, submission.correct]) submissions['recordsTotal'] = session.query(Submission).count() submissions['recordsFiltered'] = filtered_len return jsonify(submissions)
java
public Vector3d transform(Vector3dc v, Vector3d dest) { double sin = Math.sin(angle); double cos = Math.cosFromSin(sin, angle); double dot = x * v.x() + y * v.y() + z * v.z(); dest.set(v.x() * cos + sin * (y * v.z() - z * v.y()) + (1.0 - cos) * dot * x, v.y() * cos + sin * (z * v.x() - x * v.z()) + (1.0 - cos) * dot * y, v.z() * cos + sin * (x * v.y() - y * v.x()) + (1.0 - cos) * dot * z); return dest; }
java
public Observable<Page<GeoRegionInner>> listGeoRegionsAsync() { return listGeoRegionsWithServiceResponseAsync() .map(new Func1<ServiceResponse<Page<GeoRegionInner>>, Page<GeoRegionInner>>() { @Override public Page<GeoRegionInner> call(ServiceResponse<Page<GeoRegionInner>> response) { return response.body(); } }); }
python
def simulate(self, timepoints): """ Simulate initialised solver for the specified timepoints :param timepoints: timepoints that will be returned from simulation :return: a list of trajectories for each of the equations in the problem. """ solver = self._solver last_timepoint = timepoints[-1] try: simulated_timepoints, simulated_values = solver.simulate(last_timepoint, ncp_list=timepoints) except (Exception, self._solver_exception_class) as e: # The exceptions thrown by solvers are usually hiding the real cause, try to see if it is # our right_hand_side_as_function that is broken first try: self._problem.right_hand_side_as_function(self._initial_conditions, self._parameters) except: # If it is broken, throw that exception instead raise else: # If it is not, handle the original exception self._handle_solver_exception(e) trajectories = self._results_to_trajectories(simulated_timepoints, simulated_values) return trajectories
python
def find_ontology(self, txt): """ top level function used for new data processing which attempts to find a level in a heirarchy and return the key and filename usage res = FindOntology('file') # returns 'SYSTEM-PC-FILE' """ totFound = 0 searchString = txt.upper() match = [] if searchString != '': for i in self.lst_subj: if searchString in i: totFound = totFound + 1 match.append(i) if len(match) == 0: match.append('_TOP') return match
java
public CmsResource createResource( String resourcename, I_CmsResourceType type, byte[] content, List<CmsProperty> properties) throws CmsException, CmsIllegalArgumentException { return type.createResource(this, m_securityManager, resourcename, content, properties); }
java
private String expandLHS(final String lhs, int lineOffset) { substitutions = new ArrayList<Map<String, String>>(); // logger.info( "*** LHS>" + lhs + "<" ); final StringBuilder buf = new StringBuilder(); final String[] lines = lhs.split( (lhs.indexOf("\r\n") >= 0 ? "\r\n":"\n"), -1 ); // since we assembled the string, we know line breaks are \n final String[] expanded = new String[lines.length]; // buffer for expanded lines int lastExpanded = -1; int lastPattern = -1; for ( int i = 0; i < lines.length - 1; i++ ) { final String trimmed = lines[i].trim(); expanded[++lastExpanded] = lines[i]; if ( trimmed.length() == 0 || trimmed.startsWith( "#" ) || trimmed.startsWith( "//" ) ) { // comments - do nothing } else if ( trimmed.startsWith( ">" ) ) { // passthrough code - simply remove the passthrough mark character expanded[lastExpanded] = lines[i].replaceFirst( ">", " " ); lastPattern = lastExpanded; } else { // regular expansion - expand the expression expanded[lastExpanded] = substitute( expanded[lastExpanded], this.condition, i + lineOffset, useWhen, showSteps ); // do we need to report errors for that? if ( lines[i].equals( expanded[lastExpanded] ) ) { // report error this.addError( new ExpanderException( "Unable to expand: " + lines[i].replaceAll( "[\n\r]", "" ).trim(), i + lineOffset ) ); } // but if the original starts with a "-", it means we need to add it // as a constraint to the previous pattern if ( trimmed.startsWith( "-" ) && (!lines[i].equals( expanded[lastExpanded] )) ) { if ( lastPattern >= 0 ) { ConstraintInformation c = ConstraintInformation.findConstraintInformationInPattern( expanded[lastPattern] ); if ( c.start > -1 ) { // rebuilding previous pattern structure expanded[lastPattern] = expanded[lastPattern].substring( 0, c.start ) + c.constraints + ((c.constraints.trim().length() == 0) ? "" : ", ") + expanded[lastExpanded].trim() + expanded[lastPattern].substring( c.end ); } else { // error, pattern not found to add constraint to this.addError( new ExpanderException( "No pattern was found to add the constraint to: " + lines[i].trim(), i + lineOffset ) ); } } lastExpanded--; } else { lastPattern = lastExpanded; } } } for ( int i = 0; i <= lastExpanded; i++ ) { buf.append( expanded[i] ); buf.append( nl ); } return buf.toString(); }
python
def query(cls, project=None, visibility=None, q=None, id=None, offset=None, limit=None, api=None): """ Query (List) apps. :param project: Source project. :param visibility: private|public for private or public apps. :param q: List containing search terms. :param id: List contains app ids. Fetch apps with specific ids. :param offset: Pagination offset. :param limit: Pagination limit. :param api: Api instance. :return: collection object """ if project: project = Transform.to_project(project) api = api or cls._API return super(App, cls)._query(url=cls._URL['query'], project=project, visibility=visibility, q=q, id=id, offset=offset, limit=limit, api=api)
python
def rename_dfa_states(dfa: dict, suffix: str): """ Side effect on input! Renames all the states of the DFA adding a **suffix**. It is an utility function to be used to avoid automata to have states with names in common. Avoid suffix that can lead to special name like "as", "and",... :param dict dfa: input DFA. :param str suffix: string to be added at beginning of each state name. """ conversion_dict = dict() new_states = set() new_accepting = set() for state in dfa['states']: conversion_dict[state] = '' + suffix + state new_states.add('' + suffix + state) if state in dfa['accepting_states']: new_accepting.add('' + suffix + state) dfa['states'] = new_states dfa['initial_state'] = '' + suffix + dfa['initial_state'] dfa['accepting_states'] = new_accepting new_transitions = dict() for transition in dfa['transitions']: new_transitions[conversion_dict[transition[0]], transition[1]] = \ conversion_dict[dfa['transitions'][transition]] dfa['transitions'] = new_transitions return dfa
java
public List<JsonObject> executeParametrizedStatement(String statement, List<String> parameterNames, List<Object> parameterValues) { JsonObject namedParams = JsonObject.create(); for (int param = 0; param < parameterNames.size(); param++) { namedParams.put(parameterNames.get(param), parameterValues.get(param)); } ParameterizedN1qlQuery query = N1qlQuery.parameterized(statement, namedParams); return executeQuery(query); }
java
public static Map<String, String> mergeDefaults( CmsObject cms, CmsResource resource, Map<String, String> properties, Locale locale, ServletRequest request) { Map<String, CmsXmlContentProperty> propertyConfig = null; if (CmsResourceTypeXmlContent.isXmlContent(resource)) { I_CmsFormatterBean formatter = null; // check formatter configuration setting for (Entry<String, String> property : properties.entrySet()) { if (property.getKey().startsWith(CmsFormatterConfig.FORMATTER_SETTINGS_KEY) && CmsUUID.isValidUUID(property.getValue())) { formatter = OpenCms.getADEManager().getCachedFormatters( cms.getRequestContext().getCurrentProject().isOnlineProject()).getFormatters().get( new CmsUUID(property.getValue())); break; } } try { if (formatter != null) { propertyConfig = OpenCms.getADEManager().getFormatterSettings( cms, formatter, resource, locale, request); } else { // fall back to schema configuration propertyConfig = CmsXmlContentDefinition.getContentHandlerForResource(cms, resource).getSettings( cms, resource); } } catch (CmsException e) { // should never happen LOG.error(e.getLocalizedMessage(), e); } } return mergeDefaults(cms, propertyConfig, properties); }
java
@Override public boolean add(final CustomizationSupplier<T> entry) { if (list.contains(entry)) { throw new IllegalArgumentException("duplicate entry"); } return list.add(entry); }
java
public void createTenant(String tenantId) { TenantBean tenant = new TenantBean(tenantId); try { URL endpoint = serverUrl.toURI().resolve("tenants").toURL(); //$NON-NLS-1$ Request request = new Request.Builder() .url(endpoint) .post(toBody(tenant)) .header("Hawkular-Tenant", tenantId) //$NON-NLS-1$ .build(); Response response = httpClient.newCall(request).execute(); if (response.code() >= 400) { throw hawkularMetricsError(response); } } catch (URISyntaxException | IOException e) { throw new RuntimeException(e); } }
python
def accelerator_experiments(self, key, value): """Populate the ``accelerator_experiments`` key.""" result = [] a_value = force_single_element(value.get('a')) e_values = [el for el in force_list(value.get('e')) if el != '-'] zero_values = force_list(value.get('0')) if a_value and not e_values: result.append({'accelerator': a_value}) # XXX: we zip only when they have the same length, otherwise # we might match a value with the wrong recid. if len(e_values) == len(zero_values): for e_value, zero_value in zip(e_values, zero_values): result.append({ 'legacy_name': e_value, 'record': get_record_ref(zero_value, 'experiments'), }) else: for e_value in e_values: result.append({'legacy_name': e_value}) return result
python
def call(self, cleanup_protecteds): ''' Start the actual minion as a caller minion. cleanup_protecteds is list of yard host addresses that should not be cleaned up this is to fix race condition when salt-caller minion starts up If sub-classed, don't **ever** forget to run: super(YourSubClass, self).start() NOTE: Run any required code before calling `super()`. ''' try: self.prepare() if check_user(self.config['user']): self.minion.opts['__role'] = kinds.APPL_KIND_NAMES[kinds.applKinds.caller] self.minion.call_in() except (KeyboardInterrupt, SaltSystemExit) as exc: self.action_log_info('Stopping') if isinstance(exc, KeyboardInterrupt): log.warning('Exiting on Ctrl-c') self.shutdown() else: log.error(exc) self.shutdown(exc.code)
java
protected boolean canScroll(View v, boolean checkV, int dy, int x, int y) { if (v instanceof ViewGroup) { final ViewGroup group = (ViewGroup) v; final int scrollX = v.getScrollX(); final int scrollY = v.getScrollY(); final int count = group.getChildCount(); // Count backwards - let topmost views consume scroll distance first. for (int i = count - 1; i >= 0; i--) { // TODO: Add versioned support here for transformed views. // This will not work for transformed views in Honeycomb+ final View child = group.getChildAt(i); if (y + scrollY >= child.getTop() && y + scrollY < child.getBottom() && x + scrollX >= child.getLeft() && x + scrollX < child.getRight() && canScroll(child, true, dy, x + scrollX - child.getLeft(), y + scrollY - child.getTop())) { return true; } } } return checkV && ViewCompat.canScrollVertically(v, -dy); }
java
@Override public List<CPDefinition> findByCPTaxCategoryId(long CPTaxCategoryId) { return findByCPTaxCategoryId(CPTaxCategoryId, QueryUtil.ALL_POS, QueryUtil.ALL_POS, null); }
python
def _set_amz_headers(self): """ Sets x-amz-* error response fields from response headers. """ if self._response.headers: # keeping x-amz-id-2 as part of amz_host_id. if 'x-amz-id-2' in self._response.headers: self.host_id = self._response.headers['x-amz-id-2'] if 'x-amz-request-id' in self._response.headers: self.request_id = self._response.headers['x-amz-request-id'] # This is a new undocumented field, set only if available. if 'x-amz-bucket-region' in self._response.headers: self.region = self._response.headers['x-amz-bucket-region']
java
private synchronized Worker createWorker(int i) throws IOException { Preconditions.checkState(mState == State.STARTED, "Must be in a started state to create workers"); File confDir = new File(mWorkDir, "conf-worker" + i); File logsDir = new File(mWorkDir, "logs-worker" + i); File ramdisk = new File(mWorkDir, "ramdisk" + i); logsDir.mkdirs(); ramdisk.mkdirs(); int rpcPort = getNewPort(); int dataPort = getNewPort(); int webPort = getNewPort(); Map<PropertyKey, String> conf = new HashMap<>(); conf.put(PropertyKey.LOGGER_TYPE, "WORKER_LOGGER"); conf.put(PropertyKey.CONF_DIR, confDir.getAbsolutePath()); conf.put(PropertyKey.Template.WORKER_TIERED_STORE_LEVEL_DIRS_PATH.format(0), ramdisk.getAbsolutePath()); conf.put(PropertyKey.LOGS_DIR, logsDir.getAbsolutePath()); conf.put(PropertyKey.WORKER_RPC_PORT, Integer.toString(rpcPort)); conf.put(PropertyKey.WORKER_WEB_PORT, Integer.toString(webPort)); Worker worker = mCloser.register(new Worker(logsDir, conf)); mWorkers.add(worker); LOG.info("Created worker with (rpc, data, web) ports ({}, {}, {})", rpcPort, dataPort, webPort); return worker; }
python
def evaluate(self, X, *args, return_values_of="auto", return_as_dictionary=False, **kwargs): """ Evaluate the given problem. The function values set as defined in the function. The constraint values are meant to be positive if infeasible. A higher positive values means "more" infeasible". If they are 0 or negative, they will be considered as feasible what ever their value is. Parameters ---------- X : np.array A two dimensional matrix where each row is a point to evaluate and each column a variable. return_as_dictionary : bool If this is true than only one object, a dictionary, is returned. This contains all the results that are defined by return_values_of. Otherwise, by default a tuple as defined is returned. return_values_of : list of strings You can provide a list of strings which defines the values that are returned. By default it is set to "auto" which means depending on the problem the function values or additional the constraint violation (if the problem has constraints) are returned. Otherwise, you can provide a list of values to be returned. Allowed is ["F", "CV", "G", "dF", "dG", "dCV", "hF", "hG", "hCV", "feasible"] where the d stands for derivative and h stands for hessian matrix. Returns ------- A dictionary, if return_as_dictionary enabled, or a list of values as defined in return_values_of. """ # make the array at least 2-d - even if only one row should be evaluated only_single_value = len(np.shape(X)) == 1 X = np.atleast_2d(X) # check the dimensionality of the problem and the given input if X.shape[1] != self.n_var: raise Exception('Input dimension %s are not equal to n_var %s!' % (X.shape[1], self.n_var)) # automatic return the function values and CV if it has constraints if not defined otherwise if type(return_values_of) == str and return_values_of == "auto": return_values_of = ["F"] if self.n_constr > 0: return_values_of.append("CV") # create the output dictionary for _evaluate to be filled out = {} for val in return_values_of: out[val] = None # all values that are set in the evaluation function values_not_set = [val for val in return_values_of if val not in self.evaluation_of] # have a look if gradients are not set and try to use autograd and calculate grading if implemented using it gradients_not_set = [val for val in values_not_set if val.startswith("d")] # if no autograd is necessary for evaluation just traditionally use the evaluation method if len(gradients_not_set) == 0: self._evaluate(X, out, *args, **kwargs) at_least2d(out) # otherwise try to use autograd to calculate the gradient for this problem else: # calculate the function value by tracing all the calculations root, _ = run_and_trace(self._evaluate, X, *[out]) at_least2d(out) # the dictionary where the values are stored deriv = {} # if the result is calculated to be derivable for key, val in out.items(): # if yes it is already a derivative if key.startswith("d"): continue name = "d" + key is_derivable = (type(val) == autograd.numpy.numpy_boxes.ArrayBox) # if should be returned AND was not calculated yet AND is derivable using autograd if name in return_values_of and out.get(name) is None and is_derivable: # calculate the jacobian matrix and set it - (ignore warnings of autograd here) with warnings.catch_warnings(): warnings.simplefilter("ignore") if "h" + key not in out: jac = calc_jacobian(root, val) else: def calc_gradient(X): _out = {} _root, _ = run_and_trace(self._evaluate, X, *[_out]) at_least2d(_out) jac = calc_jacobian(_root, _out[key]) return jac _root, jac = run_and_trace(calc_gradient, X) hessian = [] for k in range(jac.shape[1]): _hessian = calc_jacobian(_root, jac[:, k]) hessian.append(_hessian[:, None, ...]) hessian = np.concatenate(hessian, axis=1) deriv["h" + key] = hessian deriv[name] = jac # merge to the output out = {**out, **deriv} # convert back to conventional numpy arrays - no array box as return type for key in out.keys(): if type(out[key]) == autograd.numpy.numpy_boxes.ArrayBox: out[key] = out[key]._value # if constraint violation should be returned as well if self.n_constr == 0: CV = np.zeros([X.shape[0], 1]) else: CV = Problem.calc_constraint_violation(out["G"]) if "CV" in return_values_of: out["CV"] = CV # if an additional boolean flag for feasibility should be returned if "feasible" in return_values_of: out["feasible"] = (CV <= 0) # remove the first dimension of the output - in case input was a 1d- vector if only_single_value: for key in out.keys(): if out[key] is not None: out[key] = out[key][0, :] if return_as_dictionary: return out else: # if just a single value do not return a tuple if len(return_values_of) == 1: return out[return_values_of[0]] else: return tuple([out[val] for val in return_values_of])
python
def spendables_for_address(self, address): """ Return a list of Spendable objects for the given bitcoin address. """ spendables = [] url_append = "?unspentOnly=true&token=%s&includeScript=true" % self.api_key url = self.base_url("addrs/%s%s" % (address, url_append)) result = json.loads(urlopen(url).read().decode("utf8")) for txn in result.get("txrefs", []): coin_value = txn.get("value") script = h2b(txn.get("script")) previous_hash = h2b_rev(txn.get("tx_hash")) previous_index = txn.get("tx_output_n") spendables.append(Tx.Spendable(coin_value, script, previous_hash, previous_index)) return spendables
python
def infer_newX(model, Y_new, optimize=True, init='L2'): """ Infer the distribution of X for the new observed data *Y_new*. :param model: the GPy model used in inference :type model: GPy.core.Model :param Y_new: the new observed data for inference :type Y_new: numpy.ndarray :param optimize: whether to optimize the location of new X (True by default) :type optimize: boolean :return: a tuple containing the estimated posterior distribution of X and the model that optimize X :rtype: (GPy.core.parameterization.variational.VariationalPosterior, GPy.core.Model) """ infr_m = InferenceX(model, Y_new, init=init) if optimize: infr_m.optimize() return infr_m.X, infr_m
java
public boolean insert(int newIndex) { // Ignore objects which do not belong in this quad tree INDArray point = data.slice(newIndex); if (!boundary.containsPoint(point)) return false; cumSize++; double mult1 = (double) (cumSize - 1) / (double) cumSize; double mult2 = 1.0 / (double) cumSize; centerOfMass.muli(mult1); centerOfMass.addi(point.mul(mult2)); // If there is space in this quad tree and it is a leaf, add the object here if (isLeaf() && size < QT_NODE_CAPACITY) { index[size] = newIndex; size++; return true; } //duplicate point if (size > 0) { for (int i = 0; i < size; i++) { INDArray compPoint = data.slice(index[i]); if (point.getDouble(0) == compPoint.getDouble(0) && point.getDouble(1) == compPoint.getDouble(1)) return true; } } // If this Node has already been subdivided just add the elements to the // appropriate cell if (!isLeaf()) { QuadTree index = findIndex(point); index.insert(newIndex); return true; } if (isLeaf()) subDivide(); boolean ret = insertIntoOneOf(newIndex); return ret; }
java
public File getGeneratedCssFile(String path) { String rootDir = tempDir + JawrConstant.CSS_SMARTSPRITES_TMP_DIR; String fPath = null; if (jawrConfig.isWorkingDirectoryInWebApp()) { fPath = jawrConfig.getContext().getRealPath(rootDir + getCssPath(path)); } else { fPath = rootDir + getCssPath(path); } return new File(fPath); }
java
public Observable<HierarchicalChildEntity> getHierarchicalEntityChildAsync(UUID appId, String versionId, UUID hEntityId, UUID hChildId) { return getHierarchicalEntityChildWithServiceResponseAsync(appId, versionId, hEntityId, hChildId).map(new Func1<ServiceResponse<HierarchicalChildEntity>, HierarchicalChildEntity>() { @Override public HierarchicalChildEntity call(ServiceResponse<HierarchicalChildEntity> response) { return response.body(); } }); }
java
public static Trades adaptTrades(List<BTCTurkTrades> btcTurkTrades, CurrencyPair currencyPair) { List<Trade> trades = new ArrayList<>(); BigDecimal lastTradeId = new BigDecimal("0"); for (BTCTurkTrades btcTurkTrade : btcTurkTrades) { if (btcTurkTrade.getTid().compareTo(lastTradeId) > 0) { lastTradeId = btcTurkTrade.getTid(); } trades.add(adaptTrade(btcTurkTrade, currencyPair)); } return new Trades(trades, lastTradeId.longValue(), Trades.TradeSortType.SortByID); }
java
@PostConstruct public final void init() { long timeToKeepAfterAccessInMillis = this.jobQueue.getTimeToKeepAfterAccessInMillis(); if (timeToKeepAfterAccessInMillis >= 0) { if (TimeUnit.SECONDS.toMillis(this.abandonedTimeout) >= this.jobQueue.getTimeToKeepAfterAccessInMillis()) { final String msg = String.format("%s abandonTimeout must be smaller than %s timeToKeepAfterAccess", getClass().getName(), this.jobQueue.getClass().getName()); throw new IllegalStateException(msg); } if (TimeUnit.SECONDS.toMillis(this.timeout) >= this.jobQueue.getTimeToKeepAfterAccessInMillis()) { final String msg = String.format("%s timeout must be smaller than %s timeToKeepAfterAccess", getClass().getName(), this.jobQueue.getClass().getName()); throw new IllegalStateException(msg); } } CustomizableThreadFactory threadFactory = new CustomizableThreadFactory(); threadFactory.setDaemon(true); threadFactory.setThreadNamePrefix("PrintJobManager-"); PriorityBlockingQueue<Runnable> queue = new PriorityBlockingQueue<>( this.maxNumberOfWaitingJobs, (o1, o2) -> { if (o1 instanceof JobFutureTask<?> && o2 instanceof JobFutureTask<?>) { Callable<?> callable1 = ((JobFutureTask<?>) o1).getCallable(); Callable<?> callable2 = ((JobFutureTask<?>) o2).getCallable(); if (callable1 instanceof PrintJob) { if (callable2 instanceof PrintJob) { return ThreadPoolJobManager.this.jobPriorityComparator .compare((PrintJob) callable1, (PrintJob) callable2); } return 1; } else if (callable2 instanceof PrintJob) { return -1; } } return 0; }); /* The ThreadPoolExecutor uses a unbounded queue (though we are enforcing a limit in `submit()`). * Because of that, the executor creates only `corePoolSize` threads. But to use all threads, * we set both `corePoolSize` and `maximumPoolSize` to `maxNumberOfRunningPrintJobs`. As a * consequence, the `maxIdleTime` will be ignored, idle threads will not be terminated. */ this.executor = new ThreadPoolExecutor(this.maxNumberOfRunningPrintJobs, this.maxNumberOfRunningPrintJobs, this.maxIdleTime, TimeUnit.SECONDS, queue, threadFactory) { @Override protected <T> RunnableFuture<T> newTaskFor(final Callable<T> callable) { return new JobFutureTask<>(callable); } @Override protected void beforeExecute(final Thread t, final Runnable runnable) { if (!ThreadPoolJobManager.this.clustered && runnable instanceof JobFutureTask<?>) { JobFutureTask<?> task = (JobFutureTask<?>) runnable; if (task.getCallable() instanceof PrintJob) { PrintJob printJob = (PrintJob) task.getCallable(); try { ThreadPoolJobManager.this.jobQueue .start(printJob.getEntry().getReferenceId()); } catch (RuntimeException e) { LOGGER.error("failed to mark job as running", e); } catch (NoSuchReferenceException e) { LOGGER.error("tried to mark non-existing job as 'running': {}", printJob.getEntry().getReferenceId(), e); } } } super.beforeExecute(t, runnable); } }; this.timer = Executors.newScheduledThreadPool(1, timerTask -> { final Thread thread = new Thread(timerTask, "Post result to registry"); thread.setDaemon(true); return thread; }); this.timer.scheduleAtFixedRate(new RegistryTask(), RegistryTask.CHECK_INTERVAL, RegistryTask.CHECK_INTERVAL, TimeUnit.MILLISECONDS); if (this.oldFileCleanUp) { this.cleanUpTimer = Executors.newScheduledThreadPool(1, timerTask -> { final Thread thread = new Thread(timerTask, "Clean up old files"); thread.setDaemon(true); return thread; }); this.cleanUpTimer.scheduleAtFixedRate( this.workingDirectories.getCleanUpTask(), 0, this.oldFileCleanupInterval, TimeUnit.SECONDS); } }
java
private static CustomPropertyDescriptor tryBuildFinal(Field field, Class<?> clazz) throws IntrospectionException { String name = field.getName(); String readMethodName; log.debug("尝试为final类型的字段{}创建字段说明", name); if (Boolean.class.isAssignableFrom(field.getType())) { log.debug("字段是boolean类型"); if (name.startsWith("is")) { readMethodName = name; } else { readMethodName = "is" + StringUtils.toFirstUpperCase(name); } } else { log.debug("字段不是boolean类型"); readMethodName = "get" + StringUtils.toFirstUpperCase(name); } log.debug("猜测final类型的字段{}的read方法名为{}", name, readMethodName); return convert(field, new PropertyDescriptor(name, clazz, readMethodName, null), clazz); }
java
static SelectStatement forSelection(CFMetaData cfm, Selection selection) { return new SelectStatement(cfm, 0, defaultParameters, selection, null); }
python
def search(self,search_text=None,response_type=None,params=None): """ Function to request economic data series that match search text. `<https://research.stlouisfed.org/docs/api/fred/series_search.html>`_ :arg str search_text: The words to match against economic data series. Required. :arg str response_type: File extension of response. Options are 'xml', 'json', 'dict','df','numpy','csv','tab,'pipe'. Required. :arg str search_type: Determines the type of search to perform. Options are 'full_text','series_id' :arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD" :arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD" :arg int limit: The maximum number of results to return. Options 1 to 1000 :arg int offset: Data offset. Options >=0 :arg str order_by: Order results by values of the specified attribute. Options are 'search_rank', 'series_id', 'title', 'units', 'frequency', 'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end', 'popularity' :arg str sort_order: Sort results for attribute values specified by order_by. Options are 'asc','desc' :arg str filter_variable: The attribute to filter results by. Options are 'frequency', 'units','seasonal_adjustment' :arg str filter_value: The value of the filter_variable attribute to filter results by. :arg str tag_names: Tag names used to match series. Separate with semicolon as in "income;bea" :arg str exclude_tag_names: Tag names used to exclude series. Separate with semicolon as in "income;bea" :arg bool ssl_verify: To verify HTTPs. """ path = '/series/search?' params['search_text'] = search_text response_type = response_type if response_type else self.response_type if response_type != 'xml': params['file_type'] = 'json' response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify) return response
python
def assign_authorization_to_vault(self, authorization_id, vault_id): """Adds an existing ``Authorization`` to a ``Vault``. arg: authorization_id (osid.id.Id): the ``Id`` of the ``Authorization`` arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault`` raise: AlreadyExists - ``authorization_id`` is already assigned to ``vault_id`` raise: NotFound - ``authorization_id`` or ``vault_id`` not found raise: NullArgument - ``authorization_id`` or ``vault_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinAssignmentSession.assign_resource_to_bin mgr = self._get_provider_manager('AUTHORIZATION', local=True) lookup_session = mgr.get_vault_lookup_session(proxy=self._proxy) lookup_session.get_vault(vault_id) # to raise NotFound self._assign_object_to_catalog(authorization_id, vault_id)
python
def free_params(self, value): """Set the free parameters. Note that this bypasses enforce_bounds. """ value = scipy.asarray(value, dtype=float) self.K_up_to_date = False self.k.free_params = value[:self.k.num_free_params] self.w.free_params = value[self.k.num_free_params:self.k.num_free_params + self.w.num_free_params]
java
public synchronized OtpErlangPid createPid() { final OtpErlangPid p = new OtpErlangPid(node, pidCount, serial, creation); pidCount++; if (pidCount > 0x7fff) { pidCount = 0; serial++; if (serial > 0x1fff) { /* 13 bits */ serial = 0; } } return p; }
python
def rewind(self): '''Return the uncompressed stream file position indicator to the beginning of the file''' if self.mode != READ: raise OSError("Can't rewind in write mode") self.fileobj.seek(0) self._new_member = True self.extrabuf = b"" self.extrasize = 0 self.extrastart = 0 self.offset = 0
java
public java.util.Map captureStatistics() { if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled()) trace.entry(this, cclass, "captureStatistics"); java.util.Map statistics = new java.util.HashMap(); statistics.put("maximumAvailableSize", Long.toString(maximumAvailableSize)); if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled()) trace.exit(this, cclass, "captureStatistics", new Object[] { statistics }); return statistics; }
java
private static <T extends Annotation> Class<? extends Annotation> getRepeatableAnnotationContainerClassFor(Class<T> annotationClass) { Repeatable repeatableAnnotation = annotationClass.getDeclaredAnnotation(Repeatable.class); return (repeatableAnnotation == null) ? null : repeatableAnnotation.value(); }
java
@Override public SubsetMove getRandomMove(SubsetSolution solution, Random rnd) { // get set of candidate IDs for deletion (fixed IDs are discarded) Set<Integer> delCandidates = getRemoveCandidates(solution); // compute maximum number of deletions int curMaxDel = maxDeletions(delCandidates, solution); // return null if no removals are possible if(curMaxDel == 0){ return null; } // pick number of deletions (in [1, curMaxDel]) int numDel = rnd.nextInt(curMaxDel) + 1; // pick random IDs to remove from selection Set<Integer> del = SetUtilities.getRandomSubset(delCandidates, numDel, rnd); // create and return move return new GeneralSubsetMove(Collections.emptySet(), del); }
python
def display_image(self, reset=1): """Utility routine used to display an updated frame from a framebuffer. """ try: fb = self.server.controller.get_frame(self.frame) except KeyError: # the selected frame does not exist, create it fb = self.server.controller.init_frame(self.frame) if not fb.height: width = fb.width height = int(len(fb.buffer) / width) fb.height = height # display the image if (len(fb.buffer) > 0) and (height > 0): self.server.controller.display(self.frame, width, height, True) else: self.server.controller.display(self.frame, fb.width, fb.height, False)
java
@CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) @SuppressWarnings({ "unchecked", "rawtypes" }) public static <T> Maybe<T> merge(MaybeSource<? extends MaybeSource<? extends T>> source) { ObjectHelper.requireNonNull(source, "source is null"); return RxJavaPlugins.onAssembly(new MaybeFlatten(source, Functions.identity())); }
java
@SuppressWarnings("unchecked") public EList<IfcRelCoversSpaces> getCoversSpaces() { return (EList<IfcRelCoversSpaces>) eGet(Ifc2x3tc1Package.Literals.IFC_COVERING__COVERS_SPACES, true); }
java
public BatchOptions jitterDuration(final int jitterDuration) { BatchOptions clone = getClone(); clone.jitterDuration = jitterDuration; return clone; }
python
def save_yaml_file(file, val): """ Save data to yaml file :param file: Writable object or path to file :type file: FileIO | str | unicode :param val: Value or struct to save :type val: None | int | float | str | unicode | list | dict """ opened = False if not hasattr(file, "write"): file = io.open(file, "w", encoding="utf-8") opened = True try: yaml.dump(val, file) finally: if opened: file.close()
python
def qr_code(self, instance): """ Display picture of QR-code from used secret """ try: return self._qr_code(instance) except Exception as err: if settings.DEBUG: import traceback return "<pre>%s</pre>" % traceback.format_exc()
java
@Override public Set<KamNode> getAdjacentNodes(KamNode kamNode, EdgeDirectionType edgeDirection) { return wrapNodes(kam.getAdjacentNodes(kamNode, edgeDirection)); }
python
def traverse(self): """Enumerate children and build associated objects """ builder = self.child_builder for child in self._children: with pushd(str(child)): yield child, builder(child)
python
def update_node(cls, cluster_id_label, command, private_dns, parameters=None): """ Add a node to an existing cluster """ conn = Qubole.agent(version=Cluster.api_version) parameters = {} if not parameters else parameters data = {"command" : command, "private_dns" : private_dns, "parameters" : parameters} return conn.put(cls.element_path(cluster_id_label) + "/nodes", data)
java
public Observable<OperationStatus> deleteHierarchicalEntityAsync(UUID appId, String versionId, UUID hEntityId) { return deleteHierarchicalEntityWithServiceResponseAsync(appId, versionId, hEntityId).map(new Func1<ServiceResponse<OperationStatus>, OperationStatus>() { @Override public OperationStatus call(ServiceResponse<OperationStatus> response) { return response.body(); } }); }
java
@Override public BatchDeleteConnectionResult batchDeleteConnection(BatchDeleteConnectionRequest request) { request = beforeClientExecution(request); return executeBatchDeleteConnection(request); }
java
public double errorMembership( double[] sampleA ) { double[] eig = sampleToEigenSpace(sampleA); double[] reproj = eigenToSampleSpace(eig); double total = 0; for( int i = 0; i < reproj.length; i++ ) { double d = sampleA[i] - reproj[i]; total += d*d; } return Math.sqrt(total); }
python
def update_settings(self, settings, force=False, timeout=-1): """ Updates interconnect settings on the logical interconnect. Changes to interconnect settings are asynchronously applied to all managed interconnects. (This method is not available from API version 600 onwards) Args: settings: Interconnect settings force: If set to true, the operation completes despite any problems with network connectivity or errors on the resource itself. The default is false. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: Logical Interconnect """ data = settings.copy() if 'ethernetSettings' in data: ethernet_default_values = self._get_default_values(self.SETTINGS_ETHERNET_DEFAULT_VALUES) data['ethernetSettings'] = merge_resources(data['ethernetSettings'], ethernet_default_values) uri = "{}/settings".format(self.data["uri"]) default_values = self._get_default_values(self.SETTINGS_DEFAULT_VALUES) data = self._helper.update_resource_fields(data, default_values) return self._helper.update(data, uri=uri, force=force, timeout=timeout)
java
public static <Value extends Comparable<Value>> int nullSafeCompare(final Value first, final Value second) { if (first == null) { return second == null ? EQUAL_COMPARE_RESULT : LOWER_THAN_COMPARE_RESULT; } return second == null ? GREATER_THAN_COMPARE_RESULT : first.compareTo(second); }
java
private String acquireString() throws IOException, JspException { if (isAbsoluteUrl) { // for absolute URLs, delegate to our peer BufferedReader r = new BufferedReader(acquireReader()); StringBuffer sb = new StringBuffer(); int i; // under JIT, testing seems to show this simple loop is as fast // as any of the alternatives while ((i = r.read()) != -1) { sb.append((char) i); } return sb.toString(); } else { // handle relative URLs ourselves // URL is relative, so we must be an HTTP request if (!(pageContext.getRequest() instanceof HttpServletRequest && pageContext.getResponse() instanceof HttpServletResponse)) { throw new JspTagException( Resources.getMessage("IMPORT_REL_WITHOUT_HTTP")); } // retrieve an appropriate ServletContext ServletContext c = null; String targetUrl = targetUrl(); if (context != null) { c = pageContext.getServletContext().getContext(context); } else { c = pageContext.getServletContext(); // normalize the URL if we have an HttpServletRequest if (!targetUrl.startsWith("/")) { String sp = ((HttpServletRequest) pageContext.getRequest()).getServletPath(); targetUrl = sp.substring(0, sp.lastIndexOf('/')) + '/' + targetUrl; } } if (c == null) { throw new JspTagException( Resources.getMessage( "IMPORT_REL_WITHOUT_DISPATCHER", context, targetUrl)); } // from this context, get a dispatcher RequestDispatcher rd = c.getRequestDispatcher(stripSession(targetUrl)); if (rd == null) { throw new JspTagException(stripSession(targetUrl)); } // Wrap the response so we capture the capture the output. // This relies on the underlying container to return content even if this is a HEAD // request. Some containers (e.g. Tomcat versions without the fix for // https://bz.apache.org/bugzilla/show_bug.cgi?id=57601 ) may not do that. ImportResponseWrapper irw = new ImportResponseWrapper((HttpServletResponse) pageContext.getResponse()); // spec mandates specific error handling from include() try { rd.include(pageContext.getRequest(), irw); } catch (IOException ex) { throw new JspException(ex); } catch (RuntimeException ex) { throw new JspException(ex); } catch (ServletException ex) { Throwable rc = ex.getRootCause(); while (rc instanceof ServletException) { rc = ((ServletException) rc).getRootCause(); } if (rc == null) { throw new JspException(ex); } else { throw new JspException(rc); } } // disallow inappropriate response codes per JSTL spec if (irw.getStatus() < 200 || irw.getStatus() > 299) { throw new JspTagException(irw.getStatus() + " " + stripSession(targetUrl)); } // recover the response String from our wrapper return irw.getString(); } }
java
public String toFormattedString(boolean includeColumnNames) { final int MAX_PRINTABLE_CHARS = 30; // chose print width for geography column such that it can print polygon in // aligned manner with geography column for a polygon up to: // a polygon composed of 4 vertices + 1 repeat vertex, // one ring, each coordinate of vertex having 5 digits space including the sign of lng/lat final int MAX_PRINTABLE_CHARS_GEOGRAPHY = 74; final String ELLIPSIS = "..."; final String DECIMAL_FORMAT = "%01.12f"; StringBuffer sb = new StringBuffer(); int columnCount = getColumnCount(); int[] padding = new int[columnCount]; String[] fmt = new String[columnCount]; // start with minimum padding based on length of column names. this gets // increased later as needed for (int i = 0; i < columnCount; i++) { padding[i] = getColumnName(i).length(); // min value to be increased later } resetRowPosition(); // Compute the padding needed for each column of the table (note: must // visit every row) while (advanceRow()) { for (int i = 0; i < columnCount; i++) { VoltType colType = getColumnType(i); Object value = get(i, colType); int width; if (wasNull()) { width = 4; } else if (colType == VoltType.DECIMAL) { BigDecimal bd = (BigDecimal) value; String valueStr = String.format(DECIMAL_FORMAT, bd.doubleValue()); width = valueStr.length(); } // crop long strings and such else { if (colType == VoltType.VARBINARY) { width = ((byte[]) value).length * 2; } else { width = value.toString().length(); } if ( ((colType == VoltType.GEOGRAPHY) && (width > MAX_PRINTABLE_CHARS_GEOGRAPHY)) || ((colType != VoltType.GEOGRAPHY) && (width > MAX_PRINTABLE_CHARS)) ) { width = (colType == VoltType.GEOGRAPHY) ? MAX_PRINTABLE_CHARS_GEOGRAPHY : MAX_PRINTABLE_CHARS; } } // Adjust the max width for each column if (width > padding[i]) { padding[i] = width; } } } String pad = ""; // no pad before first column header. // calculate formating space based on columns. // Append column names and separator line to buffer for (int i = 0; i < columnCount; i++) { padding[i] += 1; // Determine the formatting string for each column VoltType colType = getColumnType(i); String justification = (colType.isVariableLength() || colType == VoltType.TIMESTAMP || colType == VoltType.GEOGRAPHY_POINT) ? "-" : ""; fmt[i] = "%1$" + justification + padding[i] + "s"; if (includeColumnNames) { // Serialize the column headers sb.append(pad).append(String.format("%1$-" + padding[i] + "s", getColumnName(i))); pad = " "; } } if (includeColumnNames) { // construct separator to be used between column name header and table values sb.append("\n"); // Serialize the separator between the column headers and the rows of data pad = ""; for (int i = 0; i < columnCount; i++) { char[] underline_array = new char[padding[i]]; Arrays.fill(underline_array, '-'); sb.append(pad).append(new String(underline_array)); pad = " "; } sb.append("\n"); } // Serialize each formatted row of data. resetRowPosition(); while (advanceRow()) { pad = ""; for (int i = 0; i < columnCount; i++) { VoltType colType = getColumnType(i); Object value = get(i, colType); String valueStr; if (wasNull()) { valueStr = "NULL"; } else if (colType == VoltType.DECIMAL) { BigDecimal bd = (BigDecimal) value; valueStr = String.format(DECIMAL_FORMAT, bd.doubleValue()); } else { if (colType == VoltType.VARBINARY) { valueStr = Encoder.hexEncode((byte[]) value); // crop long varbinaries if (valueStr.length() > MAX_PRINTABLE_CHARS) { valueStr = valueStr.substring(0, MAX_PRINTABLE_CHARS - ELLIPSIS.length()) + ELLIPSIS; } } else { valueStr = value.toString(); } } sb.append(pad).append(String.format(fmt[i], valueStr)); pad = " "; } sb.append("\n"); } // Idempotent. Reset the row position for the next guy... resetRowPosition(); return sb.toString(); }
java
@SuppressWarnings("unchecked") private void siftDown (int k, E x) { int half = size >>> 1; // loop while a non-leaf while (k < half) { int child = (k << 1) + 1; // assume left child is least E c = (E)queue[child]; int right = child + 1; if (right < size && c.compareTo((E)queue[right]) > 0) c = (E)queue[child = right]; if (x.compareTo(c) <= 0) break; queue[k] = c; k = child; } queue[k] = x; }
java
public void configure(LifecycleEnvironment environment, MetricRegistry registry) { for (ReporterFactory reporter : reporters) { try { final ScheduledReporterManager manager = new ScheduledReporterManager(reporter.build(registry), reporter.getFrequency().orElseGet(this::getFrequency), reportOnStop); environment.manage(manager); } catch (Exception e) { LOGGER.warn("Failed to create reporter, metrics may not be properly reported.", e); } } }
python
def _clean_body_df(self, df): """Format the dataframe, remove empty rows, and add units attribute.""" if self.suffix == '-drvd.txt': df = df.dropna(subset=('temperature', 'reported_relative_humidity', 'u_wind', 'v_wind'), how='all').reset_index(drop=True) df.units = {'pressure': 'hPa', 'reported_height': 'meter', 'calculated_height': 'meter', 'temperature': 'Kelvin', 'temperature_gradient': 'Kelvin / kilometer', 'potential_temperature': 'Kelvin', 'potential_temperature_gradient': 'Kelvin / kilometer', 'virtual_temperature': 'Kelvin', 'virtual_potential_temperature': 'Kelvin', 'vapor_pressure': 'Pascal', 'saturation_vapor_pressure': 'Pascal', 'reported_relative_humidity': 'percent', 'calculated_relative_humidity': 'percent', 'u_wind': 'meter / second', 'u_wind_gradient': '(meter / second) / kilometer)', 'v_wind': 'meter / second', 'v_wind_gradient': '(meter / second) / kilometer)', 'refractive_index': 'unitless'} else: df['u_wind'], df['v_wind'] = get_wind_components(df['speed'], np.deg2rad(df['direction'])) df['u_wind'] = np.round(df['u_wind'], 1) df['v_wind'] = np.round(df['v_wind'], 1) df = df.dropna(subset=('temperature', 'direction', 'speed', 'dewpoint_depression', 'u_wind', 'v_wind'), how='all').reset_index(drop=True) df['dewpoint'] = df['temperature'] - df['dewpoint_depression'] df.drop('dewpoint_depression', axis=1, inplace=True) df.units = {'etime': 'second', 'pressure': 'hPa', 'height': 'meter', 'temperature': 'degC', 'dewpoint': 'degC', 'direction': 'degrees', 'speed': 'meter / second', 'u_wind': 'meter / second', 'v_wind': 'meter / second'} return df
java
@Override public void run() { /* Wait for connections... */ while (true) { // Accept requests from clients. try { clientSocket = serverSocket.accept(); /* Create a process for the communication and start it */ final AbstractClientHandler clientHandler = newClientHandler(clientSocket); final Thread thread = new Thread(clientHandler); thread.start(); } catch (final IOException e) { /* * Log the error of the server if IO fails. Something bad has happened */ logger.error("Could not accept " + e); } } }
python
def guess_cls(self): """Guess the packet class that must be used on the interface""" # Get the data link type try: ret = fcntl.ioctl(self.ins, BIOCGDLT, struct.pack('I', 0)) ret = struct.unpack('I', ret)[0] except IOError: cls = conf.default_l2 warning("BIOCGDLT failed: unable to guess type. Using %s !", cls.name) return cls # Retrieve the corresponding class try: return conf.l2types[ret] except KeyError: cls = conf.default_l2 warning("Unable to guess type (type %i). Using %s", ret, cls.name)
java
public OffsetTime withOffsetSameInstant(ZoneOffset offset) { if (offset.equals(this.offset)) { return this; } int difference = offset.getTotalSeconds() - this.offset.getTotalSeconds(); LocalTime adjusted = time.plusSeconds(difference); return new OffsetTime(adjusted, offset); }
python
def find_home_config_files(filetype=['json', 'yaml']): """Return configs of ``.vcspull.{yaml,json}`` in user's home directory.""" configs = [] yaml_config = os.path.expanduser('~/.vcspull.yaml') has_yaml_config = os.path.exists(yaml_config) json_config = os.path.expanduser('~/.vcspull.json') has_json_config = os.path.exists(json_config) if not has_yaml_config and not has_json_config: log.debug( 'No config file found. Create a .vcspull.yaml or .vcspull.json' ' in your $HOME directory. http://vcspull.git-pull.com for a' ' quickstart.' ) else: if sum(filter(None, [has_json_config, has_yaml_config])) > int(1): raise exc.MultipleConfigWarning() if has_yaml_config: configs.append(yaml_config) if has_json_config: configs.append(json_config) return configs
java
public static KeyChain configureKeyChain(Name deviceName) throws net.named_data.jndn.security.SecurityException { // access key chain in ~/.ndn; creates if necessary PrivateKeyStorage keyStorage = new FilePrivateKeyStorage(); IdentityStorage identityStorage = new BasicIdentityStorage(); KeyChain keyChain = new KeyChain(new IdentityManager(identityStorage, keyStorage), new SelfVerifyPolicyManager(identityStorage)); // create keys, certs if necessary if (!identityStorage.doesIdentityExist(deviceName)) { Name certificateName = keyChain.createIdentityAndCertificate(deviceName); Name keyName = IdentityCertificate.certificateNameToPublicKeyName(certificateName); keyChain.setDefaultKeyForIdentity(keyName); } // set default identity keyChain.getIdentityManager().setDefaultIdentity(deviceName); return keyChain; }
python
def stripascii(string): """Return string truncated at last byte that is 7-bit ASCII. Clean NULL separated and terminated TIFF strings. >>> stripascii(b'string\\x00string\\n\\x01\\x00') b'string\\x00string\\n' >>> stripascii(b'\\x00') b'' """ # TODO: pythonize this i = len(string) while i: i -= 1 if 8 < byte2int(string[i]) < 127: break else: i = -1 return string[:i+1]
python
def _get_view_infos( self, trimmed=False): """query the sherlock-catalogues database view metadata """ self.log.debug('starting the ``_get_view_infos`` method') sqlQuery = u""" SELECT v.*, t.description as "master table" FROM crossmatch_catalogues.tcs_helper_catalogue_views_info as v, crossmatch_catalogues.tcs_helper_catalogue_tables_info AS t where v.legacy_view = 0 and v.view_name not like "legacy%%" and t.id=v.table_id order by number_of_rows desc """ % locals() viewInfo = readquery( log=self.log, sqlQuery=sqlQuery, dbConn=self.cataloguesDbConn, quiet=False ) if trimmed: cleanTable = [] for r in viewInfo: orow = collections.OrderedDict(sorted({}.items())) for c in self.basicColumns: if c in r: orow[c] = r[c] cleanTable.append(orow) viewInfo = cleanTable self.log.debug('completed the ``_get_view_infos`` method') return viewInfo
python
def inflate(self): """Load the resource from the server, if not already loaded.""" if not self._is_inflated: if self._is_inflating: # catch infinite recursion when attempting to inflate # an object that doesn't have enough data to inflate msg = ("There is not enough data to inflate this object. " "Need either an href: {} or a {}: {}") msg = msg.format(self._href, self.primary_key, self._data.get(self.primary_key)) raise exceptions.ClientError(msg) self._is_inflating = True try: params = self.searchParameters if hasattr(self, 'searchParameters') else {} # To keep the method same as the original request. The default is GET self.load(self.client.request(self.method, self.url, **params)) except Exception: self.load(self._data) self._is_inflated = True self._is_inflating = False return self
python
def epochs_joint(ts, variability=None, threshold=0.0, minlength=1.0, proportion=0.75, plot=True): """Identify epochs within a multivariate time series where at least a certain proportion of channels are "stationary", based on a previously computed variability measure. (Note: This requires an IPython cluster to be started first, e.g. on a workstation type 'ipcluster start') Args: ts Timeseries of m variables, shape (n, m). variability (optional) Timeseries of shape (n, m), giving a scalar measure of the variability of timeseries `ts` near each point in time. (if None, we will use variability_fp()) threshold The maximum variability permitted in stationary epochs. minlength Shortest acceptable epoch length (in seconds) proportion Require at least this fraction of channels to be "stationary" plot bool Whether to display the output Returns: (variability, joint_epochs) joint_epochs: list of tuples A list of tuples (start, end) that give the starting and ending indices of time epochs that are stationary for at least `proportion` of channels. (epochs are inclusive of start point but not the end point) """ variability, allchannels_epochs = ts.epochs_distributed( variability, threshold, minlength, plot=False) orig_ndim = ts.ndim if ts.ndim is 1: ts = ts[:, np.newaxis] allchannels_epochs = [allchannels_epochs] variability = variability[:, np.newaxis] channels = ts.shape[1] dt = (1.0*ts.tspan[-1] - ts.tspan[0]) / (len(ts) - 1) starts = [(e[0], 1) for channel in allchannels_epochs for e in channel] ends = [(e[1], -1) for channel in allchannels_epochs for e in channel] all = sorted(starts + ends) joint_epochs = [] in_joint_epoch = False joint_start = 0.0 inside_count = 0 for bound in all: inside_count += bound[1] if not in_joint_epoch and 1.0*inside_count/channels >= proportion: in_joint_epoch = True joint_start = bound[0] if in_joint_epoch and 1.0*inside_count/channels < proportion: in_joint_epoch = False joint_end = bound[0] if (joint_end - joint_start)*dt >= minlength: joint_epochs.append((joint_start, joint_end)) if plot: joint_epochs_repeated = [joint_epochs] * channels _plot_variability(ts, variability, threshold, joint_epochs_repeated) return (variability, joint_epochs)
java
public static DescriptorExtensionList<FileSystemProvisioner,FileSystemProvisionerDescriptor> all() { return Jenkins.getInstance().<FileSystemProvisioner,FileSystemProvisionerDescriptor>getDescriptorList(FileSystemProvisioner.class); }
java
public HybridRunbookWorkerGroupInner get(String resourceGroupName, String automationAccountName, String hybridRunbookWorkerGroupName) { return getWithServiceResponseAsync(resourceGroupName, automationAccountName, hybridRunbookWorkerGroupName).toBlocking().single().body(); }
python
def import_items(item_seq, dest_model, batch_len=500, clear=False, dry_run=True, start_batch=0, end_batch=None, overwrite=True, run_update=False, ignore_related=True, ignore_errors=False, verbosity=1): """Import a sequence (queryset.values(), generator, tuple, list) of dicts into the given model """ if isinstance(dest_model, (djmodels.query.QuerySet, djmodels.Manager)): dest_qs = dest_model.all() dest_model = get_model(dest_qs) else: dest_qs = dest_model.objects.all() stats = collections.Counter() try: try: src_qs = item_seq.objects.all() except AttributeError: src_qs = item_seq.all() N = src_qs.count() item_seq = src_qs.values() except AttributeError as e: print_exc() if not ignore_errors: raise e N = len(item_seq) if not N: if verbosity > 0: print 'No records found in %r' % src_qs return N # make sure there's a valid last batch number so the verbose messages will make sense end_batch = end_batch or int(N / float(batch_len)) if clear and not dry_run: if N < dest_qs.count(): if verbosity > 0: print "WARNING: There are %d %r records in the destinsation queryset which is more than the %d records in the source data. So no records will be deleted/cleared in the destination!" % (dest_qs.count(), dest_model, N) if verbosity > 0: print "WARNING: Deleting %d records from %r to make room for %d new records !!!!!!!" % (dest_qs.count(), dest_model, N) num_deleted = delete_in_batches(dest_qs) if verbosity > 0: print "Finished deleting %d records in %r." % (num_deleted, dest_model) if verbosity > 0: print('Loading %r records from sequence provided...' % N) widgets = [pb.Counter(), '/%d rows: ' % N or 1, pb.Percentage(), ' ', pb.RotatingMarker(), ' ', pb.Bar(),' ', pb.ETA()] pbar = pb.ProgressBar(widgets=widgets, maxval=N) for batch_num, dict_batch in enumerate(util.generate_slices(item_seq, batch_len=batch_len, start_batch=start_batch)): if start_batch + batch_num > end_batch: if verbosity > 1: print('Stopping before batch {0} because it is not between {1} and {2}'.format(start_batch + batch_num, start_batch, end_batch)) break if verbosity > 2: print '-------- dict batch ------' # print(repr(dict_batch)) print(repr((start_batch + batch_num, len(dict_batch), batch_len))) item_batch = [] total_len_batches = 0 # convert an iterable of Django ORM record dictionaries into a list of Django ORM objects for d in dict_batch: if verbosity > 2: print '-------- dict of source obj ------' print(repr(d)) obj = dest_model() try: # if the model has an import_item method then use it obj.import_item(d, verbosity=verbosity) except: if verbosity > 2: print '------ Creating a new %r instance --------' % dest_model obj, row_errors = django_object_from_row(d, dest_model, ignore_related=ignore_related, verbosity=verbosity) if verbosity > 2: print 'new obj.__dict__: %r' % obj.__dict__ if run_update: try: if verbosity > 2: print '------ Updating FKs with overwrite=%r --------' % overwrite obj._update(save=False, overwrite=overwrite) except: if verbosity > 0: print_exc() print 'ERROR: Unable to update record: %r' % obj pass item_batch += [obj] stats += row_errors del(dict_batch) if verbosity and verbosity < 2: if batch_num: pbar.update(batch_num * batch_len + len(item_batch)) else: # don't start the progress bar until at least one batch has been loaded pbar.start() elif verbosity > 1: print('Writing {0} items (of type {1}) from batch {2}. Will stop at batch {3} which is record {4} ...'.format( len(item_batch), dest_model, start_batch + batch_num, end_batch, min(batch_len * (start_batch + end_batch), N), )) total_len_batches += len(item_batch) # use bulk_create to make fast DB insertions. Note: any custom save() or _update() methods will *NOT* be run if not dry_run: try: dest_model.objects.bulk_create(item_batch) except UnicodeDecodeError as err: from django.db import transaction transaction.rollback() if verbosity > 0: print '%s' % err print 'Now attempting to save objects one at a time instead of as a batch...' for obj in item_batch: try: obj.save() stats += collections.Counter(['batch_UnicodeDecodeError']) except: from django.db import transaction transaction.rollback() stats += collections.Counter(['save_UnicodeDecodeError']) print str(obj) print repr(obj.__dict__) if not ignore_errors: print_exc() raise except Exception as err: from django.db import transaction transaction.rollback() if verbosity > 0: print '%s' % err print 'Now attempting to save objects one at a time instead of as a batch...' for obj in item_batch: try: obj.save() stats += collections.Counter(['batch_Error']) except: from django.db import transaction transaction.rollback() print str(obj) print repr(obj.__dict__) print_exc() stats += collections.Counter(['save_Error']) if not ignore_errors: print_exc() raise if batch_num < end_batch: if len(item_batch) != batch_len: stats += collections.Counter(['batch_len={0}'.format(len(item_batch))]) print('Retrieving {0} {1} items for the next batch, batch number {2}...'.format( batch_len, src_qs.model, batch_num + 1)) del(item_batch) if verbosity > 0: pbar.finish() return stats
python
def default_ms(name, tabdesc=None, dminfo=None): """ Creates a default Measurement Set called name. Any Table Description elements in tabdesc will overwrite the corresponding element in a default Measurement Set Table Description (columns, hypercolumns and keywords). In practice, you probably want to specify columns such as DATA, MODEL_DATA and CORRECTED_DATA (and their associated keywords and hypercolumns) in tabdesc. """ # Default to empty dictionaries if tabdesc is None: tabdesc = {} if dminfo is None: dminfo = {} # Wrap the Table object return table(_default_ms(name, tabdesc, dminfo), _oper=3)
python
def user_parse(data): """Parse information from provider.""" id_ = data.get('id') yield 'id', id_ yield 'email', data.get('email') yield 'first_name', data.get('first_name') yield 'last_name', data.get('last_name') yield 'username', data.get('name') yield 'picture', 'http://graph.facebook.com/{0}/picture?' \ 'type=large'.format(id_) yield 'link', data.get('link') yield 'locale', data.get('locale') yield 'gender', data.get('gender') location = data.get('location', {}).get('name') if location: split_location = location.split(', ') yield 'city', split_location[0].strip() if len(split_location) > 1: yield 'country', split_location[1].strip()
python
def check_error(status): """Set a generic function as the restype attribute of all OpenJPEG functions that return a BOOL_TYPE value. This way we do not have to check for error status in each wrapping function and an exception will always be appropriately raised. """ global ERROR_MSG_LST if status != 1: if len(ERROR_MSG_LST) > 0: # clear out the existing error message so that we don't pick up # a bad one next time around. msg = '\n'.join(ERROR_MSG_LST) ERROR_MSG_LST = [] raise OpenJPEGLibraryError(msg) else: raise OpenJPEGLibraryError("OpenJPEG function failure.")
java
public void marshall(ListDevicesRequest listDevicesRequest, ProtocolMarshaller protocolMarshaller) { if (listDevicesRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(listDevicesRequest.getArn(), ARN_BINDING); protocolMarshaller.marshall(listDevicesRequest.getNextToken(), NEXTTOKEN_BINDING); protocolMarshaller.marshall(listDevicesRequest.getFilters(), FILTERS_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
python
def _critical_point_for( self, mount: top_types.Mount, cp_override: CriticalPoint = None) -> top_types.Point: """ Return the current critical point of the specified mount. The mount's critical point is the position of the mount itself, if no pipette is attached, or the pipette's critical point (which depends on tip status). If `cp_override` is specified, and that critical point actually exists, it will be used instead. Invalid `cp_override`s are ignored. """ pip = self._attached_instruments[mount] if pip is not None and cp_override != CriticalPoint.MOUNT: return pip.critical_point(cp_override) else: # TODO: The smoothie’s z/a home position is calculated to provide # the offset for a P300 single. Here we should decide whether we # implicitly accept this as correct (by returning a null offset) # or not (by returning an offset calculated to move back up the # length of the P300 single). return top_types.Point(0, 0, 0)
python
def FromString(cls, string_rep): """Create a DataStreamSelector from a string. The format of the string should either be: all <type> OR <type> <id> Where type is [system] <stream type>, with <stream type> defined as in DataStream Args: rep (str): The string representation to convert to a DataStreamSelector """ rep = str(string_rep) rep = rep.replace(u'node', '') rep = rep.replace(u'nodes', '') if rep.startswith(u'all'): parts = rep.split() spec_string = u'' if len(parts) == 3: spec_string = parts[1] stream_type = parts[2] elif len(parts) == 2: stream_type = parts[1] else: raise ArgumentError("Invalid wildcard stream selector", string_rep=string_rep) try: # Remove pluralization that can come with e.g. 'all system outputs' if stream_type.endswith(u's'): stream_type = stream_type[:-1] stream_type = DataStream.StringToType[stream_type] except KeyError: raise ArgumentError("Invalid stream type given", stream_type=stream_type, known_types=DataStream.StringToType.keys()) stream_spec = DataStreamSelector.SpecifierNames.get(spec_string, None) if stream_spec is None: raise ArgumentError("Invalid stream specifier given (should be system, user, combined or blank)", string_rep=string_rep, spec_string=spec_string) return DataStreamSelector(stream_type, None, stream_spec) # If we're not matching a wildcard stream type, then the match is exactly # the same as a DataStream identifier, so use that to match it. stream = DataStream.FromString(rep) return DataStreamSelector.FromStream(stream)
java
private JsonObject getDefaultWatermark() { Schema schema = new Schema(); String dataType; String columnName = "derivedwatermarkcolumn"; schema.setColumnName(columnName); WatermarkType wmType = WatermarkType.valueOf( this.workUnitState.getProp(ConfigurationKeys.SOURCE_QUERYBASED_WATERMARK_TYPE, "TIMESTAMP").toUpperCase()); switch (wmType) { case TIMESTAMP: dataType = "timestamp"; break; case DATE: dataType = "date"; break; default: dataType = "int"; break; } String elementDataType = "string"; List<String> mapSymbols = null; JsonObject newDataType = this.convertDataType(columnName, dataType, elementDataType, mapSymbols); schema.setDataType(newDataType); schema.setWaterMark(true); schema.setPrimaryKey(0); schema.setLength(0); schema.setPrecision(0); schema.setScale(0); schema.setNullable(false); schema.setFormat(null); schema.setComment("Default watermark column"); schema.setDefaultValue(null); schema.setUnique(false); String jsonStr = gson.toJson(schema); JsonObject obj = gson.fromJson(jsonStr, JsonObject.class).getAsJsonObject(); return obj; }
java
private Map<Object, Object> getClassPathManifestAttributes() { Map<Object, Object> manifestAttributes = null; try { LOGGER.debug("Using Manifest file:{}", getClass().getClassLoader().getResource(MANIFEST).getPath()); Manifest manifest = new Manifest(getClass().getClassLoader().getResourceAsStream(MANIFEST)); manifestAttributes = manifest.getMainAttributes(); } catch (IOException e) { LOGGER.warn("Unable to read the manifest from the classpath"); LOGGER.debug("Unable to read the manifest from the classpath", e); } return manifestAttributes; }
python
def on_start(self): """ start the service """ LOGGER.debug("natsd.Service.on_start") self.service = threading.Thread(target=self.run_event_loop, name=self.serviceQ + " service thread") self.service.start() while not self.is_started: time.sleep(0.01)
java
private TreeNode insert(TreeNode node, NumberVector nv) { // Find closest child: ClusteringFeature[] cfs = node.children; assert (cfs[0] != null) : "Unexpected empty node!"; // Find the best child: ClusteringFeature best = cfs[0]; double bestd = distance.squaredDistance(nv, best); for(int i = 1; i < cfs.length; i++) { ClusteringFeature cf = cfs[i]; if(cf == null) { break; } double d2 = distance.squaredDistance(nv, cf); if(d2 < bestd) { best = cf; bestd = d2; } } // Leaf node: if(!(best instanceof TreeNode)) { // Threshold constraint satisfied? if(absorption.squaredCriterion(best, nv) <= thresholdsq) { best.addToStatistics(nv); node.addToStatistics(nv); return null; } best = new ClusteringFeature(nv.getDimensionality()); best.addToStatistics(nv); ++leaves; if(add(node.children, best)) { node.addToStatistics(nv); // Update statistics return null; } return split(node, best); } assert (best instanceof TreeNode) : "Node is neither child nor inner?"; TreeNode newchild = insert((TreeNode) best, nv); if(newchild == null || add(node.children, newchild)) { node.addToStatistics(nv); // Update statistics return null; } return split(node, newchild); }
python
def split_predicate(ex: Extraction) -> Extraction: """ Ensure single word predicate by adding "before-predicate" and "after-predicate" arguments. """ rel_toks = ex.toks[char_to_word_index(ex.rel.span[0], ex.sent) \ : char_to_word_index(ex.rel.span[1], ex.sent) + 1] if not rel_toks: return ex verb_inds = [tok_ind for (tok_ind, tok) in enumerate(rel_toks) if tok.tag_.startswith('VB')] last_verb_ind = verb_inds[-1] if verb_inds \ else (len(rel_toks) - 1) rel_parts = [element_from_span([rel_toks[last_verb_ind]], 'V')] before_verb = rel_toks[ : last_verb_ind] after_verb = rel_toks[last_verb_ind + 1 : ] if before_verb: rel_parts.append(element_from_span(before_verb, "BV")) if after_verb: rel_parts.append(element_from_span(after_verb, "AV")) return Extraction(ex.sent, ex.toks, ex.arg1, rel_parts, ex.args2, ex.confidence)
python
def destroy(cls, url): """ This operation deletes an existing endpoint from the list of all endpoints, and makes the server stop listening on the endpoint. *Note*: deleting and disconnecting an endpoint is allowed in the system database only. Calling this action in any other database will make the server return an error. Futhermore, the last remaining endpoint cannot be deleted as this would make the server kaput. :param url The endpoint to delete, e.g. tcp://127.0.0.1:8529. """ api = Client.instance().api api.endpoint(url).delete()
java
private static List<String> filterDelimeterElement(List<String> arr, char[] delimeter){ List<String> list = new ArrayList<String>(); for(String s : arr){ if(s == null || s.isEmpty()){ continue; } if(s.length() > 1){ list.add(s); continue; } char strChar = s.charAt(0); boolean find = false; for(char c : delimeter){ if(c == strChar){ find = true; break; } } if(find == false){ list.add(s); } } return list; }
java
public int insertMetaBeanById(String tableName, MicroMetaBean microMetaBean) { //JdbcTemplate jdbcTemplate = (JdbcTemplate) MicroDbHolder.getDbSource(dbName); JdbcTemplate jdbcTemplate =getMicroJdbcTemplate(); final MicroMetaBean insertBean=microMetaBean; String timeName=getTimeName(); String sql = "insert into " + tableName +"(id,meta_content,meta_key,meta_name,meta_type,remark,create_time,update_time) values(?,?,?,?,?,?,"+timeName+","+timeName+") "; List paramList=new ArrayList(); paramList.add(insertBean.getId()); paramList.add(insertBean.getMeta_content()); paramList.add(insertBean.getMeta_key()); paramList.add(insertBean.getMeta_name()); paramList.add(insertBean.getMeta_type()); paramList.add(insertBean.getRemark()); logger.debug(sql); logger.debug(paramList.toArray()); Integer retStatus=jdbcTemplate.update(sql,paramList.toArray()); return retStatus; }
java
@Override public double getStandardDeviation(RandomVariable probabilities) { if(isDeterministic()) { return 0.0; } if(size() == 0) { return Double.NaN; } return Math.sqrt(getVariance(probabilities)); }
java
public Traverson startWith(final String uriTemplate, final Map<String, Object> vars) { return startWith(fromTemplate(uriTemplate).expand(vars)); }
java
public void writeThisClass(String strClassName, String strRecordType) { Record recFileHdr = this.getRecord(FileHdr.FILE_HDR_FILE); try { recFileHdr.addNew(); recFileHdr.getField(FileHdr.FILE_NAME).setString(strClassName); recFileHdr.setKeyArea(FileHdr.FILE_NAME_KEY); if (recFileHdr.seek(DBConstants.EQUALS)) // Is there a file with this name? strRecordType = "Record"; // Just to be sure } catch (DBException ex) { ex.printStackTrace(); } if (strRecordType.length() == 0) strRecordType = " "; if (m_ClassNameList.addName(strClassName) == false) return; WriteClass writeClass = null; if (strRecordType.equalsIgnoreCase("Record")) writeClass = new WriteRecordClass(this.getTask(), null, null); else if (strRecordType.equalsIgnoreCase("Resource")) writeClass = new WriteResourceClass(this.getTask(), null, null); else writeClass = new WriteClass(this.getTask(), null, null); if (writeClass != null) { writeClass.writeClass(strClassName, CodeType.THICK); writeClass.free(); } }
python
def later_than(after, before): """ True if then is later or equal to that """ if isinstance(after, str): after = str_to_time(after) elif isinstance(after, int): after = time.gmtime(after) if isinstance(before, str): before = str_to_time(before) elif isinstance(before, int): before = time.gmtime(before) return after >= before
python
def get_pac(url=None, js=None, from_os_settings=True, from_dns=True, timeout=2, allowed_content_types=None, **kwargs): """ Convenience function for finding and getting a parsed PAC file (if any) that's ready to use. :param str url: Download PAC from a URL. If provided, `from_os_settings` and `from_dns` are ignored. :param str js: Parse the given string as a PAC file. If provided, `from_os_settings` and `from_dns` are ignored. :param bool from_os_settings: Look for a PAC URL or filesystem path from the OS settings, and use it if present. Doesn't do anything on non-Windows or non-macOS/OSX platforms. :param bool from_dns: Look for a PAC file using the WPAD protocol. :param timeout: Time to wait for host resolution and response for each URL. :param allowed_content_types: If the response has a ``Content-Type`` header, then consider the response to be a PAC file only if the header is one of these values. If not specified, the allowed types are ``application/x-ns-proxy-autoconfig`` and ``application/x-javascript-config``. :return: The first valid parsed PAC file according to the criteria, or `None` if nothing was found. :rtype: PACFile|None :raises MalformedPacError: If something that claims to be a PAC file was obtained but could not be parsed. """ if url: downloaded_pac = download_pac([url], timeout=timeout, allowed_content_types=allowed_content_types) if not downloaded_pac: return return PACFile(downloaded_pac, **kwargs) if js: return PACFile(js, **kwargs) # Deprecated in 0.8.2 from_registry = kwargs.get('from_registry') if from_registry is not None: import warnings warnings.warn('from_registry is deprecated, use from_os_settings instead.') from_os_settings = from_registry if from_os_settings: if ON_WINDOWS: path = autoconfig_url_from_registry() elif ON_DARWIN: path = autoconfig_url_from_preferences() else: path = None if path and path.lower().startswith('file://'): path = file_url_to_local_path(path) if path and os.path.isfile(path): with open(path) as f: return PACFile(f.read(), **kwargs) pac_candidate_urls = collect_pac_urls(from_os_settings=True, from_dns=from_dns) downloaded_pac = download_pac(pac_candidate_urls, timeout=timeout, allowed_content_types=allowed_content_types) if not downloaded_pac: return return PACFile(downloaded_pac, **kwargs)
python
def to_netcdf(ds, *args, **kwargs): """ Store the given dataset as a netCDF file This functions works essentially the same as the usual :meth:`xarray.Dataset.to_netcdf` method but can also encode absolute time units Parameters ---------- ds: xarray.Dataset The dataset to store %(xarray.Dataset.to_netcdf.parameters)s """ to_update = {} for v, obj in six.iteritems(ds.variables): units = obj.attrs.get('units', obj.encoding.get('units', None)) if units == 'day as %Y%m%d.%f' and np.issubdtype( obj.dtype, np.datetime64): to_update[v] = xr.Variable( obj.dims, AbsoluteTimeEncoder(obj), attrs=obj.attrs.copy(), encoding=obj.encoding) to_update[v].attrs['units'] = units if to_update: ds = ds.copy() ds.update(to_update) return xarray_api.to_netcdf(ds, *args, **kwargs)
java
public void execute() throws BuildException { try { log( "Generating test cases for inputDef=" + options_.getInputDef()); File logFile = getLogFile(); if( logFile != null) { System.setProperty( "tcases.log.file", logFile.getAbsolutePath()); log( "For details, see " + logFile.getAbsolutePath()); } TcasesCommand.run( options_); } catch( Exception e) { throw new BuildException( e, getLocation()); } }
java
protected DocumentReference createDocument(String docType, Object document, String ownerType, Long ownerId, Long processInstanceId, String searchKey1, String searchKey2) throws EventHandlerException { ListenerHelper helper = new ListenerHelper(); return helper.createDocument(docType, document, getPackage(), ownerType, ownerId); }
java
public Observable<ServiceResponse<Page<JobExecutionInner>>> listByJobExecutionWithServiceResponseAsync(final String resourceGroupName, final String serverName, final String jobAgentName, final String jobName, final UUID jobExecutionId) { return listByJobExecutionSinglePageAsync(resourceGroupName, serverName, jobAgentName, jobName, jobExecutionId) .concatMap(new Func1<ServiceResponse<Page<JobExecutionInner>>, Observable<ServiceResponse<Page<JobExecutionInner>>>>() { @Override public Observable<ServiceResponse<Page<JobExecutionInner>>> call(ServiceResponse<Page<JobExecutionInner>> page) { String nextPageLink = page.body().nextPageLink(); if (nextPageLink == null) { return Observable.just(page); } return Observable.just(page).concatWith(listByJobExecutionNextWithServiceResponseAsync(nextPageLink)); } }); }
python
def authorize(self, request, *args, **kwargs): """ authorization logic raises PermissionDenied if user is not authorized """ user = request.user if not user.is_authenticated or not user.socialaccount_set.exists(): raise PermissionDenied()
python
def imshow(image, format, **kwargs): """Draw an image in the current context figure. Parameters ---------- image: image data Image data, depending on the passed format, can be one of: - an instance of an ipywidgets Image - a file name - a raw byte string format: {'widget', 'filename', ...} Type of the input argument. If not 'widget' or 'filename', must be a format supported by the ipywidgets Image. options: dict (default: {}) Options for the scales to be created. If a scale labeled 'x' is required for that mark, options['x'] contains optional keyword arguments for the constructor of the corresponding scale type. axes_options: dict (default: {}) Options for the axes to be created. If an axis labeled 'x' is required for that mark, axes_options['x'] contains optional keyword arguments for the constructor of the corresponding axis type. """ if format == 'widget': ipyimage = image elif format == 'filename': with open(image, 'rb') as f: data = f.read() ipyimage = ipyImage(value=data) else: ipyimage = ipyImage(value=image, format=format) kwargs['image'] = ipyimage kwargs.setdefault('x', [0., 1.]) kwargs.setdefault('y', [0., 1.]) return _draw_mark(Image, **kwargs)
java
private Object setElementCollection(Object entity, Object thriftColumnValue, MetamodelImpl metaModel, Attribute attribute) { String cqlColumnMetadata = null; Map<ByteBuffer, String> schemaTypes = this.clientBase.getCqlMetadata().getValue_types(); for (Map.Entry<ByteBuffer, String> schemaType : schemaTypes.entrySet()) { String key = UTF8Serializer.instance.deserialize((schemaType.getKey())); if (key.equals(((AbstractAttribute) attribute).getJPAColumnName())) { cqlColumnMetadata = schemaType.getValue(); break; } } Field field = (Field) ((AbstractAttribute) attribute).getJavaMember(); Class embeddedClass = ((AbstractAttribute) attribute).getBindableJavaType(); if (List.class.isAssignableFrom(((Field) attribute.getJavaMember()).getType())) { ListType listType = null; try { listType = ListType.getInstance(new TypeParser(cqlColumnMetadata.substring( cqlColumnMetadata.indexOf("("), cqlColumnMetadata.length()))); } catch (ConfigurationException | SyntaxException e) { log.error(e.getMessage()); throw new KunderaException("Error while getting instance of ListType " + e); } return setElementCollectionList(listType, ByteBuffer.wrap((byte[]) thriftColumnValue), entity, field, metaModel, embeddedClass, true); } else if (Set.class.isAssignableFrom(((Field) attribute.getJavaMember()).getType())) { SetType setType = null; try { setType = SetType.getInstance(new TypeParser(cqlColumnMetadata.substring( cqlColumnMetadata.indexOf("("), cqlColumnMetadata.length()))); } catch (ConfigurationException | SyntaxException e) { log.error(e.getMessage()); throw new KunderaException("Error while getting instance of SetType " + e); } return setElementCollectionSet(setType, ByteBuffer.wrap((byte[]) thriftColumnValue), entity, field, metaModel, embeddedClass, true); } else if (Map.class.isAssignableFrom(((Field) attribute.getJavaMember()).getType())) { MapType mapType = null; try { mapType = MapType.getInstance(new TypeParser(cqlColumnMetadata.substring( cqlColumnMetadata.indexOf("("), cqlColumnMetadata.length()))); } catch (ConfigurationException | SyntaxException e) { log.error(e.getMessage()); throw new KunderaException("Error while getting instance of MapType " + e); } return setElementCollectionMap(mapType, ByteBuffer.wrap((byte[]) thriftColumnValue), entity, field, metaModel, embeddedClass, true); } return entity; }
java
private HttpLocalFormat getFormat() { HttpLocalFormat format = threadStorage.get(); if (null == format) { format = new HttpLocalFormat(); threadStorage.set(format); } return format; }
python
def inspect_mem(self, mem): """ Get the values in a map during the current simulation cycle. :param mem: the memory to inspect :return: {address: value} Note that this returns the current memory state. Modifying the dictonary will also modify the state in the simulator """ if isinstance(mem, RomBlock): raise PyrtlError("ROM blocks are not stored in the simulation object") return self.mems[self._mem_varname(mem)]
python
def get_sigma(database_file_name='', e_min=np.NaN, e_max=np.NaN, e_step=np.NaN, t_kelvin=None): """retrieve the Energy and sigma axis for the given isotope :param database_file_name: path/to/file with extension :type database_file_name: string :param e_min: left energy range in eV of new interpolated data :type e_min: float :param e_max: right energy range in eV of new interpolated data :type e_max: float :param e_step: energy step in eV for interpolation :type e_step: float :param t_kelvin: temperature in Kelvin :type t_kelvin: float :return: {'energy': np.array, 'sigma': np.array} :rtype: dict """ file_extension = os.path.splitext(database_file_name)[1] if t_kelvin is None: # '.csv' files if file_extension != '.csv': raise IOError("Cross-section File type must be '.csv'") else: _df = get_database_data(file_name=database_file_name) _dict = get_interpolated_data(df=_df, e_min=e_min, e_max=e_max, e_step=e_step) return {'energy_eV': _dict['x_axis'], 'sigma_b': _dict['y_axis']} else: raise ValueError("Doppler broadened cross-section in not yet supported in current version.")
python
def get_default_val(self): """Helper to expand default value (support callables).""" val = self.default while callable(val): val = val() return val
java
private void checkNotClosed() throws SISessionUnavailableException { if (tc.isEntryEnabled()) SibTr.entry(tc, "checkNotClosed"); // Check that the consumer session isn't closed _consumerSession.checkNotClosed(); // Now check that this consumer hasn't closed. synchronized (this) { if(_closed) { SISessionUnavailableException e = new SISessionUnavailableException( nls.getFormattedMessage( "CONSUMER_CLOSED_ERROR_CWSIP0177", new Object[] { _localConsumerPoint.getConsumerManager().getDestination().getName(), _localConsumerPoint.getConsumerManager().getMessageProcessor().getMessagingEngineName()}, null)); if (tc.isEntryEnabled()) SibTr.exit(tc, "checkNotClosed", "consumer closed"); throw e; } } if (tc.isEntryEnabled()) SibTr.exit(tc, "checkNotClosed"); }