language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def rotateAboutVectorMatrix(vec, theta_deg): """Construct the matrix that rotates vector a about vector vec by an angle of theta_deg degrees Taken from http://en.wikipedia.org/wiki/Rotation_matrix#Rotation_matrix_from_axis_and_angle Input: theta_deg (float) Angle through which vectors should be rotated in degrees Returns: A matrix To rotate a vector, premultiply by this matrix. To rotate the coord sys underneath the vector, post multiply """ ct = np.cos(np.radians(theta_deg)) st = np.sin(np.radians(theta_deg)) # Ensure vector has normal length vec /= np.linalg.norm(vec) assert( np.all( np.isfinite(vec))) # compute the three terms term1 = ct * np.eye(3) ucross = np.zeros( (3,3)) ucross[0] = [0, -vec[2], vec[1]] ucross[1] = [vec[2], 0, -vec[0]] ucross[2] = [-vec[1], vec[0], 0] term2 = st*ucross ufunny = np.zeros( (3,3)) for i in range(0,3): for j in range(i,3): ufunny[i,j] = vec[i]*vec[j] ufunny[j,i] = ufunny[i,j] term3 = (1-ct) * ufunny return term1 + term2 + term3
python
def show_all(self, as_string=True): """, python2 will not show flags""" result = [] for item in self.container: pattern = str(item[0])[10:] if PY3 else item[0].pattern instances = item[2] or [] value = ( '%s "%s"' % (item[1].__name__, (item[1].__doc__ or "")) if callable(item[1]) else str(item[1]) ) value = "%s %s" % (type(item[1]), value) result.append(" => ".join((pattern, ",".join(instances), value))) return "\n".join(result) if as_string else result
python
def ExecQuery(self, QueryLanguage, Query, namespace=None, **extra): # pylint: disable=invalid-name """ Execute a query in a namespace. This method performs the ExecQuery operation (see :term:`DSP0200`). See :ref:`WBEM operations` for a list of all methods performing such operations. If the operation succeeds, this method returns. Otherwise, this method raises an exception. Parameters: QueryLanguage (:term:`string`): Name of the query language used in the `Query` parameter, e.g. "DMTF:CQL" for CIM Query Language, and "WQL" for WBEM Query Language. Query (:term:`string`): Query string in the query language specified in the `QueryLanguage` parameter. namespace (:term:`string`): Name of the CIM namespace to be used (case independent). Leading and trailing slash characters will be stripped. The lexical case will be preserved. If `None`, the default namespace of the connection object will be used. **extra : Additional keyword arguments are passed as additional operation parameters to the WBEM server. Note that :term:`DSP0200` does not define any additional parameters for this operation. Returns: A list of :class:`~pywbem.CIMInstance` objects that represents the query result. These instances have their `path` attribute set to identify their creation class and the target namespace of the query, but they are not addressable instances. Raises: Exceptions described in :class:`~pywbem.WBEMConnection`. """ exc = None instances = None method_name = 'ExecQuery' if self._operation_recorders: self.operation_recorder_reset() self.operation_recorder_stage_pywbem_args( method=method_name, QueryLanguage=QueryLanguage, Query=Query, namespace=namespace, **extra) try: stats = self.statistics.start_timer(method_name) namespace = self._iparam_namespace_from_namespace(namespace) result = self._imethodcall( method_name, namespace, QueryLanguage=QueryLanguage, Query=Query, **extra) if result is None: instances = [] else: instances = [x[2] for x in result[0][2]] for instance in instances: # The ExecQuery CIM-XML operation returns instances as any of # (VALUE.OBJECT | VALUE.OBJECTWITHLOCALPATH | # VALUE.OBJECTWITHPATH), i.e. classes or instances with or # without path which may or may not contain a namespace. # TODO: Fix current impl. that assumes instance with path. instance.path.namespace = namespace return instances except (CIMXMLParseError, XMLParseError) as exce: exce.request_data = self.last_raw_request exce.response_data = self.last_raw_reply exc = exce raise except Exception as exce: exc = exce raise finally: self._last_operation_time = stats.stop_timer( self.last_request_len, self.last_reply_len, self.last_server_response_time, exc) if self._operation_recorders: self.operation_recorder_stage_result(instances, exc)
java
@Deprecated public void delete(String job, String instance) throws IOException { delete(job, Collections.singletonMap("instance", instance)); }
python
def delete(context, force, yes, analysis_id): """Delete an analysis log from the database.""" analysis_obj = context.obj['store'].analysis(analysis_id) if analysis_obj is None: print(click.style('analysis log not found', fg='red')) context.abort() print(click.style(f"{analysis_obj.family}: {analysis_obj.status}")) if analysis_obj.is_temp: if yes or click.confirm(f"remove analysis log?"): analysis_obj.delete() context.obj['store'].commit() print(click.style(f"analysis deleted: {analysis_obj.family}", fg='blue')) else: if analysis_obj.is_deleted: print(click.style(f"{analysis_obj.family}: already deleted", fg='red')) context.abort() if Path(analysis_obj.out_dir).exists(): root_dir = context.obj['store'].families_dir family_dir = analysis_obj.out_dir if not force and (len(family_dir) <= len(root_dir) or root_dir not in family_dir): print(click.style(f"unknown analysis output dir: {analysis_obj.out_dir}", fg='red')) print(click.style("use '--force' to override")) context.abort() if yes or click.confirm(f"remove analysis output: {analysis_obj.out_dir}?"): shutil.rmtree(analysis_obj.out_dir, ignore_errors=True) analysis_obj.is_deleted = True context.obj['store'].commit() print(click.style(f"analysis deleted: {analysis_obj.family}", fg='blue')) else: print(click.style(f"analysis output doesn't exist: {analysis_obj.out_dir}", fg='red')) context.abort()
java
void notifyListenersOnStateChange() { LOGGER.debug("Notifying connection listeners about state change to {}", state); for (ConnectionListener listener : connectionListeners) { switch (state) { case CONNECTED: listener.onConnectionEstablished(connection); break; case CONNECTING: listener.onConnectionLost(connection); break; case CLOSED: listener.onConnectionClosed(connection); break; default: break; } } }
java
public CmsClientSitemapEntry removeSubEntry(CmsUUID entryId) { CmsClientSitemapEntry removed = null; int position = -1; if (!m_subEntries.isEmpty()) { for (int i = 0; i < m_subEntries.size(); i++) { if (m_subEntries.get(i).getId().equals(entryId)) { position = i; } } if (position != -1) { removed = m_subEntries.remove(position); updatePositions(position); } } return removed; }
java
@NullSafe public static boolean isWaiting(Thread thread) { return (thread != null && Thread.State.WAITING.equals(thread.getState())); }
python
def register_transport_ready_event(self, user_cb, user_arg): """ Register for transport ready events. The `transport ready` event is raised via a user callback. If the endpoint is configured as a source, then the user may then call :py:meth:`write_transport` in order to send data to the associated sink. Otherwise, if the endpoint is configured as a sink, then the user may call :py:meth:`read_transport` to read from the associated source instead. :param func user_cb: User defined callback function. It must take one parameter which is the user's callback argument. :param user_arg: User defined callback argument. :return: See also: :py:meth:`unregister_transport_ready_event` """ self.user_cb = user_cb self.user_arg = user_arg
java
public String getString(String name, String defaultValue) { JsonValue value = get(name); return value != null ? value.asString() : defaultValue; }
python
def tandem(args): """ %prog tandem blast_file cds_file bed_file [options] Find tandem gene clusters that are separated by N genes, based on filtered blast_file by enforcing alignments between any two genes at least 50% (or user specified value) of either gene. pep_file can also be used in same manner. """ p = OptionParser(tandem.__doc__) p.add_option("--tandem_Nmax", dest="tandem_Nmax", type="int", default=3, help="merge tandem genes within distance [default: %default]") p.add_option("--percent_overlap", type="int", default=50, help="tandem genes have >=x% aligned sequence, x=0-100 \ [default: %default]") p.set_align(evalue=.01) p.add_option("--not_self", default=False, action="store_true", help="provided is not self blast file [default: %default]") p.add_option("--strip_gene_name", dest="sep", type="string", default=".", help="strip alternative splicing. Use None for no stripping. \ [default: %default]") p.add_option("--genefamily", dest="genefam", action="store_true", help="compile gene families based on similarity [default: %default]") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) blast_file, cds_file, bed_file = args N = opts.tandem_Nmax P = opts.percent_overlap is_self = not opts.not_self sep = opts.sep ofile = opts.outfile tandem_main(blast_file, cds_file, bed_file, N=N, P=P, is_self=is_self, \ evalue=opts.evalue, strip_name=sep, ofile=ofile, genefam=opts.genefam)
java
protected String toBase64(String user, String password) { StringBuffer stringBuffer = new StringBuffer(); stringBuffer.append(user); stringBuffer.append(":"); stringBuffer.append(password); return Base64Utility.encode(stringBuffer.toString().getBytes()); }
java
public static Calendar popCalendar() { Calendar result; Deque<Calendar> calendars = CALENDARS.get(); if (calendars.isEmpty()) { result = Calendar.getInstance(); } else { result = calendars.pop(); } return result; }
java
@Conditioned @Lorsque("Je mets à jour la case à cocher '(.*)-(.*)' avec '(.*)'[\\.|\\?]") @Then("I update checkbox '(.*)-(.*)' with '(.*)'[\\.|\\?]") public void selectCheckbox(String page, String elementKey, boolean value, List<GherkinStepCondition> conditions) throws TechnicalException, FailureException { selectCheckbox(Page.getInstance(page).getPageElementByKey('-' + elementKey), value); }
java
public static Runnable writeTerminusMarker(final String nonce, final NodeSettings paths, final VoltLogger logger) { final File f = new File(paths.getVoltDBRoot(), VoltDB.TERMINUS_MARKER); return new Runnable() { @Override public void run() { try(PrintWriter pw = new PrintWriter(new FileWriter(f), true)) { pw.println(nonce); } catch (IOException e) { throw new RuntimeException("Failed to create .complete file for " + f.getName(), e); } } }; }
java
public static CmsEntityAttribute createEntityAttribute(String name, List<CmsEntity> values) { CmsEntityAttribute result = new CmsEntityAttribute(); result.m_name = name; result.m_entityValues = Collections.unmodifiableList(values); return result; }
java
private AbstractPlanNode addProjection(AbstractPlanNode rootNode) { assert (m_parsedSelect != null); assert (m_parsedSelect.m_displayColumns != null); // Build the output schema for the projection based on the display columns NodeSchema proj_schema = m_parsedSelect.getFinalProjectionSchema(); for (SchemaColumn col : proj_schema) { // Adjust the differentiator fields of TVEs, since they need to // reflect the inlined projection node in scan nodes. AbstractExpression colExpr = col.getExpression(); Collection<TupleValueExpression> allTves = ExpressionUtil.getTupleValueExpressions(colExpr); for (TupleValueExpression tve : allTves) { if ( ! tve.needsDifferentiation()) { // PartitionByPlanNode and a following OrderByPlanNode // can have an internally generated RANK column. // These do not need to have their differentiator updated, // since it's only used for disambiguation in some // combinations of "SELECT *" and subqueries. // In fact attempting to adjust this special column will // cause failed assertions. The tve for this expression // will be marked as not needing differentiation, // so we just ignore it here. continue; } rootNode.adjustDifferentiatorField(tve); } } ProjectionPlanNode projectionNode = new ProjectionPlanNode(); projectionNode.setOutputSchemaWithoutClone(proj_schema); // If the projection can be done inline. then add the // projection node inline. if (rootNode instanceof AbstractScanPlanNode) { rootNode.addInlinePlanNode(projectionNode); return rootNode; } projectionNode.addAndLinkChild(rootNode); return projectionNode; }
java
private static void formatBadLogData(LogData data, StringBuilder out) { out.append(" original message: "); if (data.getTemplateContext() == null) { out.append(data.getLiteralArgument()); } else { // We know that there's at least one argument to display here. out.append(data.getTemplateContext().getMessage()); out.append("\n original arguments:"); for (Object arg : data.getArguments()) { out.append("\n ").append(SimpleMessageFormatter.safeToString(arg)); } } Metadata metadata = data.getMetadata(); if (metadata.size() > 0) { out.append("\n metadata:"); for (int n = 0; n < metadata.size(); n++) { out.append("\n "); out.append(metadata.getKey(n).getLabel()).append(": ").append(metadata.getValue(n)); } } out.append("\n level: ").append(data.getLevel()); out.append("\n timestamp (nanos): ").append(data.getTimestampNanos()); out.append("\n class: ").append(data.getLogSite().getClassName()); out.append("\n method: ").append(data.getLogSite().getMethodName()); out.append("\n line number: ").append(data.getLogSite().getLineNumber()); }
python
def from_string(contents): """ Creates GaussianInput from a string. Args: contents: String representing an Gaussian input file. Returns: GaussianInput object """ lines = [l.strip() for l in contents.split("\n")] link0_patt = re.compile(r"^(%.+)\s*=\s*(.+)") link0_dict = {} for i, l in enumerate(lines): if link0_patt.match(l): m = link0_patt.match(l) link0_dict[m.group(1).strip("=")] = m.group(2) route_patt = re.compile(r"^#[sSpPnN]*.*") route = "" route_index = None for i, l in enumerate(lines): if route_patt.match(l): route += " " + l route_index = i # This condition allows for route cards spanning multiple lines elif (l == "" or l.isspace()) and route_index: break functional, basis_set, route_paras, dieze_tag = read_route_line(route) ind = 2 title = [] while lines[route_index + ind].strip(): title.append(lines[route_index + ind].strip()) ind += 1 title = ' '.join(title) ind += 1 toks = re.split(r"[,\s]+", lines[route_index + ind]) charge = int(toks[0]) spin_mult = int(toks[1]) coord_lines = [] spaces = 0 input_paras = {} ind += 1 for i in range(route_index + ind, len(lines)): if lines[i].strip() == "": spaces += 1 if spaces >= 2: d = lines[i].split("=") if len(d) == 2: input_paras[d[0]] = d[1] else: coord_lines.append(lines[i].strip()) mol = GaussianInput._parse_coords(coord_lines) mol.set_charge_and_spin(charge, spin_mult) return GaussianInput(mol, charge=charge, spin_multiplicity=spin_mult, title=title, functional=functional, basis_set=basis_set, route_parameters=route_paras, input_parameters=input_paras, link0_parameters=link0_dict, dieze_tag=dieze_tag)
python
def permission_required(perm, fn=None, login_url=None, raise_exception=False, redirect_field_name=REDIRECT_FIELD_NAME): """ View decorator that checks for the given permissions before allowing the view to execute. Use it like this:: from django.shortcuts import get_object_or_404 from rules.contrib.views import permission_required from posts.models import Post def get_post_by_pk(request, post_id): return get_object_or_404(Post, pk=post_id) @permission_required('posts.change_post', fn=get_post_by_pk) def post_update(request, post_id): # ... ``perm`` is either a permission name as a string, or a list of permission names. ``fn`` is an optional callback that receives the same arguments as those passed to the decorated view and must return the object to check permissions against. If omitted, the decorator behaves just like Django's ``permission_required`` decorator, i.e. checks for model-level permissions. ``raise_exception`` is a boolean specifying whether to raise a ``django.core.exceptions.PermissionDenied`` exception if the check fails. You will most likely want to set this argument to ``True`` if you have specified a custom 403 response handler in your urlconf. If ``False``, the user will be redirected to the URL specified by ``login_url``. ``login_url`` is an optional custom URL to redirect the user to if permissions check fails. If omitted or empty, ``settings.LOGIN_URL`` is used. """ def decorator(view_func): @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(request, *args, **kwargs): # Normalize to a list of permissions if isinstance(perm, six.string_types): perms = (perm,) else: perms = perm # Get the object to check permissions against if callable(fn): obj = fn(request, *args, **kwargs) else: # pragma: no cover obj = fn # Get the user user = request.user # Check for permissions and return a response if not user.has_perms(perms, obj): # User does not have a required permission if raise_exception: raise PermissionDenied() else: return _redirect_to_login(request, view_func.__name__, login_url, redirect_field_name) else: # User has all required permissions -- allow the view to execute return view_func(request, *args, **kwargs) return _wrapped_view return decorator
java
public final Intent updateIntent(Intent intent, String languageCode, FieldMask updateMask) { UpdateIntentRequest request = UpdateIntentRequest.newBuilder() .setIntent(intent) .setLanguageCode(languageCode) .setUpdateMask(updateMask) .build(); return updateIntent(request); }
java
public static boolean isAllBlank(final CharSequence... css) { if (ArrayUtils.isEmpty(css)) { return true; } for (final CharSequence cs : css) { if (isNotBlank(cs)) { return false; } } return true; }
java
public Set<TypedEdge<T>> getAdjacencyList(int vertex) { SparseTypedEdgeSet<T> edges = vertexToEdges.get(vertex); return (edges == null) ? Collections.<TypedEdge<T>>emptySet() : new EdgeListWrapper(edges); }
java
public static String unpackString(Object obj) { Object value = unpack(obj); return value == null ? null : value.toString(); }
java
void rbbiSymtablePrint() { System.out .print("Variable Definitions\n" + "Name Node Val String Val\n" + "----------------------------------------------------------------------\n"); RBBISymbolTableEntry[] syms = fHashTable.values().toArray(new RBBISymbolTableEntry[0]); for (int i = 0; i < syms.length; i++) { RBBISymbolTableEntry s = syms[i]; System.out.print(" " + s.key + " "); // TODO: format output into columns. System.out.print(" " + s.val + " "); System.out.print(s.val.fLeftChild.fText); System.out.print("\n"); } System.out.println("\nParsed Variable Definitions\n"); for (int i = 0; i < syms.length; i++) { RBBISymbolTableEntry s = syms[i]; System.out.print(s.key); s.val.fLeftChild.printTree(true); System.out.print("\n"); } }
java
@PostMapping(path = SamlIdPConstants.ENDPOINT_SAML2_SOAP_ATTRIBUTE_QUERY) protected void handlePostRequest(final HttpServletResponse response, final HttpServletRequest request) { val ctx = decodeSoapRequest(request); val query = (AttributeQuery) ctx.getMessage(); try { val issuer = query.getIssuer().getValue(); val service = verifySamlRegisteredService(issuer); val adaptor = getSamlMetadataFacadeFor(service, query); if (adaptor.isEmpty()) { throw new UnauthorizedServiceException(UnauthorizedServiceException.CODE_UNAUTHZ_SERVICE, "Cannot find metadata linked to " + issuer); } val facade = adaptor.get(); verifyAuthenticationContextSignature(ctx, request, query, facade); val attrs = new LinkedHashMap<String, Object>(); if (query.getAttributes().isEmpty()) { val id = getSamlProfileHandlerConfigurationContext().getSamlAttributeQueryTicketFactory().createTicketIdFor(query.getSubject().getNameID().getValue()); val ticket = getSamlProfileHandlerConfigurationContext().getTicketRegistry().getTicket(id, SamlAttributeQueryTicket.class); val authentication = ticket.getTicketGrantingTicket().getAuthentication(); val principal = authentication.getPrincipal(); val authnAttrs = authentication.getAttributes(); val principalAttrs = principal.getAttributes(); query.getAttributes().forEach(a -> { if (authnAttrs.containsKey(a.getName())) { attrs.put(a.getName(), authnAttrs.get(a.getName())); } else if (principalAttrs.containsKey(a.getName())) { attrs.put(a.getName(), principalAttrs.get(a.getName())); } }); } val casAssertion = buildCasAssertion(issuer, service, attrs); getSamlProfileHandlerConfigurationContext().getResponseBuilder().build(query, request, response, casAssertion, service, facade, SAMLConstants.SAML2_SOAP11_BINDING_URI, ctx); } catch (final Exception e) { LOGGER.error(e.getMessage(), e); request.setAttribute(SamlIdPConstants.REQUEST_ATTRIBUTE_ERROR, e.getMessage()); getSamlProfileHandlerConfigurationContext().getSamlFaultResponseBuilder().build(query, request, response, null, null, null, SAMLConstants.SAML2_SOAP11_BINDING_URI, ctx); } }
java
static int parseEtcResolverFirstNdots(File etcResolvConf) throws IOException { FileReader fr = new FileReader(etcResolvConf); BufferedReader br = null; try { br = new BufferedReader(fr); String line; while ((line = br.readLine()) != null) { if (line.startsWith(OPTIONS_ROW_LABEL)) { int i = line.indexOf(NDOTS_LABEL); if (i >= 0) { i += NDOTS_LABEL.length(); final int j = line.indexOf(' ', i); return Integer.parseInt(line.substring(i, j < 0 ? line.length() : j)); } break; } } } finally { if (br == null) { fr.close(); } else { br.close(); } } return DEFAULT_NDOTS; }
python
def parse_attrlist_0(str_, avs_sep=":", vs_sep=",", as_sep=";"): """ Simple parser to parse expressions in the form of [ATTR1:VAL0,VAL1,...;ATTR2:VAL0,VAL2,..]. :param str_: input string :param avs_sep: char to separate attribute and values :param vs_sep: char to separate values :param as_sep: char to separate attributes :return: a list of tuples of (key, value | [value]) where key = (Int | String | ...), value = (Int | Bool | String | ...) | [Int | Bool | String | ...] >>> parse_attrlist_0("a:1") [('a', 1)] >>> parse_attrlist_0("a:1;b:xyz") [('a', 1), ('b', 'xyz')] >>> parse_attrlist_0("requires:bash,zsh") [('requires', ['bash', 'zsh'])] >>> parse_attrlist_0("obsoletes:sysdata;conflicts:sysdata-old") [('obsoletes', 'sysdata'), ('conflicts', 'sysdata-old')] """ return [(a, vs) for a, vs in attr_val_itr(str_, avs_sep, vs_sep, as_sep)]
java
@SuppressWarnings("unchecked") @SneakyThrows public void init(final SQLStatementRuleDefinitionEntity dialectRuleDefinitionEntity, final ExtractorRuleDefinition extractorRuleDefinition) { for (SQLStatementRuleEntity each : dialectRuleDefinitionEntity.getRules()) { SQLStatementRule sqlStatementRule = new SQLStatementRule(each.getContext(), (Class<? extends SQLStatement>) Class.forName(each.getSqlStatementClass()), (SQLStatementOptimizer) newClassInstance(each.getOptimizerClass())); sqlStatementRule.getExtractors().addAll(createExtractors(each.getExtractorRuleRefs(), extractorRuleDefinition)); rules.put(getContextClassName(each.getContext()), sqlStatementRule); } }
java
public Map getAsMap() { if (mMap != null) { return mMap; } else { Map m = convertToMap(); if (!isMutable()) { mMap = m; } return m; } }
python
def com_google_fonts_check_metadata_subsets_order(family_metadata): """METADATA.pb subsets should be alphabetically ordered.""" expected = list(sorted(family_metadata.subsets)) if list(family_metadata.subsets) != expected: yield FAIL, ("METADATA.pb subsets are not sorted " "in alphabetical order: Got ['{}']" " and expected ['{}']").format("', '".join(family_metadata.subsets), "', '".join(expected)) else: yield PASS, "METADATA.pb subsets are sorted in alphabetical order."
java
public static FilesIterator<File> getRelativeIterator(String baseDir) { return new FilesIterator<File>(new File(baseDir), Strategy.RELATIVE_FILES); }
python
def parse(self, data, extent, desc_tag): # type: (bytes, int, UDFTag) -> None ''' Parse the passed in data into a UDF Implementation Use Volume Descriptor. Parameters: data - The data to parse. extent - The extent that this descriptor currently lives at. desc_tag - A UDFTag object that represents the Descriptor Tag. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF Implementation Use Volume Descriptor already initialized') (tag_unused, self.vol_desc_seqnum, impl_ident, impl_use) = struct.unpack_from(self.FMT, data, 0) self.desc_tag = desc_tag self.impl_ident = UDFEntityID() self.impl_ident.parse(impl_ident) if self.impl_ident.identifier[:12] != b'*UDF LV Info': raise pycdlibexception.PyCdlibInvalidISO("Implementation Use Identifier not '*UDF LV Info'") self.impl_use = UDFImplementationUseVolumeDescriptorImplementationUse() self.impl_use.parse(impl_use) self.orig_extent_loc = extent self._initialized = True
python
def add_scene(self, animation_id, name, color, velocity, config): """Add a new scene, returns Scene ID""" # check arguments if animation_id < 0 or animation_id >= len(self.state.animationClasses): err_msg = "Requested to register scene with invalid Animation ID. Out of range." logging.info(err_msg) return(False, 0, err_msg) if self.state.animationClasses[animation_id].check_config(config) is False: err_msg = "Requested to register scene with invalid configuration." logging.info(err_msg) return(False, 0, err_msg) self.state.sceneIdCtr += 1 self.state.scenes[self.state.sceneIdCtr] = Scene(animation_id, name, color, velocity, config) sequence_number = self.zmq_publisher.publish_scene_add(self.state.sceneIdCtr, animation_id, name, color, velocity, config) logging.debug("Registered new scene.") # set this scene as active scene if none is configured yet if self.state.activeSceneId is None: self.set_scene_active(self.state.sceneIdCtr) return (True, sequence_number, "OK")
java
public DMatrixRMaj getQ() { Equation eq = new Equation(); DMatrixRMaj Q = CommonOps_DDRM.identity(QR.numRows); DMatrixRMaj u = new DMatrixRMaj(QR.numRows,1); int N = Math.min(QR.numCols,QR.numRows); eq.alias(u,"u",Q,"Q",QR,"QR",QR.numRows,"r"); // compute Q by first extracting the householder vectors from the columns of QR and then applying it to Q for( int j = N-1; j>= 0; j-- ) { eq.alias(j,"j",gammas[j],"gamma"); eq.process("u(j:,0) = [1 ; QR((j+1):,j)]"); eq.process("Q=(eye(r)-gamma*u*u')*Q"); } return Q; }
python
def check_resource(resource): ''' Check a resource availability against a linkchecker backend The linkchecker used can be configured on a resource basis by setting the `resource.extras['check:checker']` attribute with a key that points to a valid `udata.linkcheckers` entrypoint. If not set, it will fallback on the default linkchecker defined by the configuration variable `LINKCHECKING_DEFAULT_LINKCHECKER`. Returns ------- dict or (dict, int) Check results dict and status code (if error). ''' linkchecker_type = resource.extras.get('check:checker') LinkChecker = get_linkchecker(linkchecker_type) if not LinkChecker: return {'error': 'No linkchecker configured.'}, 503 if is_ignored(resource): return dummy_check_response() result = LinkChecker().check(resource) if not result: return {'error': 'No response from linkchecker'}, 503 elif result.get('check:error'): return {'error': result['check:error']}, 500 elif not result.get('check:status'): return {'error': 'No status in response from linkchecker'}, 503 # store the check result in the resource's extras # XXX maybe this logic should be in the `Resource` model? previous_status = resource.extras.get('check:available') check_keys = _get_check_keys(result, resource, previous_status) resource.extras.update(check_keys) resource.save(signal_kwargs={'ignores': ['post_save']}) # Prevent signal triggering on dataset return result
python
def _reduce_data(self): """Private method to reduce data dimension. If data is dense, uses randomized PCA. If data is sparse, uses randomized SVD. TODO: should we subtract and store the mean? Returns ------- Reduced data matrix """ if self.n_pca is not None and self.n_pca < self.data.shape[1]: tasklogger.log_start("PCA") if sparse.issparse(self.data): if isinstance(self.data, sparse.coo_matrix) or \ isinstance(self.data, sparse.lil_matrix) or \ isinstance(self.data, sparse.dok_matrix): self.data = self.data.tocsr() self.data_pca = TruncatedSVD(self.n_pca, random_state=self.random_state) else: self.data_pca = PCA(self.n_pca, svd_solver='randomized', random_state=self.random_state) self.data_pca.fit(self.data) data_nu = self.data_pca.transform(self.data) tasklogger.log_complete("PCA") return data_nu else: data_nu = self.data if sparse.issparse(data_nu) and not isinstance( data_nu, (sparse.csr_matrix, sparse.csc_matrix, sparse.bsr_matrix)): data_nu = data_nu.tocsr() return data_nu
python
def make_symbol_table(use_numpy=True, **kws): """Create a default symboltable, taking dict of user-defined symbols. Arguments --------- numpy : bool, optional whether to include symbols from numpy kws : optional additional symbol name, value pairs to include in symbol table Returns -------- symbol_table : dict a symbol table that can be used in `asteval.Interpereter` """ symtable = {} for sym in FROM_PY: if sym in builtins: symtable[sym] = builtins[sym] for sym in FROM_MATH: if hasattr(math, sym): symtable[sym] = getattr(math, sym) if HAS_NUMPY and use_numpy: for sym in FROM_NUMPY: if hasattr(numpy, sym): symtable[sym] = getattr(numpy, sym) for name, sym in NUMPY_RENAMES.items(): if hasattr(numpy, sym): symtable[name] = getattr(numpy, sym) symtable.update(LOCALFUNCS) symtable.update(kws) return symtable
python
def user_data(self, access_token, *args, **kwargs): """Loads user data from service""" url = '{}/userinfo'.format(self.BASE_URL) response = self.get_json( url, headers={'Authorization': 'Bearer ' + access_token}, ) self.check_correct_audience(response['audience']) userdata = response['user'] return userdata
python
def get_or_create(self, log_name, bucket_size): """ Gets or creates a log. :rtype: Timebucketedlog """ try: return self[log_name] except RepositoryKeyError: return start_new_timebucketedlog(log_name, bucket_size=bucket_size)
python
def _reset_build(self, sources): """Remove partition datafiles and reset the datafiles to the INGESTED state""" from ambry.orm.exc import NotFoundError for p in self.dataset.partitions: if p.type == p.TYPE.SEGMENT: self.log("Removing old segment partition: {}".format(p.identity.name)) try: self.wrap_partition(p).local_datafile.remove() self.session.delete(p) except NotFoundError: pass for s in sources: # Don't delete partitions fro mother bundles! if s.reftype == 'partition': continue p = s.partition if p: try: self.wrap_partition(p).local_datafile.remove() self.session.delete(p) except NotFoundError: pass if s.state in (self.STATES.BUILDING, self.STATES.BUILT): s.state = self.STATES.INGESTED self.commit()
java
private static ActivationFunction natural(ErrorFunction error, int k) { if (error == ErrorFunction.CROSS_ENTROPY) { if (k == 1) { return ActivationFunction.LOGISTIC_SIGMOID; } else { return ActivationFunction.SOFTMAX; } } else { return ActivationFunction.LOGISTIC_SIGMOID; } }
java
static void init(ServletContext servletContext) { String webPath = servletContext.getRealPath("/"); if (webPath == null) { try { // 支持 weblogic: http://www.jfinal.com/feedback/1994 webPath = servletContext.getResource("/").getPath(); } catch (java.net.MalformedURLException e) { com.jfinal.kit.LogKit.error(e.getMessage(), e); } } properties.setProperty(Velocity.FILE_RESOURCE_LOADER_PATH, webPath); properties.setProperty(Velocity.ENCODING_DEFAULT, getEncoding()); properties.setProperty(Velocity.INPUT_ENCODING, getEncoding()); properties.setProperty(Velocity.OUTPUT_ENCODING, getEncoding()); }
java
public void enableBlending() { GlStateManager.enableBlend(); GlStateManager.tryBlendFuncSeparate(GL11.GL_SRC_ALPHA, GL11.GL_ONE_MINUS_SRC_ALPHA, GL11.GL_ONE, GL11.GL_ZERO); GlStateManager.alphaFunc(GL11.GL_GREATER, 0.0F); GlStateManager.shadeModel(GL11.GL_SMOOTH); GlStateManager.enableColorMaterial(); }
python
def tau_from_final_mass_spin(final_mass, final_spin, l=2, m=2, nmodes=1): """Returns QNM damping time for the given mass and spin and mode. Parameters ---------- final_mass : float or array Mass of the black hole (in solar masses). final_spin : float or array Dimensionless spin of the final black hole. l : int or array, optional l-index of the harmonic. Default is 2. m : int or array, optional m-index of the harmonic. Default is 2. nmodes : int, optional The number of overtones to generate. Default is 1. Returns ------- float or array The damping time of the QNM(s), in seconds. If only a single mode is requested (and mass, spin, l, and m are not arrays), this will be a float. If multiple modes requested, will be an array with shape ``[input shape x] nmodes``, where ``input shape`` is the broadcasted shape of the inputs. """ return get_lm_f0tau(final_mass, final_spin, l, m, nmodes)[1]
python
async def _connect(self): """Start asynchronous reconnect loop.""" self.waiting = True await self.client.start(self.ip) self.waiting = False if self.client.protocol is None: raise IOError("Could not connect to '{}'.".format(self.ip)) self.open = True
java
public com.google.privacy.dlp.v2.AnalyzeDataSourceRiskDetails.NumericalStatsResult getNumericalStatsResult() { if (resultCase_ == 3) { return (com.google.privacy.dlp.v2.AnalyzeDataSourceRiskDetails.NumericalStatsResult) result_; } return com.google.privacy.dlp.v2.AnalyzeDataSourceRiskDetails.NumericalStatsResult .getDefaultInstance(); }
java
public ServiceFuture<List<OutputInner>> listByStreamingJobAsync(final String resourceGroupName, final String jobName, final ListOperationCallback<OutputInner> serviceCallback) { return AzureServiceFuture.fromPageResponse( listByStreamingJobSinglePageAsync(resourceGroupName, jobName), new Func1<String, Observable<ServiceResponse<Page<OutputInner>>>>() { @Override public Observable<ServiceResponse<Page<OutputInner>>> call(String nextPageLink) { return listByStreamingJobNextSinglePageAsync(nextPageLink); } }, serviceCallback); }
python
def weld_str_get(array, i): """Retrieve character at index i. Parameters ---------- array : numpy.ndarray or WeldObject Input data. i : int Index of character to retrieve. If greater than length of string, returns None. Returns ------- WeldObject Representation of this computation. """ obj_id, weld_obj = create_weld_object(array) index_literal = to_weld_literal(i, WeldLong()) missing_literal = default_missing_data_literal(WeldVec(WeldChar())) missing_literal_id = get_weld_obj_id(weld_obj, missing_literal) weld_template = """map( {array}, |e: vec[i8]| let lenString = len(e); if({i} >= lenString, {missing}, if({i} > 0L, result(merge(appender[i8], lookup(slice(e, 0L, lenString), {i}))), result(merge(appender[i8], lookup(slice(e, lenString, {i}), {i}))) ) ) )""" weld_obj.weld_code = weld_template.format(array=obj_id, i=index_literal, missing=missing_literal_id) return weld_obj
python
def _create_sequences(self): '''Get all of the Sequences - Rosetta, ATOM, SEQRES, FASTA, UniParc.''' # Create the Rosetta sequences and the maps from the Rosetta sequences to the ATOM sequences try: self.pdb.construct_pdb_to_rosetta_residue_map(self.rosetta_scripts_path, rosetta_database_path = self.rosetta_database_path, cache_dir = self.cache_dir) except PDBMissingMainchainAtomsException: self.pdb_to_rosetta_residue_map_error = True # Get all the Sequences if self.pdb_id not in do_not_use_the_sequence_aligner: self.uniparc_sequences = self.PDB_UniParc_SA.uniparc_sequences else: self.uniparc_sequences = self.sifts.get_uniparc_sequences() self.fasta_sequences = self.FASTA.get_sequences(self.pdb_id) self.seqres_sequences = self.pdb.seqres_sequences self.atom_sequences = self.pdb.atom_sequences if self.pdb_to_rosetta_residue_map_error: self.rosetta_sequences = {} for c in self.atom_sequences.keys(): self.rosetta_sequences[c] = Sequence() else: self.rosetta_sequences = self.pdb.rosetta_sequences # Update the chain types for the UniParc sequences uniparc_pdb_chain_mapping = {} if self.pdb_id not in do_not_use_the_sequence_aligner: for pdb_chain_id, matches in self.PDB_UniParc_SA.clustal_matches.iteritems(): if matches: # we are not guaranteed to have a match e.g. the short chain J in 1A2C, chimeras, etc. uniparc_chain_id = matches.keys()[0] assert(len(matches) == 1) uniparc_pdb_chain_mapping[uniparc_chain_id] = uniparc_pdb_chain_mapping.get(uniparc_chain_id, []) uniparc_pdb_chain_mapping[uniparc_chain_id].append(pdb_chain_id) else: for pdb_chain_id, uniparc_chain_ids in self.sifts.get_pdb_chain_to_uniparc_id_map().iteritems(): for uniparc_chain_id in uniparc_chain_ids: uniparc_pdb_chain_mapping[uniparc_chain_id] = uniparc_pdb_chain_mapping.get(uniparc_chain_id, []) uniparc_pdb_chain_mapping[uniparc_chain_id].append(pdb_chain_id) for uniparc_chain_id, pdb_chain_ids in uniparc_pdb_chain_mapping.iteritems(): sequence_type = set([self.seqres_sequences[p].sequence_type for p in pdb_chain_ids]) assert(len(sequence_type) == 1) sequence_type = sequence_type.pop() assert(self.uniparc_sequences[uniparc_chain_id].sequence_type == None) self.uniparc_sequences[uniparc_chain_id].set_type(sequence_type) for p in pdb_chain_ids: self.pdb_chain_to_uniparc_chain_mapping[p] = uniparc_chain_id # Update the chain types for the FASTA sequences for chain_id, sequence in self.seqres_sequences.iteritems(): self.fasta_sequences[chain_id].set_type(sequence.sequence_type)
python
def project_add_tags(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /project-xxxx/addTags API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject-xxxx%2FaddTags """ return DXHTTPRequest('/%s/addTags' % object_id, input_params, always_retry=always_retry, **kwargs)
python
def collapse_substrings(variant_sequences): """ Combine shorter sequences which are fully contained in longer sequences. Parameters ---------- variant_sequences : list List of VariantSequence objects Returns a (potentially shorter) list without any contained subsequences. """ if len(variant_sequences) <= 1: # if we don't have at least two VariantSequences then just # return your input return variant_sequences # dictionary mapping VariantSequence objects to lists of reads # they absorb from substring VariantSequences extra_reads_from_substrings = defaultdict(set) result_list = [] # sort by longest to shortest total length for short_variant_sequence in sorted( variant_sequences, key=lambda seq: -len(seq)): found_superstring = False for long_variant_sequence in result_list: found_superstring = long_variant_sequence.contains(short_variant_sequence) if found_superstring: extra_reads_from_substrings[long_variant_sequence].update( short_variant_sequence.reads) if not found_superstring: result_list.append(short_variant_sequence) # add to each VariantSequence the reads it absorbed from dropped substrings # and then return return [ variant_sequence.add_reads( extra_reads_from_substrings[variant_sequence]) for variant_sequence in result_list ]
python
def val_to_mrc(code, val): """ Convert one single `val` to MRC. This function may be used for control fields in MARC records. Args:, code (str): Code of the field. val (str): Value of the field. Returns: str: Correctly padded MRC line with field. """ code = str(code) if len(code) < 3: code += (3 - len(code)) * " " return "%s L %s" % (code, val)
python
def change_format(self, new_format): """Change the format label of this content. Note that this does NOT actually alter the format of the content, only the label. """ self._load_raw_content() self._format = new_format self.get_filename(renew=True) self.get_filepath(renew=True) return
java
@FromString public static Days parseDays(String periodStr) { if (periodStr == null) { return Days.ZERO; } Period p = PARSER.parsePeriod(periodStr); return Days.days(p.getDays()); }
python
def tidied(name, age=0, matches=None, rmdirs=False, size=0, **kwargs): ''' Remove unwanted files based on specific criteria. Multiple criteria are OR’d together, so a file that is too large but is not old enough will still get tidied. If neither age nor size is given all files which match a pattern in matches will be removed. name The directory tree that should be tidied age Maximum age in days after which files are considered for removal matches List of regular expressions to restrict what gets removed. Default: ['.*'] rmdirs Whether or not it's allowed to remove directories size Maximum allowed file size. Files greater or equal to this size are removed. Doesn't apply to directories or symbolic links .. code-block:: yaml cleanup: file.tidied: - name: /tmp/salt_test - rmdirs: True - matches: - foo - b.*r ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} # Check preconditions if not os.path.isabs(name): return _error(ret, 'Specified file {0} is not an absolute path'.format(name)) if not os.path.isdir(name): return _error(ret, '{0} does not exist or is not a directory.'.format(name)) # Define some variables todelete = [] today = date.today() # Compile regular expressions if matches is None: matches = ['.*'] progs = [] for regex in matches: progs.append(re.compile(regex)) # Helper to match a given name against one or more pre-compiled regular # expressions def _matches(name): for prog in progs: if prog.match(name): return True return False # Iterate over given directory tree, depth-first for root, dirs, files in os.walk(top=name, topdown=False): # Check criteria for the found files and directories for elem in files + dirs: myage = 0 mysize = 0 deleteme = True path = os.path.join(root, elem) if os.path.islink(path): # Get age of symlink (not symlinked file) myage = abs(today - date.fromtimestamp(os.lstat(path).st_atime)) elif elem in dirs: # Get age of directory, check if directories should be deleted at all myage = abs(today - date.fromtimestamp(os.path.getatime(path))) deleteme = rmdirs else: # Get age and size of regular file myage = abs(today - date.fromtimestamp(os.path.getatime(path))) mysize = os.path.getsize(path) # Verify against given criteria, collect all elements that should be removed if (mysize >= size or myage.days >= age) and _matches(name=elem) and deleteme: todelete.append(path) # Now delete the stuff if todelete: if __opts__['test']: ret['result'] = None ret['comment'] = '{0} is set for tidy'.format(name) ret['changes'] = {'removed': todelete} return ret ret['changes']['removed'] = [] # Iterate over collected items try: for path in todelete: __salt__['file.remove'](path, force=True) # Remember what we've removed, will appear in the summary ret['changes']['removed'].append(path) except CommandExecutionError as exc: return _error(ret, '{0}'.format(exc)) # Set comment for the summary ret['comment'] = 'Removed {0} files or directories from directory {1}'.format(len(todelete), name) else: # Set comment in case there was nothing to remove ret['comment'] = 'Nothing to remove from directory {0}'.format(name) return ret
python
def mode(dev, target): """ Gets or sets the active mode. """ click.echo("Current mode: %s" % dev.mode_readable) if target: click.echo("Setting mode: %s" % target) dev.mode = target
java
private int compareStarts(Interval<T> other){ if (start == null && other.start == null) return 0; if (start == null) return -1; if (other.start == null) return 1; int compare = start.compareTo(other.start); if (compare != 0) return compare; if (isStartInclusive ^ other.isStartInclusive) return isStartInclusive ? -1 : 1; return 0; }
java
public static String escapeName(String name) { if (StringUtils.isBlank(name)) { throw new IllegalArgumentException("Blank or null is not a valid name."); } else if (java.util.regex.Pattern.matches(NAME, name)) { return name; } else { return name.replaceAll("[^A-Za-z0-9\\.\\-_]", "-"); } }
python
def dir2fn(ofn, ifn, suffix) -> Union[None, Path]: """ ofn = filename or output directory, to create filename based on ifn ifn = input filename (don't overwrite!) suffix = desired file extension e.g. .h5 """ if not ofn: # no output file desired return None ofn = Path(ofn).expanduser() ifn = Path(ifn).expanduser() assert ifn.is_file() if ofn.suffix == suffix: # must already be a filename pass else: # must be a directory assert ofn.is_dir(), f'create directory {ofn}' ofn = ofn / ifn.with_suffix(suffix).name try: assert not ofn.samefile(ifn), f'do not overwrite input file! {ifn}' except FileNotFoundError: # a good thing, the output file doesn't exist and hence it's not the input file pass return ofn
python
def _create_syns(b, needed_syns): """ Create empty synthetics :parameter b: the :class:`phoebe.frontend.bundle.Bundle` :parameter list needed_syns: list of dictionaries containing kwargs to access the dataset (dataset, component, kind) :return: :class:`phoebe.parameters.parameters.ParameterSet` of all new parameters """ # needs_mesh = {info['dataset']: info['kind'] for info in needed_syns if info['needs_mesh']} params = [] for needed_syn in needed_syns: # print "*** _create_syns needed_syn", needed_syn # used to be {}_syn syn_kind = '{}'.format(needed_syn['kind']) # if needed_syn['kind']=='mesh': # parameters.dataset.mesh will handle creating the necessary columns # needed_syn['dataset_fields'] = needs_mesh # needed_syn['columns'] = b.get_value(qualifier='columns', dataset=needed_syn['dataset'], context='dataset') # datasets = b.get_value(qualifier='datasets', dataset=needed_syn['dataset'], context='dataset') # needed_syn['datasets'] = {ds: b.filter(datset=ds, context='dataset').exclude(kind='*_dep').kind for ds in datasets} # phoebe will compute everything sorted - even if the input times array # is out of order, so let's make sure the exposed times array is in # the correct (sorted) order if 'times' in needed_syn.keys(): needed_syn['times'].sort() needed_syn['empty_arrays_len'] = len(needed_syn['times']) these_params, these_constraints = getattr(_dataset, "{}_syn".format(syn_kind.lower()))(**needed_syn) # TODO: do we need to handle constraints? these_params = these_params.to_list() for param in these_params: if param._dataset is None: # dataset may be set for mesh columns param._dataset = needed_syn['dataset'] param._kind = syn_kind param._component = needed_syn['component'] # reset copy_for... model Parameters should never copy param._copy_for = {} # context, model, etc will be handle by the bundle once these are returned params += these_params return ParameterSet(params)
java
public int convertExternalToInternal(Object recordOwner) { // Step 4 - Convert the JAXB objects to my internal format Object root = this.convertToMessage(); if (root == null) return DBConstants.ERROR_RETURN; return this.getConvertToMessage().convertMarshallableObjectToInternal(root, (RecordOwner)recordOwner); }
python
def variable_device(device, name): """Fix the variable device to colocate its ops.""" if callable(device): var_name = tf.get_variable_scope().name + '/' + name var_def = tf.NodeDef(name=var_name, op='Variable') device = device(var_def) if device is None: device = '' return device
java
public void setScope(java.util.Collection<String> scope) { if (scope == null) { this.scope = null; return; } this.scope = new com.amazonaws.internal.SdkInternalList<String>(scope); }
java
public static SimpleFeatureSource readFeatureSource( String path ) throws Exception { File shapeFile = new File(path); FileDataStore store = FileDataStoreFinder.getDataStore(shapeFile); SimpleFeatureSource featureSource = store.getFeatureSource(); return featureSource; }
python
def posterior_marginal(self, idx_param=0, res=100, smoothing=0, range_min=None, range_max=None): """ Returns an estimate of the marginal distribution of a given model parameter, based on taking the derivative of the interpolated cdf. :param int idx_param: Index of parameter to be marginalized. :param int res1: Resolution of of the axis. :param float smoothing: Standard deviation of the Gaussian kernel used to smooth; same units as parameter. :param float range_min: Minimum range of the output axis. :param float range_max: Maximum range of the output axis. .. seealso:: :meth:`SMCUpdater.plot_posterior_marginal` """ # We need to sort the particles to get cumsum to make sense. # interp1d would do it anyways (using argsort, too), so it's not a waste s = np.argsort(self.particle_locations[:,idx_param]) locs = self.particle_locations[s,idx_param] # relevant axis discretization r_min = np.min(locs) if range_min is None else range_min r_max = np.max(locs) if range_max is None else range_max ps = np.linspace(r_min, r_max, res) # interpolate the cdf of the marginal distribution using cumsum interp = scipy.interpolate.interp1d( np.append(locs, r_max + np.abs(r_max-r_min)), np.append(np.cumsum(self.particle_weights[s]), 1), #kind='cubic', bounds_error=False, fill_value=0, assume_sorted=True ) # get distribution from derivative of cdf, and smooth it pr = np.gradient(interp(ps), ps[1]-ps[0]) if smoothing > 0: gaussian_filter1d(pr, res*smoothing/(np.abs(r_max-r_min)), output=pr) del interp return ps, pr
python
def full_name(self): """Get the name of the day of the week. Returns ------- StringValue The name of the day of the week """ import ibis.expr.operations as ops return ops.DayOfWeekName(self.op().arg).to_expr()
python
def advance(self, height, ignore_overflow=False): """Advance the cursor by `height`. If this would cause the cursor to point beyond the bottom of the container, an :class:`EndOfContainer` exception is raised.""" if height <= self.remaining_height: self._self_cursor.grow(height) elif ignore_overflow: self._self_cursor.grow(float(self.remaining_height)) else: raise ContainerOverflow(self.page.number)
java
@Override public CPDefinitionLocalization findByCPDefinitionId_Last( long CPDefinitionId, OrderByComparator<CPDefinitionLocalization> orderByComparator) throws NoSuchCPDefinitionLocalizationException { CPDefinitionLocalization cpDefinitionLocalization = fetchByCPDefinitionId_Last(CPDefinitionId, orderByComparator); if (cpDefinitionLocalization != null) { return cpDefinitionLocalization; } StringBundler msg = new StringBundler(4); msg.append(_NO_SUCH_ENTITY_WITH_KEY); msg.append("CPDefinitionId="); msg.append(CPDefinitionId); msg.append("}"); throw new NoSuchCPDefinitionLocalizationException(msg.toString()); }
java
private void addEntryTableToLayout( List<CmsAccessControlEntry> entries, VerticalLayout layout, boolean editable, boolean inheritedRes) { final CmsPermissionViewTable table = new CmsPermissionViewTable( m_cms, entries, editable, inheritedRes, m_parents, this); HorizontalLayout hl = new HorizontalLayout(); Label label = new Label( CmsVaadinUtils.getMessageText( Messages.GUI_PERMISSION_COUNT_1, new Integer(table.getContainerDataSource().size()))); label.addStyleName("o-report"); hl.addComponent(label); TextField tableFilter = new TextField(); tableFilter.setIcon(FontOpenCms.FILTER); tableFilter.setInputPrompt(CmsVaadinUtils.getMessageText(org.opencms.ui.apps.Messages.GUI_EXPLORER_FILTER_0)); tableFilter.addStyleName(ValoTheme.TEXTFIELD_INLINE_ICON); tableFilter.setWidth("200px"); tableFilter.addTextChangeListener(new TextChangeListener() { private static final long serialVersionUID = 1L; public void textChange(TextChangeEvent event) { table.filterTable(event.getText()); } }); hl.addComponent(tableFilter); hl.setWidth("100%"); hl.setExpandRatio(label, 1); hl.setMargin(true); hl.setComponentAlignment(tableFilter, com.vaadin.ui.Alignment.MIDDLE_RIGHT); if (table.getContainerDataSource().size() == 0) { layout.addComponent(CmsVaadinUtils.getInfoLayout(Messages.GUI_PERMISSION_EMPTY_0)); } else { layout.addComponent(hl); layout.addComponent(table); CmsVaadinUtils.centerWindow(this); } }
python
def hide(*keys): """ Hide a set of request and/or response fields from logs. Example: @hide("id") def create_foo(): return Foo(id=uuid4()) """ def decorator(func): @wraps(func) def wrapper(*args, **kwargs): g.hide_request_fields = keys g.hide_response_fields = keys return func(*args, **kwargs) return wrapper return decorator
java
private void loadStaticField(XField staticField, Instruction obj) { if (RLE_DEBUG) { System.out.println("[loadStaticField for field " + staticField + " in instruction " + handle); } ValueNumberFrame frame = getFrame(); AvailableLoad availableLoad = new AvailableLoad(staticField); ValueNumber[] loadedValue = frame.getAvailableLoad(availableLoad); if (loadedValue == null) { // Make the load available int numWordsProduced = getNumWordsProduced(obj); loadedValue = getOutputValues(EMPTY_INPUT_VALUE_LIST, numWordsProduced); frame.addAvailableLoad(availableLoad, loadedValue); if (RLE_DEBUG) { System.out.println("[making load of " + staticField + " available]"); } } else { if (RLE_DEBUG) { System.out.println("[found available load of " + staticField + "]"); } } if (VERIFY_INTEGRITY) { checkConsumedAndProducedValues(obj, EMPTY_INPUT_VALUE_LIST, loadedValue); } pushOutputValues(loadedValue); }
python
def write(self, session, directory, name, replaceParamFile=None, **kwargs): """ Wrapper for GsshaPyFileObjectBase write method """ if self.raster is not None or self.rasterText is not None: super(RasterMapFile, self).write(session, directory, name, replaceParamFile, **kwargs)
python
def beginningPage(R): """As pages may not be given as numbers this is the most accurate this function can be""" p = R['PG'] if p.startswith('suppl '): p = p[6:] return p.split(' ')[0].split('-')[0].replace(';', '')
java
public static Locale parseLocale(String localeString) { if (localeString == null || localeString.length() == 0) { return Locale.getDefault(); } localeString = localeString.trim(); if (localeString == null) { return null; } String language = ""; String country = ""; String variant = ""; // language int start = 0; int index = localeString.indexOf("_"); if (index >= 0) { language = localeString.substring(start, index).trim(); // country start = index + 1; index = localeString.indexOf("_", start); if (index >= 0) { country = localeString.substring(start, index).trim(); // variant variant = localeString.substring(index + 1).trim(); } else { country = localeString.substring(start).trim(); } } else { language = localeString.substring(start).trim(); } return new Locale(language, country, variant); }
python
def remove_members_outside_rank_in(self, leaderboard_name, rank): ''' Remove members from the named leaderboard in a given rank range. @param leaderboard_name [String] Name of the leaderboard. @param rank [int] the rank (inclusive) which we should keep. @return the total member count which was removed. ''' if self.order == self.DESC: rank = -(rank) - 1 return self.redis_connection.zremrangebyrank( leaderboard_name, 0, rank) else: return self.redis_connection.zremrangebyrank( leaderboard_name, rank, -1)
java
public static long queryNumEntries(SQLiteDatabase db, String table, String selection) { return queryNumEntries(db, table, selection, null); }
python
def unmap_volume_from_sdc(self, volObj, sdcObj=None, **kwargs): """ Unmap a Volume from SDC or all SDCs :param volObj: ScaleIO Volume object :param sdcObj: ScaleIO SDC object :param \**kwargs: :Keyword Arguments: *disableMapAllSdcs* (``bool``) -- True to disable all SDCs mapping :return: POST request response :rtype: Requests POST response object :raise RuntimeError: If failure happen during communication with REST Gateway - Need to be cleaned up and made consistent to return understandable errors """ # TODO: # Check if object parameters are the correct ones, otherwise throw error # ADD logic for ALL SDC UNMAP # For all SDC unmapVolumeFromDict = {'allSdc':'True'} False can be used self.conn.connection._check_login() if kwargs: for key, value in kwargs.iteritems(): if key == 'enableMapAllSdcs' and value == False: if self.get_volume_all_sdcs_mapped(volObj): # Check if allSdc?s is True before continuing unmapVolumeFromSdcDict = {'allSdcs': 'False'} else: unmapVolumeFromSdcDict = {'sdcId': sdcObj.id} try: response = self.conn.connection._do_post("{}/{}{}/{}".format(self.conn.connection._api_url, "instances/Volume::", volObj.id, 'action/removeMappedSdc'), json=unmapVolumeFromSdcDict) except: raise RuntimeError("unmap_volume_from_sdc() - Cannot unmap volume") return response
python
def _confirm_prompt(message, prompt="\nAre you sure? [y/yes (default: no)]: ", affirmations=("Y", "Yes", "yes", "y")): """ Display a message, then confirmation prompt, and return true if the user responds with one of the affirmations. """ answer = input(message + prompt) return answer in affirmations
java
@Override public void deserialize(String jsonString) { final GsonBuilder builder = new GsonBuilder(); builder.excludeFieldsWithoutExposeAnnotation(); final Gson gson = builder.create(); Logo w = gson.fromJson(jsonString, Logo.class); this.nid = w.nid; this.brand = w.brand; }
python
def is_multi_target(target): """ Determine if pipeline manager's run target is multiple. :param None or str or Sequence of str target: 0, 1, or multiple targets :return bool: Whether there are multiple targets :raise TypeError: if the argument is neither None nor string nor Sequence """ if target is None or isinstance(target, str): return False elif isinstance(target, Sequence): return len(target) > 1 else: raise TypeError("Could not interpret argument as a target: {} ({})". format(target, type(target)))
java
public String n1qlToRawJson(N1qlQuery query) { return Blocking.blockForSingle(async.n1qlToRawJson(query), env.queryTimeout(), TimeUnit.MILLISECONDS); }
python
def swf2png(swf_path, png_path, swfrender_path="swfrender"): """Convert SWF slides into a PNG image Raises: OSError is raised if swfrender is not available. ConversionError is raised if image cannot be created. """ # Currently rely on swftools # # Would be great to have a native python dependency to convert swf into png or jpg. # However it seems that pyswf isn't flawless. Some graphical elements (like the text!) are lost during # the export. try: cmd = [swfrender_path, swf_path, '-o', png_path] subprocess.check_output(cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: raise ConversionError("Failed to convert SWF file %s.\n" "\tCommand: %s\n" "\tExit status: %s.\n" "\tOutput:\n%s" % (swf_path, " ".join(cmd), e.returncode, e.output))
java
private void processChildEvent(WatchedEvent event) throws Exception { HashMap<Integer, LeaderCallBackInfo> cacheCopy = new HashMap<Integer, LeaderCallBackInfo>(m_publicCache); ByteArrayCallback cb = new ByteArrayCallback(); m_zk.getData(event.getPath(), m_childWatch, cb, null); try { // cb.getData() and cb.getPath() throw KeeperException byte payload[] = cb.get(); String data = new String(payload, "UTF-8"); LeaderCallBackInfo info = LeaderCache.buildLeaderCallbackFromString(data); Integer partitionId = getPartitionIdFromZKPath(cb.getPath()); cacheCopy.put(partitionId, info); } catch (KeeperException.NoNodeException e) { // rtb: I think result's path is the same as cb.getPath()? Integer partitionId = getPartitionIdFromZKPath(event.getPath()); cacheCopy.remove(partitionId); } m_publicCache = ImmutableMap.copyOf(cacheCopy); if (m_cb != null) { m_cb.run(m_publicCache); } }
python
def get_item(self, **kwargs): """ Get collection item taking into account generated queryset of parent view. This method allows working with nested resources properly. Thus an item returned by this method will belong to its parent view's queryset, thus filtering out objects that don't belong to the parent object. Returns an object from the applicable ACL. If ACL wasn't applied, it is applied explicitly. """ if six.callable(self.context): self.reload_context(es_based=False, **kwargs) objects = self._parent_queryset() if objects is not None and self.context not in objects: raise JHTTPNotFound('{}({}) not found'.format( self.Model.__name__, self._get_context_key(**kwargs))) return self.context
python
def get_params_from_list(params_list): """Transform params list to dictionary. """ params = {} for i in range(0, len(params_list)): if '=' not in params_list[i]: try: if not isinstance(params[key], list): params[key] = [params[key]] params[key] += [params_list[i]] except KeyError: raise ValueError('Pass parameters like `key1=a key2=b c d key3=...`.') else: key_val = params_list[i].split('=') key, val = key_val params[key] = convert_string(val) return params
java
public static <T> T getBundle(Class<T> type) { return getBundle(type, Locale.getDefault()); }
java
Expression XreadAllTypesValueExpressionPrimary(boolean boole) { Expression e = null; switch (token.tokenType) { case Tokens.EXISTS : case Tokens.UNIQUE : if (boole) { return XreadPredicate(); } break; case Tokens.ROW : if (boole) { break; } read(); readThis(Tokens.OPENBRACKET); e = XreadRowElementList(true); readThis(Tokens.CLOSEBRACKET); break; default : e = XreadSimpleValueExpressionPrimary(); } if (e == null && token.tokenType == Tokens.OPENBRACKET) { read(); e = XreadRowElementList(true); readThis(Tokens.CLOSEBRACKET); } if (boole && e != null) { e = XreadPredicateRightPart(e); } return e; }
python
def unset(self, section, option): """Remove option from section.""" with self._lock: if not self._config.has_section(section): return if self._config.has_option(section, option): self._config.remove_option(section, option) self._dirty = True if not self._config.options(section): self._config.remove_section(section) self._dirty = True
python
def to_utf8(path, output_path=None): """Convert any text file to utf8 encoding. """ if output_path is None: basename, ext = os.path.splitext(path) output_path = basename + "-UTF8Encode" + ext text = smartread(path) write(text, output_path)
java
public void add(final String string) { checkNotNull(string); candidates.put(string, candidate(string)); }
java
public final EObject entryRuleNotExpression() throws RecognitionException { EObject current = null; EObject iv_ruleNotExpression = null; try { // InternalSimpleAntlr.g:1078:2: (iv_ruleNotExpression= ruleNotExpression EOF ) // InternalSimpleAntlr.g:1079:2: iv_ruleNotExpression= ruleNotExpression EOF { if ( state.backtracking==0 ) { newCompositeNode(grammarAccess.getNotExpressionRule()); } pushFollow(FOLLOW_1); iv_ruleNotExpression=ruleNotExpression(); state._fsp--; if (state.failed) return current; if ( state.backtracking==0 ) { current =iv_ruleNotExpression; } match(input,EOF,FOLLOW_2); if (state.failed) return current; } } catch (RecognitionException re) { recover(input,re); appendSkippedTokens(); } finally { } return current; }
java
private Iterator detailChildrenIterator(Detail detail) { /* sb.append("<ns2:AccessDeniedWebServiceException xmlns:ns2=\"http://exceptionthrower.system.services.v4_0.soap.server.nameapi.org/\">"); sb.append("<blame>CLIENT</blame>"); sb.append("<errorCode>2101</errorCode>"); sb.append("<faultCause>AccessDenied</faultCause>"); */ DetailEntry firstDetailEntry = getFirstDetailEntry(detail); if (firstDetailEntry!=null) { String localName = firstDetailEntry.getElementName().getLocalName(); if (localName.endsWith("Exception")) { //got a subtag return firstDetailEntry.getChildElements(); } } return detail.getDetailEntries(); }
java
public static double matthewsCorrelation(long tp, long fp, long fn, long tn) { double numerator = ((double) tp) * tn - ((double) fp) * fn; double denominator = Math.sqrt(((double) tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)); return numerator / denominator; }
python
def _clone(self): """Make a (shallow) copy of the set. There is a 'clone protocol' that subclasses of this class should use. To make a copy, first call your super's _clone() method, and use the object returned as the new instance. Then make shallow copies of the attributes defined in the subclass. This protocol allows us to write the set algorithms that return new instances (e.g. union) once, and keep using them in subclasses. """ cls = self.__class__ obj = cls.__new__(cls) obj.items = list(self.items) return obj
java
@Override protected I_CmsSearchDocument appendCategories( I_CmsSearchDocument document, CmsObject cms, CmsResource resource, I_CmsExtractionResult extractionResult, List<CmsProperty> properties, List<CmsProperty> propertiesSearched) { Document doc = (Document)document.getDocument(); // add the category of the file (this is searched so the value can also be attached on a folder) String value = CmsProperty.get(CmsPropertyDefinition.PROPERTY_SEARCH_CATEGORY, propertiesSearched).getValue(); if (CmsStringUtil.isNotEmpty(value)) { // all categories are internally stored lower case value = value.trim().toLowerCase(); if (value.length() > 0) { Field field = new StringField(CmsSearchField.FIELD_CATEGORY, value, Field.Store.YES); // field.setBoost(0); doc.add(field); } } return document; }
python
def fasta_verifier(entries, ambiguous=False): """Raises error if invalid FASTA format detected Args: entries (list): A list of FastaEntry instances ambiguous (bool): Permit ambiguous bases, i.e. permit non-ACGTU bases Raises: FormatError: Error when FASTA format incorrect with descriptive message Example: >>> from bio_utils.iterators import fasta_iter >>> import os >>> entries = r'>entry1{0}AAGGATTCG{0}' \ ... r'>entry{0}AGGTCCCCCG{0}' \ ... r'>entry3{0}GCCTAGC{0}'.format(os.linesep) >>> fasta_entries = fasta_iter(iter(entries.split(os.linesep))) >>> fasta_verifier(fasta_entries) """ if ambiguous: regex = r'^>.+{0}[ACGTURYKMSWBDHVNX]+{0}$'.format(os.linesep) else: regex = r'^>.+{0}[ACGTU]+{0}$'.format(os.linesep) delimiter = r'{0}'.format(os.linesep) for entry in entries: try: entry_verifier([entry.write()], regex, delimiter) except FormatError as error: if error.part == 0: msg = 'Unknown Header Error with {0}'.format(entry.id) raise FormatError(message=msg) elif error.part == 1 and ambiguous: msg = '{0} contains a base not in ' \ '[ACGTURYKMSWBDHVNX]'.format(entry.id) raise FormatError(message=msg) elif error.part == 1 and not ambiguous: msg = '{0} contains a base not in ' \ '[ACGTU]'.format(entry.id) raise FormatError(message=msg) else: msg = '{0}: Unknown Error: Likely a Bug'.format(entry.id) raise FormatError(message=msg)
java
public static void runPipeline(File scriptFile, List<String> cliArgs) throws IOException, UIMAException, ParseException { if (!scriptFile.exists()) { throw new IOException("Script file does not exist (" + scriptFile.getAbsolutePath() + ")"); } LOG.info("Parsing pipeline script at '{}'", scriptFile.getAbsolutePath() + " \n with CLI parameters: " + join(cliArgs, ", ")); Pipeline pipeline = null; try { pipeline = PipelineScriptParser.parse(scriptFile, cliArgs); } catch (ParseException e) { throw new ParseException("\nERROR parsing '" + scriptFile.getName() + "'\n" + e.getMessage() + "\n(see the README.txt for the pipeline script format)", e.getErrorOffset()); } LOG.info("Successfully parsed pipeline script, now starting pipeline..."); LOG.info("*************************************************************"); pipeline.run(); // will be printed if no exception. // used in pipeline tests, do not change System.out.println(OK_MESSAGE); }
python
def load(cls, path_to_file): """ Loads the image data from a file on disk and tries to guess the image MIME type :param path_to_file: path to the source file :type path_to_file: str :return: a `pyowm.image.Image` instance """ import mimetypes mimetypes.init() mime = mimetypes.guess_type('file://%s' % path_to_file)[0] img_type = ImageTypeEnum.lookup_by_mime_type(mime) with open(path_to_file, 'rb') as f: data = f.read() return Image(data, image_type=img_type)
java
@Override public CacheConfiguration<K, V> setTypes(Class<K> keyType, Class<V> valueType) { if (keyType == null || valueType == null) { throw new NullPointerException("keyType and/or valueType can't be null"); } setKeyType(keyType); setValueType(valueType); return this; }