language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
public void removeAll() { if (m_gridList != null) m_gridList.removeAll(); if (m_gridBuffer != null) m_gridBuffer.removeAll(); if (m_gridNew != null) m_gridNew.removeAll(); m_iEndOfFileIndex = UNKNOWN_POSITION; m_iPhysicalFilePosition = UNKNOWN_POSITION; m_iLogicalFilePosition = UNKNOWN_POSITION; }
python
def roles_accepted(*role_names): """| This decorator ensures that the current user is logged in, | and has *at least one* of the specified roles (OR operation). Example:: @route('/edit_article') @roles_accepted('Writer', 'Editor') def edit_article(): # User must be 'Writer' OR 'Editor' ... | Calls unauthenticated_view() when the user is not logged in or when user has not confirmed their email address. | Calls unauthorized_view() when the user does not have the required roles. | Calls the decorated view otherwise. """ # convert the list to a list containing that list. # Because roles_required(a, b) requires A AND B # while roles_required([a, b]) requires A OR B def wrapper(view_function): @wraps(view_function) # Tells debuggers that is is a function wrapper def decorator(*args, **kwargs): user_manager = current_app.user_manager # User must be logged in with a confirmed email address allowed = _is_logged_in_with_confirmed_email(user_manager) if not allowed: # Redirect to unauthenticated page return user_manager.unauthenticated_view() # User must have the required roles # NB: roles_required would call has_roles(*role_names): ('A', 'B') --> ('A', 'B') # But: roles_accepted must call has_roles(role_names): ('A', 'B') --< (('A', 'B'),) if not current_user.has_roles(role_names): # Redirect to the unauthorized page return user_manager.unauthorized_view() # It's OK to call the view return view_function(*args, **kwargs) return decorator return wrapper
python
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ C = self.COEFFS[imt] M = rup.mag - 6 R = np.sqrt(dists.rjb ** 2 + C['h'] ** 2) # In the original formulation of the GMPE, distinction is only made # between rock and soil sites, which I assumed separated by the Vs30 # value of 910m/s (see equation 5 of the paper) gamma = np.array([0 if v > 910. else 1 for v in sites.vs30]) mean = np.zeros_like(R) mean += C['b1'] + \ C['b2'] * M + \ C['b3'] * M ** 2 + \ C['b5'] * np.log10(R) + \ C['b6'] * gamma # Convert from base 10 to base e mean /= np.log10(np.e) # Converting PSV to PSA if imt != PGA() and imt != PGV(): omega = 2.*np.pi/imt.period mean += np.log(omega/(gravity*100)) # Computing standard deviation stddevs = self._get_stddevs(C, stddev_types, dists.rjb.shape[0]) # Convert from base 10 to base e stddevs = [sd/np.log10(np.e) for sd in stddevs] return mean, stddevs
java
private String bytesToString(byte[] bytes, boolean truncate) { CharsetDecoder decoder = Charset.defaultCharset() .newDecoder() .onMalformedInput(CodingErrorAction.REPLACE) .onUnmappableCharacter(CodingErrorAction.REPLACE); ByteBuffer byteBuffer = ByteBuffer.wrap(bytes); CharBuffer charBuffer; try { charBuffer = decoder.decode(byteBuffer); } catch (CharacterCodingException e) { // Shouldn't happen due to choosing REPLACE above, but Java makes us catch it anyway. throw new RuntimeException(e); } String s = charBuffer.toString(); if (truncate && s.length() > TRUNCATED_STRING_MAX_LEN) { return new StringBuilder().append(s, 0, TRUNCATED_STRING_MAX_LEN).append("[...]").toString(); } else { return s; } }
python
def xpathNewNodeSet(self): """Create a new xmlXPathObjectPtr of type NodeSet and initialize it with the single Node @val """ ret = libxml2mod.xmlXPathNewNodeSet(self._o) if ret is None:raise xpathError('xmlXPathNewNodeSet() failed') return xpathObjectRet(ret)
python
def check_debug(): """Check that Django's template debugging is enabled. Django's built-in "template debugging" records information the plugin needs to do its work. Check that the setting is correct, and raise an exception if it is not. Returns True if the debug check was performed, False otherwise """ from django.conf import settings if not settings.configured: return False # I _think_ this check is all that's needed and the 3 "hasattr" checks # below can be removed, but it's not clear how to verify that from django.apps import apps if not apps.ready: return False # django.template.backends.django gets loaded lazily, so return false # until they've been loaded if not hasattr(django.template, "backends"): return False if not hasattr(django.template.backends, "django"): return False if not hasattr(django.template.backends.django, "DjangoTemplates"): raise DjangoTemplatePluginException("Can't use non-Django templates.") for engine in django.template.engines.all(): if not isinstance(engine, django.template.backends.django.DjangoTemplates): raise DjangoTemplatePluginException( "Can't use non-Django templates." ) if not engine.engine.debug: raise DjangoTemplatePluginException( "Template debugging must be enabled in settings." ) return True
java
public MethodRefConstant addMethodRef(String className, String name, String type) { MethodRefConstant entry = getMethodRef(className, name, type); if (entry != null) return entry; ClassConstant classEntry = addClass(className); NameAndTypeConstant typeEntry = addNameAndType(name, type); entry = new MethodRefConstant(this, _entries.size(), classEntry.getIndex(), typeEntry.getIndex()); addConstant(entry); return entry; }
java
public static Query logical(LogOp op, Collection<Query> expressions) { Query q = new Query(true); for (Query x : expressions) { ((ArrayNode) q.node).add(x.toJson()); } Query a = new Query(false); a.add(op.toString(), q.toJson()); return a; }
java
public Observable<List<ModelInfoResponse>> listModelsAsync(UUID appId, String versionId, ListModelsOptionalParameter listModelsOptionalParameter) { return listModelsWithServiceResponseAsync(appId, versionId, listModelsOptionalParameter).map(new Func1<ServiceResponse<List<ModelInfoResponse>>, List<ModelInfoResponse>>() { @Override public List<ModelInfoResponse> call(ServiceResponse<List<ModelInfoResponse>> response) { return response.body(); } }); }
java
protected void closeConnection(ThriftConnectionHandle<T> connection) { if (connection != null) { try { connection.internalClose(); } catch (Throwable t) { logger.error("Destroy connection exception", t); } finally { this.thriftConnectionPool.postDestroyConnection(connection); } } }
python
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): # pylint: disable=too-many-arguments """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for specification of input and result values. Implements the following equations: Equation (8) on p. 203 for the bedrock ground motion: ``ln(y_br) = c1 + c2*(M - 6) + c3*(M - 6)**2 - lnR - c4*R + ln(ε_br)`` Equation (9) on p. 207 gives the site amplification factor: ``ln(F_s) = a1*y_br + a2 + ln(δ_site)`` Equation (10) on p. 207 for the ground motion at a given site: ``y_site = y_br*F_s`` Equation (11) on p. 207 for total standard error at a given site: ``σ{ln(ε_site)} = sqrt(σ{ln(ε_br)}**2 + σ{ln(δ_site)}**2)`` """ # obtain coefficients for required intensity measure type coeffs = self.COEFFS_BEDROCK[imt].copy() # obtain site-class specific coefficients a_1, a_2, sigma_site = self._get_site_coeffs(sites, imt) coeffs.update({'a1': a_1, 'a2': a_2, 'sigma_site': sigma_site}) # compute bedrock motion, equation (8) ln_mean = (self._compute_magnitude_terms(rup, coeffs) + self._compute_distance_terms(dists, coeffs)) # adjust for site class, equation (10) ln_mean += self._compute_site_amplification(ln_mean, coeffs) # No need to convert to g since "In [equation (8)], y_br = (SA/g)" ln_stddevs = self._get_stddevs(coeffs, stddev_types) return ln_mean, [ln_stddevs]
python
def initialize(self): """ Initializes network by calling Connection.initialize() and Layer.initialize(). self.count is set to zero. """ print("Initializing '%s' weights..." % self.name, end=" ", file=sys.stderr) if self.sharedWeights: raise AttributeError("shared weights broken") self.count = 0 for connection in self.connections: connection.initialize() for layer in self.layers: layer.initialize()
java
public java.lang.String getProtocol() { java.lang.Object ref = protocol_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); protocol_ = s; return s; } }
java
public void enableFollowLocation() { mIsFollowing = true; // set initial location when enabled if (isMyLocationEnabled()) { Location location = mMyLocationProvider.getLastKnownLocation(); if (location != null) { setLocation(location); } } // Update the screen to see changes take effect if (mMapView != null) { mMapView.postInvalidate(); } }
python
def convert_type(self, type): """Convert type to BigQuery """ # Mapping mapping = { 'any': 'STRING', 'array': None, 'boolean': 'BOOLEAN', 'date': 'DATE', 'datetime': 'DATETIME', 'duration': None, 'geojson': None, 'geopoint': None, 'integer': 'INTEGER', 'number': 'FLOAT', 'object': None, 'string': 'STRING', 'time': 'TIME', 'year': 'INTEGER', 'yearmonth': None, } # Not supported type if type not in mapping: message = 'Type %s is not supported' % type raise tableschema.exceptions.StorageError(message) return mapping[type]
python
def friction_plate_Martin_VDI(Re, plate_enlargement_factor): r'''Calculates Darcy friction factor for single-phase flow in a Chevron-style plate heat exchanger according to [1]_. .. math:: \frac{1}{\sqrt{f_d}} = \frac{\cos \phi}{\sqrt{0.28\tan\phi + 0.36\sin\phi + f_0/\cos(\phi)}} + \frac{1-\cos\phi}{\sqrt{3.8f_1}} .. math:: f_0 = 64/Re \text{ for } Re < 2000 .. math:: f_0 = (1.56\ln Re - 3)^{-2} \text{ for } Re \ge 2000 .. math:: f_1 = \frac{597}{Re} + 3.85 \text{ for } Re < 2000 .. math:: f_1 = \frac{39}{Re^{0.289}} \text{ for } Re \ge 2000 Parameters ---------- Re : float Reynolds number with respect to the hydraulic diameter of the channels, [-] plate_enlargement_factor : float The extra surface area multiplier as compared to a flat plate caused the corrugations, [-] Returns ------- fd : float Darcy friction factor [-] Notes ----- Based on experimental data from Re from 200 - 10000 and enhancement factors calculated with chevron angles of 0 to 80 degrees. See `PlateExchanger` for further clarification on the definitions. The length the friction factor gets multiplied by is not the flow path length, but rather the straight path length from port to port as if there were no chevrons. Note there is a discontinuity at Re = 2000 for the transition from laminar to turbulent flow, although the literature suggests the transition is actually smooth. This is a revision of the Martin's earlier model, adjusted to predidct higher friction factors. There are three parameters in this model, a, b and c; it is posisble to adjust them to better fit a know exchanger's pressure drop. See Also -------- friction_plate_Martin_1999 Examples -------- >>> friction_plate_Martin_VDI(Re=20000, plate_enlargement_factor=1.15) 2.702534119024076 References ---------- .. [1] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition. Berlin; New York:: Springer, 2010. ''' phi = plate_enlargement_factor if Re < 2000.: f0 = 64./Re f1 = 597./Re + 3.85 else: f0 = (1.8*log10(Re) - 1.5)**-2 f1 = 39.*Re**-0.289 a, b, c = 3.8, 0.28, 0.36 rhs = cos(phi)*(b*tan(phi) + c*sin(phi) + f0/cos(phi))**-0.5 rhs += (1. - cos(phi))*(a*f1)**-0.5 return rhs**-2.0
java
private void actionWrite() throws PageException { if (output == null) throw new ApplicationException("attribute output is not defined for tag file"); checkFile(pageContext, securityManager, file, serverPassword, createPath, true, false, true); if (file.exists()) { // Error if (nameconflict == NAMECONFLICT_ERROR) throw new ApplicationException("destination file [" + file + "] already exist"); // SKIP else if (nameconflict == NAMECONFLICT_SKIP) return; // OVERWRITE else if (nameconflict == NAMECONFLICT_OVERWRITE) file.delete(); } setACL(pageContext, file, acl); try { if (output instanceof InputStream) { IOUtil.copy((InputStream) output, file, false); } else if (Decision.isCastableToBinary(output, false)) { IOUtil.copy(new ByteArrayInputStream(Caster.toBinary(output)), file, true); } else { String content = Caster.toString(output); if (fixnewline) content = doFixNewLine(content); if (addnewline) content += SystemUtil.getOSSpecificLineSeparator(); IOUtil.write(file, content, CharsetUtil.toCharset(charset), false); } } catch (UnsupportedEncodingException e) { throw new ApplicationException("Unsupported Charset Definition [" + charset + "]", e.getMessage()); } catch (IOException e) { throw new ApplicationException("can't write file " + file.getAbsolutePath(), e.getMessage()); } setMode(file, mode); setAttributes(file, attributes); }
python
def write_data(self,variable_id, value, task): """ write values to the device """ variable = self._variables[variable_id] if task.property_name != '': # write the freq property to VariableProperty use that for later read vp = VariableProperty.objects.update_or_create_property(variable=variable, name=task.property_name.upper(), value=value, value_class='FLOAT64') return True if variable.visavariable.variable_type == 0: # configuration # only write to configuration variables pass else: return False
python
def snapshot(self): """Snapshot current state.""" self._snapshot = { 'muted': self.muted, 'volume': self.volume, 'stream': self.stream } _LOGGER.info('took snapshot of current state of %s', self.friendly_name)
java
@Override public AuditUtilityReturnCodes handleTask(ConsoleWrapper stdin, PrintStream stdout, PrintStream stderr, String[] args) throws Exception { Map<String, String> argMap = parseArgumentList(args); Map<String, String> props = convertToProperties(argMap); if (props.isEmpty()) { throw new IllegalArgumentException(getMessage("insufficientArgs")); } if (isDebug) { Handler[] handlers = Logger.getLogger("").getHandlers(); for (int index = 0; index < handlers.length; index++) { handlers[index].setLevel(Level.FINE); } theLogger.setLevel(Level.FINE); } if (isDebug) { theLogger.fine("Arguments passed in: " + argMap.toString()); } // validate that the --encrypted argument value, if specified, is either true or false if (argMap.containsKey(ARG_ENCRYPTED) && (!argMap.get(ARG_ENCRYPTED).equalsIgnoreCase("true") && (!argMap.get(ARG_ENCRYPTED).equalsIgnoreCase("false")))) { if (isDebug) theLogger.fine("Invalid value, " + argMap.get(ARG_ENCRYPTED) + ", specified for the --encrypted argument. Value must be either true or false."); throw new IllegalArgumentException(getMessage("invalidValue", argMap.get(ARG_ENCRYPTED), ARG_ENCRYPTED)); } // validate that the --signed argument value, if specified, is either true or false if (argMap.containsKey(ARG_SIGNED) && (!argMap.get(ARG_SIGNED).equalsIgnoreCase("true") && (!argMap.get(ARG_SIGNED).equalsIgnoreCase("false")))) { if (isDebug) theLogger.fine("Invalid value, " + argMap.get(ARG_SIGNED) + ", specified for the --signed argument. Value must be either true or false."); throw new IllegalArgumentException(getMessage("invalidValue", argMap.get(ARG_SIGNED), ARG_SIGNED)); } // if --encrypted=true, ensure the keystore type specified for the encryption keystore is not CMS or PKCS11 if (argMap.containsKey(ARG_ENCRYPTED) && argMap.get(ARG_ENCRYPTED).equalsIgnoreCase("true") && (argMap.containsKey(ARG_ENCRYPTION_KEYSTORE_TYPE))) { if (argMap.get(ARG_ENCRYPTION_KEYSTORE_TYPE).equalsIgnoreCase("PKCS11") || argMap.get(ARG_ENCRYPTION_KEYSTORE_TYPE).equals("CMS")) { if (isDebug) theLogger.fine("The keystore type, " + argMap.get(ARG_ENCRYPTION_KEYSTORE_TYPE) + ", specified for --encKeyStoreType is not supported. Only JKS, JCEKS and PKCS12 are supported."); throw new IllegalArgumentException(getMessage("security.audit.UnsupportedKeyStoreType", argMap.get(ARG_ENCRYPTION_KEYSTORE_TYPE), ARG_ENCRYPTION_KEYSTORE_TYPE)); } } // if --signed=true, ensure the keystore type specified for the signing keystore is not CMS or PKCS11 if (argMap.containsKey(ARG_SIGNED) && argMap.get(ARG_SIGNED).equalsIgnoreCase("true") && (argMap.containsKey(ARG_SIGNING_KEYSTORE_TYPE))) { if (argMap.get(ARG_SIGNING_KEYSTORE_TYPE).equalsIgnoreCase("PKCS11") || argMap.get(ARG_SIGNING_KEYSTORE_TYPE).equals("CMS")) { if (isDebug) theLogger.fine("The keystore type, " + argMap.get(ARG_SIGNING_KEYSTORE_TYPE) + ", specified for --signingKeyStoreType is not supported. Only JKS, JCEKS and PKCS12 are supported."); throw new IllegalArgumentException(getMessage("security.audit.UnsupportedKeyStoreType", argMap.get(ARG_SIGNING_KEYSTORE_TYPE), ARG_SIGNING_KEYSTORE_TYPE)); } } // ensure the --auditFileLocation is specified (required argument) if (!argMap.containsKey(ARG_AUDIT_FILE_LOCATION)) { if (isDebug) theLogger.fine("Missing --auditFileLocation argument"); throw new IllegalArgumentException(getMessage("missingArg", ARG_AUDIT_FILE_LOCATION)); } // ensure the audit log specified in the --auditFileLocation argument exists if (argMap.containsKey(ARG_AUDIT_FILE_LOCATION)) { String afl = argMap.get(ARG_AUDIT_FILE_LOCATION); File f = new File(afl); if (!f.exists()) { if (isDebug) theLogger.fine("Specified location for the audit log does not exist"); throw new IllegalArgumentException(getMessage("security.audit.FileNotFound", afl)); } } // ensure the --outputFileLocation is specified (required argument) if (!argMap.containsKey(ARG_OUTPUT_FILE_LOCATION)) { if (isDebug) theLogger.fine("Missing --outputFileLocation argument"); throw new IllegalArgumentException(getMessage("missingArg", ARG_OUTPUT_FILE_LOCATION)); } // check that the location of the audit log is not the same as the location of the output log if (argMap.get(ARG_AUDIT_FILE_LOCATION).equals(argMap.get(ARG_OUTPUT_FILE_LOCATION))) { if (isDebug) theLogger.fine("The input audit file location, " + argMap.get(ARG_AUDIT_FILE_LOCATION) + ", cannot be the same as the output audit file location, " + argMap.get(ARG_OUTPUT_FILE_LOCATION) + "."); throw new IllegalArgumentException(getMessage("invalidFileLocations", argMap.get(ARG_AUDIT_FILE_LOCATION), argMap.get(ARG_OUTPUT_FILE_LOCATION))); } // check that the output file location specified is not a directory if (argMap.containsKey(ARG_OUTPUT_FILE_LOCATION)) { String afl = argMap.get(ARG_OUTPUT_FILE_LOCATION); File f = new File(afl); if (f.isDirectory()) { if (isDebug) theLogger.fine("Specified location for the output audit log does not exist. It is specified but is a directory."); throw new IllegalArgumentException(getMessage("security.audit.FileNotFound", afl)); } else { if (isDebug) theLogger.fine("Specified location for the output audit log exists and is not a directory."); } if (afl.endsWith("/") || afl.endsWith("\\")) { if (isDebug) theLogger.fine("Specified location for the output audit log does not exist. It is specified but is a non existant directory."); throw new IllegalArgumentException(getMessage("security.audit.FileNotFound", afl)); } if (f.createNewFile()) { if (!f.canWrite()) { if (isDebug) theLogger.fine("Specified location for the output audit log is a non-writable file. Ensure that the location for the output file is writable"); throw new IllegalArgumentException(getMessage("audit.NonWriteableOuputFile", afl)); } } boolean fd = false; if (f.exists() && f.canWrite()) fd = f.delete(); } // if --encrypted=true, ensure that the encryption keystore password is specified. if (argMap.containsKey(ARG_ENCRYPTED) && argMap.get(ARG_ENCRYPTED).equalsIgnoreCase("true") && !argMap.containsKey(ARG_ENCRYPTION_KEYSTORE_PASSWORD)) { if (isDebug) theLogger.fine("The --encrypted argument is true, but nothing was specified for --encKeyStorePassword."); throw new IllegalArgumentException(getMessage("missingArg", ARG_ENCRYPTION_KEYSTORE_PASSWORD)); } // if --encrypted=true, ensure that the encryption keystore type is specified. if (argMap.containsKey(ARG_ENCRYPTED) && argMap.get(ARG_ENCRYPTED).equalsIgnoreCase("true") && !argMap.containsKey(ARG_ENCRYPTION_KEYSTORE_TYPE)) { if (isDebug) theLogger.fine("The --encrypted argument is true, but nothing was specified for --encKeyStoreType."); throw new IllegalArgumentException(getMessage("missingArg", ARG_ENCRYPTION_KEYSTORE_TYPE)); } // if --encrypted=true, and an encryption keystore location is specified, ensure it 1) has a URL prefix, 2) exists, and 3) is not CMS or PKCS11 if (argMap.containsKey(ARG_ENCRYPTED) && argMap.get(ARG_ENCRYPTED).equalsIgnoreCase("true") && argMap.containsKey(ARG_ENCRYPTION_KEYSTORE_LOCATION)) { String loc = argMap.get(ARG_ENCRYPTION_KEYSTORE_LOCATION); File f = new File(loc); if (!f.exists()) { if (isDebug) theLogger.fine("Specified location for the encryption keystore does not exist"); throw new IllegalArgumentException(getMessage("security.audit.FileNotFound", loc)); } if (loc.endsWith(".CMS") || loc.endsWith(".cms") || loc.endsWith(".pkce11") || loc.endsWith(".PKCE11")) { if (isDebug) theLogger.fine("The keystore type, " + argMap.get(ARG_ENCRYPTION_KEYSTORE_TYPE) + ", specified for --encKeyStoreType is not supported. Only JKS, JCEKS and PKCS12 are supported."); throw new IllegalArgumentException(getMessage("security.audit.UnsupportedKeyStoreType", argMap.get(ARG_ENCRYPTION_KEYSTORE_TYPE), ARG_ENCRYPTION_KEYSTORE_TYPE)); } } // if --signed=true, ensure that the signing keystore password is specified. if (argMap.containsKey(ARG_SIGNED) && argMap.get(ARG_SIGNED).equalsIgnoreCase("true") && !argMap.containsKey(ARG_SIGNING_KEYSTORE_PASSWORD)) { if (isDebug) theLogger.fine("The --signed argument is true, but nothing was specified for --signingKeyStorePassword."); throw new IllegalArgumentException(getMessage("missingArg", ARG_SIGNING_KEYSTORE_PASSWORD)); } // if --signed=true, ensure that the signing keystore type is specified. if (argMap.containsKey(ARG_SIGNED) && argMap.get(ARG_SIGNED).equalsIgnoreCase("true") && !argMap.containsKey(ARG_SIGNING_KEYSTORE_TYPE)) { if (isDebug) theLogger.fine("The --signed argument is true, but nothing was specified for --signingKeyStoreType."); throw new IllegalArgumentException(getMessage("missingArg", ARG_SIGNING_KEYSTORE_TYPE)); } // if --signed=true, and a signed keystore location is specified, ensure it 1) has a URL prefix, 2) exists, and 3) is not CMS or PKCS11 if (argMap.containsKey(ARG_SIGNED) && argMap.get(ARG_SIGNED).equalsIgnoreCase("true") && argMap.containsKey(ARG_SIGNING_KEYSTORE_LOCATION)) { String loc = argMap.get(ARG_SIGNING_KEYSTORE_LOCATION); File f = new File(loc); if (!f.exists()) { if (isDebug) theLogger.fine("Specified location for the signing keystore does not exist"); throw new IllegalArgumentException(getMessage("security.audit.FileNotFound", loc)); } if (loc.endsWith(".CMS") || loc.endsWith(".cms") || loc.endsWith(".pkce11") || loc.endsWith(".PKCE11")) { if (isDebug) theLogger.fine("The keystore type, " + argMap.get(ARG_SIGNING_KEYSTORE_TYPE) + ", specified for --signingKeyStoreType is not supported. Only JKS, JCEKS and PKCS12 are supported."); throw new IllegalArgumentException(getMessage("security.audit.UnsupportedKeyStoreType", argMap.get(ARG_SIGNING_KEYSTORE_TYPE), ARG_SIGNING_KEYSTORE_TYPE)); } } if ((argMap.containsKey(ARG_ENCRYPTION_KEYSTORE_PASSWORD) || argMap.containsKey(ARG_ENCRYPTION_KEYSTORE_TYPE) || argMap.containsKey(ARG_ENCRYPTION_KEYSTORE_LOCATION)) && (!argMap.containsKey(ARG_ENCRYPTED))) { if (isDebug) theLogger.fine("The --encKeyStoreLocation, --encKeyStoreType, and --encKeyStorePassword were specified, but nothing was specified for the --encrypted argument."); throw new IllegalArgumentException(getMessage("missingArg", ARG_ENCRYPTED)); } if ((argMap.containsKey(ARG_SIGNING_KEYSTORE_PASSWORD) || argMap.containsKey(ARG_SIGNING_KEYSTORE_TYPE) || argMap.containsKey(ARG_SIGNING_KEYSTORE_LOCATION)) && (!argMap.containsKey(ARG_SIGNED))) { if (isDebug) theLogger.fine("The --signingKeyStoreLocation, --signingKeyStoreType, and --signingKeyStorePassword were specified, but nothing was specified for the --signed argument."); throw new IllegalArgumentException(getMessage("missingArg", ARG_SIGNED)); } try { auditReader(stderr, props); } catch (Exception e) { throw e; } return AuditUtilityReturnCodes.OK; }
java
public void getAllAchievementGroupID(Callback<List<String>> callback) throws NullPointerException { gw2API.getAllAchievementGroupIDs().enqueue(callback); }
python
def end_frequency(self, index): """ Return the end frequency of the waveform at the given index value """ from pycbc.waveform.waveform import props return pycbc.waveform.get_waveform_end_frequency( self.table[index], approximant=self.approximant(index), **self.extra_args)
python
def rename(self, channel_name, new_name): """ https://api.slack.com/methods/channels.rename """ channel_id = self.get_channel_id(channel_name) self.params.update({ 'channel': channel_id, 'name': new_name, }) return FromUrl('https://slack.com/api/channels.rename', self._requests)(data=self.params).post()
python
def _comment_type_from_line(line): """Return the "comment header" (' * ', '# ', 'rem ', '// ', '/* '). This header goes before the content of a start of the line in a replacement. """ regex = re.compile(r"^{0}".format(_ALL_COMMENT)) match = regex.match(line) if match: return "{0} ".format(line[match.start():match.end()]) raise RuntimeError("Unable to find comment header for {0}".format(line))
python
def reshape_axes(axes, shape, newshape, unknown=None): """Return axes matching new shape. By default, unknown dimensions are labelled 'Q'. >>> reshape_axes('YXS', (219, 301, 1), (219, 301)) 'YX' >>> reshape_axes('IYX', (12, 219, 301), (3, 4, 219, 1, 301, 1)) 'QQYQXQ' """ shape = tuple(shape) newshape = tuple(newshape) if len(axes) != len(shape): raise ValueError('axes do not match shape') size = product(shape) newsize = product(newshape) if size != newsize: raise ValueError('cannot reshape %s to %s' % (shape, newshape)) if not axes or not newshape: return '' lendiff = max(0, len(shape) - len(newshape)) if lendiff: newshape = newshape + (1,) * lendiff i = len(shape)-1 prodns = 1 prods = 1 result = [] for ns in newshape[::-1]: prodns *= ns while i > 0 and shape[i] == 1 and ns != 1: i -= 1 if ns == shape[i] and prodns == prods*shape[i]: prods *= shape[i] result.append(axes[i]) i -= 1 elif unknown: result.append(unknown) else: unknown = 'Q' result.append(unknown) return ''.join(reversed(result[lendiff:]))
java
public static Single<Boolean> exists(String key, Object member) { return exists(CacheService.CACHE_CONFIG_BEAN, key, member); }
java
public Set<Integer> getUnstoppedSlots(Set<Integer> aliveTasks, Map<String, SupervisorInfo> supInfos, Assignment existAssignment) { Set<Integer> ret = new HashSet<>(); Set<ResourceWorkerSlot> oldWorkers = existAssignment.getWorkers(); Set<String> aliveSupervisors = supInfos.keySet(); for (ResourceWorkerSlot worker : oldWorkers) { for (Integer taskId : worker.getTasks()) { if (!aliveTasks.contains(taskId)) { // task is dead continue; } String oldTaskSupervisorId = worker.getNodeId(); if (!aliveSupervisors.contains(oldTaskSupervisorId)) { // supervisor is dead ret.add(taskId); } } } return ret; }
python
def create_data_element(self, mapped_class=None): """ Returns a new data element for the given mapped class. :returns: object implementing :class:`IResourceDataElement`. """ if not mapped_class is None and mapped_class != self.__mapped_cls: mp = self.__mp_reg.find_or_create_mapping(mapped_class) data_el = mp.create_data_element() else: data_el = self.__de_cls.create() return data_el
python
def highlight_matches(self): """Highlight found results""" if self.is_code_editor and self.highlight_button.isChecked(): text = self.search_text.currentText() words = self.words_button.isChecked() regexp = self.re_button.isChecked() self.editor.highlight_found_results(text, words=words, regexp=regexp)
java
@Override public MPSubscription getSubscription() throws SIDurableSubscriptionNotFoundException { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(tc, "getSubscription"); ConsumerDispatcher cd = (ConsumerDispatcher) _localConsumerPoint.getConsumerManager(); MPSubscription mpSubscription = cd.getMPSubscription(); if (mpSubscription == null) { if (TraceComponent.isAnyTracingEnabled() && CoreSPIConsumerSession.tc.isEntryEnabled()) SibTr.exit(CoreSPIConsumerSession.tc, "getSubscription", "SIDurableSubscriptionNotFoundException"); throw new SIDurableSubscriptionNotFoundException( nls.getFormattedMessage( "SUBSCRIPTION_DOESNT_EXIST_ERROR_CWSIP0146", new Object[] { null, _messageProcessor.getMessagingEngineName() }, null)); } if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(tc, "getSubscription", mpSubscription); return mpSubscription; }
python
def get_paths(self, key): ''' Retrieve a set of environment paths from the config Parameters: key (str): The section name to grab from the environment Returns: self.environ[newkey] (OrderedDict): An ordered dict containing all of the paths from the specified section, as key:val = name:path ''' newkey = key if key in self.environ else key.upper() if key.upper() \ in self.environ else None if newkey: return self.environ[newkey] else: raise KeyError('Key {0} not found in tree environment'.format(key))
java
@Override public void initialize(JsMEConfig config) throws Exception { String thisMethodName = CLASS_NAME + ".initialize(Object)"; if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) { SibTr.entry(tc, thisMethodName, ""); } meConfig = config; createMessageEngine(meConfig); // Initialize the ME's we created Enumeration meEnum = _messagingEngines.elements(); while (meEnum.hasMoreElements()) { Object o = meEnum.nextElement(); Object c = ((MessagingEngine) o).getRuntime(); if (c instanceof BaseMessagingEngineImpl) { try { ((BaseMessagingEngineImpl) c).initialize(null); setAttributes((BaseMessagingEngineImpl) c); } catch (Exception e) { FFDCFilter.processException(e, thisMethodName, "1:656:1.108", this); SibTr.exception(tc, e); SibTr.error(tc, "INTERNAL_ERROR_SIAS0003", e); SibTr.error(tc, "ME_ERROR_REPORTED_SIAS0029", ((BaseMessagingEngineImpl) c).getName()); } } } if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) { SibTr.exit(tc, thisMethodName); } }
python
def simplify(self): """Return a simplified expression.""" node = self.node.simplify() if node is self.node: return self else: return _expr(node)
java
public static double[] minusTimesEquals(final double[] v1, final double[] v2, final double s2) { assert v1.length == v2.length : ERR_VEC_DIMENSIONS; for(int i = 0; i < v1.length; i++) { v1[i] -= v2[i] * s2; } return v1; }
java
public JsonLdModule configure(ConfigParam param, String value) { Objects.requireNonNull(param); configuration.set(param, value); return this; }
java
@FFDCIgnore({ SSLPeerUnverifiedException.class, Exception.class }) private Subject tryToAuthenticate(SSLSession session) throws SASException { Subject transportSubject = null; try { transportSubject = authenticateWithCertificateChain(session); } catch (SSLPeerUnverifiedException e) { throwExceptionIfClientCertificateAuthenticationIsRequired(e); if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) { Tr.debug(tc, "The peer could not be verified, but ignoring because client certificate authentication is not required. The exception is: " + e.getMessage()); } } catch (Exception e) { /* * All the possible exceptions, including AuthenticationException, CredentialExpiredException * and CredentialDestroyedException are caught here, and re-thrown */ throwExceptionIfClientCertificateAuthenticationIsRequired(e); } return transportSubject; }
python
def send(self, sender=None, senders=None, key=None, keys=None, **kwargs): ''' *This method is a coroutine.* Schedules connected callbacks for execution. You may optionally supply ``senders`` and/or ``keys`` to include callbacks that were connected with one or more matching ``senders`` and/or ``keys``. Each call back will receive the following keyword arguments when called: :signal: the signal that scheduled the execution of the callback :senders: a :class:`set` of ``senders`` :keys: a :class:`set` of ``keys`` :\*\*kwargs: the additional kwargs supplied when the signal was created :param kwargs: keyword pairs to send to the callbacks. these override the defaults set when the signal was initiated. You can only include keywords set when the signal was created. :Returns: the number of callbacks that received the signal ''' default_kwargs = self._default_kwargs.copy() for keyword in kwargs: if keyword not in default_kwargs: raise ValueError('You can not add new kwargs to an existing signal.') default_kwargs.update(kwargs) if senders is not None: senders = set(senders) else: senders = set() if sender is not None: senders.add(sender) if keys is not None: keys = set(keys) else: keys = set() if key is not None: keys.add(key) live_callbacks = set() # collect callbacks connected to all send calls with (yield from self._lock_all): all_callbacks = yield from self._get_callbacks(self._all) live_callbacks = live_callbacks | all_callbacks # collect sender filtered callbacks sender_callbacks = set() for sender in senders: id_ = yield from self._make_id(sender) if id_ in self._by_senders: sender_lock = self._get_lock(self._locks_senders, id_) with (yield from sender_lock): new_sender_callbacks = yield from self._get_callbacks(self._by_senders[id_]) if not new_sender_callbacks: with (yield from self._lock_by_senders): # Do some pruning del(self._by_senders[id_]) del(self._locks_senders[id_]) else: sender_callbacks = sender_callbacks | new_sender_callbacks live_callbacks = live_callbacks | sender_callbacks # collect key filtered callbacks key_callbacks = set() for key in keys: if key in self._by_keys: key_lock = self._get_lock(self._locks_keys, key) with (yield from key_lock): new_key_callbacks = yield from self._get_callbacks(self._by_keys[key]) if not new_key_callbacks: # Do some pruning with (yield from self._lock_by_keys): del(self._by_keys[key]) del(self._locks_keys[key]) else: key_callbacks = key_callbacks | new_key_callbacks live_callbacks = live_callbacks | key_callbacks # schedule all collected callbacks for callback in live_callbacks: yield from self._call_callback(callback, senders, keys, **default_kwargs) return len(live_callbacks)
python
def create_deployment(applicationName=None, deploymentGroupName=None, revision=None, deploymentConfigName=None, description=None, ignoreApplicationStopFailures=None, targetInstances=None, autoRollbackConfiguration=None, updateOutdatedInstancesOnly=None, fileExistsBehavior=None): """ Deploys an application revision through the specified deployment group. See also: AWS API Documentation :example: response = client.create_deployment( applicationName='string', deploymentGroupName='string', revision={ 'revisionType': 'S3'|'GitHub', 's3Location': { 'bucket': 'string', 'key': 'string', 'bundleType': 'tar'|'tgz'|'zip', 'version': 'string', 'eTag': 'string' }, 'gitHubLocation': { 'repository': 'string', 'commitId': 'string' } }, deploymentConfigName='string', description='string', ignoreApplicationStopFailures=True|False, targetInstances={ 'tagFilters': [ { 'Key': 'string', 'Value': 'string', 'Type': 'KEY_ONLY'|'VALUE_ONLY'|'KEY_AND_VALUE' }, ], 'autoScalingGroups': [ 'string', ] }, autoRollbackConfiguration={ 'enabled': True|False, 'events': [ 'DEPLOYMENT_FAILURE'|'DEPLOYMENT_STOP_ON_ALARM'|'DEPLOYMENT_STOP_ON_REQUEST', ] }, updateOutdatedInstancesOnly=True|False, fileExistsBehavior='DISALLOW'|'OVERWRITE'|'RETAIN' ) :type applicationName: string :param applicationName: [REQUIRED] The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account. :type deploymentGroupName: string :param deploymentGroupName: The name of the deployment group. :type revision: dict :param revision: The type and location of the revision to deploy. revisionType (string) --The type of application revision: S3: An application revision stored in Amazon S3. GitHub: An application revision stored in GitHub. s3Location (dict) --Information about the location of application artifacts stored in Amazon S3. bucket (string) --The name of the Amazon S3 bucket where the application revision is stored. key (string) --The name of the Amazon S3 object that represents the bundled artifacts for the application revision. bundleType (string) --The file type of the application revision. Must be one of the following: tar: A tar archive file. tgz: A compressed tar archive file. zip: A zip archive file. version (string) --A specific version of the Amazon S3 object that represents the bundled artifacts for the application revision. If the version is not specified, the system will use the most recent version by default. eTag (string) --The ETag of the Amazon S3 object that represents the bundled artifacts for the application revision. If the ETag is not specified as an input parameter, ETag validation of the object will be skipped. gitHubLocation (dict) --Information about the location of application artifacts stored in GitHub. repository (string) --The GitHub account and repository pair that stores a reference to the commit that represents the bundled artifacts for the application revision. Specified as account/repository. commitId (string) --The SHA1 commit ID of the GitHub commit that represents the bundled artifacts for the application revision. :type deploymentConfigName: string :param deploymentConfigName: The name of a deployment configuration associated with the applicable IAM user or AWS account. If not specified, the value configured in the deployment group will be used as the default. If the deployment group does not have a deployment configuration associated with it, then CodeDeployDefault.OneAtATime will be used by default. :type description: string :param description: A comment about the deployment. :type ignoreApplicationStopFailures: boolean :param ignoreApplicationStopFailures: If set to true, then if the deployment causes the ApplicationStop deployment lifecycle event to an instance to fail, the deployment to that instance will not be considered to have failed at that point and will continue on to the BeforeInstall deployment lifecycle event. If set to false or not specified, then if the deployment causes the ApplicationStop deployment lifecycle event to fail to an instance, the deployment to that instance will stop, and the deployment to that instance will be considered to have failed. :type targetInstances: dict :param targetInstances: Information about the instances that will belong to the replacement environment in a blue/green deployment. tagFilters (list) --The tag filter key, type, and value used to identify Amazon EC2 instances in a replacement environment for a blue/green deployment. (dict) --Information about an EC2 tag filter. Key (string) --The tag filter key. Value (string) --The tag filter value. Type (string) --The tag filter type: KEY_ONLY: Key only. VALUE_ONLY: Value only. KEY_AND_VALUE: Key and value. autoScalingGroups (list) --The names of one or more Auto Scaling groups to identify a replacement environment for a blue/green deployment. (string) -- :type autoRollbackConfiguration: dict :param autoRollbackConfiguration: Configuration information for an automatic rollback that is added when a deployment is created. enabled (boolean) --Indicates whether a defined automatic rollback configuration is currently enabled. events (list) --The event type or types that trigger a rollback. (string) -- :type updateOutdatedInstancesOnly: boolean :param updateOutdatedInstancesOnly: Indicates whether to deploy to all instances or only to instances that are not running the latest application revision. :type fileExistsBehavior: string :param fileExistsBehavior: Information about how AWS CodeDeploy handles files that already exist in a deployment target location but weren't part of the previous successful deployment. The fileExistsBehavior parameter takes any of the following values: DISALLOW: The deployment fails. This is also the default behavior if no option is specified. OVERWRITE: The version of the file from the application revision currently being deployed replaces the version already on the instance. RETAIN: The version of the file already on the instance is kept and used as part of the new deployment. :rtype: dict :return: { 'deploymentId': 'string' } """ pass
python
def splitword(self, originalword, *newwords, **kwargs): """TODO: Write documentation""" if isstring(originalword): originalword = self.doc[u(originalword)] return self.correctwords([originalword], newwords, **kwargs)
python
def safe_makedir(dname): """Make a directory if it doesn't exist, handling concurrent race conditions. """ if not dname: return dname num_tries = 0 max_tries = 5 while not os.path.exists(dname): # we could get an error here if multiple processes are creating # the directory at the same time. Grr, concurrency. try: os.makedirs(dname) except OSError: if num_tries > max_tries: raise num_tries += 1 time.sleep(2) return dname
python
def get_outgroup(self): """Generates the outgroup line from the voucher code specified by the user. """ if self.outgroup is not None: outgroup_taxonomy = '' for i in self.data.seq_records: if self.outgroup == i.voucher_code: outgroup_taxonomy = '{0}_{1}'.format(i.taxonomy['genus'], i.taxonomy['species']) break outgroup = '\noutgroup {0}_{1};'.format(self.outgroup, outgroup_taxonomy) else: outgroup = '' return outgroup
python
def main(): """ The entry point for this module """ # first, check we can import aeneas package, exiting on failure if check_import(): sys.exit(1) # import and run the built-in diagnostics from aeneas.diagnostics import Diagnostics errors, warnings, c_ext_warnings = Diagnostics.check_all() if errors: sys.exit(1) if c_ext_warnings: print_warning(u"All required dependencies are met but at least one Python C extension is not available") print_warning(u"You can still run aeneas but it will be slower") print_warning(u"Enjoy running aeneas!") sys.exit(2) else: print_success(u"All required dependencies are met and all available Python C extensions are working") print_success(u"Enjoy running aeneas!") sys.exit(0)
python
def metadata_to_intermediary(metadata): """ Transforms SQLAlchemy metadata to the intermediary representation. """ tables = [table_to_intermediary(table) for table in metadata.tables.values()] relationships = [relation_to_intermediary(fk) for table in metadata.tables.values() for fk in table.foreign_keys] return tables, relationships
java
public RequestEvent waitNotify(long timeout) { initErrorInfo(); synchronized (this) { if (reqEvents.isEmpty()) { try { LOG.trace("about to block, waiting"); this.wait(timeout); LOG.trace("we've come out of the block"); } catch (Exception ex) { setException(ex); setErrorMessage("Exception: " + ex.getClass().getName() + ": " + ex.getMessage()); setReturnCode(SipSession.EXCEPTION_ENCOUNTERED); return null; } } LOG.trace("either we got the request, or timed out"); if (reqEvents.isEmpty()) { String err = "*** NOTIFY REQUEST ERROR *** (" + targetUri + ") - The maximum amount of time to wait for a NOTIFY message has elapsed."; synchronized (eventErrors) { eventErrors.addLast(err); } LOG.trace(err); setReturnCode(SipSession.TIMEOUT_OCCURRED); setErrorMessage(err); return null; } return (RequestEvent) reqEvents.removeFirst(); } }
python
async def whois(self, nickname): """ Return information about user. This is an blocking asynchronous method: it has to be called from a coroutine, as follows: info = await self.whois('Nick') """ # Some IRCDs are wonky and send strange responses for spaces in nicknames. # We just check if there's a space in the nickname -- if there is, # then we immediately set the future's result to None and don't bother checking. if protocol.ARGUMENT_SEPARATOR.search(nickname) is not None: result = self.eventloop.create_future() result.set_result(None) return result if nickname not in self._pending['whois']: await self.rawmsg('WHOIS', nickname) self._whois_info[nickname] = { 'oper': False, 'idle': 0, 'away': False, 'away_message': None } # Create a future for when the WHOIS requests succeeds. self._pending['whois'][nickname] = self.eventloop.create_future() return await self._pending['whois'][nickname]
java
@Benchmark @BenchmarkMode(Mode.SampleTime) @OutputTimeUnit(TimeUnit.NANOSECONDS) public Status codeDecode() { return Status.CODE_KEY.parseBytes("15".getBytes(Charset.forName("US-ASCII"))); }
python
def integrate_gamma(v, v0, gamma0, q0, q1, theta0): """ internal function to calculate Debye temperature :param v: unit-cell volume in A^3 :param v0: unit-cell volume in A^3 at 1 bar :param gamma0: Gruneisen parameter at 1 bar :param q0: logarithmic derivative of Gruneisen parameter :param q1: logarithmic derivative of Gruneisen parameter :param theta0: Debye temperature at 1 bar in K :return: Debye temperature in K """ def f_integrand(v): gamma = gamma0 * np.exp(q0 / q1 * ((v / v0) ** q1 - 1.)) return gamma / v theta_term = quad(f_integrand, v0, v)[0] return theta_term
python
def set_options(self, force_read=False, force_fulltext=False): """Set the options for this run.""" self.options['force_read'] = force_read self.options['force_fulltext'] = force_fulltext return
python
def com_google_fonts_check_name_postscriptname(ttFont, style, familyname): """ Check name table: POSTSCRIPT_NAME entries. """ from fontbakery.utils import name_entry_id failed = False for name in ttFont['name'].names: if name.nameID == NameID.POSTSCRIPT_NAME: expected_value = f"{familyname}-{style}" string = name.string.decode(name.getEncoding()).strip() if string != expected_value: failed = True yield FAIL, ("Entry {} on the 'name' table: " "Expected '{}' " "but got '{}'.").format(name_entry_id(name), expected_value, string) if not failed: yield PASS, "POSTCRIPT_NAME entries are all good."
python
def visit_grouping(self, grouping, asfrom=False, **kwargs): """" TODO: """ return { 'type': 'grouping', 'grouping': grouping.element._compiler_dispatch(self, **kwargs) }
java
public static HttpResponse parseMessage(byte[] in) throws IOException { HttpResponse m = new HttpResponse(); m.parse(new ByteArrayInputStream(in)); return m; }
python
def add_particles_ascii(self, s): """ Adds particles from an ASCII string. Parameters ---------- s : string One particle per line. Each line should include particle's mass, radius, position and velocity. """ for l in s.split("\n"): r = l.split() if len(r): try: r = [float(x) for x in r] p = Particle(simulation=self, m=r[0], r=r[1], x=r[2], y=r[3], z=r[4], vx=r[5], vy=r[6], vz=r[7]) self.add(p) except: raise AttributeError("Each line requires 8 floats corresponding to mass, radius, position (x,y,z) and velocity (x,y,z).")
python
def to_one(dest_class, type=RelationType.DIRECT, resource_classes=None, reverse=None, reverse_type=RelationType.DIRECT, writable=False): """Create a one to one relation to a given target :class:`Resource`. Args: dest_class(Resource): The *target* class for the relationship Keyword Args: type(RelationType): The relationship approach to use. reverse(to_may or to_one): An *optional* reverse relationship. reverse_type(RelationType): The reverse relationship approach. resource_classes(Resource): The kinds of Resources to expect in the relationship Returns: A builder function which, given a source class creates a one-to-one relationship with the target A one to one relationship means that you can get the associated target object from the object on which the ``to_one`` was declared. .. code-block:: python @to_one(Organization) def User(Resource): pass Declares that a User is associated with *one* Organization. The decorator automatically adds a method to fetch the associated organization: .. code-block:: python org = user.organization() """ def method_builder(cls): dest_resource_type = dest_class._resource_type() dest_method_name = dest_resource_type.replace('-', '_') doc_variables = { 'from_class': cls.__name__, 'to_class': dest_class.__name__, 'to_name': dest_method_name } fetch_method_doc = """Fetch the {2} associated with this :class:`{0}`. Returns: {1}: The :class:`{1}` of this :class:`{0}` """.format(cls.__name__, dest_class.__name__, dest_method_name) def _fetch_relationship_included(self): session = self._session include = self._include if include is None or dest_class not in include: # You requested an included relationship that was # not originally included error = "{} was not included".format(dest_class.__name__) raise AttributeError(error) included = self._included.get(dest_resource_type) if len(included) == 0: return None mk_one = dest_class._mk_one(session, resource_classes=resource_classes) return mk_one({ 'data': included[0] }) def fetch_relationship_direct(self, use_included=False): if use_included: return _fetch_relationship_included(self) session = self._session id = None if self.is_singleton() else self.id url = session._build_url(self._resource_path(), id, dest_resource_type) process = dest_class._mk_one(session, resource_classes=resource_classes) return session.get(url, CB.json(200, process)) def fetch_relationship_include(self, use_included=False): if use_included: return _fetch_relationship_included(self) session = self._session id = None if self.is_singleton() else self.id url = session._build_url(self._resource_path(), id) params = build_request_include([dest_class], None) def _process(json): included = json.get('included') if len(included) == 0: return None mk_one = dest_class._mk_one(session, resource_classes=resource_classes) return mk_one({ 'data': included[0] }) return session.get(url, CB.json(200, _process), params=params) if type == RelationType.DIRECT: fetch_relationship = fetch_relationship_direct elif type == RelationType.INCLUDE: fetch_relationship = fetch_relationship_include else: # pragma: no cover raise ValueError("Invalid RelationType: {}".format(type)) fetch_relationship.__doc__ = fetch_method_doc def update_method(self, resource): """Set the {to_name} for this :class:`{from_class}`. Args: resource: The :class:`{to_class}` to set Returns: True if successful """ session, url, json = _build_relatonship(self, dest_resource_type, resource) return session.patch(url, CB.boolean(200), json=json) methods = [(dest_method_name, fetch_relationship)] if writable: methods.extend([ ('update_{}'.format(dest_method_name), update_method) ]) for name, method in methods: method.__doc__ = method.__doc__.format(**doc_variables) setattr(cls, name, method) if reverse is not None: reverse(cls, type=reverse_type)(dest_class) return cls return method_builder
java
public void setCharset(Charset charset) { mutationCheck(); if (charset == null || charset.equals(ASCII) || charset.equals(UTF8)) { myCharset = charset; } else { throw new IllegalArgumentException("Unsupported Charset " + charset); } }
java
@Before("external() && access()") public void lazilyInjectField(JoinPoint thisJoinPoint) { FieldSignature fs = (FieldSignature) thisJoinPoint.getSignature(); Field field = fs.getField(); Object target = thisJoinPoint.getTarget(); injector.lazilyInject(target, field); }
java
@SuppressWarnings("checkstyle:magicnumber") void writeLong(long value) { if (value == Long.MIN_VALUE) { write(STR_LONG_MIN_VALUE); return; } if (value < 0) { write('-'); value = -value; } int digitsWithoutComma = 0; tmpSb.setLength(0); do { digitsWithoutComma++; if (digitsWithoutComma == 4) { tmpSb.append(','); digitsWithoutComma = 1; } int mod = (int) (value % 10); tmpSb.append(DIGITS[mod]); value = value / 10; } while (value > 0); for (int k = tmpSb.length() - 1; k >= 0; k--) { char c = tmpSb.charAt(k); write(c); } }
python
def sendReset(self, sequenceId=0): """ Sends a reset signal to the network. """ for col in xrange(self.numColumns): self.sensorInputs[col].addResetToQueue(sequenceId) self.network.run(1)
java
public NamedStoredProcedureQuery<T> resultSetMapping(String ... values) { if (values != null) { for(String name: values) { childNode.createChild("result-set-mapping").text(name); } } return this; }
java
@Pure public static GeodesicPosition L3_WSG84(double x, double y) { final Point2d ntfLambdaPhi = NTFLambert_NTFLambdaPhi(x, y, LAMBERT_3_N, LAMBERT_3_C, LAMBERT_3_XS, LAMBERT_3_YS); return NTFLambdaPhi_WSG84(ntfLambdaPhi.getX(), ntfLambdaPhi.getY()); }
python
def before_request(request, tracer=None): """ Attempts to extract a tracing span from incoming request. If no tracing context is passed in the headers, or the data cannot be parsed, a new root span is started. :param request: HTTP request with `.headers` property exposed that satisfies a regular dictionary interface :param tracer: optional tracer instance to use. If not specified the global opentracing.tracer will be used. :return: returns a new, already started span. """ if tracer is None: # pragma: no cover tracer = opentracing.tracer # we need to prepare tags upfront, mainly because RPC_SERVER tag must be # set when starting the span, to support Zipkin's one-span-per-RPC model tags_dict = { tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER, tags.HTTP_URL: request.full_url, } remote_ip = request.remote_ip if remote_ip: tags_dict[tags.PEER_HOST_IPV4] = remote_ip caller_name = request.caller_name if caller_name: tags_dict[tags.PEER_SERVICE] = caller_name remote_port = request.remote_port if remote_port: tags_dict[tags.PEER_PORT] = remote_port operation = request.operation try: carrier = {} for key, value in six.iteritems(request.headers): carrier[key] = value parent_ctx = tracer.extract( format=Format.HTTP_HEADERS, carrier=carrier ) except Exception as e: logging.exception('trace extract failed: %s' % e) parent_ctx = None span = tracer.start_span( operation_name=operation, child_of=parent_ctx, tags=tags_dict) return span
java
@Override public void close() { try { xmlSerializer.close(); } catch (Exception e) { e.printStackTrace(); throw new KriptonRuntimeException(e); } }
python
def check_signature(params): """ Verify the signature of the parameters in an OTP v2.0 verify request. Returns ValResultBool, Key """ if 'id' in params: try: id_int = int(params['id'][0]) except: my_log_message(args, syslog.LOG_INFO, "Non-numerical client id (%s) in request." % (params['id'][0])) return False, None key = client_ids.get(id_int) if key: if 'h' in params: sig = params['h'][0] good_sig = make_signature(params, key) if sig == good_sig: #my_log_message(args, syslog.LOG_DEBUG, "Good signature (client id '%i')" % id_int) return True, key else: my_log_message(args, syslog.LOG_INFO, "Bad signature from client id '%i' (%s, expected %s)." \ % (id_int, sig, good_sig)) else: my_log_message(args, syslog.LOG_INFO, "Client id (%i) but no HMAC in request." % (id_int)) return False, key else: my_log_message(args, syslog.LOG_INFO, "Unknown client id '%i'" % (id_int)) return False, None return True, None
python
def getChangeSets(self): """Get all the ChangeSets of this workitem :return: a :class:`list` contains all the :class:`rtcclient.models.ChangeSet` objects :rtype: list """ changeset_tag = ("rtc_cm:com.ibm.team.filesystem.workitems." "change_set.com.ibm.team.scm.ChangeSet") return (self.rtc_obj ._get_paged_resources("ChangeSet", workitem_id=self.identifier, customized_attr=changeset_tag, page_size="10"))
java
public void process(final String beanName, final HttpServletRequest request, final HttpServletResponse response) throws ServletException, IOException { InputStream is = request.getInputStream(); OutputStream os = response.getOutputStream(); Hessian2Input in = new Hessian2Input(is); AbstractHessianOutput out; SerializerFactory serializerFactory = new SerializerFactory(); serializerFactory.setAllowNonSerializable(true); serializerFactory.addFactory(new JdonSerializerFactory()); in.setSerializerFactory(serializerFactory); int code = in.read(); if (code != 'c') { // XXX: deflate throw new IOException("expected 'c' in hessian input at " + code); } int major = in.read(); in.read(); if (major >= HESSIAN_PROTOCOL_MAJOR_VERSION) { out = new Hessian2Output(os); } else { out = new HessianOutput(os); } out.setSerializerFactory(serializerFactory); // backward compatibility for some frameworks that don't read // the call type first in.skipOptionalCall(); out.startReply(); readHeaders(in); // read headers from call try { out.writeObject(makeCall(in, beanName, request)); } catch (Exception e) { writeException(out, e); } // The complete call needs to be after the invoke to handle a // trailing InputStream in.completeCall(); out.completeReply(); out.close(); }
java
protected final String replaceMacro(String macroName, String text, String replacement, ReplacementType type, File sourceFile, int sourceLine) { return replaceMacro(macroName, text, replacement, type, false, sourceFile, sourceLine); }
java
static synchronized Mutator mutatorFor(Class<?> type, String name, InheritingConfiguration configuration) { PropertyInfoKey key = new PropertyInfoKey(type, name, configuration); if (!MUTATOR_CACHE.containsKey(key) || !FIELD_CACHE.containsKey(key)) { @SuppressWarnings("unchecked") Class<Object> uncheckedType = (Class<Object>) type; for (Entry<String, Mutator> entry : TypeInfoRegistry.typeInfoFor(uncheckedType, configuration).getMutators().entrySet()) { if (entry.getValue().getMember() instanceof Method) mutatorFor(type, (Method) entry.getValue().getMember(), configuration, entry.getKey()); else if (entry.getValue().getMember() instanceof Field) fieldPropertyFor(type, (Field) entry.getValue().getMember(), configuration, entry.getKey()); } } if (MUTATOR_CACHE.containsKey(key)) return MUTATOR_CACHE.get(key); return FIELD_CACHE.get(key); }
java
@XmlElementDecl(namespace = "http://www.opengis.net/gml", name = "measure") public JAXBElement<MeasureType> createMeasure(MeasureType value) { return new JAXBElement<MeasureType>(_Measure_QNAME, MeasureType.class, null, value); }
python
def match_all_concepts(self,string): '''Returns sorted list of all :class:`Concept`s matching ``string``''' multipliers = {'exact':10**5,'fname':10**4,'fuzzy':10**2,'fuzzy_fragment':1} matches = [] for concept in self.vocab: matches += concept.matches(string,self.fuzzy,self.fname_match,self.fuzzy_fragment,self.guess) return sort_matches(matches)
python
def update(self, batch_size, ignore_stale_grad=False): """Makes one step of parameter update. Should be called after `autograd.backward()` and outside of `record()` scope, and after `trainer.update()`. For normal parameter updates, `step()` should be used, which internally calls `allreduce_grads()` and then `update()`. However, if you need to get the reduced gradients to perform certain transformation, such as in gradient clipping, then you may want to manually call `allreduce_grads()` and `update()` separately. Parameters ---------- batch_size : int Batch size of data processed. Gradient will be normalized by `1/batch_size`. Set this to 1 if you normalized loss manually with `loss = mean(loss)`. ignore_stale_grad : bool, optional, default=False If true, ignores Parameters with stale gradient (gradient that has not been updated by `backward` after last step) and skip update. """ if not self._kv_initialized: self._init_kvstore() if self._params_to_init: self._init_params() assert not (self._kvstore and self._update_on_kvstore), \ 'update() when parameters are updated on kvstore ' \ 'is not supported. Try setting `update_on_kvstore` ' \ 'to False when creating trainer.' self._check_and_rescale_grad(self._scale / batch_size) self._update(ignore_stale_grad)
java
public PolylineMarkers addPolylineToMapAsMarkers(GoogleMap map, PolylineOptions polylineOptions, MarkerOptions polylineMarkerOptions, PolylineOptions globalPolylineOptions) { PolylineMarkers polylineMarkers = new PolylineMarkers(this); if (globalPolylineOptions != null) { polylineOptions.color(globalPolylineOptions.getColor()); polylineOptions.geodesic(globalPolylineOptions.isGeodesic()); polylineOptions.visible(globalPolylineOptions.isVisible()); polylineOptions.zIndex(globalPolylineOptions.getZIndex()); polylineOptions.width(globalPolylineOptions.getWidth()); } Polyline polyline = addPolylineToMap(map, polylineOptions); polylineMarkers.setPolyline(polyline); List<Marker> markers = addPointsToMapAsMarkers(map, polylineOptions.getPoints(), polylineMarkerOptions, false); polylineMarkers.setMarkers(markers); return polylineMarkers; }
java
private int getStartingPoint(String buffer) { for (int i = 0; i < buffer.length(); i++) { if (!Character.isWhitespace(buffer.charAt(i))) { return i; } } return buffer.length(); }
python
def activate(self, key): """ Activates a new registree on the LEX with given activation key :rtype: None """ url = self._base + 'user/activate' r = requests.get(url, params={ 'activation_key': key }) r.raise_for_status()
java
@Override public List<Object> getServices(Class<?> root,Class<?> transactionType){ Map<Class<?>, List<Object>> map = mapBusinessProvider.get(root); if(map != null){ List<Object> list = map.get(transactionType); if(list != null){ return Collections.unmodifiableList(list); } } return null; }
java
protected ResourceAdapterMBeanImpl setResourceAdapterChild(String key, ResourceAdapterMBeanImpl ra) { return raMBeanChildrenList.put(key, ra); }
python
def import_env(*envs): 'import environment variables from host' for env in envs: parts = env.split(':', 1) if len(parts) == 1: export_as = env else: env, export_as = parts env_val = os.environ.get(env) if env_val is not None: yield '{}={}'.format(export_as, shlex.quote(env_val))
java
private void redimNodeArrays(int newsize) { int len = newsize < lo.length ? newsize : lo.length; char[] na = new char[newsize]; System.arraycopy(lo, 0, na, 0, len); lo = na; na = new char[newsize]; System.arraycopy(hi, 0, na, 0, len); hi = na; na = new char[newsize]; System.arraycopy(eq, 0, na, 0, len); eq = na; na = new char[newsize]; System.arraycopy(sc, 0, na, 0, len); sc = na; }
python
def get_sari_score(source_ids, prediction_ids, list_of_targets, max_gram_size=4, beta_for_deletion=0): """Compute the SARI score for a single prediction and one or more targets. Args: source_ids: a list / np.array of SentencePiece IDs prediction_ids: a list / np.array of SentencePiece IDs list_of_targets: a list of target ID lists / np.arrays max_gram_size: int. largest n-gram size we care about (e.g. 3 for unigrams, bigrams, and trigrams) beta_for_deletion: beta for deletion F score. Returns: the SARI score and its three components: add, keep, and deletion scores """ addition_scores = [] keep_scores = [] deletion_scores = [] for n in range(1, max_gram_size + 1): source_counts = _get_ngram_counter(source_ids, n) prediction_counts = _get_ngram_counter(prediction_ids, n) # All ngrams in the targets with count 1. target_counts = collections.Counter() # All ngrams in the targets with count r/num_targets, where r is the number # of targets where the ngram occurs. weighted_target_counts = collections.Counter() num_nonempty_targets = 0 for target_ids_i in list_of_targets: target_counts_i = _get_ngram_counter(target_ids_i, n) if target_counts_i: weighted_target_counts += target_counts_i num_nonempty_targets += 1 for gram in weighted_target_counts.keys(): weighted_target_counts[gram] /= num_nonempty_targets target_counts[gram] = 1 keep_scores.append(get_keep_score(source_counts, prediction_counts, weighted_target_counts)) deletion_scores.append(get_deletion_score(source_counts, prediction_counts, weighted_target_counts, beta_for_deletion)) addition_scores.append(get_addition_score(source_counts, prediction_counts, target_counts)) avg_keep_score = sum(keep_scores) / max_gram_size avg_addition_score = sum(addition_scores) / max_gram_size avg_deletion_score = sum(deletion_scores) / max_gram_size sari = (avg_keep_score + avg_addition_score + avg_deletion_score) / 3.0 return sari, avg_keep_score, avg_addition_score, avg_deletion_score
java
public static void isNotBlankThen(String str, Consumer<String> consumer) { notBankAccept(str, Function.identity(), consumer); }
java
protected void actionBack() { if (m_dialogIndex <= 0) { return; } m_dialogIndex--; m_uploadPropertyPanel.getPropertyEditor().getForm().validateAndSubmit(); m_nextAction = new Runnable() { public void run() { loadDialogBean(m_resources.get(m_dialogIndex)); } }; }
java
public static void writeProteinSequence(OutputStream outputStream, Collection<ProteinSequence> proteinSequences) throws Exception { GenbankWriter<ProteinSequence, AminoAcidCompound> genbankWriter = new GenbankWriter<ProteinSequence, AminoAcidCompound>( outputStream, proteinSequences, new GenericGenbankHeaderFormat<ProteinSequence, AminoAcidCompound>()); genbankWriter.process(); }
java
public static String[] partitionOptions(String[] options) { for (int i = 0; i < options.length; i++) { if (options[i].equals("--")) { options[i++] = ""; String[] result = new String [options.length - i]; for (int j = i; j < options.length; j++) { result[j - i] = options[j]; options[j] = ""; } return result; } } return new String [0]; }
python
def _update(self, data): """Update the object with new data.""" for k, v in six.iteritems(data): new_value = v if isinstance(v, dict): new_value = type(self)(v) elif isinstance(v, list): new_value = [(type(self)(e) if isinstance(e, dict) else e) for e in v] setattr(self, k, new_value)
java
public OpportunitiesTasksResponse getOpportunitiesTasksTaskId(Integer taskId, String datasource, String ifNoneMatch) throws ApiException { ApiResponse<OpportunitiesTasksResponse> resp = getOpportunitiesTasksTaskIdWithHttpInfo(taskId, datasource, ifNoneMatch); return resp.getData(); }
python
def get_workunit(self, ignore_list=None): """ Gets a new unit of work. Args: ignore_list: list(str) A list of filenames which should be ignored. Defaults to None. Returns: new_workunit: WorkUnit A new unit of work that has not yet been processed. A lock on it has been acquired. Raises: NoAvailableWorkException There is no more work available. """ if ignore_list is None: ignore_list = [] potential_files = self.get_potential_files(ignore_list) while len(potential_files) > 0: potential_file = self.select_potential_file(potential_files) potential_files.remove(potential_file) if self._filter(potential_file): continue if self.directory_context.get_file_size(potential_file) == 0: continue if self.progress_manager.is_done(potential_file): self._done.append(potential_file) continue else: try: self.progress_manager.lock(potential_file) except FileLockedException: continue self._already_fetched.append(potential_file) return self.builder.build_workunit( self.directory_context.get_full_path(potential_file)) logger.info("No eligible workunits remain to be fetched.") raise NoAvailableWorkException()
python
def _generate_command(self, func, name=None, **kwargs): """Generates a command parser for given func. :param func: func to generate related command parser :param type: function :param name: command name :param type: str :param **kwargs: keyword arguments those passed through to :py:class:``argparse.ArgumentParser.add_parser`` :param type: dict """ func_pointer = name or func.__name__ storm_config = get_storm_config() aliases, additional_kwarg = None, None if 'aliases' in storm_config: for command, alias_list in \ six.iteritems(storm_config.get("aliases")): if func_pointer == command: aliases = alias_list break func_help = func.__doc__ and func.__doc__.strip() subparser = self.subparsers.add_parser(name or func.__name__, aliases=aliases, help=func_help) spec = inspect.getargspec(func) opts = reversed(list(izip_longest(reversed(spec.args or []), reversed(spec.defaults or []), fillvalue=self._POSITIONAL()))) for k, v in opts: argopts = getattr(func, 'argopts', {}) args, kwargs = argopts.get(k, ([], {})) args = list(args) is_positional = isinstance(v, self._POSITIONAL) options = [arg for arg in args if arg.startswith('-')] if isinstance(v, list): kwargs.update({ 'action': 'append', }) if is_positional: if options: args = options kwargs.update({'required': True, 'dest': k}) else: args = [k] else: args = options or ['--%s' % k] kwargs.update({'default': v, 'dest': k}) arg = subparser.add_argument(*args, **kwargs) subparser.set_defaults(**{self._COMMAND_FLAG: func}) return func
java
public static <T> Id<T> newId(final TypeToken<T> type) { return (Id<T>) Ids.newId(type.getType()); }
java
public int getPredictedClass(){ if(predictedClass == -1){ if(classPredictions.rank() == 1){ predictedClass = classPredictions.argMax().getInt(0); } else { // ravel in case we get a column vector, or rank 2 row vector, etc predictedClass = classPredictions.ravel().argMax().getInt(0); } } return predictedClass; }
python
def logout(self): """Logout from anaconda cloud.""" logger.debug('Logout') method = self._anaconda_client_api.remove_authentication return self._create_worker(method)
python
def parse_session_cookie(cookie_to_cook): """ cookie_to_cook = http_header['cookie'] """ #print("cookie_to_cook: %s"%str(cookie_to_cook)) session_value = None tokens = cookie_to_cook.split(";") for tok in tokens: if 'remi_session=' in tok: #print("found session id: %s"%str(tok)) try: session_value = int(tok.replace('remi_session=', '')) except: pass return session_value
python
def surveys(self): """ Returns the list of timesteps when survey measures has been captured None if xml document is mailformed xpath: /scenario/monitoring/survey https://github.com/SwissTPH/openmalaria/wiki/GeneratedSchema32Doc#survey-times-time-steps """ survey_time_list = list() # Extract surveyTimes from /scenario/monitoring/surveys section # Using root element instead of xpath to avoid problems with namespaces # (root tag was <scenario> prior to schema 32, and then it was switched to <om:scenario>) try: for item in self.et.find("surveys").findall("surveyTime"): # Converting to float first to allow values like 730.0 survey_time_list.append(int(item.text)) except AttributeError: return None return survey_time_list
java
private Object decodeResult(IoBuffer data) { log.debug("decodeResult - data limit: {}", (data != null ? data.limit() : 0)); processHeaders(data); int count = data.getUnsignedShort(); if (count != 1) { throw new RuntimeException("Expected exactly one result but got " + count); } Input input = new Input(data); String target = input.getString(); // expect "/onResult" log.debug("Target: {}", target); String nullString = input.getString(); // expect "null" log.debug("Null string: {}", nullString); // Read return value return Deserializer.deserialize(input, Object.class); }
java
public float[][] getPhi() { float[][] phi = new float[K][V]; if (SAMPLE_LAG > 0) { for (int k = 0; k < K; k++) { for (int w = 0; w < V; w++) { phi[k][w] = phisum[k][w] / numstats; } } } else { for (int k = 0; k < K; k++) { for (int w = 0; w < V; w++) { phi[k][w] = (word_topic_matrix[w][k] + beta) / (nwsum[k] + V * beta); } } } return phi; }
python
def savestate(self, state): """Save the sampler's state in a state.txt file.""" oldstate = np.get_printoptions() np.set_printoptions(threshold=1e6) try: with open(os.path.join(self._directory, 'state.txt'), 'w') as f: print_(state, file=f) finally: np.set_printoptions(**oldstate)
java
private GroupElement find_i(final String n, final boolean fwd) { if (name_equals(n)) { return this; } GroupElement e = null; Iterator it = elements.iterator(); while (it.hasNext()) { e = (GroupElement) it.next(); if (e.name_equals(n)) { return e; } } if (fwd) { it = elements.iterator(); while (it.hasNext()) { e = ((GroupElement) it.next()).find(n, fwd); if (e != null) { return e; } } } return null; }
java
public synchronized void removeExpired() { final List<String> removeLockList = new ArrayList<String>(); for (LockData lock : getLockList()) { if (!lock.isSessionScoped() && lock.getTimeToDeath() < 0) { removeLockList.add(lock.getNodeIdentifier()); } } Collections.sort(removeLockList); for (String rLock : removeLockList) { removeLock(rLock); } }
python
def gen_gmfs(self): """ Compute the GMFs for the given realization and yields arrays of the dtype (sid, eid, imti, gmv), one for rupture """ self.sig_eps = [] for computer in self.computers: rup = computer.rupture sids = computer.sids eids_by_rlz = rup.get_eids_by_rlz(self.rlzs_by_gsim) data = [] for gs, rlzs in self.rlzs_by_gsim.items(): num_events = sum(len(eids_by_rlz[rlzi]) for rlzi in rlzs) if num_events == 0: continue # NB: the trick for performance is to keep the call to # compute.compute outside of the loop over the realizations # it is better to have few calls producing big arrays array, sig, eps = computer.compute(gs, num_events) array = array.transpose(1, 0, 2) # from M, N, E to N, M, E for i, miniml in enumerate(self.min_iml): # gmv < minimum arr = array[:, i, :] arr[arr < miniml] = 0 n = 0 for rlzi in rlzs: eids = eids_by_rlz[rlzi] e = len(eids) if not e: continue for ei, eid in enumerate(eids): gmf = array[:, :, n + ei] # shape (N, M) tot = gmf.sum(axis=0) # shape (M,) if not tot.sum(): continue sigmas = sig[:, n + ei] self.sig_eps.append((eid, sigmas, eps[:, n + ei])) for sid, gmv in zip(sids, gmf): if gmv.sum(): data.append((rlzi, sid, eid, gmv)) n += e yield numpy.array(data, self.gmv_dt)
python
def load_data_split(proc_data_dir): """Loads a split dataset Args: proc_data_dir: Directory with the split and processed data Returns: (Training Data, Validation Data, Test Data) """ ds_train = Dataset.load(path.join(proc_data_dir, 'train.bin')) ds_val = Dataset.load(path.join(proc_data_dir, 'val.bin')) ds_test = Dataset.load(path.join(proc_data_dir, 'test.bin')) return ds_train, ds_val, ds_test
java
public static ClassFileVersion ofMinorMajor(int versionNumber) { ClassFileVersion classFileVersion = new ClassFileVersion(versionNumber); if (classFileVersion.getMajorVersion() <= BASE_VERSION) { throw new IllegalArgumentException("Class version " + versionNumber + " is not valid"); } return classFileVersion; }
python
def to_pn(self, sub_letter=None): """ Returns the part number equivalent. For instance, a '1k' would still be '1k', but a '1.2k' would, instead, be a '1k2' :return: """ string = str(self) if '.' not in string: return string # take care of the case of when there is no scaling unit if not string[-1].isalpha(): if sub_letter is not None: return string.replace('.', sub_letter) return string letter = string[-1] return string.replace('.', letter)[:-1]
java
private void loadTemplates( Element parent ) { // Load templates List templates = DomUtils.getChildElementsByName( parent, URL_TEMPLATE ); for ( int i = 0; i < templates.size(); i++ ) { Element template = ( Element ) templates.get( i ); String name = getElementText( template, NAME ); if ( name == null ) { _log.error( "Malformed URL template descriptor in " + _configFilePath + ". The url-template name is missing." ); continue; } String value = getElementText( template, VALUE ); if ( value == null ) { _log.error( "Malformed URL template descriptor in " + _configFilePath + ". The url-template value is missing for template " + name ); continue; } if ( _log.isDebugEnabled() ) { _log.debug( "[URLTemplate] " + name + " = " + value ); } URLTemplate urlTemplate = new URLTemplate( value, name ); if ( urlTemplate.verify( _knownTokens, _requiredTokens ) ) { _urlTemplates.addTemplate( name, urlTemplate ); } } }