language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
python | def identifier(self):
"""Get the identifier for this node.
Extended keys can be identified by the Hash160 (RIPEMD160 after SHA256)
of the public key's `key`. This corresponds exactly to the data used in
traditional Bitcoin addresses. It is not advised to represent this data
in base58 format though, as it may be interpreted as an address that
way (and wallet software is not required to accept payment to the chain
key itself).
"""
key = self.get_public_key_hex()
return ensure_bytes(hexlify(hash160(unhexlify(ensure_bytes(key))))) |
python | def add_file(self, kitchen, recipe, message, api_file_key, file_contents):
"""
returns True for success or False for failure
'/v2/recipe/create/<string:kitchenname>/<string:recipename>', methods=['PUT']
:param self: DKCloudAPI
:param kitchen: basestring
:param recipe: basestring -- kitchen name, basestring
:param message: basestring message -- commit message, basestring
:param api_file_key: -- file name and path to the file name, relative to the recipe root
:param file_contents: -- character string of the recipe file to update
:rtype: boolean
"""
rc = DKReturnCode()
if kitchen is None or isinstance(kitchen, basestring) is False:
rc.set(rc.DK_FAIL, 'issue with kitchen parameter')
return rc
if recipe is None or isinstance(recipe, basestring) is False:
rc.set(rc.DK_FAIL, 'issue with recipe parameter')
return rc
if api_file_key is None or isinstance(api_file_key, basestring) is False:
rc.set(rc.DK_FAIL, 'issue with api_file_key parameter')
return rc
if file_contents is None or isinstance(file_contents, basestring) is False:
rc.set(rc.DK_FAIL, 'issue with file_contents parameter')
return rc
pdict = dict()
pdict[self.MESSAGE] = message
pdict[self.FILEPATH] = api_file_key
pdict[self.FILE] = file_contents
url = '%s/v2/recipe/create/%s/%s' % (self.get_url_for_direct_rest_call(), kitchen, recipe)
try:
response = requests.put(url, data=json.dumps(pdict), headers=self._get_common_headers())
rdict = self._get_json(response)
pass
except (RequestException, ValueError, TypeError), c:
s = "add_file: exception: %s" % str(c)
rc.set(rc.DK_FAIL, s)
return rc
if DKCloudAPI._valid_response(response):
rc.set(rc.DK_SUCCESS, None)
else:
arc = DKAPIReturnCode(rdict, response)
rc.set(rc.DK_FAIL, arc.get_message())
return rc |
java | public final void addAreaListener(@NonNull final AreaListener listener) {
Condition.INSTANCE.ensureNotNull(listener, "The listener may not be null");
this.areaListeners.add(listener);
} |
java | public float getSpanPercent(ILexNameToken name)
{
int hits = 0;
int misses = 0;
ILexLocation span = null;
synchronized (nameSpans)
{
span = nameSpans.get(name);
}
synchronized (allLocations)
{
for (ILexLocation l : allLocations)
{
if (l.getExecutable() && l.within(span))
{
if (l.getHits() > 0)
{
hits++;
} else
{
misses++;
}
}
}
}
int sum = hits + misses;
return sum == 0 ? 0 : (float) (1000 * hits / sum) / 10; // NN.N%
} |
java | public BatchReadResult withResponses(BatchReadOperationResponse... responses) {
if (this.responses == null) {
setResponses(new java.util.ArrayList<BatchReadOperationResponse>(responses.length));
}
for (BatchReadOperationResponse ele : responses) {
this.responses.add(ele);
}
return this;
} |
java | public void setHashFunction(HashFunction<byte[]> hashFunction) {
if(hashFunction == null) {
throw new IllegalArgumentException("Invalid hashFunction: " + hashFunction);
}
this._hashFunction = hashFunction;
this._properties.setProperty(PARAM_HASH_FUNCTION_CLASS, hashFunction.getClass().getName());
} |
java | public void run(Collection<Runnable> tasks) {
// Create a semphore that the wrapped runnables will execute
int numTasks = tasks.size();
CountDownLatch latch = new CountDownLatch(numTasks);
for (Runnable r : tasks) {
if (r == null)
throw new NullPointerException("Cannot run null tasks");
workQueue.offer(new CountingRunnable(r, latch));
}
try {
// Wait until all the tasks have finished
latch.await();
}
catch (InterruptedException ie) {
throw new IllegalStateException("Not all tasks finished", ie);
}
} |
python | def unregister(self, cleanup_mode):
"""Unregisters a machine previously registered with
:py:func:`IVirtualBox.register_machine` and optionally do additional
cleanup before the machine is unregistered.
This method does not delete any files. It only changes the machine configuration and
the list of registered machines in the VirtualBox object. To delete the files which
belonged to the machine, including the XML file of the machine itself, call
:py:func:`delete_config` , optionally with the array of IMedium objects which was returned
from this method.
How thoroughly this method cleans up the machine configuration before unregistering
the machine depends on the @a cleanupMode argument.
With "UnregisterOnly", the machine will only be unregistered, but no additional
cleanup will be performed. The call will fail if the machine is in "Saved" state
or has any snapshots or any media attached (see :py:class:`IMediumAttachment` ).
It is the responsibility of the caller to delete all such configuration in this mode.
In this mode, the API behaves like the former @c IVirtualBox::unregisterMachine() API
which it replaces.
With "DetachAllReturnNone", the call will succeed even if the machine is in "Saved"
state or if it has snapshots or media attached. All media attached to the current machine
state or in snapshots will be detached. No medium objects will be returned;
all of the machine's media will remain open.
With "DetachAllReturnHardDisksOnly", the call will behave like with "DetachAllReturnNone",
except that all the hard disk medium objects which were detached from the machine will
be returned as an array. This allows for quickly passing them to the :py:func:`delete_config`
API for closing and deletion.
With "Full", the call will behave like with "DetachAllReturnHardDisksOnly", except
that all media will be returned in the array, including removable media like DVDs and
floppies. This might be useful if the user wants to inspect in detail which media were
attached to the machine. Be careful when passing the media array to :py:func:`delete_config`
in that case because users will typically want to preserve ISO and RAW image files.
A typical implementation will use "DetachAllReturnHardDisksOnly" and then pass the
resulting IMedium array to :py:func:`delete_config` . This way, the machine is completely
deleted with all its saved states and hard disk images, but images for removable
drives (such as ISO and RAW files) will remain on disk.
This API does not verify whether the media files returned in the array are still
attached to other machines (i.e. shared between several machines). If such a shared
image is passed to :py:func:`delete_config` however, closing the image will fail there
and the image will be silently skipped.
This API may, however, move media from this machine's media registry to other media
registries (see :py:class:`IMedium` for details on media registries). For machines
created with VirtualBox 4.0 or later, if media from this machine's media registry
are also attached to another machine (shared attachments), each such medium will be
moved to another machine's registry. This is because without this machine's media
registry, the other machine cannot find its media any more and would become inaccessible.
This API implicitly calls :py:func:`save_settings` to save all current machine settings
before unregistering it. It may also silently call :py:func:`save_settings` on other machines
if media are moved to other machines' media registries.
After successful method invocation, the :py:class:`IMachineRegisteredEvent` event
is fired.
The call will fail if the machine is currently locked (see :py:class:`ISession` ).
If the given machine is inaccessible (see :py:func:`accessible` ), it
will be unregistered and fully uninitialized right afterwards. As a result,
the returned machine object will be unusable and an attempt to call
**any** method will return the "Object not ready" error.
in cleanup_mode of type :class:`CleanupMode`
How to clean up after the machine has been unregistered.
return media of type :class:`IMedium`
List of media detached from the machine, depending on the @a cleanupMode parameter.
raises :class:`VBoxErrorInvalidObjectState`
Machine is currently locked for a session.
"""
if not isinstance(cleanup_mode, CleanupMode):
raise TypeError("cleanup_mode can only be an instance of type CleanupMode")
media = self._call("unregister",
in_p=[cleanup_mode])
media = [IMedium(a) for a in media]
return media |
java | Rule Gracing() {
return FirstOf(String("."),
String("!>!"),
String("!<!"),
String("!+!"),
String("+>+"),
String("+<+"),
SequenceS(String("!"),
OptionalS(AnyOf("^<>_@").label(Position)),
LongGracing(),
String("!")),
//for compatibility
SequenceS(String("+"), LongGracing(), String("+")),
UserdefSymbol()
).label(Gracing);
} |
python | def colorize_log_entry(self, string):
"""
Apply various heuristics to return a colorized version of a string.
If these fail, simply return the string in plaintext.
"""
final_string = string
try:
# First, do stuff in square brackets
inside_squares = re.findall(r'\[([^]]*)\]', string)
for token in inside_squares:
if token in ['CRITICAL', 'ERROR', 'WARNING', 'DEBUG', 'INFO', 'NOTSET']:
final_string = final_string.replace('[' + token + ']', click.style("[", fg='cyan') + click.style(token, fg='cyan', bold=True) + click.style("]", fg='cyan'))
else:
final_string = final_string.replace('[' + token + ']', click.style("[", fg='cyan') + click.style(token, bold=True) + click.style("]", fg='cyan'))
# Then do quoted strings
quotes = re.findall(r'"[^"]*"', string)
for token in quotes:
final_string = final_string.replace(token, click.style(token, fg="yellow"))
# And UUIDs
for token in final_string.replace('\t', ' ').split(' '):
try:
if token.count('-') is 4 and token.replace('-', '').isalnum():
final_string = final_string.replace(token, click.style(token, fg="magenta"))
except Exception: # pragma: no cover
pass
# And IP addresses
try:
if token.count('.') is 3 and token.replace('.', '').isnumeric():
final_string = final_string.replace(token, click.style(token, fg="red"))
except Exception: # pragma: no cover
pass
# And status codes
try:
if token in ['200']:
final_string = final_string.replace(token, click.style(token, fg="green"))
if token in ['400', '401', '403', '404', '405', '500']:
final_string = final_string.replace(token, click.style(token, fg="red"))
except Exception: # pragma: no cover
pass
# And Zappa Events
try:
if "Zappa Event:" in final_string:
final_string = final_string.replace("Zappa Event:", click.style("Zappa Event:", bold=True, fg="green"))
except Exception: # pragma: no cover
pass
# And dates
for token in final_string.split('\t'):
try:
is_date = parser.parse(token)
final_string = final_string.replace(token, click.style(token, fg="green"))
except Exception: # pragma: no cover
pass
final_string = final_string.replace('\t', ' ').replace(' ', ' ')
if final_string[0] != ' ':
final_string = ' ' + final_string
return final_string
except Exception as e: # pragma: no cover
return string |
java | protected Coordinate getWorldPosition(MouseEvent<?> event) {
Coordinate world = super.getWorldPosition(event);
if (snappingActive) {
return snapper.snap(world);
}
return world;
} |
java | public WePayOrder queryByTransactionId(String transactionId){
checkNotNullAndEmpty(transactionId, "transactionId");
Map<String, String> queryParams = new TreeMap<>();
put(queryParams, WepayField.TRANSACTION_ID, transactionId);
return doQueryOrder(queryParams);
} |
java | @Inline(value = "VMCommandLine.launchVMWithClassPath(($1), System.getProperty(\"java.class.path\"), ($2))",
imported = {VMCommandLine.class}, statementExpression = true)
public static Process launchVM(String classToLaunch, String... additionalParams) throws IOException {
return launchVMWithClassPath(
classToLaunch,
System.getProperty("java.class.path"), //$NON-NLS-1$
additionalParams);
} |
java | private void put(
K key,
Refresher<? super K,? extends V,? extends E> refresher,
Result<V,E> result
) {
CacheEntry entry = new CacheEntry(key, refresher, result);
map.put(key, entry);
timer.schedule(entry, new Date(System.currentTimeMillis() + refreshInterval), refreshInterval);
} |
java | public static <T> Single<Boolean> put(CacheConfigBean cacheConfigBean, String key, String field, T value) {
return SingleRxXian.call(CacheService.CACHE_SERVICE, "cacheMapPut", new JSONObject() {{
put("cacheConfig", cacheConfigBean);
put("key", key);
put("field", field);
put("value", value);
}}).map(unitResponse -> {
unitResponse.throwExceptionIfNotSuccess();
return unitResponse.succeeded();
});
} |
java | @VisibleForTesting
public static FileSystemContext create(Subject subject, MasterInquireClient masterInquireClient,
AlluxioConfiguration alluxioConf) {
FileSystemContext context = new FileSystemContext(subject, alluxioConf);
context.init(masterInquireClient);
return context;
} |
java | @Override
public UpdateIntegrationResult updateIntegration(UpdateIntegrationRequest request) {
request = beforeClientExecution(request);
return executeUpdateIntegration(request);
} |
java | public Vector2d mulDirection(Matrix3x2dc mat, Vector2d dest) {
double rx = mat.m00() * x + mat.m10() * y;
double ry = mat.m01() * x + mat.m11() * y;
dest.x = rx;
dest.y = ry;
return dest;
} |
python | async def connect_controller(self, controller_name=None):
"""Connect to a controller by name. If the name is empty, it
connect to the current controller.
"""
if not controller_name:
controller_name = self.jujudata.current_controller()
if not controller_name:
raise JujuConnectionError('No current controller')
controller = self.jujudata.controllers()[controller_name]
# TODO change Connection so we can pass all the endpoints
# instead of just the first.
endpoint = controller['api-endpoints'][0]
accounts = self.jujudata.accounts().get(controller_name, {})
await self.connect(
endpoint=endpoint,
uuid=None,
username=accounts.get('user'),
password=accounts.get('password'),
cacert=controller.get('ca-cert'),
bakery_client=self.bakery_client_for_controller(controller_name),
)
self.controller_name = controller_name |
java | @Override
public void update(IPermission perm) throws AuthorizationException {
Connection conn = null;
try {
conn = RDBMServices.getConnection();
String sQuery = getUpdatePermissionSql();
if (log.isDebugEnabled()) log.debug("RDBMPermissionImpl.update(): " + sQuery);
PreparedStatement ps = conn.prepareStatement(sQuery);
try {
primUpdate(perm, ps);
} finally {
ps.close();
}
} catch (Exception ex) {
log.error("Exception updating permission [" + perm + "]", ex);
throw new AuthorizationException("Problem updating Permission " + perm);
} finally {
RDBMServices.releaseConnection(conn);
}
} |
python | def autodiscover_modules():
"""
Goes and imports the permissions submodule of every app in INSTALLED_APPS
to make sure the permission set classes are registered correctly.
"""
import imp
from django.conf import settings
for app in settings.INSTALLED_APPS:
try:
__import__(app)
app_path = sys.modules[app].__path__
except AttributeError:
continue
try:
imp.find_module('permissions', app_path)
except ImportError:
continue
__import__("%s.permissions" % app)
app_path = sys.modules["%s.permissions" % app]
LOADING = False |
python | def _handle_display_data(self, msg):
"""
Reimplemented to handle communications between the figure explorer
and the kernel.
"""
img = None
data = msg['content']['data']
if 'image/svg+xml' in data:
fmt = 'image/svg+xml'
img = data['image/svg+xml']
elif 'image/png' in data:
# PNG data is base64 encoded as it passes over the network
# in a JSON structure so we decode it.
fmt = 'image/png'
img = decodestring(data['image/png'].encode('ascii'))
elif 'image/jpeg' in data and self._jpg_supported:
fmt = 'image/jpeg'
img = decodestring(data['image/jpeg'].encode('ascii'))
if img is not None:
self.sig_new_inline_figure.emit(img, fmt)
if (self.figurebrowser is not None and
self.figurebrowser.mute_inline_plotting):
if not self.sended_render_message:
msg['content']['data']['text/plain'] = ''
self._append_html(
_('<br><hr>'
'\nFigures now render in the Plots pane by default. '
'To make them also appear inline in the Console, '
'uncheck "Mute Inline Plotting" under the Plots '
'pane options menu. \n'
'<hr><br>'), before_prompt=True)
self.sended_render_message = True
else:
msg['content']['data']['text/plain'] = ''
del msg['content']['data'][fmt]
return super(FigureBrowserWidget, self)._handle_display_data(msg) |
python | def get(self, key, default=None):
"""Get value with optional default."""
try:
key = self.get_key(key)
except KeyError:
return default
return super(DatasetDict, self).get(key, default) |
python | def persistent_modprobe(module):
"""Load a kernel module and configure for auto-load on reboot."""
if not os.path.exists('/etc/rc.modules'):
open('/etc/rc.modules', 'a')
os.chmod('/etc/rc.modules', 111)
with open('/etc/rc.modules', 'r+') as modules:
if module not in modules.read():
modules.write('modprobe %s\n' % module) |
python | def list_catalogs(results=30, start=0):
"""
Returns list of all catalogs created on this API key
Args:
Kwargs:
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of catalog objects
Example:
>>> catalog.list_catalogs()
[<catalog - test_artist_catalog>, <catalog - test_song_catalog>, <catalog - my_songs>]
>>>
"""
result = util.callm("%s/%s" % ('catalog', 'list'), {'results': results, 'start': start})
cats = [Catalog(**util.fix(d)) for d in result['response']['catalogs']]
start = result['response']['start']
total = result['response']['total']
return ResultList(cats, start, total) |
python | def _set_system_monitor_mail(self, v, load=False):
"""
Setter method for system_monitor_mail, mapped from YANG variable /system_monitor_mail (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_system_monitor_mail is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_system_monitor_mail() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=system_monitor_mail.system_monitor_mail, is_container='container', presence=False, yang_name="system-monitor-mail", rest_name="system-monitor-mail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure FRU mail setting', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor', defining_module='brocade-system-monitor', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """system_monitor_mail must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=system_monitor_mail.system_monitor_mail, is_container='container', presence=False, yang_name="system-monitor-mail", rest_name="system-monitor-mail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure FRU mail setting', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor', defining_module='brocade-system-monitor', yang_type='container', is_config=True)""",
})
self.__system_monitor_mail = t
if hasattr(self, '_set'):
self._set() |
python | def actionAngle_physical_input(method):
"""Decorator to convert inputs to actionAngle functions from physical
to internal coordinates"""
@wraps(method)
def wrapper(*args,**kwargs):
if len(args) < 3: # orbit input
return method(*args,**kwargs)
ro= kwargs.get('ro',None)
if ro is None and hasattr(args[0],'_ro'):
ro= args[0]._ro
if _APY_LOADED and isinstance(ro,units.Quantity):
ro= ro.to(units.kpc).value
vo= kwargs.get('vo',None)
if vo is None and hasattr(args[0],'_vo'):
vo= args[0]._vo
if _APY_LOADED and isinstance(vo,units.Quantity):
vo= vo.to(units.km/units.s).value
# Loop through args
newargs= ()
for ii in range(len(args)):
if _APY_LOADED and isinstance(args[ii],units.Quantity):
try:
targ= args[ii].to(units.kpc).value/ro
except units.UnitConversionError:
try:
targ= args[ii].to(units.km/units.s).value/vo
except units.UnitConversionError:
try:
targ= args[ii].to(units.rad).value
except units.UnitConversionError:
raise units.UnitConversionError("Input units not understood")
newargs= newargs+(targ,)
else:
newargs= newargs+(args[ii],)
args= newargs
return method(*args,**kwargs)
return wrapper |
python | def to_digital(d, num):
"""
进制转换,从10进制转到指定机制
:param d:
:param num:
:return:
"""
if not isinstance(num, int) or not 1 < num < 10:
raise ValueError('digital num must between 1 and 10')
d = int(d)
result = []
x = d % num
d = d - x
result.append(str(x))
while d > 0:
d = d // num
x = d % num
d = d - x
result.append(str(x))
return ''.join(result[::-1]) |
java | private int nextNonWhitespace(boolean throwOnEof)
{
/*
* This code uses ugly local variables 'p' and 'l' representing the 'pos'
* and 'limit' fields respectively. Using locals rather than fields saves
* a few field reads for each whitespace character in a pretty-printed
* document, resulting in a 5% speedup. We need to flush 'p' to its field
* before any (potentially indirect) call to fillBuffer() and reread both
* 'p' and 'l' after any (potentially indirect) call to the same method.
*/
char[] buffer = this.buffer;
int p = pos;
int l = limit;
while (true) {
if (p == l) {
pos = p;
if (!fillBuffer(1)) {
break;
}
p = pos;
l = limit;
}
int c = buffer[p++];
if (c == '\n') {
lineNumber++;
lineStart = p;
continue;
} else if (c == ' ' || c == '\r' || c == '\t') {
continue;
}
if (c == '/') {
pos = p;
if (p == l) {
pos--; // push back '/' so it's still in the buffer when this method returns
boolean charsLoaded = fillBuffer(2);
pos++; // consume the '/' again
if (!charsLoaded) {
return c;
}
}
checkLenient();
char peek = buffer[pos];
switch (peek) {
case '*':
// skip a /* c-style comment */
pos++;
if (!skipTo("*/")) {
throw syntaxError("Unterminated comment");
}
p = pos + 2;
l = limit;
continue;
case '/':
// skip a // end-of-line comment
pos++;
skipToEndOfLine();
p = pos;
l = limit;
continue;
default:
return c;
}
} else if (c == '#') {
pos = p;
/*
* Skip a # hash end-of-line comment. The JSON RFC doesn't
* specify this behaviour, but it's required to parse
* existing documents. See http://b/2571423.
*/
checkLenient();
skipToEndOfLine();
p = pos;
l = limit;
} else {
pos = p;
return c;
}
}
if (throwOnEof) {
String mess = "End of input at line " + getLineNumber() + " column " + getColumnNumber();
logger.log(Level.SEVERE, mess);
throw new JsonDeserializationException(mess);
} else {
return -1;
}
} |
python | def convert(libsvm_model, feature_names, target, input_length, probability):
"""Convert a svm model to the protobuf spec.
This currently supports:
* C-SVC
* nu-SVC
* Epsilon-SVR
* nu-SVR
Parameters
----------
model_path: libsvm_model
Libsvm representation of the model.
feature_names : [str] | str
Names of each of the features.
target: str
Name of the predicted class column.
probability: str
Name of the class probability column. Only used for C-SVC and nu-SVC.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
if not(HAS_LIBSVM):
raise RuntimeError('libsvm not found. libsvm conversion API is disabled.')
import svm as libsvm
from ...proto import SVM_pb2
from ...proto import Model_pb2
from ...proto import FeatureTypes_pb2
from ...models import MLModel
svm_type_enum = libsvm_model.param.svm_type
# Create the spec
export_spec = Model_pb2.Model()
export_spec.specificationVersion = SPECIFICATION_VERSION
if(svm_type_enum == libsvm.EPSILON_SVR or svm_type_enum == libsvm.NU_SVR):
svm = export_spec.supportVectorRegressor
else:
svm = export_spec.supportVectorClassifier
# Set the features names
inferred_length = _infer_min_num_features(libsvm_model)
if isinstance(feature_names, str):
# input will be a single array
if input_length == 'auto':
print("[WARNING] Infering an input length of %d. If this is not correct,"
" use the 'input_length' parameter." % inferred_length)
input_length = inferred_length
elif inferred_length > input_length:
raise ValueError("An input length of %d was given, but the model requires an"
" input of at least %d." % (input_length, inferred_length))
input = export_spec.description.input.add()
input.name = feature_names
input.type.multiArrayType.shape.append(input_length)
input.type.multiArrayType.dataType = Model_pb2.ArrayFeatureType.DOUBLE
else:
# input will be a series of doubles
if inferred_length > len(feature_names):
raise ValueError("%d feature names were given, but the model requires at"
" least %d features." % (len(feature_names), inferred_length))
for cur_input_name in feature_names:
input = export_spec.description.input.add()
input.name = cur_input_name
input.type.doubleType.MergeFromString(b'')
# Set target
output = export_spec.description.output.add()
output.name = target
# Set the interface types
if(svm_type_enum == libsvm.EPSILON_SVR or svm_type_enum == libsvm.NU_SVR):
export_spec.description.predictedFeatureName = target
output.type.doubleType.MergeFromString(b'')
nr_class = 2
elif(svm_type_enum == libsvm.C_SVC or svm_type_enum == libsvm.NU_SVC):
export_spec.description.predictedFeatureName = target
output.type.int64Type.MergeFromString(b'')
nr_class = len(libsvm_model.get_labels())
for i in range(nr_class):
svm.numberOfSupportVectorsPerClass.append(libsvm_model.nSV[i])
svm.int64ClassLabels.vector.append(libsvm_model.label[i])
if probability and bool(libsvm_model.probA):
output = export_spec.description.output.add()
output.name = probability
output.type.dictionaryType.MergeFromString(b'')
output.type.dictionaryType.int64KeyType.MergeFromString(b'')
export_spec.description.predictedProbabilitiesName = probability
else:
raise ValueError('Only the following SVM types are supported: C_SVC, NU_SVC, EPSILON_SVR, NU_SVR')
if(libsvm_model.param.kernel_type == libsvm.LINEAR):
svm.kernel.linearKernel.MergeFromString(b'') # Hack to set kernel to an empty type
elif(libsvm_model.param.kernel_type == libsvm.RBF):
svm.kernel.rbfKernel.gamma = libsvm_model.param.gamma
elif(libsvm_model.param.kernel_type == libsvm.POLY):
svm.kernel.polyKernel.degree = libsvm_model.param.degree
svm.kernel.polyKernel.c = libsvm_model.param.coef0
svm.kernel.polyKernel.gamma = libsvm_model.param.gamma
elif(libsvm_model.param.kernel_type == libsvm.SIGMOID):
svm.kernel.sigmoidKernel.c = libsvm_model.param.coef0
svm.kernel.sigmoidKernel.gamma = libsvm_model.param.gamma
else:
raise ValueError('Unsupported kernel. The following kernel are supported: linear, RBF, polynomial and sigmoid.')
# set rho
# also set probA/ProbB only for SVC
if(svm_type_enum == libsvm.C_SVC or svm_type_enum == libsvm.NU_SVC):
num_class_pairs = nr_class * (nr_class-1)//2
for i in range(num_class_pairs):
svm.rho.append(libsvm_model.rho[i])
if(bool(libsvm_model.probA) and bool(libsvm_model.probB)):
for i in range(num_class_pairs):
svm.probA.append(libsvm_model.probA[i])
svm.probB.append(libsvm_model.probB[i])
else:
svm.rho = libsvm_model.rho[0]
# set coefficents
if(svm_type_enum == libsvm.C_SVC or svm_type_enum == libsvm.NU_SVC):
for _ in range(nr_class - 1):
svm.coefficients.add()
for i in range(libsvm_model.l):
for j in range(nr_class - 1):
svm.coefficients[j].alpha.append(libsvm_model.sv_coef[j][i])
else:
for i in range(libsvm_model.l):
svm.coefficients.alpha.append(libsvm_model.sv_coef[0][i])
# set support vectors
for i in range(libsvm_model.l):
j = 0
cur_support_vector = svm.sparseSupportVectors.vectors.add()
while libsvm_model.SV[i][j].index != -1:
cur_node = cur_support_vector.nodes.add()
cur_node.index = libsvm_model.SV[i][j].index
cur_node.value = libsvm_model.SV[i][j].value
j += 1
return MLModel(export_spec) |
java | public List<JAXBElement<Object>> get_GenericApplicationPropertyOfTexture() {
if (_GenericApplicationPropertyOfTexture == null) {
_GenericApplicationPropertyOfTexture = new ArrayList<JAXBElement<Object>>();
}
return this._GenericApplicationPropertyOfTexture;
} |
java | public ServiceFuture<List<SourceControlInner>> listSourceControlsNextAsync(final String nextPageLink, final ServiceFuture<List<SourceControlInner>> serviceFuture, final ListOperationCallback<SourceControlInner> serviceCallback) {
return AzureServiceFuture.fromPageResponse(
listSourceControlsNextSinglePageAsync(nextPageLink),
new Func1<String, Observable<ServiceResponse<Page<SourceControlInner>>>>() {
@Override
public Observable<ServiceResponse<Page<SourceControlInner>>> call(String nextPageLink) {
return listSourceControlsNextSinglePageAsync(nextPageLink);
}
},
serviceCallback);
} |
python | def _get_fs(self, key: str, dms: bool = False) -> Union[str, float]:
"""Get float number.
:param key: Key (`lt` or `lg`).
:param dms: DMS format.
:return: Float number
"""
# Default range is a range of longitude.
rng = (-90, 90) if key == 'lt' else (-180, 180)
result = self.random.uniform(*rng, precision=6)
if dms:
return self._dd_to_dms(result, key)
return result |
java | private void checkFirmwareVersion() {
Log.i(TAG, "Checking Firmware version...");
setState(OADState.CHECKING_FW_VERSION);
mGattClient.getDeviceProfile().getFirmwareVersion(new DeviceProfile.VersionCallback() {
@Override
public void onComplete(String version) {
// Check the Bean version against the Bundle version
boolean updateNeeded = needsUpdate(firmwareBundle.version(), version);
if (updateNeeded && oadApproval.isApproved()) {
// Needs update and client has approved, keep the update going
startOfferingImages();
} else if (updateNeeded && !oadApproval.isApproved()) {
// Needs update but client has not approved, ask for approval
watchdog.pause();
oadListener.updateRequired(true);
} else if (!updateNeeded && !oadApproval.isApproved()){
// Does not need update and the client has never approved. This means
// no update is required, and no firmware update ever occurred
finishNoUpdateOccurred();
oadListener.updateRequired(false);
} else if (!updateNeeded && oadApproval.isApproved()) {
// Does not need update, and the client has approved. This means that
// the firmware process actually took place, and completed
finishUpdateOccurred();
} else {
Log.w(TAG, "Unexpected OAD Condition!");
}
}
});
} |
python | def _activity_helper(modifier: str, location=None):
"""Make an activity dictionary.
:param str modifier:
:param Optional[dict] location: An entity from :func:`pybel.dsl.entity`
:rtype: dict
"""
rv = {MODIFIER: modifier}
if location:
rv[LOCATION] = location
return rv |
java | public void set( long x ) {
CAT newcat = new CAT(null,4,x);
// Spin until CAS works
while( !CAS_cat(_cat,newcat) ) {/*empty*/}
} |
python | def _default_client(jws_client, reactor, key, alg):
"""
Make a client if we didn't get one.
"""
if jws_client is None:
pool = HTTPConnectionPool(reactor)
agent = Agent(reactor, pool=pool)
jws_client = JWSClient(HTTPClient(agent=agent), key, alg)
return jws_client |
python | def get_rec_dtype(self, **keys):
"""
Get the dtype for the specified columns
parameters
----------
colnums: integer array
The column numbers, 0 offset
vstorage: string, optional
See docs in read_columns
"""
colnums = keys.get('colnums', None)
vstorage = keys.get('vstorage', self._vstorage)
if colnums is None:
colnums = self._extract_colnums()
descr = []
isvararray = numpy.zeros(len(colnums), dtype=numpy.bool)
for i, colnum in enumerate(colnums):
dt, isvar = self.get_rec_column_descr(colnum, vstorage)
descr.append(dt)
isvararray[i] = isvar
dtype = numpy.dtype(descr)
offsets = numpy.zeros(len(colnums), dtype='i8')
for i, n in enumerate(dtype.names):
offsets[i] = dtype.fields[n][1]
return dtype, offsets, isvararray |
java | private static final boolean filterTerminal(Atom[] ca1, Atom[] ca2, int p1b, int p1e, int p2b, int p2e, int fragLen, int minLen)
{
int d1 = (p1b < p2b)?p1b:p2b;
int d2 = (ca1.length - p1e) < (ca2.length - p2e)?(ca1.length - p1e):(ca2.length - p2e);
int d3 = d1 + d2 + fragLen; //maximum alignment length from current AFP
/// DO NOT DO Math.round() this will give different results to FATCAT....
int d4 = (int)(0.3 * minLen);
return d3 < d4;
} |
java | public static void addThreadIdentityService(ThreadIdentityService tis) {
if (tis != null) {
threadIdentityServices.add(tis);
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) {
Tr.debug(tc, "A ThreadIdentityService implementation was added.", tis.getClass().getName());
}
}
} |
java | public ServiceFuture<CertificateBundle> mergeCertificateAsync(String vaultBaseUrl, String certificateName, List<byte[]> x509Certificates, final ServiceCallback<CertificateBundle> serviceCallback) {
return ServiceFuture.fromResponse(mergeCertificateWithServiceResponseAsync(vaultBaseUrl, certificateName, x509Certificates), serviceCallback);
} |
python | def modify(self, current_modified_line, anchors, file_path, file_lines=None,
index=None):
"""
Replace all AnchorHub tag-using inline links in this line and edit
them to use
:param current_modified_line: string representing the the line at
file_lines[index] _after_ any previous modifications from other
WriterStrategy objects:
:param anchors: Dictionary mapping string file paths to inner
dictionaries. These inner dictionaries map string AnchorHub tags
to string generated anchor
:param file_path: string representing the file_path of the current
file being examined by this WriterStrategy
:param file_lines: List of strings corresponding to lines in a text file
:param index: index of file_lines corresponding to the current line
:return: string. current_modified_line with all inline links that use
AnchorHub tags replaced with their associated generated anchors
"""
changed_line = "" # Will be built up piece by piece as we find links
links = self._get_link_indices(current_modified_line)
# Used to keep track of what we've parsed in current_modified_line
last_index = 0
for link in links:
# These indices are relative to current_modified_line
link_start_index = link[0] # Start index in current_modified_line
link_end_index = link[1] # End index in current_modified_line
link_text = current_modified_line[link_start_index:link_end_index]
# This index is relative to link_text
url_start = self._link_start_regex.search(link_text).end()
url_text = link_text[url_start:len(link_text) - 1].strip()
# This index is relative to url_text
hash_index = url_text.find('#') # index of '#' in url_text
link_path = url_text[:hash_index]
tag = url_text[hash_index + 1:]
if link_path == "":
# Link points to tag in this file
file_key = file_path
else:
file_key = self._get_file_key(file_path, link_path)
if self._file_has_tag_anchor_keypair(anchors, file_key, tag):
# The tag used on this link was specified as an AnchorHub tag
# Add existing text up to (and including) the # mark
changed_line += current_modified_line[last_index:
link_start_index +
url_start + hash_index+1]
# Add the the generated anchor, plus a closing parenthesis
changed_line += anchors[file_key][tag] + ')'
else:
# The tag used is a normal anchor tag: don't change it
changed_line += current_modified_line[last_index:link_end_index]
last_index = link_end_index
# Add the end of the line back on
changed_line += current_modified_line[last_index:]
return changed_line |
python | def poll_parser(poll):
"""
Parses a poll object
"""
if __is_deleted(poll):
return deleted_parser(poll)
if poll['type'] not in poll_types:
raise Exception('Not a poll type')
return Poll(
poll['id'],
poll['by'],
__check_key('kids', poll), # poll and pollopt differ this property
__check_key('parts', poll), # poll and pollopt differ this property
poll['score'],
poll['text'],
poll['time'],
poll['title'],
poll['type'],
) |
java | private boolean runMulti(boolean auto, RateLimiter rateLimiter)
{
if (settings.command.targetUncertainty >= 0)
output.println("WARNING: uncertainty mode (err<) results in uneven workload between thread runs, so should be used for high level analysis only");
int prevThreadCount = -1;
int threadCount = settings.rate.minThreads;
List<StressMetrics> results = new ArrayList<>();
List<String> runIds = new ArrayList<>();
do
{
output.println(String.format("Running with %d threadCount", threadCount));
if (settings.command.truncate == SettingsCommand.TruncateWhen.ALWAYS)
settings.command.truncateTables(settings);
StressMetrics result = run(settings.command.getFactory(settings), threadCount, settings.command.count,
settings.command.duration, rateLimiter, settings.command.durationUnits, output);
if (result == null)
return false;
results.add(result);
if (prevThreadCount > 0)
System.out.println(String.format("Improvement over %d threadCount: %.0f%%",
prevThreadCount, 100 * averageImprovement(results, 1)));
runIds.add(threadCount + " threadCount");
prevThreadCount = threadCount;
if (threadCount < 16)
threadCount *= 2;
else
threadCount *= 1.5;
if (!results.isEmpty() && threadCount > settings.rate.maxThreads)
break;
if (settings.command.type.updates)
{
// pause an arbitrary period of time to let the commit log flush, etc. shouldn't make much difference
// as we only increase load, never decrease it
output.println("Sleeping for 15s");
try
{
Thread.sleep(15 * 1000);
} catch (InterruptedException e)
{
return false;
}
}
// run until we have not improved throughput significantly for previous three runs
} while (!auto || (hasAverageImprovement(results, 3, 0) && hasAverageImprovement(results, 5, settings.command.targetUncertainty)));
// summarise all results
StressMetrics.summarise(runIds, results, output, settings.samples.historyCount);
return true;
} |
python | def parse_string_expr(self, string_expr_node):
""" Parse a string node content. """
string_expr_node_value = string_expr_node['value']
string_expr_str = string_expr_node_value[1:-1]
# Care escaped string literals
if string_expr_node_value[0] == "'":
string_expr_str = string_expr_str.replace("''", "'")
else:
string_expr_str = string_expr_str.replace('\\"', '"')
# NOTE: This is a hack to parse expr1. See :help expr1
raw_ast = self.parse_string('echo ' + string_expr_str)
# We need the left node of ECHO node
parsed_string_expr_nodes = raw_ast['body'][0]['list']
start_pos = string_expr_node['pos']
def adjust_position(node):
pos = node['pos']
# Care 1-based index and the length of "echo ".
pos['col'] += start_pos['col'] - 1 - 5
# Care the length of "echo ".
pos['i'] += start_pos['i'] - 5
# Care 1-based index
pos['lnum'] += start_pos['lnum'] - 1
for parsed_string_expr_node in parsed_string_expr_nodes:
traverse(parsed_string_expr_node, on_enter=adjust_position)
return parsed_string_expr_nodes |
java | public static CoreDictionary.Attribute getAttribute(String word)
{
CoreDictionary.Attribute attribute = CoreDictionary.get(word);
if (attribute != null) return attribute;
return CustomDictionary.get(word);
} |
java | public static <E> ArraySet<E> newArraySet(E... elements) {
int capacity = elements.length * 4 / 3 + 1;
ArraySet<E> set = new ArraySet<E>(capacity);
Collections.addAll(set, elements);
return set;
} |
java | public static String locateOrCreateDirectory(String dirName) throws IOException {
String path = locateDirectory(dirName);
if (path == null) {
File file = new File(dirName);
file.mkdirs();
path = file.getCanonicalPath();
}
return path;
} |
java | public static SSLSessionStrategy build(String trustStore,
String trustStorePassword,
String keyStore,
String keyStorePassword,
String[] keyAliases,
String keyPassword,
String[] allowedProtocols,
String[] allowedCiphers,
boolean allowAnyHostname,
boolean trustSelfSigned)
throws NoSuchAlgorithmException, KeyStoreException, CertificateException, IOException,
KeyManagementException, UnrecoverableKeyException {
Args.notNull(allowedProtocols, "Allowed protocols"); //$NON-NLS-1$
Args.notNull(allowedCiphers, "Allowed ciphers"); //$NON-NLS-1$
TrustStrategy trustStrategy = trustSelfSigned ? SELF_SIGNED : null;
HostnameVerifier hostnameVerifier = allowAnyHostname ? ALLOW_ANY :
SSLConnectionSocketFactory.getDefaultHostnameVerifier();
PrivateKeyStrategy privateKeyStrategy = keyAliases == null ? null : new SelectByAlias(keyAliases);
boolean clientAuth = keyStore == null ? false : true;
SSLContextBuilder builder = SSLContexts.custom();
if (trustStore != null) {
loadTrustMaterial(builder,
new File(trustStore),
trustStorePassword.toCharArray(),
trustStrategy);
}
if (keyStore != null) {
char[] ksp = keyStorePassword == null ? null : keyStorePassword.toCharArray();
char[] kp = keyPassword == null ? null : keyPassword.toCharArray();
loadKeyMaterial(builder, new File(keyStore), ksp, kp, privateKeyStrategy);
}
SSLContext sslContext = builder.build();
return new SSLSessionStrategy(hostnameVerifier, new CipherSelectingSSLSocketFactory(
sslContext.getSocketFactory(), allowedCiphers, allowedProtocols, clientAuth));
} |
java | public void endDiff() throws DiffException {
DOMSource source = new DOMSource(doc);
try {
transformer.transform(source, result);
} catch (TransformerException te) {
throw new DiffException(te);
}
} |
python | def from_rotation_vector(rot):
"""Convert input 3-vector in axis-angle representation to unit quaternion
Parameters
----------
rot: (Nx3) float array
Each vector represents the axis of the rotation, with norm
proportional to the angle of the rotation in radians.
Returns
-------
q: array of quaternions
Unit quaternions resulting in rotations corresponding to input
rotations. Output shape is rot.shape[:-1].
"""
rot = np.array(rot, copy=False)
quats = np.zeros(rot.shape[:-1]+(4,))
quats[..., 1:] = rot[...]/2
quats = as_quat_array(quats)
return np.exp(quats) |
java | private String parse(Matrix matrix) {
String transform = "";
if (matrix != null) {
double dx = matrix.getDx();
double dy = matrix.getDy();
if (matrix.getXx() != 0 && matrix.getYy() != 0 && matrix.getXx() != 1 && matrix.getYy() != 1) {
transform += "scale(" + matrix.getXx() + ", " + matrix.getYy() + ")"; // scale first
// no space between 'scale' and '(' !!!
dx /= matrix.getXx();
dy /= matrix.getYy();
}
transform += " translate(" + (float) dx + ", " + (float) dy + ")";
// no space between 'translate' and '(' !!!
}
return transform;
} |
python | def subcorpus(self, selector):
"""
Generates a new :class:`.Corpus` using the criteria in ``selector``.
Accepts selector arguments just like :meth:`.Corpus.select`\.
.. code-block:: python
>>> corpus = Corpus(papers)
>>> subcorpus = corpus.subcorpus(('date', 1995))
>>> subcorpus
<tethne.classes.corpus.Corpus object at 0x10278ea10>
"""
subcorpus = self.__class__(self[selector],
index_by=self.index_by,
index_fields=self.indices.keys(),
index_features=self.features.keys())
return subcorpus |
python | def _all_params(arr):
"""
Ensures that the argument is a list that either is empty or contains only GPParamSpec's
:param arr: list
:return:
"""
if not isinstance([], list):
raise TypeError("non-list value found for parameters")
return all(isinstance(x, GPParamSpec) for x in arr) |
java | @Override
public R visitErroneous(ErroneousTree node, P p) {
return defaultAction(node, p);
} |
python | def close(self):
"""
Disconnect and error-out all requests.
"""
with self.lock:
if self.is_closed:
return
self.is_closed = True
log.debug("Closing connection (%s) to %s", id(self), self.endpoint)
reactor.callFromThread(self.connector.disconnect)
log.debug("Closed socket to %s", self.endpoint)
if not self.is_defunct:
self.error_all_requests(
ConnectionShutdown("Connection to %s was closed" % self.endpoint))
# don't leave in-progress operations hanging
self.connected_event.set() |
python | def __raise_file_system_exception(self, item, directory):
"""
Raises a common fileSystem exception.
:param item: Name of the item generating the exception.
:type item: unicode
:param directory: Name of the target directory.
:type directory: unicode
"""
path = os.path.join(directory, item)
if os.path.isfile(path):
raise foundations.exceptions.FileExistsError(
"{0} | A file with '{1}' name already exists in '{2}' directory!".format(self.__class__.__name__,
item,
directory))
else:
raise foundations.exceptions.DirectoryExistsError(
"{0} | A directory with '{1}' name already exists in '{2}' directory!".format(self.__class__.__name__,
item,
directory)) |
java | protected void subAppend(final LoggingEvent event) {
// The rollover check must precede actual writing. This is the
// only correct behavior for time driven triggers.
AccessController.doPrivileged(new PrivilegedAction<Void>() {
@Override
public Void run() {
if (triggeringPolicy.isTriggeringEvent(RollingFileAppender.this, event, getFile(), getFileLength())) {
//
// wrap rollover request in try block since
// rollover may fail in case read access to directory
// is not provided. However appender should still be in good
// condition and the append should still happen.
try {
rollover();
} catch (Exception ex) {
LogLog.warn("Exception during rollover attempt.", ex);
}
}
return null;
}
});
super.subAppend(event);
} |
java | private static DefaultDirectedGraph<ClassExpression,DefaultEdge> getClassGraph (OntologyImpl.UnclassifiedOntologyTBox ontology,
DefaultDirectedGraph<ObjectPropertyExpression,DefaultEdge> objectPropertyGraph,
DefaultDirectedGraph<DataPropertyExpression,DefaultEdge> dataPropertyGraph) {
DefaultDirectedGraph<ClassExpression,DefaultEdge> graph = new DefaultDirectedGraph<>(DefaultEdge.class);
for (OClass concept : ontology.classes())
if (!concept.isBottom() && !concept.isTop())
graph.addVertex(concept);
// domains and ranges of roles
for (ObjectPropertyExpression role : objectPropertyGraph.vertexSet())
graph.addVertex(role.getDomain());
// edges between the domains and ranges for sub-properties
for (DefaultEdge edge : objectPropertyGraph.edgeSet()) {
ObjectPropertyExpression child = objectPropertyGraph.getEdgeSource(edge);
ObjectPropertyExpression parent = objectPropertyGraph.getEdgeTarget(edge);
graph.addEdge(child.getDomain(), parent.getDomain());
}
// domains and ranges of roles
for (DataPropertyExpression role : dataPropertyGraph.vertexSet())
for (DataSomeValuesFrom dom : role.getAllDomainRestrictions())
graph.addVertex(dom);
// edges between the domains and ranges for sub-properties
for (DefaultEdge edge : dataPropertyGraph.edgeSet()) {
DataPropertyExpression child = dataPropertyGraph.getEdgeSource(edge);
DataPropertyExpression parent = dataPropertyGraph.getEdgeTarget(edge);
graph.addEdge(child.getDomainRestriction(DatatypeImpl.rdfsLiteral), parent.getDomainRestriction(DatatypeImpl.rdfsLiteral));
}
ClassExpression top = null;
// class inclusions from the ontology
for (BinaryAxiom<ClassExpression> clsIncl : ontology.getSubClassAxioms()) {
if (clsIncl.getSub().isBottom() || clsIncl.getSuper().isTop())
continue;
if (clsIncl.getSuper().isBottom()) {
throw new RuntimeException("BOT cannot occur on the LHS - replaced by DISJ");
}
if (clsIncl.getSub().isTop()) {
top = clsIncl.getSub();
graph.addVertex(top);
}
graph.addEdge(clsIncl.getSub(), clsIncl.getSuper());
}
if (top != null) {
for (ClassExpression c : graph.vertexSet())
graph.addEdge(c, top);
}
return graph;
} |
java | public void detachEdge(SquareEdge edge) {
edge.a.edges[edge.sideA] = null;
edge.b.edges[edge.sideB] = null;
edge.distance = 0;
edgeManager.recycleInstance(edge);
} |
python | def _dispatch(self):
"""Run the dispatch() method on all ingredients."""
for ingredient in self.ingredients:
result = ingredient.dispatch(self.context)
if result is not None:
return result |
python | def main(args=None):
"""Main command-line interface entrypoint.
Runs the given subcommand or argument that were specified. If not given a
``args`` parameter, assumes the arguments are passed on the command-line.
Args:
args (list): list of command-line arguments
Returns:
Zero on success, non-zero otherwise.
"""
if args is None:
args = sys.argv[1:]
parser = create_parser()
args = parser.parse_args(args)
if args.verbose >= 2:
level = logging.DEBUG
elif args.verbose >= 1:
level = logging.INFO
else:
level = logging.WARNING
logging.basicConfig(level=level)
try:
args.command(args)
except pylink.JLinkException as e:
sys.stderr.write('Error: %s%s' % (str(e), os.linesep))
return 1
return 0 |
python | def get_resource_allocation(self):
"""Get the :py:class:`ResourceAllocation` element tance.
Returns:
ResourceAllocation: Resource allocation used to access information about the resource where this PE is running.
.. versionadded:: 1.9
"""
if hasattr(self, 'resourceAllocation'):
return ResourceAllocation(self.rest_client.make_request(self.resourceAllocation), self.rest_client) |
python | def fit(self, train_X, train_Y, val_X=None, val_Y=None, graph=None):
"""Fit the model to the data.
Parameters
----------
train_X : array_like, shape (n_samples, n_features)
Training data.
train_Y : array_like, shape (n_samples, n_classes)
Training labels.
val_X : array_like, shape (N, n_features) optional, (default = None).
Validation data.
val_Y : array_like, shape (N, n_classes) optional, (default = None).
Validation labels.
graph : tf.Graph, optional (default = None)
Tensorflow Graph object.
Returns
-------
"""
if len(train_Y.shape) != 1:
num_classes = train_Y.shape[1]
else:
raise Exception("Please convert the labels with one-hot encoding.")
g = graph if graph is not None else self.tf_graph
with g.as_default():
# Build model
self.build_model(train_X.shape[1], num_classes)
with tf.Session() as self.tf_session:
# Initialize tf stuff
summary_objs = tf_utils.init_tf_ops(self.tf_session)
self.tf_merged_summaries = summary_objs[0]
self.tf_summary_writer = summary_objs[1]
self.tf_saver = summary_objs[2]
# Train model
self._train_model(train_X, train_Y, val_X, val_Y)
# Save model
self.tf_saver.save(self.tf_session, self.model_path) |
python | def get_value_from_schema(schema, definition: dict, key: str,
definition_key: str):
"""Gets a value from a schema and definition.
If the value has references it will recursively attempt to resolve them.
:param ResourceSchema schema: The resource schema.
:param dict definition: The definition dict from the schema.
:param str key: The key to use to get the value from the schema.
:param str definition_key: The name of the definition.
:returns: The value.
:raises TypeSystemError: If the key can't be found in the schema/definition
or we can't resolve the definition.
"""
resolved_definition = definition.copy()
if '$ref' in resolved_definition:
try:
# NOTE: The resolve method recursively resolves references, so
# we don't need to worry about that in this function.
resolved_definition = schema.resolve(definition['$ref'])
except SchemaError as e:
raise TypeSystemError(str(e))
try:
value = resolved_definition[key]
except KeyError:
# Before raising an error, the resolved definition may have an array
# or object inside it that needs to be resolved in order to get
# values. Attempt that here and then fail if we still can't find
# the key we are looking for.
# If the key was missing and this is an array, try to resolve it
# from the items key.
if resolved_definition['type'] == 'array':
return [
get_value_from_schema(schema, resolved_definition['items'], key,
definition_key)
]
# If the key was missing and this is an object, resolve it from it's
# properties.
elif resolved_definition['type'] == 'object':
value = {}
for prop, definition in resolved_definition['properties'].items():
value[prop] = get_value_from_schema(
schema, definition, key, definition_key)
return value
raise TypeSystemError(
'Definition `{}` is missing a {}.'.format(
definition_key, key))
return value |
python | def encode_to_sha(msg):
"""coerce numeric list inst sha-looking bytearray"""
if isinstance(msg, str):
msg = msg.encode('utf-8')
return (codecs.encode(msg, "hex_codec") + (b'00' * 32))[:64] |
python | def read_csv_as_integer(csv_name, integer_columns, usecols=None):
"""Returns a DataFrame from a .csv file stored in /data/raw/.
Converts columns specified by 'integer_columns' to integer.
"""
csv_path = os.path.join(DATA_FOLDER, csv_name)
csv = pd.read_csv(csv_path, low_memory=False, usecols=usecols)
for column in integer_columns:
csv = csv[pd.to_numeric(csv[column], errors="coerce").notnull()]
csv[integer_columns] = csv[integer_columns].apply(pd.to_numeric)
return csv |
python | def create_api_key(self, api_id, stage_name):
"""
Create new API key and link it with an api_id and a stage_name
"""
response = self.apigateway_client.create_api_key(
name='{}_{}'.format(stage_name, api_id),
description='Api Key for {}'.format(api_id),
enabled=True,
stageKeys=[
{
'restApiId': '{}'.format(api_id),
'stageName': '{}'.format(stage_name)
},
]
)
print('Created a new x-api-key: {}'.format(response['id'])) |
java | public List<CallbackMetadata> getCallbacks(CallbackType callbackType) {
return callbacks == null ? null : callbacks.get(callbackType);
} |
java | public static int cs_fkeep(Scs A, Scs_ifkeep fkeep, Object other) {
int j, p, nz = 0, n, Ap[], Ai[];
float Ax[];
if (!Scs_util.CS_CSC(A))
return (-1); /* check inputs */
n = A.n;
Ap = A.p;
Ai = A.i;
Ax = A.x;
for (j = 0; j < n; j++) {
p = Ap[j]; /* get current location of col j */
Ap[j] = nz; /* record new location of col j */
for (; p < Ap[j + 1]; p++) {
if (fkeep.fkeep(Ai[p], j, Ax != null ? Ax[p] : 1, other)) {
if (Ax != null)
Ax[nz] = Ax[p]; /* keep A(i,j) */
Ai[nz++] = Ai[p];
}
}
}
Ap[n] = nz; /* finalize A */
Scs_util.cs_sprealloc(A, 0); /* remove extra space from A */
return (nz);
} |
python | def API520_F2(k, P1, P2):
r'''Calculates coefficient F2 for subcritical flow for use in API 520
subcritical flow relief valve sizing.
.. math::
F_2 = \sqrt{\left(\frac{k}{k-1}\right)r^\frac{2}{k}
\left[\frac{1-r^\frac{k-1}{k}}{1-r}\right]}
.. math::
r = \frac{P_2}{P_1}
Parameters
----------
k : float
Isentropic coefficient or ideal gas heat capacity ratio [-]
P1 : float
Upstream relieving pressure; the set pressure plus the allowable
overpressure, plus atmospheric pressure, [Pa]
P2 : float
Built-up backpressure; the increase in pressure during flow at the
outlet of a pressure-relief device after it opens, [Pa]
Returns
-------
F2 : float
Subcritical flow coefficient `F2` [-]
Notes
-----
F2 is completely dimensionless.
Examples
--------
From [1]_ example 2, matches.
>>> API520_F2(1.8, 1E6, 7E5)
0.8600724121105563
References
----------
.. [1] API Standard 520, Part 1 - Sizing and Selection.
'''
r = P2/P1
return ( k/(k-1)*r**(2./k) * ((1-r**((k-1.)/k))/(1.-r)) )**0.5 |
python | def point_within_radius(self, points, pt, canvas_radius,
scales=(1.0, 1.0)):
"""Points `points` and point `pt` are in data coordinates.
Return True for points within the circle defined by
a center at point `pt` and within canvas_radius.
"""
scale_x, scale_y = scales
x, y = pt
a_arr, b_arr = np.asarray(points).T
dx = np.fabs(x - a_arr) * scale_x
dy = np.fabs(y - b_arr) * scale_y
new_radius = np.sqrt(dx**2 + dy**2)
res = (new_radius <= canvas_radius)
return res |
java | public int getThisTaskIndex() {
List<Integer> tasks = new ArrayList<>(getComponentTasks(getThisComponentId()));
Collections.sort(tasks);
for (int i = 0; i < tasks.size(); i++) {
if (tasks.get(i) == getThisTaskId()) {
return i;
}
}
throw new RuntimeException("Fatal: could not find this task id in this component");
} |
python | def update_average_model(self, model):
""" Update weights of the average model with new model observation """
for model_param, average_param in zip(model.parameters(), self.average_model.parameters()):
# EWMA average model update
average_param.data.mul_(self.average_model_alpha).add_(model_param.data * (1 - self.average_model_alpha)) |
java | protected List<String> getCandidateConfigurations(AnnotationMetadata metadata,
AnnotationAttributes attributes) {
List<String> configurations = SpringFactoriesLoader.loadFactoryNames(
getSpringFactoriesLoaderFactoryClass(), getBeanClassLoader());
Assert.notEmpty(configurations,
"No auto configuration classes found in META-INF/spring.factories. If you "
+ "are using a custom packaging, make sure that file is correct.");
return configurations;
} |
python | def add_env(self, name, val):
'''Add an environment variable to the docker run invocation
'''
if name in self.env_vars:
raise KeyError(name)
self.env_vars[name] = val |
python | def _get_system():
'''
Get Pure Storage FlashArray configuration
1) From the minion config
pure_tags:
fa:
san_ip: management vip or hostname for the FlashArray
api_token: A valid api token for the FlashArray being managed
2) From environment (PUREFA_IP and PUREFA_API)
3) From the pillar (PUREFA_IP and PUREFA_API)
'''
agent = {'base': USER_AGENT_BASE,
'class': __name__,
'version': VERSION,
'platform': platform.platform()
}
user_agent = '{base} {class}/{version} ({platform})'.format(**agent)
try:
array = __opts__['pure_tags']['fa'].get('san_ip')
api = __opts__['pure_tags']['fa'].get('api_token')
if array and api:
system = purestorage.FlashArray(array, api_token=api, user_agent=user_agent)
except (KeyError, NameError, TypeError):
try:
san_ip = os.environ.get('PUREFA_IP')
api_token = os.environ.get('PUREFA_API')
system = purestorage.FlashArray(san_ip,
api_token=api_token,
user_agent=user_agent)
except (ValueError, KeyError, NameError):
try:
system = purestorage.FlashArray(__pillar__['PUREFA_IP'],
api_token=__pillar__['PUREFA_API'],
user_agent=user_agent)
except (KeyError, NameError):
raise CommandExecutionError('No Pure Storage FlashArray credentials found.')
try:
system.get()
except Exception:
raise CommandExecutionError('Pure Storage FlashArray authentication failed.')
return system |
java | public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case ID:
return isSetId();
case CATEGORY:
return isSetCategory();
case LABEL:
return isSetLabel();
case THRESHOLD:
return isSetThreshold();
case RESPONSES:
return isSetResponses();
}
throw new IllegalStateException();
} |
java | public static <T extends Comparable<? super T>> int compare(T c1, T c2, boolean isNullGreater) {
if (c1 == c2) {
return 0;
} else if (c1 == null) {
return isNullGreater ? 1 : -1;
} else if (c2 == null) {
return isNullGreater ? -1 : 1;
}
return c1.compareTo(c2);
} |
python | def sortByTotal(requestContext, seriesList):
"""
Takes one metric or a wildcard seriesList.
Sorts the list of metrics by the sum of values across the time period
specified.
"""
return list(sorted(seriesList, key=safeSum, reverse=True)) |
python | def create_task(coro, loop):
# pragma: no cover
"""Compatibility wrapper for the loop.create_task() call introduced in
3.4.2."""
if hasattr(loop, 'create_task'):
return loop.create_task(coro)
return asyncio.Task(coro, loop=loop) |
python | def _parse_comma_list(self):
"""Parse a comma seperated list."""
if self._cur_token['type'] not in self._literals:
raise Exception(
"Parser failed, _parse_comma_list was called on non-literal"
" {} on line {}.".format(
repr(self._cur_token['value']), self._cur_token['line']
)
)
array = []
while self._cur_token['type'] in self._literals and not self._finished:
array.append(self._cur_token['value'])
self._increment()
self._skip_whitespace()
if self._cur_token['type'] is TT.comma:
self._increment()
self._skip_whitespace()
elif (
not self._finished and
self._cur_token['type'] not in (TT.ws, TT.lbreak)
):
raise ParseError('comma or newline', self._cur_token)
return array |
java | private boolean containsCompositeKey(TableInfo tableInfo)
{
return tableInfo.getTableIdType() != null && tableInfo.getTableIdType().isAnnotationPresent(Embeddable.class);
} |
java | @Override
public List<CommerceShipment> findAll() {
return findAll(QueryUtil.ALL_POS, QueryUtil.ALL_POS, null);
} |
java | public PutRecordsResult withRecords(PutRecordsResultEntry... records) {
if (this.records == null) {
setRecords(new com.amazonaws.internal.SdkInternalList<PutRecordsResultEntry>(records.length));
}
for (PutRecordsResultEntry ele : records) {
this.records.add(ele);
}
return this;
} |
java | public static synchronized IAsyncProvider createInstance() throws AsyncException {
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) {
Tr.entry(tc, "createInstance");
}
if (instance == null) {
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) {
Tr.debug(tc, "instance is null. Instantiating new AsyncLibrary ");
}
instance = new AsyncLibrary();
}
if ((aioInitialized == AIO_NOT_INITIALIZED) || (aioInitialized == AIO_SHUTDOWN)) {
initialize();
}
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) {
Tr.exit(tc, "createInstance");
}
return instance;
} |
java | public static byte[] streamOut(Object object, boolean compressed) throws IOException {
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
streamOut(bytes, object, compressed);
bytes.flush();
bytes.close();
return bytes.toByteArray();
} |
python | def action_rename(self, courseid, taskid, path, new_path):
""" Delete a file or a directory """
# normalize
path = path.strip()
new_path = new_path.strip()
if not path.startswith("/"):
path = "/" + path
if not new_path.startswith("/"):
new_path = "/" + new_path
old_path = self.verify_path(courseid, taskid, path)
if old_path is None:
return self.show_tab_file(courseid, taskid, _("Internal error"))
wanted_path = self.verify_path(courseid, taskid, new_path, True)
if wanted_path is None:
return self.show_tab_file(courseid, taskid, _("Invalid new path"))
try:
self.task_factory.get_task_fs(courseid, taskid).move(old_path, wanted_path)
return self.show_tab_file(courseid, taskid)
except:
return self.show_tab_file(courseid, taskid, _("An error occurred while moving the files")) |
java | @PUT
@Path("/{repoName}/{repoPath:.*}/")
public Response put(@PathParam("repoName") String repoName, @PathParam("repoPath") String repoPath,
@HeaderParam(ExtHttpHeaders.LOCKTOKEN) String lockTokenHeader, @HeaderParam(ExtHttpHeaders.IF) String ifHeader,
@HeaderParam(ExtHttpHeaders.FILE_NODETYPE) String fileNodeTypeHeader,
@HeaderParam(ExtHttpHeaders.CONTENT_NODETYPE) String contentNodeTypeHeader,
@HeaderParam(ExtHttpHeaders.CONTENT_MIXINTYPES) String mixinTypes,
@HeaderParam(ExtHttpHeaders.CONTENT_TYPE) MediaType mediaType,
@HeaderParam(ExtHttpHeaders.USER_AGENT) String userAgent, InputStream inputStream, @Context UriInfo uriInfo)
{
if (log.isDebugEnabled())
{
log.debug("PUT " + repoName + "/" + repoPath);
}
repoPath = normalizePath(repoPath);
MimeTypeRecognizer mimeTypeRecognizer =
new MimeTypeRecognizer(TextUtil.nameOnly(repoPath), mimeTypeResolver, mediaType, webDavServiceInitParams
.isUntrustedUserAgent(userAgent));
try
{
repoName = getRepositoryName(repoName);
List<String> tokens = lockTokens(lockTokenHeader, ifHeader);
Session session = session(repoName, workspaceName(repoPath), tokens);
String fileNodeType =
NodeTypeUtil.getNodeType(fileNodeTypeHeader, webDavServiceInitParams.getDefaultFileNodeType(),
webDavServiceInitParams.getAllowedFileNodeTypes());
String contentNodeType = NodeTypeUtil.getContentNodeType(contentNodeTypeHeader);
NodeTypeManager ntm = session.getWorkspace().getNodeTypeManager();
NodeType nodeType = ntm.getNodeType(contentNodeType);
NodeTypeUtil.checkContentResourceType(nodeType);
if (webDavServiceInitParams.isEnableAutoVersion())
{
return new PutCommand(nullResourceLocks, uriInfo.getBaseUriBuilder().path(getClass()).path(repoName),
mimeTypeRecognizer).put(session, path(repoPath), inputStream, fileNodeType, contentNodeType,
NodeTypeUtil.getMixinTypes(mixinTypes), tokens, webDavServiceInitParams.getAllowedAutoVersionPath());
}
else
{
return new PutCommand(nullResourceLocks, uriInfo.getBaseUriBuilder().path(getClass()).path(repoName),
mimeTypeRecognizer).put(session, path(repoPath), inputStream, fileNodeType, contentNodeType,
NodeTypeUtil.getMixinTypes(mixinTypes), webDavServiceInitParams.getDefaultUpdatePolicyType(),
webDavServiceInitParams.getDefaultAutoVersionType(), tokens);
}
}
catch (NoSuchWorkspaceException exc)
{
log.error("NoSuchWorkspaceException " + exc.getMessage(), exc);
return Response.status(HTTPStatus.CONFLICT).entity(exc.getMessage()).build();
}
catch (NoSuchNodeTypeException exc)
{
log.error("NoSuchNodeTypeException " + exc.getMessage(), exc);
return Response.status(HTTPStatus.BAD_REQUEST).entity(exc.getMessage()).build();
}
catch (Exception exc)
{
log.error(exc.getMessage(), exc);
return Response.serverError().entity(exc.getMessage()).build();
}
} |
java | protected String getDnForUser(String userId) {
LdapUserEntity user = (LdapUserEntity) createUserQuery(org.camunda.bpm.engine.impl.context.Context.getCommandContext())
.userId(userId)
.singleResult();
if(user == null) {
return "";
} else {
return user.getDn();
}
} |
java | public void clearCache()
{
// XXX: see if can remove this, and rely on the invocation cache existing
LruCache<Object,I> invocationCache = _invocationCache;
if (invocationCache != null) {
invocationCache.clear();
}
} |
java | @Override
public void deleteProcessInstancesByProcessDefinition(String processDefinitionId, String deleteReason, boolean cascade) {
List<String> processInstanceIds = executionDataManager.findProcessInstanceIdsByProcessDefinitionId(processDefinitionId);
for (String processInstanceId : processInstanceIds) {
deleteProcessInstance(processInstanceId, deleteReason, cascade);
}
if (cascade) {
getHistoricProcessInstanceEntityManager().deleteHistoricProcessInstanceByProcessDefinitionId(processDefinitionId);
}
} |
python | def eventFilter(self, object, event):
"""
Filters the events for the editors to control how the cursor
flows between them.
:param object | <QtCore.QObject>
event | <QtCore.QEvent>
:return <bool> | consumed
"""
index = self.indexOf(object)
pressed = event.type() == event.KeyPress
released = event.type() == event.KeyRelease
if index == -1 or \
not (pressed or released) or \
self.isEditorHandlingBlocked():
return super(XSerialEdit, self).eventFilter(object, event)
text = nativestring(event.text()).strip()
# handle Ctrl+C (copy)
if event.key() == QtCore.Qt.Key_C and \
event.modifiers() == QtCore.Qt.ControlModifier and \
pressed:
self.copy()
return True
# handle Ctrl+X (cut)
elif event.key() == QtCore.Qt.Key_X and \
event.modifiers() == QtCore.Qt.ControlModifier and \
pressed:
if not self.isReadOnly():
self.cut()
return True
# handle Ctrl+A (select all)
elif event.key() == QtCore.Qt.Key_A and \
event.modifiers() == QtCore.Qt.ControlModifier and \
pressed:
self.selectAll()
return True
# handle Ctrl+V (paste)
elif event.key() == QtCore.Qt.Key_V and \
event.modifiers() == QtCore.Qt.ControlModifier and \
pressed:
if not self.isReadOnly():
self.paste()
return True
# ignore tab movements
elif event.key() in (QtCore.Qt.Key_Tab, QtCore.Qt.Key_Backtab):
pass
# delete all selected text
elif event.key() == QtCore.Qt.Key_Backspace:
sel_text = self.selectedText()
if sel_text and not self.isReadOnly():
self.clearSelection()
return True
# ignore modified keys
elif not released:
return super(XSerialEdit, self).eventFilter(object, event)
# move to the previous editor
elif object.cursorPosition() == 0:
if event.key() in (QtCore.Qt.Key_Backspace, QtCore.Qt.Key_Left):
self.goBack()
# move to next editor
elif object.cursorPosition() == object.maxLength():
valid_chars = string.ascii_letters + string.digits
valid_text = text != '' and text in valid_chars
if valid_text or event.key() == QtCore.Qt.Key_Right:
self.goForward()
return super(XSerialEdit, self).eventFilter(object, event) |
java | protected synchronized ClientProtocol createNameNodeProxy(UnixUserGroupInformation ugi
) throws IOException {
if (nnProxy != null) {
return nnProxy;
}
ServletContext context = getServletContext();
InetSocketAddress nnAddr = (InetSocketAddress)context.getAttribute("name.node.address");
if (nnAddr == null) {
throw new IOException("The namenode is not out of safemode yet");
}
Configuration conf = new Configuration(
(Configuration)context.getAttribute("name.conf"));
UnixUserGroupInformation.saveToConf(conf,
UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
nnProxy = DFSClient.createNamenode(nnAddr, conf);
return nnProxy;
} |
java | public final ValueWithPos<String> formatDin5008WithPos(final ValueWithPos<String> pphoneNumber,
final String pcountryCode) {
return valueWithPosDefaults(this.formatDin5008WithPos(
this.parsePhoneNumber(pphoneNumber, pcountryCode), CreatePhoneCountryConstantsClass.create()
.countryMap().get(StringUtils.defaultString(pcountryCode))),
pphoneNumber);
} |
python | def simultaneous_listen(self):
"""
This function is called by passive simultaneous nodes who
wish to establish themself as such. It sets up a connection
to the Rendezvous Server to monitor for new hole punching requests.
"""
# Close socket.
if self.server_con is not None:
self.server_con.s.close()
self.server_con = None
# Reset predictions + mappings.
self.mappings = None
self.predictions = None
# Connect to rendezvous server.
parts = self.sequential_connect()
if parts is None:
return 0
con, mappings, predictions = parts
con.blocking = 0
con.timeout = 0
con.s.settimeout(0)
self.server_con = con
self.mappings = mappings
self.predictions = predictions
# Register simultaneous node with server.
msg = "SIMULTANEOUS READY 0 0"
ret = self.server_con.send_line(msg)
if not ret:
return 0
return 1 |
python | def read_bits(self, num):
'''
Read several bits packed into the same field. Will return as a list.
The bit field itself is little-endian, though the order of the
returned array looks big-endian for ease of decomposition.
Reader('\x02').read_bits(2) -> [False,True]
Reader('\x08').read_bits(2) ->
[False,True,False,False,False,False,False,False]
first_field, second_field = Reader('\x02').read_bits(2)
Will raise BufferUnderflow if there's not enough bytes in the buffer.
Will raise ValueError if num < 0 or num > 9
'''
# Perform a faster check on underflow
if self._pos >= self._end_pos:
raise self.BufferUnderflow()
if num < 0 or num >= 9:
raise ValueError("8 bits per field")
field = ord(self._input[self._pos])
result = map(lambda x: field >> x & 1, xrange(num))
self._pos += 1
return result |
python | def _ensure_index_cache(self, db_uri, db_name, collection_name):
"""Adds a collections index entries to the cache if not present"""
if not self._check_indexes or db_uri is None:
return {'indexes': None}
if db_name not in self.get_cache():
self._internal_map[db_name] = {}
if collection_name not in self._internal_map[db_name]:
indexes = []
try:
if self._index_cache_connection is None:
self._index_cache_connection = pymongo.MongoClient(db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
db = self._index_cache_connection[db_name]
indexes = db[collection_name].index_information()
except:
warning = 'Warning: unable to connect to ' + db_uri + "\n"
else:
internal_map_entry = {'indexes': indexes}
self.get_cache()[db_name][collection_name] = internal_map_entry
return self.get_cache()[db_name][collection_name] |
python | def _make_tempy_tag(self, tag, attrs, void):
"""Searches in tempy.tags for the correct tag to use, if does not exists uses the TempyFactory to
create a custom tag."""
tempy_tag_cls = getattr(self.tempy_tags, tag.title(), None)
if not tempy_tag_cls:
unknow_maker = [self.unknown_tag_maker, self.unknown_tag_maker.Void][void]
tempy_tag_cls = unknow_maker[tag]
attrs = {Tag._TO_SPECIALS.get(k, k): v or True for k, v in attrs}
tempy_tag = tempy_tag_cls(**attrs)
if not self.current_tag:
self.result.append(tempy_tag)
if not void:
self.current_tag = tempy_tag
else:
if not tempy_tag._void:
self.current_tag(tempy_tag)
self.current_tag = self.current_tag.childs[-1] |
python | def manifold(self, transformer):
"""
Creates the manifold estimator if a string value is passed in,
validates other objects passed in.
"""
if not is_estimator(transformer):
if transformer not in self.ALGORITHMS:
raise YellowbrickValueError(
"could not create manifold for '%s'".format(str(transformer))
)
# Create a new transformer with the specified params
self._name = MANIFOLD_NAMES[transformer]
transformer = clone(self.ALGORITHMS[transformer])
params = {
"n_components": 2,
"n_neighbors": self.n_neighbors,
"random_state": self.random_state,
}
for param in list(params.keys()):
if param not in transformer.get_params():
del params[param]
transformer.set_params(**params)
self._manifold = transformer
if self._name is None:
self._name = self._manifold.__class__.__name__ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.