language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java | public void curveTo2 (final float x2, final float y2, final float x3, final float y3) throws IOException
{
if (inTextMode)
{
throw new IllegalStateException ("Error: curveTo2 is not allowed within a text block.");
}
writeOperand (x2);
writeOperand (y2);
writeOperand (x3);
writeOperand (y3);
writeOperator ((byte) 'v');
} |
python | def make_request(self, method, *args, **kwargs):
"""Creates a request from a method function call."""
if args and not use_signature:
raise NotImplementedError("Only keyword arguments allowed in Python2")
new_kwargs = {kw: unwrap(value) for kw, value in kwargs.items()}
if use_signature:
new_args = tuple(unwrap(value) for value in args)
bound_args = method.signature.bind(
unwrap(self), *new_args, **new_kwargs).arguments
# if we encounter any Enum arguments, replace them with their value
def translate_enum(arg):
return arg.value if isinstance(arg, Enum) else arg
for k in bound_args:
if isinstance(bound_args[k], str):
continue
if isinstance(bound_args[k], dict):
continue
try:
x = [translate_enum(arg) for arg in bound_args[k]]
bound_args[k] = x
except TypeError:
bound_args[k] = translate_enum(bound_args[k])
# replace `self` with the correct keyword
new_kwargs = {(kw if kw != 'self' else method.field_name): v
for kw, v in bound_args.items()}
# args = tuple(x.value if isinstance(x, Enum) else x for x in args)
else:
new_kwargs[self.field_name] = unwrap(self)
return method.request_type(**new_kwargs) |
python | def make_context(headers: Headers) -> Optional[TraceContext]:
"""Converts available headers to TraceContext, if headers mapping does
not contain zipkin headers, function returns None.
"""
# TODO: add validation for trace_id/span_id/parent_id
# normalize header names just in case someone passed regular dict
# instead dict with case insensitive keys
headers = {k.lower(): v for k, v in headers.items()}
required = (TRACE_ID_HEADER.lower(), SPAN_ID_HEADER.lower())
has_b3 = all(h in headers for h in required)
has_b3_single = SINGLE_HEADER in headers
if not(has_b3_single or has_b3):
return None
if has_b3:
debug = parse_debug_header(headers)
sampled = debug if debug else parse_sampled_header(headers)
context = TraceContext(
trace_id=headers[TRACE_ID_HEADER.lower()],
parent_id=headers.get(PARENT_ID_HEADER.lower()),
span_id=headers[SPAN_ID_HEADER.lower()],
sampled=sampled,
debug=debug,
shared=False,
)
return context
return _parse_single_header(headers) |
python | def __update_rating(uid, rating):
'''
Update rating.
'''
entry = TabRating.update(
rating=rating
).where(TabRating.uid == uid)
entry.execute() |
python | def ucs_manager_connect(self, ucsm_ip):
"""Connects to a UCS Manager."""
if not self.ucsmsdk:
self.ucsmsdk = self._import_ucsmsdk()
ucsm = CONF.ml2_cisco_ucsm.ucsms.get(ucsm_ip)
if not ucsm or not ucsm.ucsm_username or not ucsm.ucsm_password:
LOG.error('UCS Manager network driver failed to get login '
'credentials for UCSM %s', ucsm_ip)
return None
handle = self.ucsmsdk.handle(ucsm_ip, ucsm.ucsm_username,
ucsm.ucsm_password)
try:
handle.login()
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmConnectFailed(ucsm_ip=ucsm_ip, exc=e)
return handle |
python | def query(self, *args, **kwargs):
"""
Reimplemented from base class.
This method does not add additional functionality of the
base class' :meth:`~couchbase.bucket.Bucket.query` method (all the
functionality is encapsulated in the view class anyway). However it
does require one additional keyword argument
:param class itercls: A class used for instantiating the view
object. This should be a subclass of
:class:`~couchbase.asynchronous.view.AsyncViewBase`.
"""
if not issubclass(kwargs.get('itercls', None), AsyncViewBase):
raise ArgumentError.pyexc("itercls must be defined "
"and must be derived from AsyncViewBase")
return super(AsyncBucket, self).query(*args, **kwargs) |
python | def connect(self, force=False):
'''Establish a connection'''
# Don't re-establish existing connections
if not force and self.alive():
return True
self._reset()
# Otherwise, try to connect
with self._socket_lock:
try:
logger.info('Creating socket...')
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.settimeout(self._timeout)
logger.info('Connecting to %s, %s', self.host, self.port)
self._socket.connect((self.host, self.port))
# Set our socket's blocking state to whatever ours is
self._socket.setblocking(self._blocking)
# Safely write our magic
self._pending.append(constants.MAGIC_V2)
while self.pending():
self.flush()
# And send our identify command
self.identify(self._identify_options)
while self.pending():
self.flush()
self._reconnnection_counter.success()
# Wait until we've gotten a response to IDENTIFY, try to read
# one. Also, only spend up to the provided timeout waiting to
# establish the connection.
limit = time.time() + self._timeout
responses = self._read(1)
while (not responses) and (time.time() < limit):
responses = self._read(1)
if not responses:
raise ConnectionTimeoutException(
'Read identify response timed out (%ss)' % self._timeout)
self.identified(responses[0])
return True
except:
logger.exception('Failed to connect')
if self._socket:
self._socket.close()
self._reconnnection_counter.failed()
self._reset()
return False |
java | public static Function<Object,Boolean> bool(final String methodName, final Object... optionalParameters) {
return methodForBoolean(methodName, optionalParameters);
} |
java | public void marshall(AssociationOverview associationOverview, ProtocolMarshaller protocolMarshaller) {
if (associationOverview == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(associationOverview.getStatus(), STATUS_BINDING);
protocolMarshaller.marshall(associationOverview.getDetailedStatus(), DETAILEDSTATUS_BINDING);
protocolMarshaller.marshall(associationOverview.getAssociationStatusAggregatedCount(), ASSOCIATIONSTATUSAGGREGATEDCOUNT_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
python | def _id_handler(self, f):
"""
Given a Feature from self.iterator, figure out what the ID should be.
This uses `self.id_spec` identify the ID.
"""
# If id_spec is a string, convert to iterable for later
if isinstance(self.id_spec, six.string_types):
id_key = [self.id_spec]
elif hasattr(self.id_spec, '__call__'):
id_key = [self.id_spec]
# If dict, then assume it's a feature -> attribute mapping, e.g.,
# {'gene': 'gene_id'} for GTF
elif isinstance(self.id_spec, dict):
try:
id_key = self.id_spec[f.featuretype]
if isinstance(id_key, six.string_types):
id_key = [id_key]
# Otherwise, use default auto-increment.
except KeyError:
return self._increment_featuretype_autoid(f.featuretype)
# Otherwise assume it's an iterable.
else:
id_key = self.id_spec
# Then try them in order, returning the first one that works:
for k in id_key:
if hasattr(k, '__call__'):
_id = k(f)
if _id:
if _id.startswith('autoincrement:'):
return self._increment_featuretype_autoid(_id[14:])
return _id
else:
# use GFF fields rather than attributes for cases like :seqid:
# or :strand:
if (len(k) > 3) and (k[0] == ':') and (k[-1] == ':'):
# No [0] here -- only attributes key/vals are forced into
# lists, not standard GFF fields.
return getattr(f, k[1:-1])
else:
try:
return f.attributes[k][0]
except (KeyError, IndexError):
pass
# If we get here, then default autoincrement
return self._increment_featuretype_autoid(f.featuretype) |
python | def _parse_return(cls, result):
"""Extract the result, return value and context from a result object
"""
return_value = None
success = result['result']
context = result['context']
if 'return_value' in result:
return_value = result['return_value']
return success, return_value, context |
java | public void marshall(Beard beard, ProtocolMarshaller protocolMarshaller) {
if (beard == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(beard.getValue(), VALUE_BINDING);
protocolMarshaller.marshall(beard.getConfidence(), CONFIDENCE_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
python | def loadAddressbyID(self, id, callback=None, errback=None):
"""
Load an existing address by ID into a high level Address object
:param int id: id of an existing Address
"""
import ns1.ipam
address = ns1.ipam.Address(self.config, id=id)
return address.load(callback=callback, errback=errback) |
java | public static <R> MethodResult<R> success(R result) {
return new MethodResult<>(result, null, false);
} |
java | protected boolean exists(PointcutPatternRule pointcutPatternRule, String transletName,
String beanId, String className, String methodName) {
boolean matched = true;
if (transletName != null && pointcutPatternRule.getTransletNamePattern() != null) {
matched = patternMatches(pointcutPatternRule.getTransletNamePattern(), transletName,
ActivityContext.NAME_SEPARATOR_CHAR);
}
if (matched && beanId != null && pointcutPatternRule.getBeanIdPattern() != null) {
matched = patternMatches(pointcutPatternRule.getBeanIdPattern(), beanId,
ActivityContext.ID_SEPARATOR_CHAR);
}
if (matched && className != null && pointcutPatternRule.getClassNamePattern() != null) {
matched = patternMatches(pointcutPatternRule.getClassNamePattern(), className,
ActivityContext.ID_SEPARATOR_CHAR);
}
if (matched && methodName != null && pointcutPatternRule.getMethodNamePattern() != null) {
matched = patternMatches(pointcutPatternRule.getMethodNamePattern(), methodName);
}
return matched;
} |
java | @Override
public synchronized boolean removeHost(Host host, boolean refresh) {
HostConnectionPool<CL> pool = hosts.remove(host);
if (pool != null) {
topology.removePool(pool);
rebuildPartitions();
monitor.onHostRemoved(host);
pool.shutdown();
return true;
}
else {
return false;
}
} |
python | def create_normal_logq(self,z):
"""
Create logq components for mean-field normal family (the entropy estimate)
"""
means, scale = self.get_means_and_scales()
return ss.norm.logpdf(z,loc=means,scale=scale).sum() |
python | def arg_comparitor(name):
"""
:param arg name
:return: pair containing name, comparitor
given an argument name, munge it and return a proper comparitor
>>> arg_comparitor("a")
a, operator.eq
>>> arg_comparitor("a__in")
a, operator.contains
"""
if name.endswith("__in"):
return name[:-4], contains
elif name.endswith("__ge"):
return name[:-4], ge
elif name.endswith("__gt"):
return name[:-4], gt
elif name.endswith("__le"):
return name[:-4], le
elif name.endswith("__lt"):
return name[:-4], lt
if name.endswith("__eq"):
return name[:-4], eq
if name.endswith("__ne"):
return name[:-4], ne
else:
return name, eq |
java | public BaseTile getBaseTile (int tx, int ty)
{
BaseTile tile = _base[index(tx, ty)];
if (tile == null && _defset != null) {
tile = (BaseTile)_defset.getTile(
TileUtil.getTileHash(tx, ty) % _defset.getTileCount());
}
return tile;
} |
java | public static void serializeCopyableDataset(State state, CopyableDatasetMetadata copyableDataset) {
state.setProp(SERIALIZED_COPYABLE_DATASET, copyableDataset.serialize());
} |
java | public static boolean validateSignatureForPost(String body, Map<String, String> headers, String url, String apiKey) throws OneTouchException, UnsupportedEncodingException {
HashMap<String, String> params = new HashMap<>();
if (body == null || body.isEmpty())
throw new OneTouchException("'PARAMS' are missing.");
extract("", new JSONObject(body), params);
return validateSignature(params, headers, "POST", url, apiKey);
} |
java | public static int[] executeBatch(Connection conn, Iterable<String> sqls) throws SQLException {
Statement statement = null;
try {
statement = conn.createStatement();
for (String sql : sqls) {
statement.addBatch(sql);
}
return statement.executeBatch();
} finally {
DbUtil.close(statement);
}
} |
java | public Map<String, CmsModuleVersion> getInstalledModules() {
String file = CmsModuleConfiguration.DEFAULT_XML_FILE_NAME;
// /opencms/modules/module[?]
String basePath = new StringBuffer("/").append(CmsConfigurationManager.N_ROOT).append("/").append(
CmsModuleConfiguration.N_MODULES).append("/").append(CmsModuleXmlHandler.N_MODULE).append(
"[?]/").toString();
Map<String, CmsModuleVersion> modules = new HashMap<String, CmsModuleVersion>();
String name = "";
for (int i = 1; name != null; i++) {
if (i > 1) {
String ver = CmsModuleVersion.DEFAULT_VERSION;
try {
ver = getXmlHelper().getValue(
file,
CmsStringUtil.substitute(basePath, "?", "" + (i - 1)) + CmsModuleXmlHandler.N_VERSION);
} catch (@SuppressWarnings("unused") CmsXmlException e) {
// ignore
}
modules.put(name, new CmsModuleVersion(ver));
}
try {
name = getXmlHelper().getValue(
file,
CmsStringUtil.substitute(basePath, "?", "" + i) + CmsModuleXmlHandler.N_NAME);
} catch (@SuppressWarnings("unused") CmsXmlException e) {
// ignore
}
}
return modules;
} |
python | def main():
"""
What will be executed when running as a stand alone program.
"""
args = parse_args()
try:
s = pyhsm.base.YHSM(device=args.device, debug=args.debug)
get_entropy(s, args.iterations, args.ratio)
return 0
except pyhsm.exception.YHSM_Error as e:
sys.stderr.write("ERROR: %s" % (e.reason))
return 1 |
python | def _parse_names_set(feature_names):
"""Helping function of `_parse_feature_names` that parses a set of feature names."""
feature_collection = OrderedDict()
for feature_name in feature_names:
if isinstance(feature_name, str):
feature_collection[feature_name] = ...
else:
raise ValueError('Failed to parse {}, expected string'.format(feature_name))
return feature_collection |
java | public static double KullbackLeiblerDivergence(SparseArray x, double[] y) {
if (x.isEmpty()) {
throw new IllegalArgumentException("List x is empty.");
}
Iterator<SparseArray.Entry> iter = x.iterator();
boolean intersection = false;
double kl = 0.0;
while (iter.hasNext()) {
SparseArray.Entry b = iter.next();
int i = b.i;
if (y[i] > 0) {
intersection = true;
kl += b.x * Math.log(b.x / y[i]);
}
}
if (intersection) {
return kl;
} else {
return Double.POSITIVE_INFINITY;
}
} |
python | def generate_synonym(self, input_word):
""" Generate Synonym using a WordNet
synset.
"""
results = []
results.append(input_word)
synset = wordnet.synsets(input_word)
for i in synset:
index = 0
syn = i.name.split('.')
if syn[index]!= input_word:
name = syn[0]
results.append(PataLib().strip_underscore(name))
else:
index = index + 1
results = {'input' : input_word, 'results' : results, 'category' : 'synonym'}
return results |
java | @Override
public final void setReportNAN(Boolean value) {
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.entry(this, tc, "setReportNAN", value);
if (value != null) {
getApi().setField(JsApiAccess.REPORTNAN_VALUE, value);
}
else {
getApi().setChoiceField(JsApiAccess.REPORTNAN, JsApiAccess.IS_REPORTNAN_UNSET);
}
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.exit(this, tc, "setReportNAN");
} |
java | private long extractCommittedSpHandle(ExportRow row, long committedSeqNo) {
long ret = 0;
if (committedSeqNo == ExportDataSource.NULL_COMMITTED_SEQNO) {
return ret;
}
// Get the rows's sequence number (3rd column)
long seqNo = (long) row.values[2];
if (seqNo != committedSeqNo) {
return ret;
}
// Get the row's sp handle (1rst column)
ret = (long) row.values[0];
return ret;
} |
java | protected String cleanDocumentationContent( String content ) {
Pattern REMOVE_WHITESPACE_AND_LINE_FEEDS_PATTERN = Pattern.compile("[\\n\\r\\s]+");
Matcher matcher = REMOVE_WHITESPACE_AND_LINE_FEEDS_PATTERN.matcher(content);
return matcher.replaceAll(" ");
} |
python | def setCurrentPage( self, pageno ):
"""
Sets the current page for this widget to the inputed page.
:param pageno | <int>
"""
if ( pageno == self._currentPage ):
return
if ( pageno <= 0 ):
pageno = 1
self._currentPage = pageno
self._prevButton.setEnabled(pageno > 1)
self._nextButton.setEnabled(pageno < self.pageCount())
self._pagesSpinner.blockSignals(True)
self._pagesSpinner.setValue(pageno)
self._pagesSpinner.blockSignals(False)
if ( not self.signalsBlocked() ):
self.currentPageChanged.emit(pageno) |
java | protected void initializeDataExtends(Relation<NumberVector> relation, int dim, double[] min, double[] extend) {
assert (min.length == dim && extend.length == dim);
// if no parameter for min max compute min max values for each dimension
// from dataset
if(minima == null || maxima == null || minima.length == 0 || maxima.length == 0) {
double[][] minmax = RelationUtil.computeMinMax(relation);
final double[] dmin = minmax[0], dmax = minmax[1];
for(int d = 0; d < dim; d++) {
min[d] = dmin[d];
extend[d] = dmax[d] - dmin[d];
}
return;
}
if(minima.length == dim) {
System.arraycopy(minima, 0, min, 0, dim);
}
else if(minima.length == 1) {
Arrays.fill(min, minima[0]);
}
else {
throw new AbortException("Invalid minima specified: expected " + dim + " got minima dimensionality: " + minima.length);
}
if(maxima.length == dim) {
for(int d = 0; d < dim; d++) {
extend[d] = maxima[d] - min[d];
}
return;
}
else if(maxima.length == 1) {
for(int d = 0; d < dim; d++) {
extend[d] = maxima[0] - min[d];
}
return;
}
else {
throw new AbortException("Invalid maxima specified: expected " + dim + " got maxima dimensionality: " + maxima.length);
}
} |
python | def reward_battery(self):
"""
Add a battery level reward
"""
if not 'battery' in self.mode:
return
mode = self.mode['battery']
if mode and mode and self.__test_cond(mode):
self.logger.debug('Battery out')
self.player.stats['reward'] += mode['reward']
self.player.game_over = self.player.game_over or mode['terminal'] |
java | @GwtIncompatible("Unnecessary")
private boolean shouldGenerateOutputPerModule(String output) {
return !config.module.isEmpty()
&& output != null && output.contains("%outname%");
} |
java | @RequestMapping(value = "download/{branchId}")
public ResponseEntity<String> download(@PathVariable ID branchId, String path) {
return gitService.download(structureService.getBranch(branchId), path)
.map(ResponseEntity::ok)
.orElseThrow(
() -> new SCMDocumentNotFoundException(path)
);
} |
java | @Override
public EClass getIfcCartesianTransformationOperator3DnonUniform() {
if (ifcCartesianTransformationOperator3DnonUniformEClass == null) {
ifcCartesianTransformationOperator3DnonUniformEClass = (EClass) EPackage.Registry.INSTANCE
.getEPackage(Ifc4Package.eNS_URI).getEClassifiers().get(85);
}
return ifcCartesianTransformationOperator3DnonUniformEClass;
} |
java | public static boolean loadLibrary(String shortName, int loadFlags) throws UnsatisfiedLinkError {
sSoSourcesLock.readLock().lock();
try {
if (sSoSources == null) {
// This should never happen during normal operation,
// but if we're running in a non-Android environment,
// fall back to System.loadLibrary.
if ("http://www.android.com/".equals(System.getProperty("java.vendor.url"))) {
// This will throw.
assertInitialized();
} else {
// Not on an Android system. Ask the JVM to load for us.
synchronized (SoLoader.class) {
boolean needsLoad = !sLoadedLibraries.contains(shortName);
if (needsLoad) {
if (sSystemLoadLibraryWrapper != null) {
sSystemLoadLibraryWrapper.loadLibrary(shortName);
} else {
System.loadLibrary(shortName);
}
}
return needsLoad;
}
}
}
} finally {
sSoSourcesLock.readLock().unlock();
}
String mergedLibName = MergedSoMapping.mapLibName(shortName);
String soName = mergedLibName != null ? mergedLibName : shortName;
return loadLibraryBySoName(
System.mapLibraryName(soName),
shortName,
mergedLibName,
loadFlags | SoSource.LOAD_FLAG_ALLOW_SOURCE_CHANGE,
null);
} |
python | def tile(ctx, point, zoom):
"""Print Tile containing POINT.."""
tile = TilePyramid(
ctx.obj['grid'],
tile_size=ctx.obj['tile_size'],
metatiling=ctx.obj['metatiling']
).tile_from_xy(*point, zoom=zoom)
if ctx.obj['output_format'] == 'Tile':
click.echo('%s %s %s' % tile.id)
elif ctx.obj['output_format'] == 'WKT':
click.echo(tile.bbox(pixelbuffer=ctx.obj['pixelbuffer']))
elif ctx.obj['output_format'] == 'GeoJSON':
click.echo(
geojson.dumps(
geojson.FeatureCollection([
geojson.Feature(
geometry=tile.bbox(pixelbuffer=ctx.obj['pixelbuffer']),
properties=dict(
zoom=tile.zoom,
row=tile.row,
col=tile.col
)
)
])
)
) |
python | def track_name_event(self, name):
"""Return the bytes for a track name meta event."""
l = self.int_to_varbyte(len(name))
return '\x00' + META_EVENT + TRACK_NAME + l + name |
python | def add_vrf(self):
""" Add a new VRF to NIPAP and return its data.
"""
v = VRF()
if 'rt' in request.json:
v.rt = validate_string(request.json, 'rt')
if 'name' in request.json:
v.name = validate_string(request.json, 'name')
if 'description' in request.json:
v.description = validate_string(request.json, 'description')
if 'tags' in request.json:
v.tags = request.json['tags']
if 'avps' in request.json:
v.avps = request.json['avps']
try:
v.save()
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
return json.dumps(v, cls=NipapJSONEncoder) |
java | public boolean isInverse() {
Boolean value = (Boolean) getStateHelper().eval(PropertyKeys.inverse, false);
return (boolean) value;
} |
java | public String domain_confirmTermination_POST(String domain, String commentary, OvhTerminationReasonEnum reason, String token) throws IOException {
String qPath = "/email/domain/{domain}/confirmTermination";
StringBuilder sb = path(qPath, domain);
HashMap<String, Object>o = new HashMap<String, Object>();
addBody(o, "commentary", commentary);
addBody(o, "reason", reason);
addBody(o, "token", token);
String resp = exec(qPath, "POST", sb.toString(), o);
return convertTo(resp, String.class);
} |
python | def upload_async(data_service_auth_data, config, upload_id,
filename, index, num_chunks_to_send, progress_queue):
"""
Method run in another process called from ParallelChunkProcessor.make_and_start_process.
:param data_service_auth_data: tuple of auth data for rebuilding DataServiceAuth
:param config: dds.Config configuration settings to use during upload
:param upload_id: uuid unique id of the 'upload' we are uploading chunks into
:param filename: str path to file who's contents we will be uploading
:param index: int offset into filename where we will start sending bytes from (must multiply by upload_bytes_per_chunk)
:param num_chunks_to_send: int number of chunks of config.upload_bytes_per_chunk size to send.
:param progress_queue: ProgressQueue queue to send notifications of progress or errors
"""
auth = DataServiceAuth(config)
auth.set_auth_data(data_service_auth_data)
data_service = DataServiceApi(auth, config.url)
sender = ChunkSender(data_service, upload_id, filename, config.upload_bytes_per_chunk, index, num_chunks_to_send,
progress_queue)
try:
sender.send()
except:
error_msg = "".join(traceback.format_exception(*sys.exc_info()))
progress_queue.error(error_msg) |
java | public static boolean pushImage(Launcher launcher, final JenkinsBuildInfoLog log, final String imageTag, final String username, final String password, final String host)
throws IOException, InterruptedException {
return launcher.getChannel().call(new MasterToSlaveCallable<Boolean, IOException>() {
public Boolean call() throws IOException {
String message = "Pushing image: " + imageTag;
if (StringUtils.isNotEmpty(host)) {
message += " using docker daemon host: " + host;
}
log.info(message);
DockerUtils.pushImage(imageTag, username, password, host);
return true;
}
});
} |
java | @Override
public <G, H> Choice6<A, B, C, D, G, H> biMap(Function<? super E, ? extends G> lFn,
Function<? super F, ? extends H> rFn) {
return match(Choice6::a, Choice6::b, Choice6::c, Choice6::d, e -> e(lFn.apply(e)), f -> f(rFn.apply(f)));
} |
java | @Nullable
private static DimFilter toExpressionLeafFilter(
final PlannerContext plannerContext,
final RowSignature rowSignature,
final RexNode rexNode
)
{
final DruidExpression druidExpression = toDruidExpression(plannerContext, rowSignature, rexNode);
return druidExpression != null
? new ExpressionDimFilter(druidExpression.getExpression(), plannerContext.getExprMacroTable())
: null;
} |
java | public void marshall(SubResourceSummary subResourceSummary, ProtocolMarshaller protocolMarshaller) {
if (subResourceSummary == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(subResourceSummary.getType(), TYPE_BINDING);
protocolMarshaller.marshall(subResourceSummary.getId(), ID_BINDING);
protocolMarshaller.marshall(subResourceSummary.getAttackVectors(), ATTACKVECTORS_BINDING);
protocolMarshaller.marshall(subResourceSummary.getCounters(), COUNTERS_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
java | public static String serializeFromField(final Object containingObject, final String fieldName,
final int indentWidth, final boolean onlySerializePublicFields, final ClassFieldCache classFieldCache) {
final FieldTypeInfo fieldResolvedTypeInfo = classFieldCache
.get(containingObject.getClass()).fieldNameToFieldTypeInfo.get(fieldName);
if (fieldResolvedTypeInfo == null) {
throw new IllegalArgumentException("Class " + containingObject.getClass().getName()
+ " does not have a field named \"" + fieldName + "\"");
}
final Field field = fieldResolvedTypeInfo.field;
if (!JSONUtils.fieldIsSerializable(field, /* onlySerializePublicFields = */ false)) {
throw new IllegalArgumentException("Field " + containingObject.getClass().getName() + "." + fieldName
+ " needs to be accessible, non-transient, and non-final");
}
Object fieldValue;
try {
fieldValue = JSONUtils.getFieldValue(containingObject, field);
} catch (final IllegalAccessException e) {
throw new IllegalArgumentException("Could get value of field " + fieldName, e);
}
return serializeObject(fieldValue, indentWidth, onlySerializePublicFields, classFieldCache);
} |
java | @NonNull public <T> T peek(int index) {
//noinspection unchecked
return (T) history.get(history.size() - index - 1);
} |
python | def replace(self, replacements):
"""
Replace variables with other variables.
:param dict replacements: A dict of variable replacements.
:return: self
"""
for old_var, new_var in replacements.items():
old_var_id = id(old_var)
if old_var_id in self._object_mapping:
# FIXME: we need to check if old_var still exists in the storage
old_so = self._object_mapping[old_var_id] # type: StoredObject
self._store(old_so.start, new_var, old_so.size, overwrite=True)
return self |
java | public static int searchLast(short[] shortArray, short value, int occurrence) {
if(occurrence <= 0 || occurrence > shortArray.length) {
throw new IllegalArgumentException("Occurrence must be greater or equal to 1 and less than "
+ "the array length: " + occurrence);
}
int valuesSeen = 0;
for(int i = shortArray.length-1; i >=0; i--) {
if(shortArray[i] == value) {
valuesSeen++;
if(valuesSeen == occurrence) {
return i;
}
}
}
return -1;
} |
java | private static void addOne2InverseMap(DisconfKey disconfKey, Map<DisconfKey, List<IDisconfUpdate>> inverseMap,
IDisconfUpdate iDisconfUpdate) {
// 忽略的key 应该忽略掉
if (DisClientConfig.getInstance().getIgnoreDisconfKeySet().contains(disconfKey.getKey())) {
return;
}
List<IDisconfUpdate> serviceList;
if (inverseMap.containsKey(disconfKey)) {
inverseMap.get(disconfKey).add(iDisconfUpdate);
} else {
serviceList = new ArrayList<IDisconfUpdate>();
serviceList.add(iDisconfUpdate);
inverseMap.put(disconfKey, serviceList);
}
} |
python | def redraw(self, reset_camera=False):
"""
Redraw the render window.
Args:
reset_camera: Set to True to reset the camera to a
pre-determined default for each structure. Defaults to False.
"""
self.ren.RemoveAllViewProps()
self.picker = None
self.add_picker_fixed()
self.helptxt_mapper = vtk.vtkTextMapper()
tprops = self.helptxt_mapper.GetTextProperty()
tprops.SetFontSize(14)
tprops.SetFontFamilyToTimes()
tprops.SetColor(0, 0, 0)
if self.structure is not None:
self.set_structure(self.structure, reset_camera)
self.ren_win.Render() |
java | public void notEnabled(double seconds) {
double end = System.currentTimeMillis() + (seconds * 1000);
try {
double timeTook = elementPresent(seconds);
WebDriverWait wait = new WebDriverWait(element.getDriver(), (long) (seconds - timeTook), DEFAULT_POLLING_INTERVAL);
wait.until(ExpectedConditions.not(ExpectedConditions.elementToBeClickable(element.defineByElement())));
timeTook = Math.min((seconds * 1000) - (end - System.currentTimeMillis()), seconds * 1000) / 1000;
checkNotEnabled(seconds, timeTook);
} catch (TimeoutException e) {
checkNotEnabled(seconds, seconds);
}
} |
java | public void addPenalizingValidation(Object key, PenalizingValidation penalizingValidation){
initMapOnce();
penalties.put(key, penalizingValidation);
// update penalized value
if(!penalizingValidation.passed()){
assignedPenalties = true;
double p = penalizingValidation.getPenalty();
penalizedValue += minimizing ? p : -p;
}
} |
python | def defUtilityFuncs(self):
'''
Defines CRRA utility function for this period (and its derivatives),
saving them as attributes of self for other methods to use.
Parameters
----------
none
Returns
-------
none
'''
self.u = lambda c : utility(c,gam=self.CRRA) # utility function
self.uP = lambda c : utilityP(c,gam=self.CRRA) # marginal utility function
self.uPP = lambda c : utilityPP(c,gam=self.CRRA) |
python | def handle_log_entry(self, m):
'''handling incoming log entry'''
if m.time_utc == 0:
tstring = ''
else:
tstring = time.ctime(m.time_utc)
self.entries[m.id] = m
print("Log %u numLogs %u lastLog %u size %u %s" % (m.id, m.num_logs, m.last_log_num, m.size, tstring)) |
java | @Override
public CompletableFuture<ExecutionInfo> executeAsyncWithStats() {
final StatementWrapper statementWrapper = new NativeStatementWrapper(getOperationType(boundStatement), meta, boundStatement, encodedBoundValues);
final String queryString = statementWrapper.getBoundStatement().preparedStatement().getQueryString();
if (LOGGER.isTraceEnabled()) {
LOGGER.trace(format("Execute native query async with execution info : %s", queryString));
}
CompletableFuture<ResultSet> cfutureRS = rte.execute(statementWrapper);
return cfutureRS
.thenApply(options::resultSetAsyncListener)
.thenApply(x -> statementWrapper.logReturnResults(x, options.computeMaxDisplayedResults(rte.configContext)))
.thenApply(statementWrapper::logTrace)
.thenApply(x -> LWTHelper.triggerLWTListeners(lwtResultListeners, x, queryString))
.thenApply(x -> x.getExecutionInfo());
} |
java | public Object lookup(String name) {
Object result = null;
for (Registry reg: registryList){
result = reg.lookup(name);
if (result != null){
break;
}
}
return result;
} |
python | def get_linkrolls(num='All', destination_slug=None, *args, **kwargs):
"""
Takes an optional number and destination slug and returns a list of LinkRolls.
Given a number, return list limited to the given number.
Given a destination slug, limit linkrolls to the matching destination.
Usage:
{% get_linkrolls %}
{% get_linkrolls 5 %}
{% get_linkrolls 5 some_slug %}
"""
if destination_slug:
linkrolls = LinkRoll.objects.filter(destination__slug=destination_slug)
else:
linkrolls = LinkRoll.objects.all()
if num is not 'All':
linkrolls = linkrolls[0:num]
return {
'object_list': linkrolls
} |
java | public static void initialize(MapWidget mapWidget, SearchHandler searchResultGrid, boolean modalSearch) {
SearchWidgetRegistry.mapWidget = mapWidget;
searchController = new SearchController(mapWidget, modalSearch);
favouritesController = new FavouritesController();
if (searchResultGrid != null) {
searchController.addSearchHandler(searchResultGrid);
}
} |
python | def sync(self, owner, id, **kwargs):
"""
Sync files
Update all files within a dataset that have originally been added via URL (e.g. via /datasets endpoints or on data.world). Check-out or tutorials for tips on how to add Google Sheets, GitHub and S3 files via URL and how to use webhooks or scripts to keep them always in sync.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.sync(owner, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required)
:param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required)
:return: SuccessMessage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.sync_with_http_info(owner, id, **kwargs)
else:
(data) = self.sync_with_http_info(owner, id, **kwargs)
return data |
python | def get_bitcoind( new_bitcoind_opts=None, reset=False, new=False ):
"""
Get or instantiate our bitcoind client.
Optionally re-set the bitcoind options.
"""
global bitcoind
if reset:
bitcoind = None
elif not new and bitcoind is not None:
return bitcoind
if new or bitcoind is None:
if new_bitcoind_opts is not None:
set_bitcoin_opts( new_bitcoind_opts )
bitcoin_opts = get_bitcoin_opts()
new_bitcoind = None
try:
try:
new_bitcoind = virtualchain.connect_bitcoind( bitcoin_opts )
except KeyError, ke:
log.exception(ke)
log.error("Invalid configuration: %s" % bitcoin_opts)
return None
if new:
return new_bitcoind
else:
# save for subsequent reuse
bitcoind = new_bitcoind
return bitcoind
except Exception, e:
log.exception( e )
return None |
python | def delete_source(ident):
'''Delete an harvest source'''
source = get_source(ident)
source.deleted = datetime.now()
source.save()
signals.harvest_source_deleted.send(source)
return source |
python | def new(self, platform_id):
# type: (int) -> None
'''
A method to create a new El Torito Validation Entry.
Parameters:
platform_id - The platform ID to set for this validation entry.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Validation Entry already initialized')
self.platform_id = platform_id
self.id_string = b'\x00' * 24 # FIXME: let the user set this
self.checksum = 0
self.checksum = utils.swab_16bit(self._checksum(self._record()) - 1)
self._initialized = True |
python | def export(cls, folder, particles, datetimes):
"""
Export trackline data to GeoJSON file
"""
normalized_locations = [particle.normalized_locations(datetimes) for particle in particles]
track_coords = []
for x in xrange(0, len(datetimes)):
points = MultiPoint([loc[x].point.coords[0] for loc in normalized_locations])
track_coords.append(points.centroid.coords[0])
ls = LineString(track_coords)
if not os.path.exists(folder):
os.makedirs(folder)
filepath = os.path.join(folder, "trackline.geojson")
f = open(filepath, "wb")
f.write(json.dumps(mapping(ls)))
f.close()
return filepath |
java | public final void mINSERT_REALTIME() throws RecognitionException {
try {
int _type = INSERT_REALTIME;
int _channel = DEFAULT_TOKEN_CHANNEL;
// druidG.g:583:17: ( ( 'INSERT_REALTIME' | 'insert_realtime' ) )
// druidG.g:583:18: ( 'INSERT_REALTIME' | 'insert_realtime' )
{
// druidG.g:583:18: ( 'INSERT_REALTIME' | 'insert_realtime' )
int alt3=2;
int LA3_0 = input.LA(1);
if ( (LA3_0=='I') ) {
alt3=1;
}
else if ( (LA3_0=='i') ) {
alt3=2;
}
else {
NoViableAltException nvae =
new NoViableAltException("", 3, 0, input);
throw nvae;
}
switch (alt3) {
case 1 :
// druidG.g:583:19: 'INSERT_REALTIME'
{
match("INSERT_REALTIME");
}
break;
case 2 :
// druidG.g:583:37: 'insert_realtime'
{
match("insert_realtime");
}
break;
}
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
} |
java | @Override
public List<CPInstance> findByCPDefinitionId(long CPDefinitionId,
int start, int end) {
return findByCPDefinitionId(CPDefinitionId, start, end, null);
} |
java | @RequestMapping(value = "changeLog/fileFilter/{projectId}/create", method = RequestMethod.POST)
public Resource<SCMFileChangeFilter> createChangeLogFileFilter(@PathVariable ID projectId, @RequestBody SCMFileChangeFilter filter) {
securityService.checkProjectFunction(projectId.get(), ProjectConfig.class);
return securityService.asAdmin(() -> {
// Loads the project
Project project = structureService.getProject(projectId);
// Gets the store
SCMFileChangeFilters config = entityDataService.retrieve(
project,
SCMFileChangeFilters.class.getName(),
SCMFileChangeFilters.class
);
if (config == null) config = SCMFileChangeFilters.create();
// Updates the store
config = config.save(filter);
// Saves the store back
entityDataService.store(project, SCMFileChangeFilters.class.getName(), config);
// OK
return getChangeLogFileFilter(projectId, filter.getName());
});
} |
java | public long getDuration(TimeUnit targetUnit) {
if (isRunning()) {
throw new IllegalStateException("The clock is not yet stopped.");
}
return targetUnit.convert(durationNanos, TimeUnit.NANOSECONDS);
} |
python | def fn_mean(self, a, axis=None):
"""
Compute the arithmetic mean of an array, ignoring NaNs.
:param a: The array.
:return: The arithmetic mean of the array.
"""
return numpy.nanmean(self._to_ndarray(a), axis=axis) |
java | public void saveOldProperty(Map<String,String> oldProperties, String param)
{
oldProperties.put(param, this.getTask().getProperty(param));
} |
java | @SuppressWarnings("unchecked")
public static Expression<String> likeToRegex(Expression<String> expr, boolean matchStartAndEnd) {
// TODO : this should take the escape character into account
if (expr instanceof Constant<?>) {
final String like = expr.toString();
final StringBuilder rv = new StringBuilder(like.length() + 4);
if (matchStartAndEnd && !like.startsWith("%")) {
rv.append('^');
}
for (int i = 0; i < like.length(); i++) {
char ch = like.charAt(i);
if (ch == '.' || ch == '*' || ch == '?') {
rv.append('\\');
} else if (ch == '%') {
rv.append(".*");
continue;
} else if (ch == '_') {
rv.append('.');
continue;
}
rv.append(ch);
}
if (matchStartAndEnd && !like.endsWith("%")) {
rv.append('$');
}
if (!like.equals(rv.toString())) {
return ConstantImpl.create(rv.toString());
}
} else if (expr instanceof Operation<?>) {
Operation<?> o = (Operation<?>) expr;
if (o.getOperator() == Ops.CONCAT) {
Expression<String> lhs = likeToRegex((Expression<String>) o.getArg(0), false);
Expression<String> rhs = likeToRegex((Expression<String>) o.getArg(1), false);
if (lhs != o.getArg(0) || rhs != o.getArg(1)) {
return operation(String.class, Ops.CONCAT, lhs, rhs);
}
}
}
return expr;
} |
java | @Override
protected void onDraw(Canvas canvas) {
super.onDraw(canvas);
int contentWidth = mWidth - paddingLeft - paddingRight;
int contentHeight = mHeight - paddingTop - paddingBottom;
mCirclePaint.setStyle(Paint.Style.FILL);
mCirclePaint.setColor(Color.RED);
// Draw CircularViewObject
mCircle.draw(canvas);
// Draw non-highlighted Markers
if (mMarkerList != null && !mMarkerList.isEmpty()) {
for (final Marker marker : mMarkerList) {
if (!mDrawHighlightedMarkerOnTop || !marker.equals(mHighlightedMarker)) {
marker.draw(canvas);
}
}
}
// Draw highlighted marker
if (mDrawHighlightedMarkerOnTop && mHighlightedMarker != null) {
mHighlightedMarker.draw(canvas);
}
// Draw line
if (mIsAnimating) {
final float radiusFromCenter = mCircle.getRadius() + CIRCLE_TO_MARKER_PADDING + BASE_MARKER_RADIUS;
final float x = (float) Math.cos(Math.toRadians(mHighlightedDegree)) * radiusFromCenter + mCircle.getX();
final float y = (float) Math.sin(Math.toRadians(mHighlightedDegree)) * radiusFromCenter + mCircle.getY();
canvas.drawLine(mCircle.getX(), mCircle.getY(), x, y, mCirclePaint);
}
// Draw the text.
if (!TextUtils.isEmpty(mText)) {
canvas.drawText(mText,
mCircle.getX() - mTextWidth / 2f,
mCircle.getY() - mTextHeight / 2f,
// paddingLeft + (contentWidth - mTextWidth) / 2,
// paddingTop + (contentHeight + mTextHeight) / 2,
mTextPaint);
}
} |
python | def maf_permutation(context_counts,
context_to_mut,
seq_context,
gene_seq,
num_permutations=10000,
drop_silent=False):
"""Performs null-permutations across all genes and records the results in
a format like a MAF file. This could be useful for examining the null
permutations because the alternative approaches always summarize the results.
With the simulated null-permutations, novel metrics can be applied to create
an empirical null-distribution.
Parameters
----------
context_counts : pd.Series
number of mutations for each context
context_to_mut : dict
dictionary mapping nucleotide context to a list of observed
somatic base changes.
seq_context : SequenceContext
Sequence context for the entire gene sequence (regardless
of where mutations occur). The nucleotide contexts are
identified at positions along the gene.
gene_seq : GeneSequence
Sequence of gene of interest
num_permutations : int, default: 10000
number of permutations to create for null
drop_silent : bool, default=False
Flage on whether to drop all silent mutations. Some data sources
do not report silent mutations, and the simulations should match this.
Returns
-------
maf_list : list of tuples
list of null mutations with mutation info in a MAF like format
"""
mycontexts = context_counts.index.tolist()
somatic_base, base_context = zip(*[(base, one_context)
for one_context in mycontexts
for base in context_to_mut[one_context]])
# get random positions determined by sequence context
tmp_contxt_pos = seq_context.random_pos(context_counts.iteritems(),
num_permutations)
tmp_mut_pos = np.hstack(pos_array for base, pos_array in tmp_contxt_pos)
# info about gene
gene_name = gene_seq.bed.gene_name
strand = gene_seq.bed.strand
chrom = gene_seq.bed.chrom
gene_seq.bed.init_genome_coordinates() # map seq pos to genome
# determine result of random positions
maf_list = []
for row in tmp_mut_pos:
# get genome coordinate
pos2genome = np.vectorize(lambda x: gene_seq.bed.seqpos2genome[x]+1)
genome_coord = pos2genome(row)
# get info about mutations
tmp_mut_info = mc.get_aa_mut_info(row,
somatic_base,
gene_seq)
# get string describing variant
var_class = cutils.get_variant_classification(tmp_mut_info['Reference AA'],
tmp_mut_info['Somatic AA'],
tmp_mut_info['Codon Pos'])
# prepare output
for k, mysomatic_base in enumerate(somatic_base):
# format DNA change
ref_nuc = tmp_mut_info['Reference Nuc'][k]
nuc_pos = row[k]
dna_change = 'c.{0}{1}>{2}'.format(ref_nuc, nuc_pos, mysomatic_base)
# format protein change
ref_aa = tmp_mut_info['Reference AA'][k]
somatic_aa = tmp_mut_info['Somatic AA'][k]
codon_pos = tmp_mut_info['Codon Pos'][k]
protein_change = 'p.{0}{1}{2}'.format(ref_aa, codon_pos, somatic_aa)
# reverse complement if on negative strand
if strand == '-':
ref_nuc = utils.rev_comp(ref_nuc)
mysomatic_base = utils.rev_comp(mysomatic_base)
# append results
if drop_silent and var_class[k].decode() == 'Silent': continue
maf_line = [gene_name, strand, chrom, genome_coord[k], genome_coord[k],
ref_nuc, mysomatic_base, base_context[k], dna_change,
protein_change, var_class[k].decode()]
maf_list.append(maf_line)
return maf_list |
python | def recall(y, y_pred):
"""Recall score
recall = true_positives / (true_positives + false_negatives)
Parameters:
-----------
y : vector, shape (n_samples,)
The target labels.
y_pred : vector, shape (n_samples,)
The predicted labels.
Returns:
--------
recall : float
"""
tp = true_positives(y, y_pred)
fn = false_negatives(y, y_pred)
return tp / (tp + fn) |
python | def _accumulate(iterable, func=(lambda a,b:a+b)): # this was from the itertools documentation
'Return running totals'
# accumulate([1,2,3,4,5]) --> 1 3 6 10 15
# accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120
it = iter(iterable)
try:
total = next(it)
except StopIteration:
return
yield total
for element in it:
total = func(total, element)
yield total |
python | def optimize_layout(
head_embedding,
tail_embedding,
head,
tail,
n_epochs,
n_vertices,
epochs_per_sample,
a,
b,
rng_state,
gamma=1.0,
initial_alpha=1.0,
negative_sample_rate=5.0,
verbose=False,
):
"""Improve an embedding using stochastic gradient descent to minimize the
fuzzy set cross entropy between the 1-skeletons of the high dimensional
and low dimensional fuzzy simplicial sets. In practice this is done by
sampling edges based on their membership strength (with the (1-p) terms
coming from negative sampling similar to word2vec).
Parameters
----------
head_embedding: array of shape (n_samples, n_components)
The initial embedding to be improved by SGD.
tail_embedding: array of shape (source_samples, n_components)
The reference embedding of embedded points. If not embedding new
previously unseen points with respect to an existing embedding this
is simply the head_embedding (again); otherwise it provides the
existing embedding to embed with respect to.
head: array of shape (n_1_simplices)
The indices of the heads of 1-simplices with non-zero membership.
tail: array of shape (n_1_simplices)
The indices of the tails of 1-simplices with non-zero membership.
n_epochs: int
The number of training epochs to use in optimization.
n_vertices: int
The number of vertices (0-simplices) in the dataset.
epochs_per_samples: array of shape (n_1_simplices)
A float value of the number of epochs per 1-simplex. 1-simplices with
weaker membership strength will have more epochs between being sampled.
a: float
Parameter of differentiable approximation of right adjoint functor
b: float
Parameter of differentiable approximation of right adjoint functor
rng_state: array of int64, shape (3,)
The internal state of the rng
gamma: float (optional, default 1.0)
Weight to apply to negative samples.
initial_alpha: float (optional, default 1.0)
Initial learning rate for the SGD.
negative_sample_rate: int (optional, default 5)
Number of negative samples to use per positive sample.
verbose: bool (optional, default False)
Whether to report information on the current progress of the algorithm.
Returns
-------
embedding: array of shape (n_samples, n_components)
The optimized embedding.
"""
dim = head_embedding.shape[1]
move_other = head_embedding.shape[0] == tail_embedding.shape[0]
alpha = initial_alpha
epochs_per_negative_sample = epochs_per_sample / negative_sample_rate
epoch_of_next_negative_sample = epochs_per_negative_sample.copy()
epoch_of_next_sample = epochs_per_sample.copy()
for n in range(n_epochs):
for i in range(epochs_per_sample.shape[0]):
if epoch_of_next_sample[i] <= n:
j = head[i]
k = tail[i]
current = head_embedding[j]
other = tail_embedding[k]
dist_squared = rdist(current, other)
if dist_squared > 0.0:
grad_coeff = -2.0 * a * b * pow(dist_squared, b - 1.0)
grad_coeff /= a * pow(dist_squared, b) + 1.0
else:
grad_coeff = 0.0
for d in range(dim):
grad_d = clip(grad_coeff * (current[d] - other[d]))
current[d] += grad_d * alpha
if move_other:
other[d] += -grad_d * alpha
epoch_of_next_sample[i] += epochs_per_sample[i]
n_neg_samples = int(
(n - epoch_of_next_negative_sample[i])
/ epochs_per_negative_sample[i]
)
for p in range(n_neg_samples):
k = tau_rand_int(rng_state) % n_vertices
other = tail_embedding[k]
dist_squared = rdist(current, other)
if dist_squared > 0.0:
grad_coeff = 2.0 * gamma * b
grad_coeff /= (0.001 + dist_squared) * (
a * pow(dist_squared, b) + 1
)
elif j == k:
continue
else:
grad_coeff = 0.0
for d in range(dim):
if grad_coeff > 0.0:
grad_d = clip(grad_coeff * (current[d] - other[d]))
else:
grad_d = 4.0
current[d] += grad_d * alpha
epoch_of_next_negative_sample[i] += (
n_neg_samples * epochs_per_negative_sample[i]
)
alpha = initial_alpha * (1.0 - (float(n) / float(n_epochs)))
if verbose and n % int(n_epochs / 10) == 0:
print("\tcompleted ", n, " / ", n_epochs, "epochs")
return head_embedding |
java | public static <T> boolean arrayContainsRef(T[] array, T value) {
for (int i = 0; i < array.length; i++) {
if (array[i] == value) {
return true;
}
}
return false;
} |
python | def str2dict(strdict, required_keys=None, optional_keys=None):
"""Convert key1=value1,key2=value2,... string into dictionary.
:param strdict: string in the form of key1=value1,key2=value2
:param required_keys: list of required keys. All keys in this list must be
specified. Otherwise ArgumentTypeError will be raised.
If this parameter is unspecified, no required key check
will be done.
:param optional_keys: list of optional keys.
This parameter is used for valid key check.
When at least one of required_keys and optional_keys,
a key must be a member of either of required_keys or
optional_keys. Otherwise, ArgumentTypeError will be
raised. When both required_keys and optional_keys are
unspecified, no valid key check will be done.
"""
result = {}
if strdict:
for kv in strdict.split(','):
key, sep, value = kv.partition('=')
if not sep:
msg = _("invalid key-value '%s', expected format: key=value")
raise argparse.ArgumentTypeError(msg % kv)
result[key] = value
valid_keys = set(required_keys or []) | set(optional_keys or [])
if valid_keys:
invalid_keys = [k for k in result if k not in valid_keys]
if invalid_keys:
msg = _("Invalid key(s) '%(invalid_keys)s' specified. "
"Valid key(s): '%(valid_keys)s'.")
raise argparse.ArgumentTypeError(
msg % {'invalid_keys': ', '.join(sorted(invalid_keys)),
'valid_keys': ', '.join(sorted(valid_keys))})
if required_keys:
not_found_keys = [k for k in required_keys if k not in result]
if not_found_keys:
msg = _("Required key(s) '%s' not specified.")
raise argparse.ArgumentTypeError(msg % ', '.join(not_found_keys))
return result |
java | @Override
List<XMLFilter> getProcessingPipe(final URI fileToParse) {
final List<XMLFilter> pipe = new ArrayList<>();
if (genDebugInfo) {
final DebugFilter debugFilter = new DebugFilter();
debugFilter.setLogger(logger);
debugFilter.setCurrentFile(currentFile);
pipe.add(debugFilter);
}
if (filterUtils != null) {
final ProfilingFilter profilingFilter = new ProfilingFilter();
profilingFilter.setLogger(logger);
profilingFilter.setJob(job);
profilingFilter.setFilterUtils(filterUtils);
profilingFilter.setCurrentFile(fileToParse);
pipe.add(profilingFilter);
}
final ValidationFilter validationFilter = new ValidationFilter();
validationFilter.setLogger(logger);
validationFilter.setValidateMap(validateMap);
validationFilter.setCurrentFile(fileToParse);
validationFilter.setJob(job);
validationFilter.setProcessingMode(processingMode);
pipe.add(validationFilter);
final NormalizeFilter normalizeFilter = new NormalizeFilter();
normalizeFilter.setLogger(logger);
pipe.add(normalizeFilter);
pipe.add(topicFragmentFilter);
pipe.addAll(super.getProcessingPipe(fileToParse));
// linkRewriteFilter.setCurrentFile(currentFile);
// pipe.add(linkRewriteFilter);
ditaWriterFilter.setDefaultValueMap(defaultValueMap);
ditaWriterFilter.setCurrentFile(currentFile);
ditaWriterFilter.setOutputFile(outputFile);
pipe.add(ditaWriterFilter);
return pipe;
} |
python | def verify_sms_code(phone_number, code):
"""
获取到手机验证码之后,验证验证码是否正确。如果验证失败,抛出异常。
:param phone_number: 需要验证的手机号码
:param code: 接受到的验证码
:return: None
"""
params = {
'mobilePhoneNumber': phone_number,
}
leancloud.client.post('/verifySmsCode/{0}'.format(code), params=params)
return True |
python | def fixup_building_sdist():
''' Check for 'sdist' and ensure we always build BokehJS when packaging
Source distributions do not ship with BokehJS source code, but must ship
with a pre-built BokehJS library. This function modifies ``sys.argv`` as
necessary so that ``--build-js`` IS present, and ``--install-js` is NOT.
Returns:
None
'''
if "sdist" in sys.argv:
if "--install-js" in sys.argv:
print("Removing '--install-js' incompatible with 'sdist'")
sys.argv.remove('--install-js')
if "--build-js" not in sys.argv:
print("Adding '--build-js' required for 'sdist'")
sys.argv.append('--build-js') |
python | def initialize(self):
""" A reimplemented initializer.
This method will add the include objects to the parent of the
include and ensure that they are initialized.
"""
super(Block, self).initialize()
block = self.block
if block: #: This block is setting the content of another block
#: Remove the existing blocks children
if self.mode == 'replace':
#: Clear the blocks children
for c in block.children:
c.destroy()
#: Add this blocks children to the other block
block.insert_children(None, self.children)
else: #: This block is inserting it's children into it's parent
self.parent.insert_children(self, self.children) |
java | public void marshall(DeleteFacetRequest deleteFacetRequest, ProtocolMarshaller protocolMarshaller) {
if (deleteFacetRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(deleteFacetRequest.getSchemaArn(), SCHEMAARN_BINDING);
protocolMarshaller.marshall(deleteFacetRequest.getName(), NAME_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
java | public void setExcludedMembers(java.util.Collection<String> excludedMembers) {
if (excludedMembers == null) {
this.excludedMembers = null;
return;
}
this.excludedMembers = new com.amazonaws.internal.SdkInternalList<String>(excludedMembers);
} |
java | @Override
public boolean eIsSet(int featureID) {
switch (featureID) {
case BpsimPackage.PRIORITY_PARAMETERS__INTERRUPTIBLE:
return interruptible != null;
case BpsimPackage.PRIORITY_PARAMETERS__PRIORITY:
return priority != null;
}
return super.eIsSet(featureID);
} |
python | def crossMatchTo(self, reference, radius=1*u.arcsec, visualize=False):
'''
Cross-match this catalog onto another reference catalog.
If proper motions are included in the reference, then
its coordinates will be propagated to the obstime/epoch
of this current catalog.
Parameters
----------
reference : Constellation
A reference Constellation to which we want to
cross-match the stars in this catalog. Most likely,
you'll want to use Gaia for this (since it has
good astrometry and good proper motions).
radius : float, with astropy units of angle
How close to objects need to be in the cross-match
for them to be considered a matched pair?
Returns
-------
i_this : array of indices
The elements of this catalog that are matched.
i_ref : array of indices
The elements of the reference catalog, corresponding to
'''
# find the closest match for each of star in this constellation
i_ref, d2d_ref, d3d_ref = self.coordinates.match_to_catalog_sky(reference.atEpoch(self.coordinates.obstime))
# extract only those within the specified radius
ok = d2d_ref < radius
self.speak('found {} matches within {}'.format(np.sum(ok), radius))
# make a plot, if desired
if visualize:
self.speak('p')
plt.hist(d2d_ref.arcsec, range=(0,15))
plt.axvline(radius.arcsec)
plt.xlabel('Separation (arcsec)')
plt.ylabel('Number of Matched Sources')
# return the indices (of this, and of the reference) for the matches
return ok, i_ref[ok] |
python | def check(self, diff):
r"""Check that the new file introduced has a valid name
The module can either be an __init__.py file or must
match ``feature_[a-zA-Z0-9_]+\.\w+``.
"""
filename = pathlib.Path(diff.b_path).parts[-1]
is_valid_feature_module_name = re_test(
FEATURE_MODULE_NAME_REGEX, filename)
is_valid_init_module_name = filename == '__init__.py'
assert is_valid_feature_module_name or is_valid_init_module_name |
java | public byte[] set(byte[] key,byte[] value){
Jedis jedis = jedisPool.getResource();
try{
jedis.set(key,value);
if(this.expire != 0){
jedis.expire(key, this.expire);
}
}finally{
jedisPool.returnResource(jedis);
}
return value;
} |
python | def parse(self):
"""Main entrypoint into the parser. It interprets and creates all the
relevant Lutron objects and stuffs them into the appropriate hierarchy."""
import xml.etree.ElementTree as ET
root = ET.fromstring(self._xml_db_str)
# The structure is something like this:
# <Areas>
# <Area ...>
# <DeviceGroups ...>
# <Scenes ...>
# <ShadeGroups ...>
# <Outputs ...>
# <Areas ...>
# <Area ...>
# First area is useless, it's the top-level project area that defines the
# "house". It contains the real nested Areas tree, which is the one we want.
top_area = root.find('Areas').find('Area')
self.project_name = top_area.get('Name')
areas = top_area.find('Areas')
for area_xml in areas.getiterator('Area'):
area = self._parse_area(area_xml)
self.areas.append(area)
return True |
python | def get_vmpolicy_macaddr_output_has_more(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
output = ET.SubElement(get_vmpolicy_macaddr, "output")
has_more = ET.SubElement(output, "has-more")
has_more.text = kwargs.pop('has_more')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
java | private static String getResourceBase() {
String resourceBase = setting.getStr("webRoot");
if (StrUtil.isEmpty(resourceBase)) {
resourceBase = "./src/main/webapp";// 用于Maven测试
File file = new File(resourceBase);
if (false == file.exists() || false == file.isDirectory()) {
resourceBase = ".";
}
}
log.debug("Jetty resource base: [{}]", resourceBase);
return resourceBase;// 当前目录,用于部署环境
} |
java | public boolean goInto(ElementContext rootContext, String... innerElements) throws IOException, XMLException {
ElementContext parent = rootContext;
for (int i = 0; i < innerElements.length; ++i) {
if (!nextInnerElement(parent, innerElements[i]))
return false;
parent = event.context.getFirst();
}
return true;
} |
python | def add_node(self, node):
"""Add a node and connect it to the center."""
nodes = self.nodes()
if len(nodes) > 1:
first_node = min(nodes, key=attrgetter("creation_time"))
first_node.connect(direction="both", whom=node) |
python | def available_string(self, episode):
"""Return a string of available episodes."""
available = [ep for ep in self if ep > episode]
string = ','.join(str(ep) for ep in available[:self.EPISODES_TO_SHOW])
if len(available) > self.EPISODES_TO_SHOW:
string += '...'
return string |
java | protected void disposeCacheValue(T value)
{
if (value instanceof DisposableCacheValue) {
try {
((DisposableCacheValue) value).dispose();
} catch (Throwable e) {
// We catch Throwable because this method is usually automatically called by an event send by the cache
// implementation and there is no reason to crash the whole cache because of some badly implemented
// dispose() we don't control.
LOGGER.warn("Error when trying to dispose a cache object of cache [{}]",
this.configuration != null ? this.configuration.getConfigurationId() : null, e);
}
}
} |
java | public final void synpred6_InternalSARL_fragment() throws RecognitionException {
// InternalSARL.g:7821:4: ( ( () 'synchronized' '(' ) )
// InternalSARL.g:7821:5: ( () 'synchronized' '(' )
{
// InternalSARL.g:7821:5: ( () 'synchronized' '(' )
// InternalSARL.g:7822:5: () 'synchronized' '('
{
// InternalSARL.g:7822:5: ()
// InternalSARL.g:7823:5:
{
}
match(input,88,FOLLOW_80); if (state.failed) return ;
match(input,49,FOLLOW_2); if (state.failed) return ;
}
}
} |
python | def assert_same_rank(self, other):
"""Raises an exception if `self` and `other` do not have convertible ranks.
Args:
other: Another `TensorShape`.
Raises:
ValueError: If `self` and `other` do not represent shapes with the
same rank.
"""
other = as_shape(other)
if self.ndims is not None and other.ndims is not None:
if self.ndims != other.ndims:
raise ValueError(
"Shapes %s and %s must have the same rank" % (self, other)
) |
java | public static <T extends Comparable<T>> RelationalOperator<T> lessThanEqualToOrGreaterThan(T lowerBound, T upperBound) {
return ComposableRelationalOperator.compose(lessThanEqualTo(lowerBound), LogicalOperator.OR, greaterThan(upperBound));
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.