language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
python | def get_title(brain_or_object):
"""Get the Title for this object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Title
:rtype: string
"""
if is_brain(brain_or_object) and base_hasattr(brain_or_object, "Title"):
return brain_or_object.Title
return get_object(brain_or_object).Title() |
java | int splitSegment(int origin_vertex, double[] split_scalars, int split_count) {
int actual_splits = 0;
int next_vertex = getNextVertex(origin_vertex);
if (next_vertex == -1)
throw GeometryException.GeometryInternalError();
int vindex = getVertexIndex(origin_vertex);
int vindex_next = getVertexIndex(next_vertex);
Segment seg = getSegmentFromIndex_(vindex);
double seg_length = seg == null ? m_vertices._getShortestDistance(
vindex, vindex_next) : seg.calculateLength2D();
double told = 0.0;
for (int i = 0; i < split_count; i++) {
double t = split_scalars[i];
if (told < t && t < 1.0) {
double f = t;
if (seg != null) {
f = seg_length > 0 ? seg._calculateSubLength(t)
/ seg_length : 0.0;
}
m_vertices._interpolateTwoVertices(vindex, vindex_next, f,
getHelperPoint_());// use this call mainly to
// interpolate the attributes. XYs
// are interpolated incorrectly for
// curves and are recalculated when
// segment is cut below.
int inserted_vertex = insertVertex_(
getPathFromVertex(origin_vertex), next_vertex,
getHelperPoint_());
actual_splits++;
if (seg != null) {
Segment subseg = seg.cut(told, t);
int prev_vertex = getPrevVertex(inserted_vertex);
int vindex_prev = getVertexIndex(prev_vertex);
setSegmentToIndex_(vindex_prev, subseg);
setXY(inserted_vertex, subseg.getEndXY()); // fix XY
// coordinates
// to be
// parameter
// based
// (interpolate_two_vertices_)
if (i == split_count - 1 || split_scalars[i + 1] == 1.0) {// last
// chance
// to
// set
// last
// split
// segment
// here:
Segment subseg_end = seg.cut(t, 1.0);
setSegmentToIndex_(vindex_prev, subseg_end);
}
}
}
}
return actual_splits;
} |
java | @Override
public boolean satisfies(Match match, int... ind)
{
int x = -1;
for (MappedConst mc : con)
{
if (mc.satisfies(match, ind)) x *= -1;
}
return x == 1;
} |
java | public EEnum getIfcSIPrefix() {
if (ifcSIPrefixEEnum == null) {
ifcSIPrefixEEnum = (EEnum) EPackage.Registry.INSTANCE.getEPackage(Ifc2x3tc1Package.eNS_URI)
.getEClassifiers().get(890);
}
return ifcSIPrefixEEnum;
} |
python | def attention_lm_moe_base_memeff():
"""Base model with attention expert."""
hparams = attention_lm_moe_base_long_seq()
hparams.use_sepconv = False
hparams.diet_experts = True
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.layer_prepostprocess_dropout = 0.0
hparams.memory_efficient_ffn = True
hparams.attention_type = AttentionType.MEMORY_EFFICIENT
hparams.num_heads = 8
hparams.factored_logits = True
return hparams |
python | def _prepare_imports(self, dicts):
""" an override for prepare imports that sorts the imports by parent_id dependencies """
# all pseudo parent ids we've seen
pseudo_ids = set()
# pseudo matches
pseudo_matches = {}
# get prepared imports from parent
prepared = dict(super(OrganizationImporter, self)._prepare_imports(dicts))
# collect parent pseudo_ids
for _, data in prepared.items():
parent_id = data.get('parent_id', None) or ''
if parent_id.startswith('~'):
pseudo_ids.add(parent_id)
# turn pseudo_ids into a tuple of dictionaries
pseudo_ids = [(ppid, get_pseudo_id(ppid)) for ppid in pseudo_ids]
# loop over all data again, finding the pseudo ids true json id
for json_id, data in prepared.items():
# check if this matches one of our ppids
for ppid, spec in pseudo_ids:
match = True
for k, v in spec.items():
if data[k] != v:
match = False
break
if match:
if ppid in pseudo_matches:
raise UnresolvedIdError('multiple matches for pseudo id: ' + ppid)
pseudo_matches[ppid] = json_id
# toposort the nodes so parents are imported first
network = Network()
in_network = set()
import_order = []
for json_id, data in prepared.items():
parent_id = data.get('parent_id', None)
# resolve pseudo_ids to their json id before building the network
if parent_id in pseudo_matches:
parent_id = pseudo_matches[parent_id]
network.add_node(json_id)
if parent_id:
# Right. There's an import dep. We need to add the edge from
# the parent to the current node, so that we import the parent
# before the current node.
network.add_edge(parent_id, json_id)
# resolve the sorted import order
for jid in network.sort():
import_order.append((jid, prepared[jid]))
in_network.add(jid)
# ensure all data made it into network (paranoid check, should never fail)
if in_network != set(prepared.keys()): # pragma: no cover
raise PupaInternalError("import is missing nodes in network set")
return import_order |
java | @Nonnull
public static <ELEMENTTYPE> String getImplodedMappedNonEmpty (@Nonnull final String sSep,
@Nullable final ELEMENTTYPE [] aElements,
@Nonnegative final int nOfs,
@Nonnegative final int nLen,
@Nonnull final Function <? super ELEMENTTYPE, String> aMapper)
{
ValueEnforcer.notNull (sSep, "Separator");
if (aElements != null)
ValueEnforcer.isArrayOfsLen (aElements, nOfs, nLen);
ValueEnforcer.notNull (aMapper, "Mapper");
final StringBuilder aSB = new StringBuilder ();
if (aElements != null)
{
int nElementsAdded = 0;
for (int i = nOfs; i < nOfs + nLen; ++i)
{
final String sElement = aMapper.apply (aElements[i]);
if (hasText (sElement))
{
if (nElementsAdded > 0)
aSB.append (sSep);
nElementsAdded++;
aSB.append (sElement);
}
}
}
return aSB.toString ();
} |
java | public static long cleartextSize(long ciphertextSize, Cryptor cryptor) {
checkArgument(ciphertextSize >= 0, "expected ciphertextSize to be positive, but was %s", ciphertextSize);
long cleartextChunkSize = cryptor.fileContentCryptor().cleartextChunkSize();
long ciphertextChunkSize = cryptor.fileContentCryptor().ciphertextChunkSize();
long overheadPerChunk = ciphertextChunkSize - cleartextChunkSize;
long numFullChunks = ciphertextSize / ciphertextChunkSize; // floor by int-truncation
long additionalCiphertextBytes = ciphertextSize % ciphertextChunkSize;
if (additionalCiphertextBytes > 0 && additionalCiphertextBytes <= overheadPerChunk) {
throw new IllegalArgumentException("Method not defined for input value " + ciphertextSize);
}
long additionalCleartextBytes = (additionalCiphertextBytes == 0) ? 0 : additionalCiphertextBytes - overheadPerChunk;
assert additionalCleartextBytes >= 0;
return cleartextChunkSize * numFullChunks + additionalCleartextBytes;
} |
java | public String getHelpText() {
@SuppressWarnings("unchecked")
ArrayList<Argument> arguments = (ArrayList<Argument>) this.args.clone();
String ret = programDescription + "\n";
ret += "Usage: " + executableName + " ";
for (Argument arg : this.args) {
if (arg.isParam) {
ret += !arg.isRequiredArg()
? "["
: "";
ret += arg.longOption;
ret += !arg.isRequiredArg()
? "]"
: "";
ret += arg.isMultipleAllowed()
? "..."
: "";
ret += " ";
arguments.remove(arg);
}
}
for (Argument arg : arguments) {
if (arg.isRequiredArg()) {
ret += !arg.isRequiredArg()
? "["
: "";
ret += arg.getLongOption() == null ? "-" + arg.getOption()
: "--" + arg.getLongOption();
ret += !arg.isRequiredArg()
? "]"
: "";
ret += arg.isMultipleAllowed()
? "..."
: "";
ret += " ";
}
}
ret += "[OPTION]";
ret += arguments.size() > 1
? "... "
: " ";
ret += "\n" + usageHint + "\n";
ArrayList<String> col1 = new ArrayList<String>();
ArrayList<String> col2 = new ArrayList<String>();
for (Argument option : arguments) {
String line = option.getOption() != null ? String.format(" %-5s",
"-" + option.option
+ (option.getLongOption() != null ? "," : ""))
: String.format(" %-5s", "");
line += option.getLongOption() != null ? "--"
+ option.longOption
+ (option.getValueHint().length() > 0
&& option.takesValue() ? "="
+ option.getValueHint() : "") : "";
col1.add(line);
col2.add(option.helpText);
}
ret += theColumnator((String[]) col1.toArray(new String[] {}),
(String[]) col2.toArray(new String[] {}), 2, 80);
return ret;
} |
python | def read(self):
"""
Reads a single character from the device.
:returns: character read from the device
:raises: :py:class:`~alarmdecoder.util.CommError`
"""
ret = None
try:
ret = self._device.read_data(1)
except (usb.core.USBError, FtdiError) as err:
raise CommError('Error reading from device: {0}'.format(str(err)), err)
return ret |
java | public static String writeShort(Short value) {
if (value==null) return null;
return Short.toString(value);
} |
java | protected void detectJournalManager() throws IOException {
int failures = 0;
do {
try {
Stat stat = new Stat();
String primaryAddr = zk.getPrimaryAvatarAddress(logicalName,
stat, true, true);
if (primaryAddr == null || primaryAddr.trim().isEmpty()) {
primaryURI = null;
remoteJournalManager = null;
LOG.warn("Failover detected, wait for it to finish...");
failures = 0;
sleep(FAILOVER_RETRY_SLEEP);
continue;
}
primaryURI = addrToURI(primaryAddr);
LOG.info("Read primary URI from zk: " + primaryURI);
if (primaryURI.equals(avatarZeroURI)) {
remoteJournalManager = remoteJournalManagerZero;
} else if (primaryURI.equals(avatarOneURI)) {
remoteJournalManager = remoteJournalManagerOne;
} else {
LOG.warn("Invalid primaryURI: " + primaryURI);
primaryURI = null;
remoteJournalManager = null;
failures = 0;
sleep(FAILOVER_RETRY_SLEEP);
}
} catch (KeeperException kex) {
if (KeeperException.Code.CONNECTIONLOSS == kex.code()
&& failures < AvatarZooKeeperClient.ZK_CONNECTION_RETRIES) {
failures++;
// This means there was a failure connecting to zookeeper
// we should retry since some nodes might be down.
sleep(FAILOVER_RETRY_SLEEP);
continue;
}
throwIOException(kex.getMessage(), kex);
} catch (InterruptedException e) {
throwIOException(e.getMessage(), e);
} catch (URISyntaxException e) {
throwIOException(e.getMessage(), e);
}
} while (remoteJournalManager == null);
} |
python | def write_squonk_datasetmetadata(outputBase, thinOutput, valueClassMappings, datasetMetaProps, fieldMetaProps):
"""This is a temp hack to write the minimal metadata that Squonk needs.
Will needs to be replaced with something that allows something more complete to be written.
:param outputBase: Base name for the file to write to
:param thinOutput: Write only new data, not structures. Result type will be BasicObject
:param valueClasses: A dict that describes the Java class of the value properties (used by Squonk)
:param datasetMetaProps: A dict with metadata properties that describe the datset as a whole.
The keys used for these metadata are up to the user, but common ones include source, description, created, history.
:param fieldMetaProps: A list of dicts with the additional field metadata. Each dict has a key named fieldName whose value
is the name of the field being described, and a key name values wholes values is a map of metadata properties.
The keys used for these metadata are up to the user, but common ones include source, description, created, history.
"""
meta = {}
props = {}
# TODO add created property - how to handle date formats?
if datasetMetaProps:
props.update(datasetMetaProps)
if fieldMetaProps:
meta["fieldMetaProps"] = fieldMetaProps
if len(props) > 0:
meta["properties"] = props
if valueClassMappings:
meta["valueClassMappings"] = valueClassMappings
if thinOutput:
meta['type'] = 'org.squonk.types.BasicObject'
else:
meta['type'] = 'org.squonk.types.MoleculeObject'
s = json.dumps(meta)
meta = open(outputBase + '.metadata', 'w')
meta.write(s)
meta.close() |
java | public static PlaceDetailsRequest placeDetails(
GeoApiContext context, String placeId, PlaceAutocompleteRequest.SessionToken sessionToken) {
PlaceDetailsRequest request = new PlaceDetailsRequest(context);
request.placeId(placeId);
request.sessionToken(sessionToken);
return request;
} |
java | public CmsJspDateSeriesBean getParentSeries() {
if ((m_parentSeries == null) && getIsExtractedDate()) {
CmsObject cms = m_value.getCmsObject();
try {
CmsResource res = cms.readResource(m_seriesDefinition.getParentSeriesId());
CmsJspContentAccessBean content = new CmsJspContentAccessBean(cms, m_value.getLocale(), res);
CmsJspContentAccessValueWrapper value = content.getValue().get(m_value.getPath());
return new CmsJspDateSeriesBean(value, m_locale);
} catch (NullPointerException | CmsException e) {
LOG.warn("Parent series with id " + m_seriesDefinition.getParentSeriesId() + " could not be read.", e);
}
}
return null;
} |
java | public static <T extends Vector> double getSimilarity(
SimType similarityType, T a, T b) {
switch (similarityType) {
case COSINE:
return cosineSimilarity(a, b);
case PEARSON_CORRELATION:
return correlation(a, b);
case EUCLIDEAN:
return euclideanSimilarity(a, b);
case SPEARMAN_RANK_CORRELATION:
return spearmanRankCorrelationCoefficient(a, b);
case JACCARD_INDEX:
return jaccardIndex(a, b);
case AVERAGE_COMMON_FEATURE_RANK:
return averageCommonFeatureRank(a, b);
case LIN:
return linSimilarity(a, b);
case KL_DIVERGENCE:
return klDivergence(a, b);
case KENDALLS_TAU:
return kendallsTau(a, b);
case TANIMOTO_COEFFICIENT:
return tanimotoCoefficient(a, b);
}
return 0;
} |
java | private boolean hashesMatch(Dependency dependency1, Dependency dependency2) {
if (dependency1 == null || dependency2 == null || dependency1.getSha1sum() == null || dependency2.getSha1sum() == null) {
return false;
}
return dependency1.getSha1sum().equals(dependency2.getSha1sum());
} |
python | def ElementFactory(href, smcresult=None, raise_exc=None):
"""
Factory returns an object of type Element when only
the href is provided.
:param str href: string href to fetch
:param SMCResult smcresult: optional SMCResult. If provided,
the request fetch will be skipped
:param Exception raise_exc: exception to raise if fetch
failed
"""
if smcresult is None:
smcresult = SMCRequest(href=href).read()
if smcresult.json:
cache = ElementCache(smcresult.json, etag=smcresult.etag)
typeof = lookup_class(cache.type)
instance = typeof(
name=cache.get('name'),
href=href,
type=cache.type)
instance.data = cache
return instance
if raise_exc and smcresult.msg:
raise raise_exc(smcresult.msg) |
java | private void initialize() {
this.setText(Constant.messages.getString("sites.resend.popup"));
this.addActionListener(new java.awt.event.ActionListener() {
@Override
public void actionPerformed(java.awt.event.ActionEvent evt) {
if (treeSite != null) {
SiteNode node = (SiteNode) treeSite.getLastSelectedPathComponent();
ManualRequestEditorDialog dialog = extension.getResendDialog();
HistoryReference ref = node.getHistoryReference();
HttpMessage msg = null;
try {
msg = ref.getHttpMessage().cloneRequest();
dialog.setMessage(msg);
dialog.setVisible(true);
} catch (HttpMalformedHeaderException | DatabaseException e) {
logger.error(e.getMessage(), e);
}
}
}
});
} |
java | public boolean skipPreamble () throws IOException
{
// First delimiter may be not preceeded with a CRLF.
System.arraycopy (m_aBoundary, 2, m_aBoundary, 0, m_aBoundary.length - 2);
m_nBoundaryLength = m_aBoundary.length - 2;
try
{
// Discard all data up to the delimiter.
discardBodyData ();
// Read boundary - if succeded, the stream contains an
// encapsulation.
return readBoundary ();
}
catch (final MultipartMalformedStreamException e)
{
return false;
}
finally
{
// Restore delimiter.
System.arraycopy (m_aBoundary, 0, m_aBoundary, 2, m_aBoundary.length - 2);
m_nBoundaryLength = m_aBoundary.length;
m_aBoundary[0] = CR;
m_aBoundary[1] = LF;
}
} |
java | public boolean recordThrowDescription(
JSTypeExpression type, String description) {
if (currentInfo.documentThrows(type, description)) {
populated = true;
return true;
} else {
return false;
}
} |
python | def checkPortAvailable(ha):
"""Checks whether the given port is available"""
# Not sure why OS would allow binding to one type and not other.
# Checking for port available for TCP and UDP.
sockTypes = (socket.SOCK_DGRAM, socket.SOCK_STREAM)
for typ in sockTypes:
sock = socket.socket(socket.AF_INET, typ)
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(ha)
if typ == socket.SOCK_STREAM:
l_onoff = 1
l_linger = 0
sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack('ii', l_onoff, l_linger))
except OSError as exc:
if exc.errno in [
errno.EADDRINUSE, errno.EADDRNOTAVAIL,
WS_SOCKET_BIND_ERROR_ALREADY_IN_USE,
WS_SOCKET_BIND_ERROR_NOT_AVAILABLE
]:
raise PortNotAvailable(ha)
else:
raise exc
finally:
sock.close() |
python | def scale(cls, *scaling):
"""Create a scaling transform from a scalar or vector.
:param scaling: The scaling factor. A scalar value will
scale in both dimensions equally. A vector scaling
value scales the dimensions independently.
:type scaling: float or sequence
:rtype: Affine
"""
if len(scaling) == 1:
sx = sy = float(scaling[0])
else:
sx, sy = scaling
return tuple.__new__(cls, (sx, 0.0, 0.0, 0.0, sy, 0.0, 0.0, 0.0, 1.0)) |
python | def load_slice(self, state, start, end):
"""
Return the memory objects overlapping with the provided slice.
:param start: the start address
:param end: the end address (non-inclusive)
:returns: tuples of (starting_addr, memory_object)
"""
items = [ ]
if start > self._page_addr + self._page_size or end < self._page_addr:
l.warning("Calling load_slice on the wrong page.")
return items
for addr in range(max(start, self._page_addr), min(end, self._page_addr + self._page_size)):
i = addr - self._page_addr
mo = self._storage[i]
if mo is None:
mo = self._sinkhole
if mo is not None and (not items or items[-1][1] is not mo):
items.append((addr, mo))
return items |
java | Chronology getEffectiveChronology() {
Chronology chrono = currentParsed().chrono;
if (chrono == null) {
chrono = overrideChronology;
if (chrono == null) {
chrono = IsoChronology.INSTANCE;
}
}
return chrono;
} |
python | def get_layer_heights(heights, depth, *args, **kwargs):
"""Return an atmospheric layer from upper air data with the requested bottom and depth.
This function will subset an upper air dataset to contain only the specified layer using
the heights only.
Parameters
----------
heights : array-like
Atmospheric heights
depth : `pint.Quantity`
The thickness of the layer
*args : array-like
Atmospheric variable(s) measured at the given pressures
bottom : `pint.Quantity`, optional
The bottom of the layer
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data. Defaults
to True.
with_agl : bool, optional
Returns the heights as above ground level by subtracting the minimum height in the
provided heights. Defaults to False.
Returns
-------
`pint.Quantity, pint.Quantity`
The height and data variables of the layer
"""
bottom = kwargs.pop('bottom', None)
interpolate = kwargs.pop('interpolate', True)
with_agl = kwargs.pop('with_agl', False)
# Make sure pressure and datavars are the same length
for datavar in args:
if len(heights) != len(datavar):
raise ValueError('Height and data variables must have the same length.')
# If we want things in AGL, subtract the minimum height from all height values
if with_agl:
sfc_height = np.min(heights)
heights = heights - sfc_height
# If the bottom is not specified, make it the surface
if bottom is None:
bottom = heights[0]
# Make heights and arguments base units
heights = heights.to_base_units()
bottom = bottom.to_base_units()
# Calculate the top of the layer
top = bottom + depth
ret = [] # returned data variables in layer
# Ensure heights are sorted in ascending order
sort_inds = np.argsort(heights)
heights = heights[sort_inds]
# Mask based on top and bottom
inds = _greater_or_close(heights, bottom) & _less_or_close(heights, top)
heights_interp = heights[inds]
# Interpolate heights at bounds if necessary and sort
if interpolate:
# If we don't have the bottom or top requested, append them
if top not in heights_interp:
heights_interp = np.sort(np.append(heights_interp, top)) * heights.units
if bottom not in heights_interp:
heights_interp = np.sort(np.append(heights_interp, bottom)) * heights.units
ret.append(heights_interp)
for datavar in args:
# Ensure that things are sorted in ascending order
datavar = datavar[sort_inds]
if interpolate:
# Interpolate for the possibly missing bottom/top values
datavar_interp = interpolate_1d(heights_interp, heights, datavar)
datavar = datavar_interp
else:
datavar = datavar[inds]
ret.append(datavar)
return ret |
python | def similar_items(self, key, replaces):
"""
Returns a list of (key, value) tuples for all variants of ``key``
in this DAWG according to ``replaces``.
``replaces`` is an object obtained from
``DAWG.compile_replaces(mapping)`` where mapping is a dict
that maps single-char unicode sitrings to another single-char
unicode strings.
"""
return self._similar_items("", key, self.dct.ROOT, replaces) |
java | @Override
public AdminDeleteUserAttributesResult adminDeleteUserAttributes(AdminDeleteUserAttributesRequest request) {
request = beforeClientExecution(request);
return executeAdminDeleteUserAttributes(request);
} |
python | def apply_with(self, _, v, ctx):
""" constructor
:param v: things used to constrcut date
:type v: timestamp in float, datetime.date object, or ISO-8601 in str
"""
self.v = None
if isinstance(v, float):
self.v = datetime.date.fromtimestamp(v)
elif isinstance(v, datetime.date):
self.v = v
elif isinstance(v, six.string_types):
self.v = from_iso8601(v).date()
else:
raise ValueError('Unrecognized type for Date: ' + str(type(v))) |
python | def qurl(parser, token):
"""
Append, remove or replace query string parameters (preserve order)
{% qurl url [param]* [as <var_name>] %}
{% qurl 'reverse_name' [reverse_params] | [param]* [as <var_name>] %}
param:
name=value: replace all values of name by one value
name=None: remove all values of name
name+=value: append a new value for name
name-=value: remove the value of name with the value
Example::
{% qurl '/search?page=1&color=blue&color=green'
order='name' page=None color+='red' color-='green' %}
Output: /search?color=blue&order=name&color=red
{% qurl request.get_full_path order='name' %}
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError(
'"{0}" takes at least one argument (url)'.format(bits[0]))
if bits.count('|') > 1:
raise TemplateSyntaxError(
'"{0}" may take only one separator'.format(bits[0]))
if bits.count('|'):
# A url expression was passed, needs reversing
url = _get_url_node(parser, bits[:bits.index('|')])
bits = bits[bits.index('|')+1:]
else:
# A url was passed directly
url = parser.compile_filter(bits[1])
bits = bits[2:]
asvar = None
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
qs = []
if len(bits):
kwarg_re = re.compile(r"(\w+)(\-=|\+=|=|\-\-)(.*)")
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to url tag")
name, op, value = match.groups()
qs.append((name, op, parser.compile_filter(value),))
return QURLNode(url, qs, asvar) |
python | def page(self, actor_sid=values.unset, event_type=values.unset,
resource_sid=values.unset, source_ip_address=values.unset,
start_date=values.unset, end_date=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of EventInstance records from the API.
Request is executed immediately
:param unicode actor_sid: Only include Events initiated by this Actor
:param unicode event_type: Only include Events of this EventType
:param unicode resource_sid: Only include Events referring to this resource
:param unicode source_ip_address: Only include Events that originated from this IP address
:param datetime start_date: Only show events on or after this date
:param datetime end_date: Only show events on or before this date
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of EventInstance
:rtype: twilio.rest.monitor.v1.event.EventPage
"""
params = values.of({
'ActorSid': actor_sid,
'EventType': event_type,
'ResourceSid': resource_sid,
'SourceIpAddress': source_ip_address,
'StartDate': serialize.iso8601_datetime(start_date),
'EndDate': serialize.iso8601_datetime(end_date),
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return EventPage(self._version, response, self._solution) |
python | def get_edge_ids_by_node_ids(self, node_a, node_b):
"""Returns a list of edge ids connecting node_a to node_b."""
# Check if the nodes are adjacent
if not self.adjacent(node_a, node_b):
return []
# They're adjacent, so pull the list of edges from node_a and determine which ones point to node_b
node = self.get_node(node_a)
return [edge_id for edge_id in node['edges'] if self.get_edge(edge_id)['vertices'][1] == node_b] |
java | public <T> FluentIterable<T> toFluentIterable(Function<? super Cursor, T> singleRowTransform) {
try {
return Cursors.toFluentIterable(this, singleRowTransform);
} finally {
close();
}
} |
java | public Object jsonObject() {
if (json == null) {
return null;
}
if (cached == null) {
Object tmp = null;
if (json[0] == '{') {
tmp = new LazyJsonObject<String, Object>(json);
} else if (json[0] == '[') {
tmp = new LazyJsonArray<Object>(json);
} else {
try {
// NOTE: This if-else condition is for Jackson 2.5.0
// json variable is byte[] which is from Cursor.getBlob().
// And json byte array is ended with '\0'.
// '\0' causes parsing problem with Jackson 2.5.0 that we upgraded Feb 24, 2015.
// We did not observe this problem with Jackson 1.9.2 that we used before.
if(json.length > 0 && json[json.length - 1] == 0) {
tmp = Manager.getObjectMapper().readValue(json, 0, json.length - 1, Object.class);
}
else {
tmp = Manager.getObjectMapper().readValue(json, Object.class);
}
} catch (Exception e) {
//cached will remain null
Log.w(Database.TAG, "Exception parsing json", e);
}
}
cached = tmp;
}
return cached;
} |
python | def scanmeta(f):
"""Scan file headers for @meta ... @endmeta information and store that into
a dictionary.
"""
print(f)
if isinstance(f, str):
f = io.open(f, mode='r', encoding='latin-1')
done = False
l = f.readline()
s = None
while l and s is None:
i = l.find('!')
if i >= 0:
l = l[i+1:]
i = l.find('@meta')
if i >= 0:
l = l[i+5:]
i = l.find('@endmeta')
if i >= 0:
s = l[:i]
done = True
else:
s = l
l = f.readline()
if not done and not l:
return { }
while l and not done:
i = l.find('!')
if i >= 0:
l = l[i+1:]
i = l.find('@endmeta')
if i >= 0:
s += ' '+l[:i]
done = True
else:
s += ' '+l
l = f.readline()
s = map(lambda x: x.split(':'), s.split())
d = { }
for x in s:
if len(x) > 2 or len(x) == 0:
raise RuntimeError('Syntax error in meta information.')
elif len(x) == 2:
d[x[0]] = x[1]
else:
d[x[0]] = None
return d |
java | private InputStream getPrivateKeyStream(String privateKey) throws LRException
{
try
{
// If the private key matches the form of a private key string, treat it as such
if (privateKey.matches(pgpRegex))
{
return new ByteArrayInputStream(privateKey.getBytes());
}
// Otherwise, treat it as a file location on the local disk
else
{
return new FileInputStream(new File(privateKey));
}
}
catch (IOException e)
{
throw new LRException(LRException.NO_KEY_STREAM);
}
} |
java | public int isCompliantWithRequestContentType(Request request) {
if (acceptedMediaTypes == null || acceptedMediaTypes.isEmpty() || request == null) {
return 2;
} else {
String content = request.contentMimeType();
if (content == null) {
return 2;
} else {
// For all consume, check whether we accept it
MediaType contentMimeType = MediaType.parse(request.contentMimeType());
for (MediaType type : acceptedMediaTypes) {
if (contentMimeType.is(type)) {
if (type.hasWildcard()) {
return 1;
} else {
return 2;
}
}
}
return 0;
}
}
} |
java | public void downloadBlockChain() {
DownloadProgressTracker listener = new DownloadProgressTracker();
startBlockChainDownload(listener);
try {
listener.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
} |
python | def _get_repos(url):
"""Gets repos in url
:param url: Url
:return: List of repositories in given url
"""
current_page = 1
there_is_something_left = True
repos_list = []
while there_is_something_left:
api_driver = GithubRawApi(
url,
url_params={"page": current_page},
get_api_content_now=True
) # driver to parse API content
for repo in api_driver.api_content: # list of raw repository
repo_name = repo["name"]
repo_user = repo["owner"]["login"]
repos_list.append(
GithubUserRepository(repo_user, repo_name))
there_is_something_left = bool(api_driver.api_content)
current_page += 1
return repos_list |
java | @Override
public boolean cancel(final String appName, final String id,
final boolean isReplication) {
if (super.cancel(appName, id, isReplication)) {
replicateToPeers(Action.Cancel, appName, id, null, null, isReplication);
synchronized (lock) {
if (this.expectedNumberOfClientsSendingRenews > 0) {
// Since the client wants to cancel it, reduce the number of clients to send renews
this.expectedNumberOfClientsSendingRenews = this.expectedNumberOfClientsSendingRenews - 1;
updateRenewsPerMinThreshold();
}
}
return true;
}
return false;
} |
python | def count_called(self, axis=None):
"""Count called genotypes.
Parameters
----------
axis : int, optional
Axis over which to count, or None to perform overall count.
"""
b = self.is_called()
return np.sum(b, axis=axis) |
python | def format(self, record):
"""Override default format method."""
if record.levelno == logging.DEBUG:
string = Back.WHITE + Fore.BLACK + ' debug '
elif record.levelno == logging.INFO:
string = Back.BLUE + Fore.WHITE + ' info '
elif record.levelno == logging.WARNING:
string = Back.YELLOW + Fore.BLACK + ' warning '
elif record.levelno == logging.ERROR:
string = Back.RED + Fore.WHITE + ' error '
elif record.levelno == logging.CRITICAL:
string = Back.BLACK + Fore.WHITE + ' critical '
else:
string = ''
return '{none}{string}{none} {super}'.format(
none=Style.RESET_ALL, string=string, super=super().format(record)) |
python | def clear(self):
'''
Method which resets any variables held by this class, so that the parser can be used again
:return: Nothing
'''
self.tags = []
'''the current list of tags which have been opened in the XML file'''
self.chars = {}
'''the chars held by each tag, indexed by their tag name'''
self.attribs = {}
'''the attributes of each tag, indexed by their tag name'''
self.handler = None
''' the method which will handle the current tag, and the data currently in the class '''
self.piece = PieceTree.PieceTree()
'''the class tree top'''
self.isDynamic = False
'''Indicator of whether the current thing being processed is a dynamic'''
self.data["note"] = None
self.data["direction"] = None
self.data["expression"] = None
self.data["degree"] = None
self.data["frame_note"] = None
self.data["staff_id"] = 1
self.data["voice"] = 1
self.data["handleType"] = "" |
python | def setEditable(self, state):
"""
Sets whether or not this label should be editable or not.
:param state | <bool>
"""
self._editable = state
if state and not self._lineEdit:
self.setLineEdit(XLineEdit(self))
elif not state and self._lineEdit:
self._lineEdit.close()
self._lineEdit.setParent(None)
self._lineEdit.deleteLater()
self._lineEdit = None |
python | def correction(sentence, pos):
"Most probable spelling correction for word."
word = sentence[pos]
cands = candidates(word)
if not cands:
cands = candidates(word, False)
if not cands:
return word
cands = sorted(cands, key=lambda w: P(w, sentence, pos), reverse=True)
cands = [c[0] for c in cands]
return cands |
java | @Override
public Path toAbsolutePath() {
// Already absolute?
if (this.isAbsolute()) {
return this;
}
// Else construct a new absolute path and normalize it
final Path absolutePath = new ShrinkWrapPath(ArchivePath.SEPARATOR + this.path, this.fileSystem);
final Path normalized = absolutePath.normalize();
return normalized;
} |
java | public int last(int node) {
while (true) {
final int right = right(node);
if (right == NIL) {
break;
}
node = right;
}
return node;
} |
python | def data(ctx, path):
"""List EDC data for [STUDY] [ENV] [SUBJECT]"""
_rws = partial(rws_call, ctx)
if len(path) == 0:
_rws(ClinicalStudiesRequest(), default_attr='oid')
elif len(path) == 1:
_rws(StudySubjectsRequest(path[0], 'Prod'), default_attr='subjectkey')
elif len(path) == 2:
_rws(StudySubjectsRequest(path[0], path[1]), default_attr='subjectkey')
elif len(path) == 3:
try:
click.echo(get_data(ctx, path[0], path[1], path[2]))
except RWSException as e:
click.echo(str(e))
except requests.exceptions.HTTPError as e:
click.echo(str(e))
else:
click.echo('Too many arguments') |
java | @Override
public void evaluate(DoubleSolution solution) {
int hi=0;
double [] fx = new double[getNumberOfObjectives()] ; // functions
EBEsElementsTopology(solution); // transforma geometria a caracterÃÂsticas mecánicas
EBEsCalculus(); // metodo matricial de la rigidez para estructuras espaciales (3D)
// START OBJETIVES FUNCTION
for(int j=0; j<getNumberOfObjectives(); j++)
{
// total weight
if(OF_[j].equals("W"))
{
// START structure total weight ---------------------
fx[j]=0.0;
for(int ba=0; ba<numberOfElements_; ba++){
int idx =(int)Element_[ba][INDEX_];
fx[j]+=Groups_[idx][AREA]*Element_[ba][L_]*Groups_[idx][SPECIFIC_WEIGHT];
}
solution.setObjective(j, fx[j]);
// END minimizing structure total weight ------------------------
}
// summation of deformations
else if(OF_[j].equals("D"))
{
// START maximize displacement nodes ---------------------------------------------
fx[j] = 0.0;
for(int i=0;i<nodeCheck_.length;i++){
double xn=DisplacementNodes_[numberOfLibertyDegree_ * (int)nodeCheck_[i][0]+aX_][hi];
double yn=DisplacementNodes_[numberOfLibertyDegree_ * (int)nodeCheck_[i][0]+aY_][hi];
double zn=DisplacementNodes_[numberOfLibertyDegree_ * (int)nodeCheck_[i][0]+aZ_][hi];
fx[j]+= Math.sqrt(Math.pow(xn,2.0)+Math.pow(yn,2.0)+Math.pow(zn,2.0));
}
solution.setObjective(j, fx[j]);
// END minimizing sum of displacement in nodes ---------------------------------------------
}
// stress square absolute error
else if(OF_[j].equals("SSAE"))
{
// START strain residual minimun ---------------------------------------------
// strain residualt global
fx[j]=StrainResidualMin_[hi]+StrainResidualMax_[hi];
solution.setObjective(j, fx[j]);
// END strain residual minimun ---------------------------------------------
}
// Efficiency of Nash-Sutcliffe for stress and compress
else if(OF_[j].equals("ENS"))
{
fx[j]=FunctionENS(0);
solution.setObjective(j, fx[j]);
}
else if(OF_[j].equals("MDV"))
{
fx[j]=FunctionsMahalanobis_Distance_With_Variance(0);
solution.setObjective(j, fx[j]);
}
else
{
System.out.println("Error: not considerate START OBJECTIVES FUNCTION ");
}
}
numberOfEval_++;
//if((numberOfEval_ % 1000) == 0) System.out.println(numberOfEval_);
// END OBJETIVES FUNCTION
// maximizing the function objective ------------------------
// fx[1] *= -1.0;
// NOT USED -----------------------------------
/*
double l=0; // longitud total de todos los elementos
// total deflection of estructure
fx[1]=0;
for(int ba=0; ba<numberOfElements_; ba++){
l+=Element_[ba][L_];
int ni = (int)Element_[ba][i_];
int nj = (int)Element_[ba][j_];
double dxi=DisplacementNodes_[numberOfLibertyDegree_*ni+aX_][hi];
double dyi=DisplacementNodes_[numberOfLibertyDegree_*ni+aY_][hi];
double dzi=DisplacementNodes_[numberOfLibertyDegree_*ni+aZ_][hi];
double dxj=DisplacementNodes_[numberOfLibertyDegree_*nj+aX_][hi];
double dyj=DisplacementNodes_[numberOfLibertyDegree_*nj+aY_][hi];
double dzj=DisplacementNodes_[numberOfLibertyDegree_*nj+aZ_][hi];
// fx[1]+=Math.sqrt(Math.pow((dxi-dxj), 2.0)+Math.pow((dyi-dyj), 2.0)+Math.pow((dzi-dzj), 2.0))/l;
fx[1]+=(-dxi+dxj)/l;
}
*/
// END NOT USED ------------------------------------------------------------------------------
this.evaluateConstraints(solution);
} |
python | def reset_coords(self, names=None, drop=False, inplace=None):
"""Given names of coordinates, reset them to become variables
Parameters
----------
names : str or list of str, optional
Name(s) of non-index coordinates in this dataset to reset into
variables. By default, all non-index coordinates are reset.
drop : bool, optional
If True, remove coordinates instead of converting them into
variables.
inplace : bool, optional
If True, modify this dataset inplace. Otherwise, create a new
object.
Returns
-------
Dataset
"""
inplace = _check_inplace(inplace)
if names is None:
names = self._coord_names - set(self.dims)
else:
if isinstance(names, str):
names = [names]
self._assert_all_in_dataset(names)
bad_coords = set(names) & set(self.dims)
if bad_coords:
raise ValueError(
'cannot remove index coordinates with reset_coords: %s'
% bad_coords)
obj = self if inplace else self.copy()
obj._coord_names.difference_update(names)
if drop:
for name in names:
del obj._variables[name]
return obj |
python | def delete_multiple(self, ids=None, messages=None):
"""Execute an HTTP request to delete messages from queue.
Arguments:
ids -- A list of messages id to be deleted from the queue.
messages -- Response to message reserving.
"""
url = "queues/%s/messages" % self.name
items = None
if ids is None and messages is None:
raise Exception('Please, specify at least one parameter.')
if ids is not None:
items = [{'id': item} for item in ids]
if messages is not None:
items = [{'id': item['id'], 'reservation_id': item['reservation_id']} for item in
messages['messages']]
data = json.dumps({'ids': items})
result = self.client.delete(url=url, body=data,
headers={'Content-Type': 'application/json'})
return result['body'] |
python | def _format_table(fmt, headers, rows, colwidths, colaligns):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2*pad) for w in colwidths]
padded_headers = _pad_row(headers, pad)
padded_rows = [_pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.lineabove))
if padded_headers:
lines.append(_build_row(padded_headers, padded_widths, colaligns, headerrow))
if fmt.linebelowheader and "linebelowheader" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelowheader))
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
lines.append(_build_line(padded_widths, colaligns, fmt.linebetweenrows))
# the last row without a line below
lines.append(_build_row(padded_rows[-1], padded_widths, colaligns, fmt.datarow))
else:
for row in padded_rows:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
if fmt.linebelow and "linebelow" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelow))
return "\n".join(lines) |
java | @Override
public void applicationStopping(ApplicationInfo appInfo) {
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled())
Tr.debug(this, tc, "application stopping, remove stored cookie name : " + appInfo.getName() + ", cookie name : " + this.cookieNames.get(appInfo.getName()));
setFutureGeneratePluginTask();
this.cookieNames.remove(appInfo.getName());
} |
java | static AttrRequests createFromString(final String str)
throws BOSHException {
if (str == null) {
return null;
} else {
return new AttrRequests(str);
}
} |
java | @Override
public String generateId(Attribute attribute) {
String idPart = generateHashcode(attribute.getEntity().getId() + attribute.getIdentifier());
String namePart = truncateName(cleanName(attribute.getName()));
return namePart + SEPARATOR + idPart;
} |
java | public static ResourceRecordSet<AAAAData> aaaa(String name, Collection<String> addresses) {
return new AAAABuilder().name(name).addAll(addresses).build();
} |
java | public double Interpolation_I_Single_Y_func_Area_(double A)
{
// A (cm2) es el area necesaria para cubrir la tensión
// Y (mm) es la latura relacionada al eje y
double Y =0;
// se limita la interpolación
if( 5.0 < A && A < 1000.0)
{
Y = 0.000003*Math.pow(A,3) - 0.0063*Math.pow(A,2) + 4.1118*A + 75.414;
}
return Y;
} |
python | def log_likelihood(z, x, P, H, R):
"""
Returns log-likelihood of the measurement z given the Gaussian
posterior (x, P) using measurement function H and measurement
covariance error R
"""
S = np.dot(H, np.dot(P, H.T)) + R
return logpdf(z, np.dot(H, x), S) |
python | def find_view_function(module_name, function_name, fallback_app=None, fallback_template=None, verify_decorator=True):
'''
Finds a view function, class-based view, or template view.
Raises ViewDoesNotExist if not found.
'''
dmp = apps.get_app_config('django_mako_plus')
# I'm first calling find_spec first here beacuse I don't want import_module in
# a try/except -- there are lots of reasons that importing can fail, and I just want to
# know whether the file actually exists. find_spec raises AttributeError if not found.
try:
spec = find_spec(module_name)
except ValueError:
spec = None
if spec is None:
# no view module, so create a view function that directly renders the template
try:
return create_view_for_template(fallback_app, fallback_template)
except TemplateDoesNotExist as e:
raise ViewDoesNotExist('view module {} not found, and fallback template {} could not be loaded ({})'.format(module_name, fallback_template, e))
# load the module and function
try:
module = import_module(module_name)
func = getattr(module, function_name)
func.view_type = 'function'
except ImportError as e:
raise ViewDoesNotExist('module "{}" could not be imported: {}'.format(module_name, e))
except AttributeError as e:
raise ViewDoesNotExist('module "{}" found successfully, but "{}" was not found: {}'.format(module_name, function_name, e))
# if class-based view, call as_view() to get a view function to it
if inspect.isclass(func) and issubclass(func, View):
func = func.as_view()
func.view_type = 'class'
# if regular view function, check the decorator
elif verify_decorator and not view_function.is_decorated(func):
raise ViewDoesNotExist("view {}.{} was found successfully, but it must be decorated with @view_function or be a subclass of django.views.generic.View.".format(module_name, function_name))
# attach a converter to the view function
if dmp.options['PARAMETER_CONVERTER'] is not None:
try:
converter = import_qualified(dmp.options['PARAMETER_CONVERTER'])(func)
setattr(func, CONVERTER_ATTRIBUTE_NAME, converter)
except ImportError as e:
raise ImproperlyConfigured('Cannot find PARAMETER_CONVERTER: {}'.format(str(e)))
# return the function/class
return func |
python | def heightmap_count_cells(hm: np.ndarray, mi: float, ma: float) -> int:
"""Return the number of map cells which value is between ``mi`` and ``ma``.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
mi (float): The lower bound.
ma (float): The upper bound.
Returns:
int: The count of values which fall between ``mi`` and ``ma``.
.. deprecated:: 8.1
Can be replaced by an equivalent NumPy function such as:
``numpy.count_nonzero((mi <= hm) & (hm < ma))``
"""
return int(lib.TCOD_heightmap_count_cells(_heightmap_cdata(hm), mi, ma)) |
python | def removeAnnotation(self, specfiles=None):
"""Remove all annotation information from :class:`Fi` elements.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:type specfiles: None, str, [str, str]
"""
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)]
else:
specfiles = aux.toList(specfiles)
for specfile in aux.toList(specfiles):
for item in viewvalues(self.container[specfile]):
item.isMatched = False
item.isAnnotated = False
item.siIds = list()
item.siiIds = list()
item.peptide = None
item.sequence = None
item.bestScore = None |
java | Map<Class<?>, List<InjectionTarget>> getDeclaredInjectionTargets
(List<InjectionBinding<?>> resolvedInjectionBindings)
{
final boolean isTraceOn = TraceComponent.isAnyTracingEnabled();
if (isTraceOn && tc.isEntryEnabled())
Tr.entry(tc, "getDeclaredInjectionTargets");
Map<Class<?>, List<InjectionTarget>> declaredTargets =
new HashMap<Class<?>, List<InjectionTarget>>();
// First, collect declared injection targets on a per-class basis.
for (InjectionBinding<?> injectionBinding : resolvedInjectionBindings)
{
if (!injectionBinding.isResolved())
{
if (isTraceOn && tc.isDebugEnabled())
Tr.debug(tc, "skipping unresolved " + injectionBinding);
continue;
}
if (isTraceOn && tc.isDebugEnabled())
Tr.debug(tc, "adding targets for " +
Util.identity(injectionBinding) + '[' + injectionBinding.getDisplayName() + ']');
List<InjectionTarget> injectionTargets = InjectionProcessorContextImpl.getInjectionTargets(injectionBinding);
if (injectionTargets != null)
{
for (InjectionTarget target : injectionTargets)
{
Member member = target.getMember();
Class<?> memberClass = member.getDeclaringClass();
if (isTraceOn && tc.isDebugEnabled())
Tr.debug(tc, "adding " + member);
List<InjectionTarget> classTargets = declaredTargets.get(memberClass);
if (classTargets == null)
{
if (isTraceOn && tc.isDebugEnabled())
Tr.debug(tc, "creating list for " + memberClass + "/" + AccessController.doPrivileged(new GetClassLoaderPrivileged(memberClass)));
classTargets = new ArrayList<InjectionTarget>();
declaredTargets.put(memberClass, classTargets);
}
classTargets.add(target);
}
}
}
if (isTraceOn && tc.isEntryEnabled())
Tr.exit(tc, "getDeclaredInjectionTargets");
return declaredTargets;
} |
python | def minify_urls(filepath, ext='asc', url_regex=None, output_ext='.urls_minified', access_token=None):
""" Use bitly or similar minifier to shrink all URLs in text files within a folder structure.
Used for the NLPIA manuscript directory for Manning Publishing
bitly API: https://dev.bitly.com/links.html
Args:
path (str): Directory or file path
ext (str): File name extension to filter text files by. default='.asc'
output_ext (str): Extension to append to filenames of altered files default='' (in-place replacement of URLs)
FIXME: NotImplementedError! Untested!
"""
access_token = access_token or secrets.bitly.access_token
output_ext = output_ext or ''
url_regex = regex.compile(url_regex) if isinstance(url_regex, str) else url_regex
filemetas = []
for filemeta in find_files(filepath, ext=ext):
filemetas += [filemeta]
altered_text = ''
with open(filemeta['path'], 'rt') as fin:
text = fin.read()
end = 0
for match in url_regex.finditer(text):
url = match.group()
start = match.start()
altered_text += text[:start]
resp = requests.get('https://api-ssl.bitly.com/v3/shorten?access_token={}&longUrl={}'.format(
access_token, url), allow_redirects=True, timeout=5)
js = resp.json()
short_url = js['shortUrl']
altered_text += short_url
end = start + len(url)
altered_text += text[end:]
with open(filemeta['path'] + (output_ext or ''), 'wt') as fout:
fout.write(altered_text)
return altered_text |
python | def delete_user_by_email(self, email):
"""
This call will delete a user from the Iterable database.
This call requires a path parameter to be passed in, 'email'
in this case, which is why we're just adding this to the 'call'
argument that goes into the 'api_call' request.
"""
call = "/api/users/"+ str(email)
return self.api_call(call=call, method="DELETE") |
java | public static int computeEasternSundayNumber(final int year)
{
final int i = year % 19;
final int j = year / 100;
final int k = year % 100;
final int l = (19 * i + j - j / 4 - (j - (j + 8) / 25 + 1) / 3 + 15) % 30;
final int m = (32 + 2 * (j % 4) + 2 * (k / 4) - l - k % 4) % 7;
return (l + m - 7 * ((i + 11 * l + 22 * m) / 451) + 114);
} |
python | def _acking(self, params=None):
"""
Packet acknowledge and retry loop
:param params: Ignore
:type params: None
:rtype: None
"""
while self._is_running:
try:
t, num_try, (ip, port), packet = self._to_ack.get(
timeout=self._select_timeout
)
except queue.Empty:
# Timed out
continue
diff = t - time.time()
if diff > 0:
time.sleep(diff)
with self._seq_ack_lock:
if packet.header.sequence_number not in self._seq_ack:
# Not waiting for this?
continue
if num_try <= self._retransmit_max_tries:
# Try again
self._send(ip, port, packet.pack(True))
self._to_ack.put(
(
time.time() + self._retransmit_timeout,
num_try + 1,
(ip, port),
packet
)
)
else:
# Failed to ack
with self._seq_ack_lock:
try:
self._seq_ack.remove(packet.header.sequence_number)
except KeyError:
pass
self.warning("Exceeded max tries") |
java | public void setSchemaArns(java.util.Collection<String> schemaArns) {
if (schemaArns == null) {
this.schemaArns = null;
return;
}
this.schemaArns = new java.util.ArrayList<String>(schemaArns);
} |
java | public Constraint<SVar<T>,Set<T>> rewrite(UnionFind<SVar<T>> uf) {
SVar<T> vIn_p = uf.find(vIn);
SVar<T> vDest_p = uf.find(vDest);
return new CtDiffConstraint<T>(vIn_p, ctSet, vDest_p);
} |
python | def execute(self, env, args):
""" Prints task information.
`env`
Runtime ``Environment`` instance.
`args`
Arguments object from arg parser.
"""
task_name = args.task_name
if task_name is None:
if not env.task.active:
raise errors.NoActiveTask
task_name = env.task.name
tasks = env.task.get_list_info(task_name)
if not tasks:
raise errors.TaskNotFound(task_name)
_print_tasks(env, tasks) |
python | def block_splitter(self, sources, weight=get_weight, key=lambda src: 1):
"""
:param sources: a list of sources
:param weight: a weight function (default .weight)
:param key: None or 'src_group_id'
:returns: an iterator over blocks of sources
"""
ct = self.oqparam.concurrent_tasks or 1
maxweight = self.csm.get_maxweight(weight, ct, source.MINWEIGHT)
if not hasattr(self, 'logged'):
if maxweight == source.MINWEIGHT:
logging.info('Using minweight=%d', source.MINWEIGHT)
else:
logging.info('Using maxweight=%d', maxweight)
self.logged = True
return general.block_splitter(sources, maxweight, weight, key) |
java | public String getAnswerPattern() {
if (InputElement_Type.featOkTst && ((InputElement_Type)jcasType).casFeat_answerPattern == null)
jcasType.jcas.throwFeatMissing("answerPattern", "edu.cmu.lti.oaqa.framework.types.InputElement");
return jcasType.ll_cas.ll_getStringValue(addr, ((InputElement_Type)jcasType).casFeatCode_answerPattern);} |
python | def find_trees(self, query_dict=None, exact=False, verbose=False, wrap_response=False, **kwargs):
"""Query on tree properties. See documentation for _OTIWrapper class."""
if self.use_v1:
uri = '{p}/singlePropertySearchForTrees'.format(p=self.query_prefix)
else:
uri = '{p}/find_trees'.format(p=self.query_prefix)
resp = self._do_query(uri,
query_dict=query_dict,
exact=exact,
verbose=verbose,
valid_keys=self.tree_search_term_set,
kwargs=kwargs)
if wrap_response:
return TreeRefList(resp)
return resp |
python | def startup_script(self, startup_script):
"""
Updates the startup script.
:param startup_script: content of the startup script
"""
try:
startup_script_path = os.path.join(self.working_dir, 'startup.vpc')
with open(startup_script_path, "w+", encoding='utf-8') as f:
if startup_script is None:
f.write('')
else:
startup_script = startup_script.replace("%h", self._name)
f.write(startup_script)
except OSError as e:
raise VPCSError('Cannot write the startup script file "{}": {}'.format(startup_script_path, e)) |
java | public ReturnValue invoke(final CommandDefinition cd, final String[] argsAry) {
String pluginName = cd.getPluginName();
try {
String[] commandLine = cd.getCommandLine();
if (acceptParams) {
for (int j = 0; j < commandLine.length; j++) {
for (int i = 0; i < argsAry.length; i++) {
commandLine[j] = commandLine[j].replaceAll("\\$[Aa][Rr][Gg]" + (i + 1) + "\\$", Matcher.quoteReplacement(argsAry[i]));
}
}
}
PluginProxy plugin = (PluginProxy) pluginRepository.getPlugin(pluginName);
if (plugin == null) {
LOG.info(context, "Unable to instantiate plugin named " + pluginName);
//context.getEventBus().post(new LogEvent(this, LogEventType.INFO, "Unable to instantiate plugin named " + pluginName));
//EventsUtil.sendEvent(listenersList, this, LogEvent.INFO, "Unable to instantiate plugin named " + pluginName);
return new ReturnValue(Status.UNKNOWN, "Error instantiating plugin '" + pluginName + "' : bad plugin name?");
}
//plugin.addListeners(listenersList);
InjectionUtils.inject(plugin, context);
//plugin.setContext(context);
return plugin.execute(commandLine);
} catch (Throwable thr) {
LOG.error(context, "Plugin [" + pluginName + "] execution error", thr);
//context.getEventBus().post(new LogEvent(this, LogEventType.ERROR, "Plugin [" + pluginName + "] execution error", thr));
return new ReturnValue(Status.UNKNOWN, "Plugin [" + pluginName + "] execution error: " + thr.getMessage());
}
} |
python | def normalize(X):
""" equivalent to scipy.preprocessing.normalize on sparse matrices
, but lets avoid another depedency just for a small utility function """
X = coo_matrix(X)
X.data = X.data / sqrt(bincount(X.row, X.data ** 2))[X.row]
return X |
python | def _process_job_and_get_successors(self, job_info):
"""
Process a job, get all successors of this job, and call _handle_successor() to handle each successor.
:param JobInfo job_info: The JobInfo instance
:return: None
"""
job = job_info.job
successors = self._get_successors(job)
all_new_jobs = [ ]
for successor in successors:
new_jobs = self._handle_successor(job, successor, successors)
if new_jobs:
all_new_jobs.extend(new_jobs)
for new_job in new_jobs:
self._insert_job(new_job)
self._post_job_handling(job, all_new_jobs, successors) |
python | def get_instances(self):
# type: () -> List[Tuple[str, str, int]]
"""
Retrieves the list of the currently registered component instances
:return: A list of (name, factory name, state) tuples.
"""
with self.__instances_lock:
return sorted(
(name, stored_instance.factory_name, stored_instance.state)
for name, stored_instance in self.__instances.items()
) |
python | def _query_near(*, session=None, **kwargs):
"""Query marine database with given query string values and keys"""
url_endpoint = 'http://calib.org/marine/index.html'
if session is not None:
resp = session.get(url_endpoint, params=kwargs)
else:
with requests.Session() as s:
# Need to get the index page before query. Otherwise get bad query response that seems legit.
s.get('http://calib.org/marine/index.html')
resp = s.get(url_endpoint, params=kwargs)
return resp |
java | private static NetFlowV5Header parseHeader(ByteBuf bb) {
final int version = bb.readUnsignedShort();
if (version != 5) {
throw new InvalidFlowVersionException(version);
}
final int count = bb.readUnsignedShort();
final long sysUptime = bb.readUnsignedInt();
final long unixSecs = bb.readUnsignedInt();
final long unixNsecs = bb.readUnsignedInt();
final long flowSequence = bb.readUnsignedInt();
final short engineType = bb.readUnsignedByte();
final short engineId = bb.readUnsignedByte();
final short sampling = bb.readShort();
final int samplingMode = (sampling >> 14) & 3;
final int samplingInterval = sampling & 0x3fff;
return NetFlowV5Header.create(
version,
count,
sysUptime,
unixSecs,
unixNsecs,
flowSequence,
engineType,
engineId,
samplingMode,
samplingInterval);
} |
python | def cli(ctx, lsftdi, lsusb, lsserial, info):
"""System tools.\n
Install with `apio install system`"""
exit_code = 0
if lsftdi:
exit_code = System().lsftdi()
elif lsusb:
exit_code = System().lsusb()
elif lsserial:
exit_code = System().lsserial()
elif info:
click.secho('Platform: ', nl=False)
click.secho(get_systype(), fg='yellow')
else:
click.secho(ctx.get_help())
ctx.exit(exit_code) |
java | public static List<Match> searchPlain(Model model, Pattern pattern)
{
List<Match> list = new LinkedList<Match>();
Map<BioPAXElement, List<Match>> map = search(model, pattern);
for (List<Match> matches : map.values())
{
list.addAll(matches);
}
return list;
} |
python | def repeal_target(self):
"""The resolution this resolution has repealed, or is attempting
to repeal.
Returns
-------
:class:`ApiQuery` of :class:`Resolution`
Raises
------
TypeError:
If the resolution doesn't repeal anything.
"""
if not self.category == 'Repeal':
raise TypeError("This resolution doesn't repeal anything")
return wa.resolution(int(self.option) + 1) |
java | @Override
public InputStream getInputStream( BinaryKey key ) throws BinaryStoreException {
Connection connection = newConnection();
try {
InputStream inputStream = database.readContent(key, connection);
if (inputStream == null) {
// if we didn't find anything, the connection should've been closed already
throw new BinaryStoreException(JcrI18n.unableToFindBinaryValue.text(key, database.getTableName()));
}
// the connection & statement will be left open until the stream is closed !
return inputStream;
} catch (SQLException e) {
throw new BinaryStoreException(e);
}
} |
python | def pop_key(self, arg, key, *args, **kwargs):
"""Delete a previously defined key for the `add_argument`
"""
return self.unfinished_arguments[arg].pop(key, *args, **kwargs) |
java | public java.util.List<CancelStepsInfo> getCancelStepsInfoList() {
if (cancelStepsInfoList == null) {
cancelStepsInfoList = new com.amazonaws.internal.SdkInternalList<CancelStepsInfo>();
}
return cancelStepsInfoList;
} |
java | public final void removePseudoDestination(SIBUuid12 destinationUuid)
{
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.entry(tc, "removePseudoDestination", destinationUuid);
destinationIndex.removePseudoUuid(destinationUuid);
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.exit(tc, "removePseudoDestination");
} |
java | public static String getLibInfo() {
String info = libInfo.get();
if (info == null) {
info = String.format(Locale.ENGLISH, LIB_INFO, C4.getVersion());
libInfo.compareAndSet(null, info);
}
return info;
} |
python | def resolve_dict_keywords(keywords):
"""Replace dictionary content with html.
:param keywords: The keywords.
:type keywords: dict
:return: New keywords with updated content.
:rtype: dict
"""
for keyword in ['value_map', 'inasafe_fields', 'inasafe_default_values']:
value = keywords.get(keyword)
if value:
value = value.get('content')
value = KeywordIO._dict_to_row(value).to_html()
keywords[keyword]['content'] = value
value_maps = keywords.get('value_maps')
thresholds = keywords.get('thresholds')
if value_maps:
value_maps = value_maps.get('content')
value_maps = KeywordIO._value_maps_row(value_maps).to_html()
keywords['value_maps']['content'] = value_maps
if thresholds:
thresholds = thresholds.get('content')
thresholds = KeywordIO._threshold_to_row(thresholds).to_html()
keywords['thresholds']['content'] = thresholds
return keywords |
java | public S getRepresentative(Block<S, L> block) {
return block.getStates().choose().getOriginalState();
} |
java | private int find(int key, int hash) {
int index = index(hash);
final int maxTry = capacity;
for (int i = 0; i < maxTry; i++) {
Object slot = values[index];
if (slot == null) return -1;
if (slot != GUARD) return keys[index] == key ? index : -1;
index = nextIndex(index);
}
return -1;
} |
python | def bake(self):
"""Find absolute times for all keys.
Absolute time is stored in the KeyFrame dictionary as the variable
__abs_time__.
"""
self.unbake()
for key in self.dct:
self.get_absolute_time(key)
self.is_baked = True |
java | private Optional<String> validateProctimeAttribute(Optional<String> proctimeAttribute) {
return proctimeAttribute.map((attribute) -> {
// validate that field exists and is of correct type
Optional<TypeInformation<?>> tpe = schema.getFieldType(attribute);
if (!tpe.isPresent()) {
throw new ValidationException("Processing time attribute '" + attribute + "' is not present in TableSchema.");
} else if (tpe.get() != Types.SQL_TIMESTAMP()) {
throw new ValidationException("Processing time attribute '" + attribute + "' is not of type SQL_TIMESTAMP.");
}
return attribute;
} |
java | public Object invoke(InvocationContext ctx, VisitableCommand command) {
return asyncInterceptorChain.invoke(ctx, command);
} |
python | def verify_item_signature(signature_attribute, encrypted_item, verification_key, crypto_config):
# type: (dynamodb_types.BINARY_ATTRIBUTE, dynamodb_types.ITEM, DelegatedKey, CryptoConfig) -> None
"""Verify the item signature.
:param dict signature_attribute: Item signature DynamoDB attribute value
:param dict encrypted_item: Encrypted DynamoDB item
:param DelegatedKey verification_key: DelegatedKey to use to calculate the signature
:param CryptoConfig crypto_config: Cryptographic configuration
"""
signature = signature_attribute[Tag.BINARY.dynamodb_tag]
verification_key.verify(
algorithm=verification_key.algorithm,
signature=signature,
data=_string_to_sign(
item=encrypted_item,
table_name=crypto_config.encryption_context.table_name,
attribute_actions=crypto_config.attribute_actions,
),
) |
java | public Paint getComboBoxButtonBorderPaint(Shape s, CommonControlState type) {
TwoColors colors = getCommonBorderColors(type);
return createVerticalGradient(s, colors);
} |
java | public static boolean isLess(BigDecimal bigNum1, BigDecimal bigNum2) {
Assert.notNull(bigNum1);
Assert.notNull(bigNum2);
return bigNum1.compareTo(bigNum2) < 0;
} |
python | def _format_property_values(self, previous, current):
"""
Format WMI Object's RAW data based on the previous sample.
Do not override the original WMI Object !
"""
formatted_wmi_object = CaseInsensitiveDict()
for property_name, property_raw_value in iteritems(current):
counter_type = self._property_counter_types.get(property_name)
property_formatted_value = property_raw_value
if counter_type:
calculator = self._get_property_calculator(counter_type)
property_formatted_value = calculator(previous, current, property_name)
formatted_wmi_object[property_name] = property_formatted_value
return formatted_wmi_object |
python | def read_hdf5_segmentlist(h5f, path=None, gpstype=LIGOTimeGPS, **kwargs):
"""Read a `SegmentList` object from an HDF5 file or group.
"""
# find dataset
dataset = io_hdf5.find_dataset(h5f, path=path)
segtable = Table.read(dataset, format='hdf5', **kwargs)
out = SegmentList()
for row in segtable:
start = LIGOTimeGPS(int(row['start_time']), int(row['start_time_ns']))
end = LIGOTimeGPS(int(row['end_time']), int(row['end_time_ns']))
if gpstype is LIGOTimeGPS:
out.append(Segment(start, end))
else:
out.append(Segment(gpstype(start), gpstype(end)))
return out |
python | def get_one(self, cls=None, **kwargs):
"""Returns a one case."""
case = cls() if cls else self._CasesClass()
for attr, value in kwargs.iteritems():
setattr(case, attr, value)
return case |
python | def mute_modmail_author(self, _unmute=False):
"""Mute the sender of this modmail message.
:param _unmute: Unmute the user instead. Please use
:meth:`unmute_modmail_author` instead of setting this directly.
"""
path = 'unmute_sender' if _unmute else 'mute_sender'
return self.reddit_session.request_json(
self.reddit_session.config[path], data={'id': self.fullname}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.