language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
python
|
def run(self, templ, *args):
"""
A simple utility to run SQL queries.
:param templ: a query or query template
:param args: the arguments (or the empty tuple)
:returns: the DB API 2 cursor used to run the query
"""
curs = self._conn.cursor()
query = curs.mogrify(templ, args)
if self.debug:
print(query)
curs.execute(query)
return curs
|
java
|
public Map<String, Object> getInboundConnectionInfo() {
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
Tr.entry(tc, "getInboundConnectionInfo");
Map<String, Object> inboundConnectionInfo = ThreadManager.getInstance().getInboundConnectionInfo();
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
Tr.exit(tc, "getInboundConnectionInfo", inboundConnectionInfo);
return inboundConnectionInfo;
}
|
java
|
public void logDebug(String message, Exception exception) {
if (!(logDebug || globalLog.logDebug))
return;
if (debugLog != null)
log(debugLog, "DEBUG", owner, message, exception);
else
log(globalLog.debugLog, "DEBUG", owner, message, exception);
}
|
python
|
def read_content(path: str, limit: Optional[int] = None) -> Iterator[List[str]]:
"""
Returns a list of tokens for each line in path up to a limit.
:param path: Path to files containing sentences.
:param limit: How many lines to read from path.
:return: Iterator over lists of words.
"""
with smart_open(path) as indata:
for i, line in enumerate(indata):
if limit is not None and i == limit:
break
yield list(get_tokens(line))
|
java
|
static public boolean isMouseWithinComponent(Component component) {
Point mousePos = MouseInfo.getPointerInfo().getLocation();
Rectangle bounds = component.getBounds();
bounds.setLocation(component.getLocationOnScreen());
return bounds.contains(mousePos);
}
|
java
|
public static void loadCompressedTexture(final GVRContext gvrContext,
final ResourceCache<GVRImage> textureCache,
final CompressedTextureCallback callback,
final GVRAndroidResource resource, final int quality)
throws IllegalArgumentException {
validateCallbackParameters(gvrContext, callback, resource);
final GVRImage cached = textureCache == null ? null : textureCache
.get(resource);
if (cached != null) {
Log.v("ASSET", "Texture: %s loaded from cache", cached.getFileName());
gvrContext.runOnGlThread(new Runnable() {
@Override
public void run() {
callback.loaded(cached, resource);
}
});
}
else
{
CompressedTextureCallback actualCallback = textureCache == null ? callback
: ResourceCache.wrapCallback(textureCache, callback);
AsyncCompressedTexture.loadTexture(gvrContext,
CancelableCallbackWrapper.wrap(GVRCompressedImage.class, actualCallback),
resource, GVRContext.LOWEST_PRIORITY);
}
}
|
python
|
def render_template_string(self, plain, rich = None, **context):
'''Render the body of the message from a string. If ``rich`` isnβt
provided then the message will not have a rich body.'''
self.plain = render_template_string(plain, **context)
if rich is not None:
self.rich = render_template_string(rich, **context)
|
python
|
def taxonomy_from_annotated_tree(self, dendropy_tree):
'''Given an annotated dendropy tree, return the taxonomy of each tip
Parameters
---------
tree: dendropy.Tree
Decorated tree to extract taxonomy from
Returns
-------
dictionary of tip name to array of taxonomic affiliations'''
tip_to_taxonomy = {}
for tip in dendropy_tree.leaf_node_iter():
tax = []
n = tip.parent_node
while n:
node_taxonomy = self.taxonomy_from_node_name(n.label)
if node_taxonomy and n.parent_node:
tax = [node_taxonomy]+tax
n = n.parent_node
tip_name = tip.taxon.label.replace(' ','_')
if tip_name in tip_to_taxonomy:
raise Exception("Found duplicate tip name '%s'" % tip_name)
if len(tax)==0:
tip_to_taxonomy[tip_name] = []
else:
tip_to_taxonomy[tip_name] = [t.strip() for t in '; '.join(tax).split(';')]
return tip_to_taxonomy
|
java
|
public static Map<String, String> getSiblings(final String prefix) {
Map<String, String> map = new HashMap<String, String>();
for (Enumeration<Object> enumeration = properties.keys(); enumeration.hasMoreElements();) {
String key = (String) enumeration.nextElement();
if (key.startsWith(prefix) && (prefix.endsWith("@") || key.indexOf('.', prefix.length()) == -1)) {
map.put(key, (String) properties.get(key));
}
}
return map;
}
|
python
|
def get_topic(self):
""" Returns the considered topic if applicable. """
pk = self.kwargs.get(self.topic_pk_url_kwarg, None)
if not pk:
return
if not hasattr(self, '_topic'):
self._topic = get_object_or_404(Topic, pk=pk)
return self._topic
|
java
|
public void marshalling(Xdr xdr) {
super.marshalling(xdr);
xdr.putLong(_cookie);
xdr.putLong(_cookieverf);
xdr.putUnsignedInt(_count);
}
|
java
|
public void marshall(CustomDomainConfigType customDomainConfigType, ProtocolMarshaller protocolMarshaller) {
if (customDomainConfigType == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(customDomainConfigType.getCertificateArn(), CERTIFICATEARN_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
|
python
|
def tagdict(self):
"""return a dict converted from this string interpreted as a tag-string
.. code-block:: py
>>> from pprint import pprint
>>> dict_ = IrcString('aaa=bbb;ccc;example.com/ddd=eee').tagdict
>>> pprint({str(k): str(v) for k, v in dict_.items()})
{'aaa': 'bbb', 'ccc': 'None', 'example.com/ddd': 'eee'}
"""
tagdict = getattr(self, '_tagdict', None)
if tagdict is None:
try:
self._tagdict = tags.decode(self)
except ValueError:
self._tagdict = {}
return self._tagdict
|
java
|
public String interval(Date target) {
double unit = 1000.0D;
double dayUnit = DAY / unit;
double hourUnit = HOUR / unit;
double minUnit = MINUTE / unit;
double interval = sinceMillis(target) / unit;
IntervalDesc desc = theIntervalDesc.get();
if (interval >= 0.0D) {
if (interval / (12 * 30 * dayUnit) > 1.0D) {
return asText(target);
}
if (interval / (30 * dayUnit) > 1.0D) {
return String.format("%s%s", (int) (interval / (30 * dayUnit)), desc.getMonthAgo());
}
if (interval / (7 * dayUnit) > 1.0D) {
return String.format("7%s", desc.getDayAgo());
}
if ((interval / (7 * dayUnit) <= 1.0D) && (interval / dayUnit >= 1.0D)) {
return String.format("%s%s", (int) (interval / dayUnit), desc.getDayAgo());
}
if ((interval / dayUnit < 1.0D) && (interval / hourUnit >= 1.0D)) {
return String.format("%s%s", (int) (interval / hourUnit), desc.getHourAgo());
}
if ((interval < hourUnit) && (interval >= minUnit)) {
return String.format("%s%s", (int) (interval / minUnit), desc.getMinuteAgo());
}
return desc.getJustNow();
}
return asText(target);
}
|
python
|
def kana_romaji_lt(romaji, *kana):
'''
Generates a lookup table with the kana characters on the left side
and their rΕmaji equivalents as the values.
For the consonant-vowel (cv) characters, we'll generate:
{u'γ': ('ka', 'k', 'k', 'Δ'),
u'γ': ('ga', 'g', 'g', 'Δ'),
[...]
Multiple kana character sets can be passed as rest arguments.
'''
lt = {}
for kana_set in kana:
for n in range(len(romaji)):
ro = romaji[n]
ka = kana_set[n]
lt[ka] = ro
return lt
|
python
|
def schema_complete():
"""Schema for data in CollectorStage."""
return Schema({
'stage': And(str, len),
'status': And(str, lambda s: s in ['started', 'succeeded', 'failed']),
Optional('events', default=[]): And(len, [CollectorStage.schema_event_items()])
})
|
python
|
def wrap_udf(hdfs_file, inputs, output, so_symbol, name=None):
"""
Creates a callable scalar function object. Must be created in Impala to be
used
Parameters
----------
hdfs_file: .so file that contains relevant UDF
inputs: list of strings or sig.TypeSignature
Input types to UDF
output: string
Ibis data type
so_symbol: string, C++ function name for relevant UDF
name: string (optional). Used internally to track function
Returns
-------
container : UDF object
"""
func = ImpalaUDF(inputs, output, so_symbol, name=name, lib_path=hdfs_file)
return func
|
java
|
public static <T> void register(final Class<T> service) {
for (T each : ServiceLoader.load(service)) {
registerServiceClass(service, each);
}
}
|
python
|
def delete(self, path, data=None, headers=None, params=None):
"""
Deletes resources at given paths.
:rtype: dict
:return: Empty dictionary to have consistent interface.
Some of Atlassian REST resources don't return any content.
"""
self.request('DELETE', path=path, data=data, headers=headers, params=params)
|
python
|
def build(self, builder):
"""
Build XML by appending to builder
"""
params = dict(LocationOID=self.oid)
# mixins
self.mixin()
self.mixin_params(params)
builder.start("SiteRef", params)
builder.end("SiteRef")
|
python
|
def fetch_album_name(self):
"""
Get the name of the album from lastfm.
"""
response = get_lastfm('track.getInfo', artist=self.artist,
track=self.title)
if response:
try:
self.album = response['track']['album']['title']
logger.debug('Found album %s from lastfm', self.album)
except Exception:
logger.warning('Could not fetch album name for %s', self)
else:
logger.warning('Could not fetch album name for %s', self)
|
java
|
public java.util.Map<java.lang.String, alluxio.grpc.InconsistentProperties> getErrorsMap() {
return internalGetErrors().getMap();
}
|
python
|
def get_library_instance(self, library_path, library_name):
"""Generate a Library instance from within libraries dictionary tree."""
if self.is_library_in_libraries(library_path, library_name):
from rafcon.core.states.library_state import LibraryState
return LibraryState(library_path, library_name, "0.1")
else:
logger.warning("Library manager will not create a library instance which is not in the mounted libraries.")
|
python
|
def render(self, ctx=None):
'''
:param ctx: rendering context in which the method was called
:rtype: `Bits`
:return: rendered value of the container
'''
super(Trunc, self).render(ctx)
self._current_value = self._current_rendered
self._current_rendered = self._current_rendered[:self._max_size]
return self._current_rendered
|
python
|
def create_new_file(help_string=NO_HELP, default=NO_DEFAULT, suffixes=None):
# type: (str, Union[str, NO_DEFAULT_TYPE], Union[List[str], None]) -> str
"""
Create a new file parameter
:param help_string:
:param default:
:param suffixes:
:return:
"""
# noinspection PyTypeChecker
return ParamFilename(
help_string=help_string,
default=default,
type_name="new_file",
suffixes=suffixes,
)
|
python
|
def pop(self, key=-1):
"""Removes and returns an item at a given index. Similar to list.pop()."""
value = self._values[key]
self.__delitem__(key)
return value
|
python
|
def ReadClientLastPings(self,
min_last_ping=None,
max_last_ping=None,
fleetspeak_enabled=None,
cursor=None):
"""Reads client ids for all clients in the database."""
query = "SELECT client_id, UNIX_TIMESTAMP(last_ping) FROM clients "
query_values = []
where_filters = []
if min_last_ping is not None:
where_filters.append("last_ping >= FROM_UNIXTIME(%s) ")
query_values.append(mysql_utils.RDFDatetimeToTimestamp(min_last_ping))
if max_last_ping is not None:
where_filters.append(
"(last_ping IS NULL OR last_ping <= FROM_UNIXTIME(%s))")
query_values.append(mysql_utils.RDFDatetimeToTimestamp(max_last_ping))
if fleetspeak_enabled is not None:
if fleetspeak_enabled:
where_filters.append("fleetspeak_enabled IS TRUE")
else:
where_filters.append(
"(fleetspeak_enabled IS NULL OR fleetspeak_enabled IS FALSE)")
if where_filters:
query += "WHERE " + "AND ".join(where_filters)
cursor.execute(query, query_values)
last_pings = {}
for int_client_id, last_ping in cursor.fetchall():
client_id = db_utils.IntToClientID(int_client_id)
last_pings[client_id] = mysql_utils.TimestampToRDFDatetime(last_ping)
return last_pings
|
python
|
def _build_command(self, cmd_1, cmd_2,
select=False, select_command=None):
"""
Constructs the complete command.
:param cmd_1: Light command 1.
:param cmd_2: Light command 2.
:return: The complete command.
"""
return CommandV6(cmd_1, cmd_2, self._remote_style, self._group_number,
select, select_command)
|
java
|
public DatabaseConnectionRequest<T, U> addParameters(Map<String, Object> parameters) {
request.addParameters(parameters);
return this;
}
|
python
|
def evpn_next_hop_unchanged(self, **kwargs):
"""Configure next hop unchanged for an EVPN neighbor.
You probably don't want this method. You probably want to configure
an EVPN neighbor using `BGP.neighbor`. That will configure next-hop
unchanged automatically.
Args:
ip_addr (str): IP Address of BGP neighbor.
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
delete (bool): Deletes the neighbor if `delete` is ``True``.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
None
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.203', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.local_asn(local_as='65535',
... rbridge_id='225')
... output = dev.bgp.neighbor(ip_addr='10.10.10.10',
... remote_as='65535', rbridge_id='225')
... output = dev.bgp.evpn_next_hop_unchanged(rbridge_id='225',
... ip_addr='10.10.10.10')
... output = dev.bgp.evpn_next_hop_unchanged(rbridge_id='225',
... ip_addr='10.10.10.10', get=True)
... output = dev.bgp.evpn_next_hop_unchanged(rbridge_id='225',
... ip_addr='10.10.10.10', delete=True)
"""
callback = kwargs.pop('callback', self._callback)
args = dict(rbridge_id=kwargs.pop('rbridge_id', '1'),
evpn_neighbor_ipv4_address=kwargs.pop('ip_addr'))
next_hop_unchanged = getattr(self._rbridge,
'rbridge_id_router_router_bgp_address_'
'family_l2vpn_evpn_neighbor_evpn_'
'neighbor_ipv4_next_hop_unchanged')
config = next_hop_unchanged(**args)
if kwargs.pop('delete', False):
config.find('.//*next-hop-unchanged').set('operation', 'delete')
if kwargs.pop('get', False):
return callback(config, handler='get_config')
return callback(config)
|
java
|
public static void write(Node node, OutputStream outputStream)
{
OutputStreamWriter writer = new OutputStreamWriter(outputStream);
write(node, writer, 4, false);
}
|
java
|
public static Object decodeToObject(String encodedObject, int options, final ClassLoader loader)
throws IOException, ClassNotFoundException {
// Decode and gunzip if necessary
byte[] objBytes = decode(encodedObject, options);
ByteArrayInputStream bais = null;
ObjectInputStream ois = null;
Object obj = null;
try {
bais = new ByteArrayInputStream(objBytes);
// If no custom class loader is provided, use Java's builtin OIS.
if (loader == null) {
ois = new ObjectInputStream(bais);
} else {
// Else make a customized object input stream that uses the provided class loader.
ois = new ObjectInputStream(bais) {
@Override
public Class<?> resolveClass(ObjectStreamClass streamClass)
throws IOException, ClassNotFoundException {
Class c = Class.forName(streamClass.getName(), false, loader);
if (c == null) {
return super.resolveClass(streamClass);
} else {
return c; // Class loader knows of this class.
}
}
};
}
obj = ois.readObject();
} catch (IOException e) {
throw e; // Catch and throw in order to execute finally{}
} catch (ClassNotFoundException e) {
throw e; // Catch and throw in order to execute finally{}
} finally {
try {
if (bais != null) {
bais.close();
}
} catch (Exception e) {
}
try {
if (ois != null) {
ois.close();
}
} catch (Exception e) {
}
}
return obj;
}
|
java
|
public static StoreWriter createWriter(File file, Configuration config) {
return StoreImpl.createWriter(file, config);
}
|
python
|
def create_prefix_dir(nb_file, fmt):
"""Create directory if fmt has a prefix"""
if 'prefix' in fmt:
nb_dir = os.path.dirname(nb_file) + os.path.sep
if not os.path.isdir(nb_dir):
logging.log(logging.WARNING, "[jupytext] creating missing directory %s", nb_dir)
os.makedirs(nb_dir)
|
python
|
def sum_filter(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0):
r"""
Calculates a multi-dimensional sum filter.
Parameters
----------
input : array-like
input array to filter
size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either `size` or `footprint` must be defined. `size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust `size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `size` is 2, then the actual size used is
(2,2,2).
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
Returns
-------
sum_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
Convenience implementation employing convolve.
See Also
--------
scipy.ndimage.filters.convolve : Convolve an image with a kernel.
"""
footprint = __make_footprint(input, size, footprint)
slicer = [slice(None, None, -1)] * footprint.ndim
return convolve(input, footprint[slicer], output, mode, cval, origin)
|
java
|
protected Field createFulltextField(String value, boolean store, boolean withOffsets)
{
return new Field(FieldNames.FULLTEXT, value, store ? Field.Store.YES : Field.Store.NO, Field.Index.ANALYZED,
withOffsets ? Field.TermVector.WITH_OFFSETS : Field.TermVector.NO);
}
|
java
|
public String findTypeWithMatchingNamespace(String prefix) {
prefix = prefix + ".";
// This must be sorted so that errors are deterministic, or we'll break integration tests.
for (String name : getAllSortedTypeNames()) {
if (name.startsWith(prefix)) {
return name;
}
}
return null;
}
|
java
|
public <T extends Enum<T>> EnumSet<T> getParametersAsEnums(String name, Class<T> type, EnumSet<T> defaultValue) {
String[] enumNames = getParameters(name);
if (enumNames == null) {
return defaultValue;
} else {
Collection<T> enums = new ArrayList<>();
for (String enumName : enumNames) {
T enumValue = stringToEnum(blankToNull(enumName), type);
if (enumValue != null) {
enums.add(enumValue);
}
}
return enums.isEmpty() ? defaultValue : EnumSet.copyOf(enums);
}
}
|
java
|
public static Payout retrieve(String payout) throws StripeException {
return retrieve(payout, (Map<String, Object>) null, (RequestOptions) null);
}
|
python
|
def _warn_if_string(iterable):
"""Helper for the response objects to check if the iterable returned
to the WSGI server is not a string.
"""
if isinstance(iterable, string_types):
from warnings import warn
warn(Warning('response iterable was set to a string. This appears '
'to work but means that the server will send the '
'data to the client char, by char. This is almost '
'never intended behavior, use response.data to assign '
'strings to the response object.'), stacklevel=2)
|
python
|
def type_assert_iter(
iterable,
cls,
cast_from=None,
cast_to=None,
dynamic=None,
objcls=None,
choices=None,
ctor=None,
allow_none=False,
):
""" Checks that every object in @iterable is an instance of @cls
Will also unmarshal JSON objects to Python objects if items in
@iterable are an instance of dict
Args:
iterable: Any iterable to check. Note that it would not
make sense to pass a generator to this function
cls: type, The class type to assert each member
of @iterable is
cast_from: type-or-tuple-of-types, If @obj is an instance
of this type(s), cast it to @cast_to
cast_to: type, The type to cast @obj to if it's an instance
of @cast_from, or None to cast to @cls.
If you need more than type(x), use a lambda or
factory function.
dynamic: @cls, A dynamic default value if @iterable is None,
and @dynamic is not None.
objcls: None-or-type, a type to assert @iterable is,
ie: list, set, etc...
choices: iterable-or-None, If not None, each object in
@iterable must be in @choices
ctor: None-or-static-method: Use this method as the
constructor instead of __init__
allow_none: bool, True to allow @iterable to be None,
otherwise False
Returns:
@iterable, note that @iterable will be recreated, which
may be a performance concern if @iterable has many items
Raises:
TypeError: if @obj is not an instance of @cls
"""
if (
allow_none
and
iterable is None
):
return iterable
_check_dstruct(iterable, objcls)
if choices is not None:
for obj in iterable:
_check_choices(obj, choices)
if (
iterable is None
and
dynamic is not None
):
iterable = dynamic
t = type(iterable)
return t(
_check(
obj,
cls,
False,
cast_from,
cast_to,
ctor=ctor,
) for obj in iterable
)
|
java
|
public void loadField(String fieldName,
TypeDesc type) {
getfield(0, Opcode.GETFIELD, constantField(fieldName, type), type);
}
|
java
|
@Trivial
public static ZipEntryData[] collectZipEntries(ZipFile zipFile) {
final List<ZipEntryData> entriesList = new ArrayList<ZipEntryData>();
final Enumeration<? extends ZipEntry> zipEntries = zipFile.entries();
while ( zipEntries.hasMoreElements() ) {
entriesList.add( createZipEntryData( zipEntries.nextElement() ) );
}
ZipEntryData[] entryData = entriesList.toArray( new ZipEntryData[ entriesList.size() ] );
Arrays.sort(entryData, ZIP_ENTRY_DATA_COMPARATOR);
return entryData;
}
|
java
|
public DrawerBuilder withActivity(@NonNull Activity activity) {
this.mRootView = (ViewGroup) activity.findViewById(android.R.id.content);
this.mActivity = activity;
this.mLayoutManager = new LinearLayoutManager(mActivity);
return this;
}
|
python
|
def gpio_properties(self):
"""Returns the properties of the user-controllable GPIOs.
Provided the device supports user-controllable GPIOs, they will be
returned by this method.
Args:
self (JLink): the ``JLink`` instance
Returns:
A list of ``JLinkGPIODescriptor`` instances totalling the number of
requested properties.
Raises:
JLinkException: on error.
"""
res = self._dll.JLINK_EMU_GPIO_GetProps(0, 0)
if res < 0:
raise errors.JLinkException(res)
num_props = res
buf = (structs.JLinkGPIODescriptor * num_props)()
res = self._dll.JLINK_EMU_GPIO_GetProps(ctypes.byref(buf), num_props)
if res < 0:
raise errors.JLinkException(res)
return list(buf)
|
python
|
def makeIdentifiers(blocks, target="sentenceContainsTarget(+SID,+WID).",
treeDepth="3", nodeSize="3", numOfClauses="8"):
"""
Make unique identifiers for components of the block and write to files.
:param blocks: Blocks of sentences (likely the output of
``textprocessing.getBlocks``).
:type blocks: list
:param target: Target to write to the background file (another option might
be ``blockContainsTarget(+BID,+SID).``).
:type target: str.
:param treeDepth: Depth of the tree.
:type treeDepth: str.
:param nodeSize: Maximum size of each node in the tree.
:type nodeSize: str.
:param numOfClauses: Number of clauses in total.
:type numOfClauses: str.
.. note:: This is a function that writes *facts*, presently there is no
way to distinguish between these and positive/negatives examples.
Example:
.. code-block:: python
from rnlp.textprocessing import getSentences
from rnlp.textprocessing import getBlocks
from rnlp.parse import makeIdentifiers
example = "Hello there. How are you? I am fine."
sentences = getSentences(example)
# ['Hello there', 'How are you', 'I am fine']
blocks = getBlocks(sentences, 2)
# with 1: [['Hello there'], ['How are you'], ['I am fine']]
# with 2: [['Hello there', 'How are you'], ['I am fine']]
# with 3: [['Hello there', 'How are you', 'I am fine']]
makeIdentifiers(blocks)
# 100%|ββββββββββββββββββββββ| 2/2 [00:00<00:00, 18.49it/s]
"""
blockID, sentenceID, wordID = 1, 0, 0
print("Creating background file...")
_writeBk(target=target, treeDepth=treeDepth,
nodeSize=nodeSize, numOfClauses=numOfClauses)
print("Creating identifiers from the blocks...")
nBlocks = len(blocks)
for block in tqdm(blocks):
_writeBlock(block, blockID)
sentenceID = 1
nSentences = len(block)
beginning = nSentences/float(3)
ending = (2*nSentences)/float(3)
for sentence in block:
if sentenceID < nSentences:
# mode: nextSentenceInBlock(blockID, sentenceID, sentenceID).
ps = "nextSentenceInBlock(" + str(blockID) + "," + \
str(blockID) + "_" + str(sentenceID) + "," + \
str(blockID) + "_" + str(sentenceID+1) + ")."
_writeFact(ps)
if sentenceID < beginning:
# mode: earlySentenceInBlock(blockID, sentenceID).
ps = "earlySentenceInBlock(" + str(blockID) + "," + \
str(blockID) + "_" + str(sentenceID) + ")."
_writeFact(ps)
elif sentenceID > ending:
# mode: lateSentenceInBlock(blockID, sentenceID).
ps = "lateSentenceInBlock(" + str(blockID) + "," + \
str(blockID) + "_" + str(sentenceID) + ")."
_writeFact(ps)
else:
# mode: midWaySentenceInBlock(blockID, sentenceID).
ps = "earlySentenceInBlock(" + str(blockID) + "," + \
str(blockID) + "_" + str(sentenceID) + ")."
_writeFact(ps)
# mode: sentenceInBlock(sentenceID, blockID).
ps = "sentenceInBlock(" + str(blockID) + "_" + str(sentenceID) + \
"," + str(blockID) + ")."
_writeFact(ps)
_writeSentenceInBlock(sentence, blockID, sentenceID)
wordID = 1
tokens = nltk.word_tokenize(sentence)
nWords = len(tokens)
wBeginning = nWords/float(3)
wEnding = (2*nWords)/float(3)
for word in tokens:
"""
if word == "He":
pos = open("pos.txt","a")
word = str(blockID)+"_"+str(sentenceID)+"_"+str(wordID)
sentence = str(blockID)+"_"+str(sentenceID)
pos.write("sentenceContainsTarget("+sentence+","+word+").\n")
pos.close()
else:
neg = open("neg.txt","a")
word = str(blockID)+"_"+str(sentenceID)+"_"+str(wordID)
sentence = str(blockID)+"_"+str(sentenceID)
neg.write("sentenceContainsTarget("+sentence+","+word+").\n")
neg.close()
"""
# mode: wordString(wordID, #str).
ps = "wordString(" + str(blockID) + "_" + str(sentenceID) + \
"_" + str(wordID) + "," + "'" + str(word) + "')."
_writeFact(ps)
# mode: partOfSpeechTag(wordID, #POS).
POS = nltk.pos_tag([word])[0][1]
ps = "partOfSpeech(" + str(blockID) + "_" + str(sentenceID) + \
"_" + str(wordID) + "," + '"' + str(POS) + '").'
_writeFact(ps)
# mode: nextWordInSentence(sentenceID, wordID, wordID).
if wordID < nWords:
ps = "nextWordInSentence(" + str(blockID) + "_" + \
str(sentenceID) + "," + str(blockID) + "_" + \
str(sentenceID) + "_" + str(wordID) + "," + \
str(blockID) + "_" + str(sentenceID) + "_" + \
str(wordID+1) + ")."
_writeFact(ps)
if wordID < wBeginning:
# mode: earlyWordInSentence(sentenceID, wordID).
ps = "earlyWordInSentence(" + str(blockID) + "_" + \
str(sentenceID) + "," + str(blockID) + "_" + \
str(sentenceID) + "_" + str(wordID) + ")."
_writeFact(ps)
elif wordID > wEnding:
# mode: lateWordInSentence(sentenceID< wordID).
ps = "lateWordInSentence(" + str(blockID) + "_" + \
str(sentenceID) + "," + str(blockID) + "_" + \
str(sentenceID) + "_" + str(wordID) + ")."
_writeFact(ps)
else:
# mode: midWayWordInSentence(sentenceID, wordID).
ps = "midWayWordInSentence(" + str(blockID) + "_" + \
str(sentenceID) + "," + str(blockID) + "_" + \
str(sentenceID) + "_" + str(wordID) + ")."
_writeFact(ps)
# mode: wordInSentence(wordID, sentenceID).
ps = "wordInSentence(" + str(blockID) + "_" + \
str(sentenceID) + "_" + str(wordID) + "," + \
str(blockID) + "_" + str(sentenceID) + ")."
_writeFact(ps)
_writeWordFromSentenceInBlock(word, blockID,
sentenceID, wordID)
wordID += 1
sentenceID += 1
blockID += 1
|
python
|
def overlaps(self, other):
"""
Return true if a rectangle overlaps this rectangle.
"""
return (
self.right > other.left and
self.left < other.right and
self.top < other.bottom and
self.bottom > other.top
)
|
python
|
def partitioned_probability(self, direction, partition):
"""Compute the probability of the mechanism over the purview in
the partition.
"""
repertoire = self.partitioned_repertoire(direction, partition)
return self.state_probability(direction, repertoire, partition.purview)
|
java
|
public final void mT__63() throws RecognitionException {
try {
int _type = T__63;
int _channel = DEFAULT_TOKEN_CHANNEL;
// BELScript.g:50:7: ( 'sec' )
// BELScript.g:50:9: 'sec'
{
match("sec");
}
state.type = _type;
state.channel = _channel;
}
finally {
}
}
|
java
|
public static Iterable<String> iterate(final String node) {
return new Iterable<String>() {
@Override
public Iterator<String> iterator() {
return new Scanner(node).useDelimiter("/");
}
};
}
|
python
|
def _path_with_dir_fd(self, path, fct, dir_fd):
"""Return the path considering dir_fd. Raise on nmvalid parameters."""
if dir_fd is not None:
if sys.version_info < (3, 3):
raise TypeError("%s() got an unexpected keyword "
"argument 'dir_fd'" % fct.__name__)
# check if fd is supported for the built-in real function
real_fct = getattr(os, fct.__name__)
if real_fct not in self.supports_dir_fd:
raise NotImplementedError(
'dir_fd unavailable on this platform')
if isinstance(path, int):
raise ValueError("%s: Can't specify dir_fd without "
"matching path" % fct.__name__)
if not self.path.isabs(path):
return self.path.join(
self.filesystem.get_open_file(
dir_fd).get_object().path, path)
return path
|
java
|
public LogTarget addDelegate(Log log)
{
LogProxyTarget logProxyTarget = new LogProxyTarget(log);
this.addTarget(logProxyTarget);
return logProxyTarget;
}
|
java
|
public List<JavaClass> getAllClasses() {
ClassDumpSegment classDumpBounds;
if (heapDumpSegment == null) {
return Collections.emptyList();
}
classDumpBounds = getClassDumpSegment();
if (classDumpBounds == null) {
return Collections.emptyList();
}
return classDumpBounds.createClassCollection();
}
|
python
|
def list_archive(archive, verbosity=1, program=None, interactive=True):
"""List given archive."""
# Set default verbosity to 1 since the listing output should be visible.
util.check_existing_filename(archive)
if verbosity >= 0:
util.log_info("Listing %s ..." % archive)
return _handle_archive(archive, 'list', verbosity=verbosity,
interactive=interactive, program=program)
|
python
|
def _prune_previous_versions(self, symbol, keep_mins=120, keep_version=None, new_version_shas=None):
"""
Prune versions, not pointed at by snapshots which are at least keep_mins old. Prune will never
remove all versions.
"""
new_version_shas = new_version_shas if new_version_shas else []
prunable_ids_to_shas = self._find_prunable_version_ids(symbol, keep_mins)
prunable_ids = list(prunable_ids_to_shas.keys())
if keep_version is not None:
try:
prunable_ids.remove(keep_version)
except ValueError:
pass
if not prunable_ids:
return
base_version_ids = self._find_base_version_ids(symbol, prunable_ids)
version_ids = list(set(prunable_ids) - set(base_version_ids))
if not version_ids:
return
# Delete the version documents
mongo_retry(self._versions.delete_many)({'_id': {'$in': version_ids}})
prunable_ids_to_shas = {k: prunable_ids_to_shas[k] for k in version_ids}
# The new version has not been written yet, so make sure that any SHAs pointed by it are preserved
shas_to_delete = [sha for v in prunable_ids_to_shas.values() for sha in v[0] if sha not in new_version_shas]
# Cleanup any chunks
mongo_retry(cleanup)(self._arctic_lib, symbol, version_ids, self._versions,
shas_to_delete=shas_to_delete,
pointers_cfgs=[v[1] for v in prunable_ids_to_shas.values()])
|
java
|
@VisibleForTesting
static String canonicalizeHost(String host) {
if (host.startsWith("[") && host.endsWith("]")) {
return host.substring(1, host.length() - 1);
}
return host;
}
|
python
|
def get_all(rc_file='~/.odoorpcrc'):
"""Return all session configurations from the `rc_file` file.
>>> import odoorpc
>>> from pprint import pprint as pp
>>> pp(odoorpc.session.get_all()) # doctest: +SKIP
{'foo': {'database': 'db_name',
'host': 'localhost',
'passwd': 'password',
'port': 8069,
'protocol': 'jsonrpc',
'timeout': 120,
'type': 'ODOO',
'user': 'admin'},
...}
.. doctest::
:hide:
>>> import odoorpc
>>> session = '%s_session' % DB
>>> odoo.save(session)
>>> data = odoorpc.session.get_all()
>>> data[session]['host'] == HOST
True
>>> data[session]['protocol'] == PROTOCOL
True
>>> data[session]['port'] == int(PORT)
True
>>> data[session]['database'] == DB
True
>>> data[session]['user'] == USER
True
>>> data[session]['passwd'] == PWD
True
>>> data[session]['type'] == 'ODOO'
True
"""
conf = ConfigParser()
conf.read([os.path.expanduser(rc_file)])
sessions = {}
for name in conf.sections():
sessions[name] = {
'type': conf.get(name, 'type'),
'host': conf.get(name, 'host'),
'protocol': conf.get(name, 'protocol'),
'port': conf.getint(name, 'port'),
'timeout': conf.getfloat(name, 'timeout'),
'user': conf.get(name, 'user'),
'passwd': conf.get(name, 'passwd'),
'database': conf.get(name, 'database'),
}
return sessions
|
python
|
def set_time(time):
'''
Sets the current time. Must be in 24 hour format.
:param str time: The time to set in 24 hour format. The value must be
double quoted. ie: '"17:46"'
:return: True if successful, False if not
:rtype: bool
:raises: SaltInvocationError on Invalid Time format
:raises: CommandExecutionError on failure
CLI Example:
.. code-block:: bash
salt '*' timezone.set_time '"17:34"'
'''
# time must be double quoted '"17:46"'
time_format = _get_date_time_format(time)
dt_obj = datetime.strptime(time, time_format)
cmd = 'systemsetup -settime {0}'.format(dt_obj.strftime('%H:%M:%S'))
return salt.utils.mac_utils.execute_return_success(cmd)
|
python
|
def object_permission_set(self):
'''All users have view permissions. Admin users, and users with
org:admin can create, update, and delete any user. Any user can update
or delete themselves. Only admins can create or modify other admin
users.'''
return Or(
AllowOnlySafeHttpMethod,
AllowAdmin,
And(
AllowPermission('org:admin'),
ObjAttrTrue(lambda _, u: not u.is_superuser),
ObjAttrTrue(
lambda r, _: r.data.get('admin') is not True)
),
And(
AllowModify,
ObjAttrTrue(
lambda req, user: user == req.user),
ObjAttrTrue(
lambda r, _: r.data.get('admin') is not True)
),
)
|
java
|
public String authority() {
String authority = this.authority;
if (authority != null) {
return authority;
}
if (isGroup()) {
authority = "group:" + groupName;
} else if (port != 0) {
if (hostType == HostType.IPv6_ONLY) {
authority = '[' + host() + "]:" + port;
} else {
authority = host() + ':' + port;
}
} else if (hostType == HostType.IPv6_ONLY) {
authority = '[' + host() + ']';
} else {
authority = host();
}
return this.authority = authority;
}
|
java
|
@Processor
public Object logWarning(
@Optional @FriendlyName("Log Message") String message,
@Optional String integrationScenario,
@Optional String messageType,
@Optional String contractId,
@Optional String correlationId,
@Optional @FriendlyName("Extra Info") Map<String, String> extraInfo) {
return doLog(LogLevelType.WARNING, message, integrationScenario, contractId, correlationId, extraInfo);
}
|
java
|
@Deprecated
@Override
public void addModel(String name, Object value) {
getInvocation().addModel(name, value);
}
|
java
|
public Map<String, Float> findSynonyms(String target, int cnt) {
float[] vec = transform(target);
if ((vec == null) || (cnt == 0))
return Collections.emptyMap();
int[] synonyms = new int[cnt];
float[] scores = new float[cnt];
int min = 0;
for (int i = 0; i < cnt; i++) {
synonyms[i] = i;
scores[i] = cosineSimilarity(vec, i * vec.length, _output._vecs);
if (scores[i] < scores[min])
min = i;
}
final int vocabSize = _output._vocab.size();
for (int i = cnt; i < vocabSize; i++) {
float score = cosineSimilarity(vec, i * vec.length, _output._vecs);
if ((score <= scores[min]) || (score >= 0.999999))
continue;
synonyms[min] = i;
scores[min] = score;
// find a new min
min = 0;
for (int j = 1; j < cnt; j++)
if (scores[j] < scores[min])
min = j;
}
Map<String, Float> result = new HashMap<>(cnt);
for (int i = 0; i < cnt; i++)
result.put(_output._words[synonyms[i]].toString(), scores[i]);
return result;
}
|
java
|
public static void writeInt32LE(final int value, final byte[] buffer, int offset)
{
buffer[offset++] = (byte) ((value >>> 0) & 0xFF);
buffer[offset++] = (byte) ((value >>> 8) & 0xFF);
buffer[offset++] = (byte) ((value >>> 16) & 0xFF);
buffer[offset] = (byte) ((value >>> 24) & 0xFF);
}
|
python
|
def parse(cls, fptr, offset, length):
"""Parse UUID box.
Parameters
----------
fptr : file
Open file object.
offset : int
Start position of box in bytes.
length : int
Length of the box in bytes.
Returns
-------
UUIDBox
Instance of the current UUID box.
"""
num_bytes = offset + length - fptr.tell()
read_buffer = fptr.read(num_bytes)
the_uuid = UUID(bytes=read_buffer[0:16])
return cls(the_uuid, read_buffer[16:], length=length, offset=offset)
|
java
|
public CompletableFuture<Collection<JobStatusMessage>> listJobs() {
return runDispatcherCommand(dispatcherGateway ->
dispatcherGateway
.requestMultipleJobDetails(rpcTimeout)
.thenApply(jobs ->
jobs.getJobs().stream()
.map(details -> new JobStatusMessage(details.getJobId(), details.getJobName(), details.getStatus(), details.getStartTime()))
.collect(Collectors.toList())));
}
|
python
|
def fw_update(self, data, fw_name=None):
"""Top level FW update function. """
LOG.debug("FW Update %s", data)
self._fw_update(fw_name, data)
|
python
|
async def delete(self):
"""
Delete task (in any state) permanently.
Returns `True` is task is deleted.
"""
the_tuple = await self.queue.delete(self.tube, self.task_id)
self.update_from_tuple(the_tuple)
return bool(self.state == DONE)
|
java
|
public static long toLong(@Nullable final String longStr) {
if (!isValidLong(longStr)) {
throw new IllegalArgumentException(longStr + ExceptionValues.EXCEPTION_DELIMITER + ExceptionValues.INVALID_LONG_VALUE);
}
final String stripedLong = StringUtils.strip(longStr);
return NumberUtils.createLong(stripedLong);
}
|
python
|
def get_input(prompt, default=None, choices=None, option_value=None):
"""
If option_value is not None, then return it. Otherwise get the result from
input.
"""
if option_value is not None:
return option_value
choices = choices or []
while 1:
r = input(prompt+' ').strip()
if not r and default is not None:
return default
if choices:
if r not in choices:
r = None
else:
break
else:
break
return r
|
python
|
def replace(self, html):
"""Perform replacements on given HTML fragment."""
self.html = html
text = html.text()
positions = []
def perform_replacement(match):
offset = sum(positions)
start, stop = match.start() + offset, match.end() + offset
s = self.html[start:stop]
if self._is_replacement_allowed(s):
repl = match.expand(self.replacement)
self.html[start:stop] = repl
else:
repl = match.group() # no replacement takes place
positions.append(match.end())
return repl
while True:
if positions:
text = text[positions[-1]:]
text, n = self.pattern.subn(perform_replacement, text, count=1)
if not n: # all is already replaced
break
|
python
|
async def vcx_messages_update_status(msg_json: str):
"""
Update the status of messages from the specified connection
:param msg_json:
:return:
"""
logger = logging.getLogger(__name__)
if not hasattr(vcx_messages_update_status, "cb"):
logger.debug("vcx_messages_update_status: Creating callback")
vcx_messages_update_status.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32))
c_msg_json = c_char_p(msg_json.encode('utf-8'))
c_status = c_char_p("MS-106".encode('utf-8'))
result = await do_call('vcx_messages_update_status',
c_status,
c_msg_json,
vcx_messages_update_status.cb)
logger.debug("vcx_messages_update_status completed")
return result
|
java
|
private Report getReport(ReportType reportType) {
String type = null;
String primaryColumn = null;
Properties properties = new Properties();
if (reportType != null) {
type = reportType.getType();
primaryColumn = reportType.getPrimaryColumn();
for (PropertyType propertyType : reportType.getProperty()) {
properties.setProperty(propertyType.getName(), propertyType.getValue());
}
}
Report.ReportBuilder reportBuilder = Report.builder().primaryColumn(primaryColumn).properties(properties);
if (type != null) {
reportBuilder.selectedTypes(Report.selectTypes(type));
}
return reportBuilder.build();
}
|
python
|
def to_dictionary(self):
"""Serialize an object into dictionary form. Useful if you have to
serialize an array of objects into JSON. Otherwise, if you call the
:meth:`to_json` method on each object in the list and then try to
dump the array, you end up with an array with one string."""
j = {}
for p in self.properties:
try:
v = getattr(self, p)
except AttributeError:
continue
if v is not None:
if p == 't':
j[p] = getattr(self, p).isoformat()
else:
j[p] = getattr(self, p)
return j
|
python
|
def traverse_one(self, attribute, source, target, visitor):
"""
:param source: source data proxy
:type source: instance of `DataTraversalProxy` or None
:param target: target data proxy
:type target: instance of `DataTraversalProxy` or None
"""
if __debug__:
self.__log_traverse_one(self.__trv_path, attribute, source, target)
prx = source or target
rel_op = RELATION_OPERATIONS.check(source, target)
if prx.do_traverse() \
and (rel_op == prx.relation_operation or attribute is None):
for attr in prx.get_relationship_attributes():
# Check cascade settings.
if not bool(attr.cascade & rel_op):
continue
if not source is None:
try:
attr_source = source.get_attribute_proxy(attr)
except AttributeError:
# If the source does not have the attribute set, we
# do nothing (as opposed to when the value is None).
continue
else:
attr_source = None
if not target is None:
attr_target = target.get_attribute_proxy(attr)
else:
attr_target = None
attr_rel_op = RELATION_OPERATIONS.check(attr_source,
attr_target)
if attr_rel_op == RELATION_OPERATIONS.ADD:
if rel_op == RELATION_OPERATIONS.ADD:
parent = source
else:
parent = target
elif attr_rel_op == RELATION_OPERATIONS.REMOVE:
parent = target
else: # UPDATE
parent = target
card = get_attribute_cardinality(attr)
if card == CARDINALITY_CONSTANTS.ONE:
if attr_source is None and attr_target is None:
# If both source and target have None values, there is
# nothing to do.
continue
if attr_rel_op == RELATION_OPERATIONS.ADD:
# if not attr_source.get_id() is None:
# # We only ADD new items.
# continue
src_items = [attr_source]
tgt_items = None
elif attr_rel_op == RELATION_OPERATIONS.REMOVE:
src_items = None
tgt_items = [attr_target]
else: # UPDATE
src_items = [attr_source]
tgt_items = [attr_target]
src_id = attr_source.get_id()
tgt_id = attr_target.get_id()
if src_id != tgt_id:
if not src_id is None:
# If the source ID is None, this is a replace
# operation (ADD source, REMOVE target).
src_target = attr_target.get_matching(src_id)
if not src_target is None:
tgt_items.append(src_target)
else:
src_items = attr_source
tgt_items = attr_target
self.__trv_path.push(parent, (source, target), attr, rel_op)
self.traverse_many(attr, src_items, tgt_items, visitor)
self.__trv_path.pop() # path.pop()
visitor.visit(self.__trv_path, attribute, source, target)
|
java
|
@Deprecated
public InputStream getInputStream(String bucketName, String key, Supplier<File> tempFileSupplier) throws AmazonServiceException, AmazonClientException, InterruptedException, IOException{
return readUtils.getInputStream(bucketName, key, tempFileSupplier);
}
|
java
|
public static WxPayApiConfig putApiConfig(WxPayApiConfig wxPayApiConfig) {
if (CFG_MAP.size() == 0) {
CFG_MAP.put(DEFAULT_CFG_KEY, wxPayApiConfig);
}
return CFG_MAP.put(wxPayApiConfig.getAppId(), wxPayApiConfig);
}
|
python
|
def result_group(group_id, failures=False, wait=0, count=None, cached=Conf.CACHED):
"""
Return a list of results for a task group.
:param str group_id: the group id
:param bool failures: set to True to include failures
:param int count: Block until there are this many results in the group
:param bool cached: run this against the cache backend
:return: list or results
"""
if cached:
return result_group_cached(group_id, failures, wait, count)
start = time()
if count:
while True:
if count_group(group_id) == count or wait and (time() - start) * 1000 >= wait >= 0:
break
sleep(0.01)
while True:
r = Task.get_result_group(group_id, failures)
if r:
return r
if (time() - start) * 1000 >= wait >= 0:
break
sleep(0.01)
|
java
|
public static AbstractMessage duplicate( Message srcMessage ) throws JMSException
{
AbstractMessage msgCopy;
// Internal type copy
if (srcMessage instanceof AbstractMessage)
msgCopy = ((AbstractMessage)srcMessage).copy();
else
if (srcMessage instanceof TextMessage)
msgCopy = duplicateTextMessage((TextMessage)srcMessage);
else
if (srcMessage instanceof ObjectMessage)
msgCopy = duplicateObjectMessage((ObjectMessage)srcMessage);
else
if (srcMessage instanceof BytesMessage)
msgCopy = duplicateBytesMessage((BytesMessage)srcMessage);
else
if (srcMessage instanceof MapMessage)
msgCopy = duplicateMapMessage((MapMessage)srcMessage);
else
if (srcMessage instanceof StreamMessage)
msgCopy = duplicateStreamMessage((StreamMessage)srcMessage);
else
msgCopy = duplicateMessage(srcMessage);
return msgCopy;
}
|
java
|
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case MSG:
return is_set_msg();
}
throw new IllegalStateException();
}
|
java
|
public static <T> Set<Set<Node<T>>> compute(final Graph<T> graph) {
final Set<Set<Node<T>>> connectedComponents = new LinkedHashSet<>();
final Set<Node<T>> unmarkedNodes = new LinkedHashSet<>(graph.nodes());
while (!unmarkedNodes.isEmpty()) {
Set<Node<T>> connectedComp = new LinkedHashSet<>();
deepFirstSearch(unmarkedNodes.iterator().next(), connectedComp, unmarkedNodes);
connectedComponents.add(connectedComp);
}
return connectedComponents;
}
|
python
|
def dropna(self, **kwargs):
"""Returns a new QueryCompiler with null values dropped along given axis.
Return:
a new DataManager
"""
axis = kwargs.get("axis", 0)
subset = kwargs.get("subset", None)
thresh = kwargs.get("thresh", None)
how = kwargs.get("how", "any")
# We need to subset the axis that we care about with `subset`. This
# will be used to determine the number of values that are NA.
if subset is not None:
if not axis:
compute_na = self.getitem_column_array(subset)
else:
compute_na = self.getitem_row_array(self.index.get_indexer_for(subset))
else:
compute_na = self
if not isinstance(axis, list):
axis = [axis]
# We are building this dictionary first to determine which columns
# and rows to drop. This way we do not drop some columns before we
# know which rows need to be dropped.
if thresh is not None:
# Count the number of NA values and specify which are higher than
# thresh.
drop_values = {
ax ^ 1: compute_na.isna().sum(axis=ax ^ 1).to_pandas().squeeze()
> thresh
for ax in axis
}
else:
drop_values = {
ax
^ 1: getattr(compute_na.isna(), how)(axis=ax ^ 1).to_pandas().squeeze()
for ax in axis
}
if 0 not in drop_values:
drop_values[0] = None
if 1 not in drop_values:
drop_values[1] = None
rm_from_index = (
[obj for obj in compute_na.index[drop_values[1]]]
if drop_values[1] is not None
else None
)
rm_from_columns = (
[obj for obj in compute_na.columns[drop_values[0]]]
if drop_values[0] is not None
else None
)
else:
rm_from_index = (
compute_na.index[drop_values[1]] if drop_values[1] is not None else None
)
rm_from_columns = (
compute_na.columns[drop_values[0]]
if drop_values[0] is not None
else None
)
return self.drop(index=rm_from_index, columns=rm_from_columns)
|
python
|
def padDigitalData(self, dig_data, n):
"""Pad dig_data with its last element so that the new array is a
multiple of n.
"""
n = int(n)
l0 = len(dig_data)
if l0 % n == 0:
return dig_data # no need of padding
else:
ladd = n - (l0 % n)
dig_data_add = np.zeros(ladd, dtype="uint32")
dig_data_add.fill(dig_data[-1])
return np.concatenate((dig_data, dig_data_add))
|
python
|
def reload_input_system(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
reload = ET.Element("reload")
config = reload
input = ET.SubElement(reload, "input")
system = ET.SubElement(input, "system")
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
java
|
private void initialize() {
this.setTitle(Constant.messages.getString("filter.changeua.title"));
this.setContentPane(getJPanel());
if (Model.getSingleton().getOptionsParam().getViewParam().getWmUiHandlingOption() == 0) {
this.setSize(375, 173);
}
this.setPreferredSize(new Dimension(375, 173));
for (int i=0; i<FilterChangeUserAgent.userAgentName.length; i++) {
cmbUserAgent.addItem(FilterChangeUserAgent.userAgentName[i]);
}
this.pack();
}
|
python
|
def excludeSNPs(inPrefix, outPrefix, exclusionFileName):
"""Exclude some SNPs using Plink.
:param inPrefix: the prefix of the input file.
:param outPrefix: the prefix of the output file.
:param exclusionFileName: the name of the file containing the markers to be
excluded.
:type inPrefix: str
:type outPrefix: str
:type exclusionFileName: str
Using Plink, exclude a list of markers from ``inPrefix``, and saves the
results in ``outPrefix``. The list of markers are in ``exclusionFileName``.
"""
plinkCommand = ["plink", "--noweb", "--bfile", inPrefix, "--exclude",
exclusionFileName, "--make-bed", "--out", outPrefix]
runCommand(plinkCommand)
|
java
|
@Override
public void destroy() throws Exception {
String thisMethodName = CLASS_NAME + ".destroy()";
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) {
SibTr.entry(tc, thisMethodName, "");
}
// Destroy the ME's we created
Enumeration meEnum = _messagingEngines.elements();
// Destroy each ME on this server. Any exceptions are caught and
// deliberately not
// rethrown as errors in one ME must not affect any others that might
// exist.
while (meEnum.hasMoreElements()) {
Object o = meEnum.nextElement();
Object c = ((MessagingEngine) o).getRuntime();
if (c instanceof BaseMessagingEngineImpl) {
try {
((BaseMessagingEngineImpl) c).destroy();
mbeanServiceReg.unregister();
} catch (Exception e) {
FFDCFilter.processException(e, thisMethodName,
"1:910:1.108", this);
SibTr.exception(tc, e);
SibTr.error(tc, "INTERNAL_ERROR_SIAS0003", e);
}
}
}
_messagingEngines = null;
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) {
SibTr.exit(tc, thisMethodName);
}
}
|
python
|
def _update_mtime(self):
""" Updates modif time """
try:
self._mtime = os.path.getmtime(self.editor.file.path)
except OSError:
# file_path does not exists.
self._mtime = 0
self._timer.stop()
except (TypeError, AttributeError):
# file path is none, this happen if you use setPlainText instead of
# openFile. This is perfectly fine, we just do not have anything to
# watch
try:
self._timer.stop()
except AttributeError:
pass
|
python
|
def __check_port(self, port):
"""check port status
return True if port is free, False else
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((_host(), port))
return True
except socket.error:
return False
finally:
s.close()
|
java
|
public static Observable<String> stringConcat(Observable<String> src) {
return toString(src.reduce(new StringBuilder(), new Func2<StringBuilder, String, StringBuilder>() {
@Override
public StringBuilder call(StringBuilder a, String b) {
return a.append(b);
}
}));
}
|
python
|
def _GenerateChunk(self, length):
"""Generates data for a single chunk."""
while 1:
to_read = min(length, self.RECV_BLOCK_SIZE)
if to_read == 0:
return
data = self.rfile.read(to_read)
if not data:
return
yield data
length -= len(data)
|
java
|
@Override
public void shutdown(ShutdownModeAmp mode)
{
if (! _lifecycle.toStopping()) {
return;
}
_lifecycle.toDestroy();
OnShutdownMessage shutdownMessage = new OnShutdownMessage(this, mode, isSingle());
_queue.offer(shutdownMessage);
// _queue.close();
/*
for (Actor<?> actorProcessor : _queueActors) {
actorProcessor.close();
}
*/
_queue.wakeAllAndWait();
shutdownMessage.waitFor(1, TimeUnit.SECONDS);
super.shutdown(mode);
try (OutboxAmp outbox = OutboxAmp.currentOrCreate(manager())) {
Object ctx = outbox.getAndSetContext(this);
try {
outbox.flush();
if (! isSingle()) {
_worker.shutdown(mode);
}
} finally {
outbox.getAndSetContext(ctx);
}
}
// XXX: _worker.shutdown(ShutdownModeAmp.IMMEDIATE);
}
|
java
|
private static boolean isAssignableFrom(String genericD,String genericS,Class<?> classD,Class<?> classS,boolean isFirst, boolean isAddAllFunction,boolean isPutAllFunction){
try{
int dStartBracket = genericD.indexOf("<");
int sStartBracket = genericS.indexOf("<");
int dEndBracket = genericD.lastIndexOf(">");
int sEndBracket = genericS.lastIndexOf(">");
// if there aren't generics
if(dStartBracket==-1 && sStartBracket==-1 && dEndBracket==-1 && sEndBracket==-1)
if(isFirst)
return functionsAreAllowed(isAddAllFunction, isPutAllFunction, classD, classS);
else{
genericD = "?".equals(genericD)?"java.lang.Object":genericD;
genericS = "?".equals(genericS)?"java.lang.Object":genericS;
return isAssignableFrom(Class.forName(genericD),Class.forName(genericS));
}
// destination class
String dBeforeBracket = "";
// source class
String sBeforeBracket = "";
// destination class defined in the generic
String dAfterBracket = "";
// source class defined in the generic
String sAfterBracket = "";
// if generics exists
if(dStartBracket!=-1 && dEndBracket!=-1){
// destination class
dBeforeBracket = genericD.substring(0, dStartBracket).trim();
// destination class defined in the generic
dAfterBracket = genericD.substring(dStartBracket+1,dEndBracket);
}
// if generics exists
if(sStartBracket!=-1 && sEndBracket!=-1){
// source class
sBeforeBracket = genericS.substring(0, sStartBracket).trim();
// source class defined in the generic
sAfterBracket = genericS.substring(sStartBracket+1,sEndBracket);
}
if(isEmpty(dBeforeBracket) && !isEmpty(sBeforeBracket))
dBeforeBracket = genericD;
if(!isEmpty(dBeforeBracket) && isEmpty(sBeforeBracket))
sBeforeBracket = genericS;
boolean isAssignableFrom = false;
if(!isEmpty(dBeforeBracket) && !isEmpty(sBeforeBracket))
isAssignableFrom = isFirst?functionsAreAllowed(isAddAllFunction, isPutAllFunction, classD, classS):
isAssignableFrom(Class.forName(dBeforeBracket),Class.forName(sBeforeBracket));
if(!isEmpty(dAfterBracket) && !isEmpty(sAfterBracket)){
if(isAddAllFunction)
return isAssignableFrom && isAssignableFrom(dAfterBracket, sAfterBracket, null, null, false, false, false);
if(isPutAllFunction){
int dSplitIndex = pairSplitIndex(dAfterBracket);
String dKey = dAfterBracket.substring(0, dSplitIndex).trim();
String dValue = dAfterBracket.substring(dSplitIndex+1).trim();
int sSplitIndex = pairSplitIndex(sAfterBracket);
String sKey = sAfterBracket.substring(0, sSplitIndex).trim();
String sValue = sAfterBracket.substring(sSplitIndex+1).trim();
return isAssignableFrom
&& isAssignableFrom(dKey, sKey, null, null, false, false, false)
&& isAssignableFrom(dValue, sValue, null, null, false, false, false);
}
return isAssignableFrom && dAfterBracket.equals(sAfterBracket);
}
return isAssignableFrom;
}catch (Exception e) { return false; }
}
|
python
|
def successors(self, node, exclude_compressed=True):
"""
Returns the list of successors of a given node
Parameters
----------
node : str
The target node
exclude_compressed : boolean
If true, compressed nodes are excluded from the successors list
Returns
-------
list
List of successors nodes
"""
succs = super(Graph, self).successors(node)
if exclude_compressed:
return [n for n in succs if not self.node[n].get('compressed', False)]
else:
return succs
|
java
|
protected String getOptionalAttributeValue(StartElement start,
QName attributeName) {
Attribute mapNameAttribute = start.getAttributeByName(attributeName);
if (mapNameAttribute == null) {
return null;
} else {
return mapNameAttribute.getValue();
}
}
|
java
|
public void updateAsciiStream(int columnIndex, InputStream inputStream, long length)
throws SQLException {
checkUpdatable(columnIndex);
if (inputStream == null) {
parameterHolders[columnIndex - 1] = new NullParameter(ColumnType.BLOB);
return;
}
parameterHolders[columnIndex - 1] = new StreamParameter(inputStream, length,
noBackslashEscapes);
}
|
java
|
public String getKey(String tableName, String query, Object[] params) {
return tableName + query + (params == null ? null : Arrays.asList(params).toString());
}
|
java
|
public List<GitlabJob> getProjectJobs(Integer projectId) {
String tailUrl = GitlabProject.URL + "/" + sanitizeProjectId(projectId) + GitlabJob.URL + PARAM_MAX_ITEMS_PER_PAGE;
return retrieve().getAll(tailUrl, GitlabJob[].class);
}
|
java
|
public Job copyTableId(String dataset, String tableName) throws BigQueryException {
// [START bigquery_copy_table]
TableId destinationId = TableId.of(dataset, tableName);
JobOption options = JobOption.fields(JobField.STATUS, JobField.USER_EMAIL);
Job job = table.copy(destinationId, options);
// Wait for the job to complete.
try {
Job completedJob =
job.waitFor(
RetryOption.initialRetryDelay(Duration.ofSeconds(1)),
RetryOption.totalTimeout(Duration.ofMinutes(3)));
if (completedJob != null && completedJob.getStatus().getError() == null) {
// Job completed successfully.
} else {
// Handle error case.
}
} catch (InterruptedException e) {
// Handle interrupted wait
}
// [END bigquery_copy_table]
return job;
}
|
java
|
public static String milliSecToString(final long mS) {
final long rem_mS = (long)(mS % 1000.0);
final long rem_sec = (long)((mS / 1000.0) % 60.0);
final long rem_min = (long)((mS / 60000.0) % 60.0);
final long hr = (long)(mS / 3600000.0);
final String mSstr = zeroPad(Long.toString(rem_mS), 3);
final String secStr = zeroPad(Long.toString(rem_sec), 2);
final String minStr = zeroPad(Long.toString(rem_min), 2);
return String.format("%d:%2s:%2s.%3s", hr, minStr, secStr, mSstr);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.