language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
python | def format_traceback(extracted_tb,
exc_type,
exc_value,
cwd='',
term=None,
function_color=12,
dim_color=8,
editor='vi',
template=DEFAULT_EDITOR_SHORTCUT_TEMPLATE):
"""Return an iterable of formatted Unicode traceback frames.
Also include a pseudo-frame at the end representing the exception itself.
Format things more compactly than the stock formatter, and make every
frame an editor shortcut.
"""
def format_shortcut(editor,
path,
line_number,
function=None):
"""Return a pretty-printed editor shortcut."""
return template.format(editor=editor,
line_number=line_number or 0,
path=path,
function=function or u'',
hash_if_function=u' # ' if function else u'',
function_format=term.color(function_color),
# Underline is also nice and doesn't make us
# worry about appearance on different background
# colors.
normal=term.normal,
dim_format=term.color(dim_color) + term.bold,
line_number_max_width=line_number_max_width,
term=term)
template += '\n' # Newlines are awkward to express on the command line.
extracted_tb = _unicode_decode_extracted_tb(extracted_tb)
if not term:
term = Terminal()
if extracted_tb:
# Shorten file paths:
for i, (file, line_number, function, text) in enumerate(extracted_tb):
extracted_tb[i] = human_path(src(file), cwd), line_number, function, text
line_number_max_width = len(unicode(max(the_line for _, the_line, _, _ in extracted_tb)))
# Stack frames:
for i, (path, line_number, function, text) in enumerate(extracted_tb):
text = (text and text.strip()) or u''
yield (format_shortcut(editor, path, line_number, function) +
(u' %s\n' % text))
# Exception:
if exc_type is SyntaxError:
# Format a SyntaxError to look like our other traceback lines.
# SyntaxErrors have a format different from other errors and include a
# file path which looks out of place in our newly highlit, editor-
# shortcutted world.
if hasattr(exc_value, 'filename') and hasattr(exc_value, 'lineno'):
exc_lines = [format_shortcut(editor, exc_value.filename, exc_value.lineno)]
formatted_exception = format_exception_only(SyntaxError, exc_value)[1:]
else:
# The logcapture plugin may format exceptions as strings,
# stripping them of the full filename and lineno
exc_lines = []
formatted_exception = format_exception_only(SyntaxError, exc_value)
formatted_exception.append(u'(Try --nologcapture for a more detailed traceback)\n')
else:
exc_lines = []
formatted_exception = format_exception_only(exc_type, exc_value)
exc_lines.extend([_decode(f) for f in formatted_exception])
yield u''.join(exc_lines) |
java | public void setTopicIds(String v) {
if (Token_Type.featOkTst && ((Token_Type)jcasType).casFeat_topicIds == null)
jcasType.jcas.throwFeatMissing("topicIds", "de.julielab.jules.types.Token");
jcasType.ll_cas.ll_setStringValue(addr, ((Token_Type)jcasType).casFeatCode_topicIds, v);} |
java | public com.google.api.ads.admanager.axis.v201811.TimeZoneType getTimeZoneType() {
return timeZoneType;
} |
java | @VisibleForTesting
void performLazySeek(long bytesToRead) throws IOException {
throwIfNotOpen();
// Return quickly if there is no pending seek operation, i.e. position didn't change.
if (currentPosition == contentChannelPosition && contentChannel != null) {
return;
}
logger.atFine().log(
"Performing lazySeek from %s to %s position with %s bytesToRead for '%s'",
contentChannelPosition, currentPosition, bytesToRead, resourceIdString);
// used to auto-detect random access
long oldPosition = contentChannelPosition;
long seekDistance = currentPosition - contentChannelPosition;
if (contentChannel != null
&& seekDistance > 0
// Always skip in place gzip-encoded files, because they do not support range reads.
&& (gzipEncoded || seekDistance <= readOptions.getInplaceSeekLimit())
&& currentPosition < contentChannelEnd) {
logger.atFine().log(
"Seeking forward %s bytes (inplaceSeekLimit: %s) in-place to position %s for '%s'",
seekDistance, readOptions.getInplaceSeekLimit(), currentPosition, resourceIdString);
skipInPlace(seekDistance);
} else {
closeContentChannel();
}
if (contentChannel == null) {
if (isRandomAccessPattern(oldPosition)) {
setRandomAccess();
}
openContentChannel(bytesToRead);
}
} |
python | def gen_undef():
"""Return an UNDEF instruction.
"""
empty_reg = ReilEmptyOperand()
return ReilBuilder.build(ReilMnemonic.UNDEF, empty_reg, empty_reg, empty_reg) |
java | protected void registerAddOperation(final ManagementResourceRegistration registration, final AbstractAddStepHandler handler,
OperationEntry.Flag... flags) {
registration.registerOperationHandler(getOperationDefinition(ModelDescriptionConstants.ADD,
new DefaultResourceAddDescriptionProvider(registration, descriptionResolver, orderedChild), OperationEntry.EntryType.PUBLIC, flags)
, handler);
} |
python | def parse_inventory(inventory_output=None):
"""Parse the inventory text and return udi dict."""
udi = {
"name": "",
"description": "",
"pid": "",
"vid": "",
"sn": ""
}
if inventory_output is None:
return udi
# find the record with chassis text in name or descr
capture_next = False
chassis_udi_text = None
for line in inventory_output.split('\n'):
lc_line = line.lower()
if ('chassis' in lc_line or 'switch system' in lc_line or 'rack' in lc_line) and 'name' in lc_line and 'descr':
capture_next = True
chassis_udi_text = line
continue
if capture_next:
inventory_output = chassis_udi_text + "\n" + line
break
match = re.search(r"(?i)NAME: (?P<name>.*?),? (?i)DESCR", inventory_output, re.MULTILINE)
if match:
udi['name'] = match.group('name').strip('" ,')
match = re.search(r"(?i)DESCR: (?P<description>.*)", inventory_output, re.MULTILINE)
if match:
udi['description'] = match.group('description').strip('" ')
match = re.search(r"(?i)PID: (?P<pid>.*?),? ", inventory_output, re.MULTILINE)
if match:
udi['pid'] = match.group('pid')
match = re.search(r"(?i)VID: (?P<vid>.*?),? ", inventory_output, re.MULTILINE)
if match:
udi['vid'] = match.group('vid')
match = re.search(r"(?i)SN: (?P<sn>.*)", inventory_output, re.MULTILINE)
if match:
udi['sn'] = match.group('sn').strip()
return udi |
python | def do_POST(self, ):
"""Handle POST requests
When the user is redirected, this handler will respond with a website
which will send a post request with the url fragment as parameters.
This will get the parameters and store the original redirection
url and fragments in :data:`LoginServer.tokenurl`.
:returns: None
:rtype: None
:raises: None
"""
log.debug('POST')
self._set_headers()
# convert the parameters back to the original fragment
# because we need to send the original uri to set_token
# url fragments will not show up in self.path though.
# thats why we make the hassle to send it as a post request.
# Note: oauth does not allow for http connections
# but twitch does, so we fake it
ruri = constants.REDIRECT_URI.replace('http://', 'https://')
self.server.set_token(ruri + self.path.replace('?', '#')) |
python | def yield_json(stream):
"""Uses array and object delimiter counts for balancing.
"""
buff = u""
arr_count = 0
obj_count = 0
while True:
buff += read_chunk(stream)
# If we finish parsing all objs or arrays, yield a finished JSON
# entity.
if buff.endswith('{'):
obj_count += 1
if buff.endswith('['):
arr_count += 1
if buff.endswith(']'):
arr_count -= 1
if obj_count == arr_count == 0:
json_item = copy(buff)
buff = u""
yield json_item
if buff.endswith('}'):
obj_count -= 1
if obj_count == arr_count == 0:
json_item = copy(buff)
buff = u""
yield json_item |
java | private static void doTaskHooksTranslation(Config heronConfig) {
List<String> hooks = heronConfig.getAutoTaskHooks();
if (hooks != null && !hooks.isEmpty()) {
heronConfig.put(backtype.storm.Config.STORMCOMPAT_TOPOLOGY_AUTO_TASK_HOOKS, hooks);
List<String> translationHooks = new LinkedList<>();
translationHooks.add(ITaskHookDelegate.class.getName());
heronConfig.setAutoTaskHooks(translationHooks);
}
} |
python | def md5(self):
"""
Get an MD5 reflecting everything in the DataStore.
Returns
----------
md5: str, MD5 in hexadecimal
"""
hasher = hashlib.md5()
for key in sorted(self.data.keys()):
hasher.update(self.data[key].md5().encode('utf-8'))
md5 = hasher.hexdigest()
return md5 |
java | public Node nextAvailableNode() {
if (!isInit) {
init();
}
currentNodeIndex++;
if (currentNodeIndex >= allNodes.length) {
currentNodeIndex = 0;
}
return currentNode();
} |
python | def push(self, local: _PATH = 'LICENSE', remote: _PATH = '/sdcard/LICENSE') -> None:
'''Copy local files/directories to device.'''
if not os.path.exists(local):
raise FileNotFoundError(f'Local {local!r} does not exist.')
self._execute('-s', self.device_sn, 'push', local, remote) |
python | def section_names(self, ordkey="wall_time"):
"""
Return the names of sections ordered by ordkey.
For the time being, the values are taken from the first timer.
"""
section_names = []
# FIXME this is not trivial
for idx, timer in enumerate(self.timers()):
if idx == 0:
section_names = [s.name for s in timer.order_sections(ordkey)]
#check = section_names
#else:
# new_set = set( [s.name for s in timer.order_sections(ordkey)])
# section_names.intersection_update(new_set)
# check = check.union(new_set)
#if check != section_names:
# print("sections", section_names)
# print("check",check)
return section_names |
java | final static void enable() {
if (lifeCycle.getStatus().equals(RunStatus.READY) || lifeCycle.getStatus().equals(RunStatus.STOPPED)) {
init();
} else if (lifeCycle.getStatus().equals(RunStatus.DISABLED)) {
lifeCycle.setStatus(RunStatus.RUNNING);
}
} |
python | def close(self):
"""
Close the fits file and set relevant metadata to None
"""
if hasattr(self, '_FITS'):
if self._FITS is not None:
self._FITS.close()
self._FITS = None
self._filename = None
self.mode = None
self.charmode = None
self.intmode = None
self.hdu_list = None
self.hdu_map = None |
java | public static CPOptionValue fetchByCPOptionId_Last(long CPOptionId,
OrderByComparator<CPOptionValue> orderByComparator) {
return getPersistence()
.fetchByCPOptionId_Last(CPOptionId, orderByComparator);
} |
java | public static int cusparseSdense2csr(
cusparseHandle handle,
int m,
int n,
cusparseMatDescr descrA,
Pointer A,
int lda,
Pointer nnzPerRow,
Pointer csrSortedValA,
Pointer csrSortedRowPtrA,
Pointer csrSortedColIndA)
{
return checkResult(cusparseSdense2csrNative(handle, m, n, descrA, A, lda, nnzPerRow, csrSortedValA, csrSortedRowPtrA, csrSortedColIndA));
} |
python | def assign_fonts(counts,maxsize,minsize,exclude_words):
'''Defines the font size of a word in the cloud.
Counts is a list of tuples in the form (word,count)'''
valid_counts = []
if exclude_words:
for i in counts:
if i[1] != 1:
valid_counts.append(i)
else:
valid_counts = counts
frequencies = map(lambda x: x[1],valid_counts)
words = map(lambda x: x[0],valid_counts)
maxcount = max(frequencies)
font_sizes = map(lambda x:fontsize(x,maxsize,minsize,maxcount),frequencies)
size_dict = dict(zip(words, font_sizes))
return size_dict |
python | def main(argv=None):
"""ben-nett entry point"""
arguments = cli_common(__doc__, argv=argv)
benet = BeNet(arguments['CAMPAIGN_FILE'])
benet.run()
if argv is not None:
return benet |
java | static public void assertNull(Object object) {
String message = "Expected: <null> but was: " + String.valueOf(object);
assertNull(message, object);
} |
python | def with_body(self, body):
# @todo take encoding into account
"""Sets the request body to the provided value and returns the request itself.
Keyword arguments:
body -- A UTF-8 string or bytes-like object which represents the request body.
"""
try:
self.body = body.encode('utf-8')
except:
try:
self.body = bytes(body)
except:
raise ValueError("Request body must be a string or bytes-like object.")
hasher = hashlib.sha256()
hasher.update(self.body)
digest = base64.b64encode(hasher.digest()).decode('utf-8')
self.with_header("X-Authorization-Content-Sha256", digest)
return self |
java | public static <N, E> List<List<N>> collectSCCs(Graph<N, E> graph) {
return SCCs.collectSCCs(graph);
} |
java | public static PhasedBackoffWaitStrategy withSleep(
long spinTimeout,
long yieldTimeout,
TimeUnit units)
{
return new PhasedBackoffWaitStrategy(
spinTimeout, yieldTimeout,
units, new SleepingWaitStrategy(0));
} |
java | @BetaApi(
"The surface for long-running operations is not stable yet and may change in the future.")
public final OperationFuture<Empty, WorkflowMetadata> instantiateWorkflowTemplateAsync(
String name, Map<String, String> parameters) {
InstantiateWorkflowTemplateRequest request =
InstantiateWorkflowTemplateRequest.newBuilder()
.setName(name)
.putAllParameters(parameters)
.build();
return instantiateWorkflowTemplateAsync(request);
} |
python | def strings_to_categoricals(self, df: Optional[pd.DataFrame] = None):
"""Transform string annotations to categoricals.
Only affects string annotations that lead to less categories than the
total number of observations.
Params
------
df
If ``df`` is ``None``, modifies both :attr:`obs` and :attr:`.var`,
otherwise modifies ``df`` inplace.
Notes
-----
Turns the view of an :class:`~anndata.AnnData` into an actual
:class:`~anndata.AnnData`.
"""
dont_modify = False # only necessary for backed views
if df is None:
dfs = [self.obs, self.var]
if self.isview:
if not self.isbacked:
self._init_as_actual(self.copy())
else:
dont_modify = True
else:
dfs = [df]
for df in dfs:
string_cols = [
key for key in df.columns
if is_string_dtype(df[key])
and not is_categorical(df[key])
]
for key in string_cols:
# make sure we only have strings (could be that there are
# np.nans (float), -666, '-666', for instance)
c = df[key].astype('U')
# make a categorical
c = pd.Categorical(c, categories=natsorted(np.unique(c)))
if len(c.categories) < len(c):
if dont_modify:
raise RuntimeError(
'Please call `.strings_to_categoricals()` on full AnnData, not on this view. '
'You might encounter this error message while copying or writing to disk.'
)
df[key] = c
logger.info('... storing {!r} as categorical'.format(key)) |
java | public static void execute()
throws EFapsException
{
RunLevel.RUNLEVEL.executeMethods();
final List<String> allInitializer = RunLevel.RUNLEVEL.getAllInitializers();
for (final AbstractCache<?> cache : AbstractCache.getCaches()) {
final String initiliazer = cache.getInitializer();
if (!allInitializer.contains(initiliazer)) {
cache.clear();
}
}
for (final IRunLevelListener listener : Listener.get().<IRunLevelListener>invoke(IRunLevelListener.class)) {
listener.onExecute(RunLevel.RUNLEVEL.name);
}
} |
python | def get_ancestors(self):
"""
:returns: A queryset containing the current node object's ancestors,
starting by the root node and descending to the parent.
"""
if self.is_root():
return get_result_class(self.__class__).objects.none()
paths = [
self.path[0:pos]
for pos in range(0, len(self.path), self.steplen)[1:]
]
return get_result_class(self.__class__).objects.filter(
path__in=paths).order_by('depth') |
java | public static <R> FuncN<Observable<R>> asyncFunc(final FuncN<? extends R> func) {
return toAsync(func);
} |
python | def list_tables(self, dataset):
"""Returns the list of tables in a given dataset.
:param dataset:
:type dataset: BQDataset
"""
request = self.client.tables().list(projectId=dataset.project_id,
datasetId=dataset.dataset_id,
maxResults=1000)
response = request.execute()
while response is not None:
for t in response.get('tables', []):
yield t['tableReference']['tableId']
request = self.client.tables().list_next(request, response)
if request is None:
break
response = request.execute() |
java | @Override
@SuppressWarnings("unchecked")
public K deleteMax() {
K result;
switch (size) {
case 0:
throw new NoSuchElementException();
case 1:
result = array[1];
array[1] = null;
size--;
break;
case 2:
result = array[2];
array[2] = null;
size--;
break;
default:
if (comparator == null) {
if (((Comparable<? super K>) array[3]).compareTo(array[2]) > 0) {
result = array[3];
array[3] = array[size];
array[size] = null;
size--;
if (size >= 3) {
fixdownMax(3);
}
} else {
result = array[2];
array[2] = array[size];
array[size] = null;
size--;
fixdownMax(2);
}
} else {
if (comparator.compare(array[3], array[2]) > 0) {
result = array[3];
array[3] = array[size];
array[size] = null;
size--;
if (size >= 3) {
fixdownMaxWithComparator(3);
}
} else {
result = array[2];
array[2] = array[size];
array[size] = null;
size--;
fixdownMaxWithComparator(2);
}
}
break;
}
if (Constants.NOT_BENCHMARK) {
if (2 * minCapacity < array.length - 1 && 4 * size < array.length - 1) {
ensureCapacity((array.length - 1) / 2);
}
}
return result;
} |
java | public void writeLongUTF(String str) throws IOException {
int length = str.length();
writeCompressedInt(length);
for(int position = 0; position<length; position+=20480) {
int blockLength = length - position;
if(blockLength>20480) blockLength = 20480;
String block = str.substring(position, position+blockLength);
writeUTF(block);
}
} |
python | def on_sanji_message(self, client, userdata, msg):
"""This function will recevie all message from mqtt
client
the client instance for this callback
userdata
the private user data as set in Client() or userdata_set()
message
an instance of MQTTMessage. This is a class with members topic,
payload, qos, retain.
"""
try:
message = Message(msg.payload)
except (TypeError, ValueError) as e:
_logger.error(e, exc_info=True)
return
if message.type() == MessageType.UNKNOWN:
_logger.debug("Got an UNKNOWN message, don't dispatch")
return
if message.type() == MessageType.RESPONSE:
self.res_queue.put(message)
if message.type() == MessageType.REQUEST or \
message.type() == MessageType.DIRECT or \
message.type() == MessageType.HOOK or \
message.type() == MessageType.EVENT:
self.req_queue.put(message) |
python | def char2hex(a: str):
"""Convert a hex character to its integer value.
'0' becomes 0, '9' becomes 9
'A' becomes 10, 'F' becomes 15
'a' becomes 10, 'f' becomes 15
Returns -1 on error.
"""
if "0" <= a <= "9":
return ord(a) - 48
elif "A" <= a <= "F":
return ord(a) - 55
elif "a" <= a <= "f": # a-f
return ord(a) - 87
return -1 |
java | protected String getCurrentInterval(HttpServletRequest req, boolean saveToSession){
String intervalParameter = req.getParameter(PARAM_INTERVAL);
String interval = intervalParameter;
if (interval==null){
interval = (String)req.getSession().getAttribute(BEAN_INTERVAL);
if (interval == null)
interval = DEFAULT_INTERVAL;
}
if (intervalParameter!=null && saveToSession)
req.getSession().setAttribute(BEAN_INTERVAL, interval);
return interval;
} |
python | def trace(args):
"""
%prog trace unitig{version}.{partID}.{unitigID}
Call `grep` to get the erroneous fragment placement.
"""
p = OptionParser(trace.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
s, = args
version, partID, unitigID = get_ID(s)
flist = glob("../5-consensus/*_{0:03d}.err".format(int(partID)))
assert len(flist) == 1
fp = open(flist[0])
instate = False
for row in fp:
if working in row and unitigID in row:
rows = []
instate = True
if instate:
rows.append(row)
if failed in row:
instate = False
if len(rows) > 20:
ignore_line = "... ({0} lines skipped)\n".format(len(rows) - 20)
rows = rows[:10] + [ignore_line] + rows[-10:]
print("".join(rows), file=sys.stderr) |
python | def decode_fetch_response(cls, data):
"""
Decode bytes to a FetchResponse
Arguments:
data: bytes to decode
"""
((correlation_id, num_topics), cur) = relative_unpack('>ii', data, 0)
for _ in range(num_topics):
(topic, cur) = read_short_string(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for j in range(num_partitions):
((partition, error, highwater_mark_offset), cur) = \
relative_unpack('>ihq', data, cur)
(message_set, cur) = read_int_string(data, cur)
yield FetchResponse(
topic, partition, error,
highwater_mark_offset,
KafkaProtocol._decode_message_set_iter(message_set)) |
python | def age(self, minimum: int = 16, maximum: int = 66) -> int:
"""Get a random integer value.
:param maximum: Maximum value of age.
:param minimum: Minimum value of age.
:return: Random integer.
:Example:
23.
"""
age = self.random.randint(minimum, maximum)
self._store['age'] = age
return age |
python | def _add_event_source(awsclient, evt_source, lambda_arn):
"""
Given an event_source dictionary, create the object and add the event source.
"""
event_source_obj = _get_event_source_obj(awsclient, evt_source)
# (where zappa goes like remove, add)
# we go with update and add like this:
if event_source_obj.exists(lambda_arn):
event_source_obj.update(lambda_arn)
else:
event_source_obj.add(lambda_arn) |
java | public boolean addCellLink(GridCell<P> cell) {
if (this.cells.add(cell)) {
return isReferenceCell(cell);
}
return false;
} |
java | @Override
public Object parse(String source) throws Exception {
DocumentBuilder builder = builderFactory.newDocumentBuilder();
InputSource is = new InputSource(new StringReader(source));
Node node = builder.parse(is).getFirstChild();
LinkedHashMap<String, Object> map = new LinkedHashMap<>();
return copyChildren("xml", node, map);
} |
java | public <Result> Result submitRequest(TDApiRequest apiRequest, Optional<String> apiKeyCache, TDHttpRequestHandler<Result> handler)
throws TDClientException
{
RequestContext requestContext = new RequestContext(config, apiRequest, apiKeyCache);
try {
return submitRequest(requestContext, handler);
}
catch (InterruptedException e) {
logger.warn("API request interrupted", e);
throw new TDClientInterruptedException(e);
}
catch (TDClientException e) {
throw e;
}
catch (Exception e) {
throw new TDClientException(INVALID_JSON_RESPONSE, e);
}
} |
python | def get_transcript(self, gene_pk, refseq_id):
"Get a transcript from the cache or add a new record."
if not refseq_id:
return
transcript_pk = self.transcripts.get(refseq_id)
if transcript_pk:
return transcript_pk
gene = Gene(pk=gene_pk)
transcript = Transcript(refseq_id=refseq_id, gene=gene)
try:
transcript.save()
except IntegrityError:
transcript = Transcript.objects.get(refseq_id=refseq_id, gene=gene)
self.transcripts[refseq_id] = transcript.pk
return transcript.pk |
python | def stop_stack(awsclient, stack_name, use_suspend=False):
"""Stop an existing stack on AWS cloud.
:param awsclient:
:param stack_name:
:param use_suspend: use suspend and resume on the autoscaling group
:return: exit_code
"""
exit_code = 0
# check for DisableStop
#disable_stop = conf.get('deployment', {}).get('DisableStop', False)
#if disable_stop:
# log.warn('\'DisableStop\' is set - nothing to do!')
#else:
if not stack_exists(awsclient, stack_name):
log.warn('Stack \'%s\' not deployed - nothing to do!', stack_name)
else:
client_cfn = awsclient.get_client('cloudformation')
client_autoscaling = awsclient.get_client('autoscaling')
client_rds = awsclient.get_client('rds')
client_ec2 = awsclient.get_client('ec2')
resources = all_pages(
client_cfn.list_stack_resources,
{ 'StackName': stack_name },
lambda r: r['StackResourceSummaries']
)
autoscaling_groups = [
r for r in resources
if r['ResourceType'] == 'AWS::AutoScaling::AutoScalingGroup'
]
# lookup all types of scaling processes
# [Launch, Terminate, HealthCheck, ReplaceUnhealthy, AZRebalance
# AlarmNotification, ScheduledActions, AddToLoadBalancer]
response = client_autoscaling.describe_scaling_process_types()
scaling_process_types = [t['ProcessName'] for t in response.get('Processes', [])]
for asg in autoscaling_groups:
# find instances in autoscaling group
ec2_instances = all_pages(
client_autoscaling.describe_auto_scaling_instances,
{},
lambda r: [i['InstanceId'] for i in r.get('AutoScalingInstances', [])
if i['AutoScalingGroupName'] == asg['PhysicalResourceId']],
)
if use_suspend:
# alternative implementation to speed up start
# only problem is that instances must survive stop & start
# suspend all autoscaling processes
log.info('Suspending all autoscaling processes for \'%s\'',
asg['LogicalResourceId'])
response = client_autoscaling.suspend_processes(
AutoScalingGroupName=asg['PhysicalResourceId'],
ScalingProcesses=scaling_process_types
)
_stop_ec2_instances(awsclient, ec2_instances)
else:
# resize autoscaling group (min, max = 0)
log.info('Resize autoscaling group \'%s\' to minSize=0, maxSize=0',
asg['LogicalResourceId'])
response = client_autoscaling.update_auto_scaling_group(
AutoScalingGroupName=asg['PhysicalResourceId'],
MinSize=0,
MaxSize=0
)
if ec2_instances:
running_instances = all_pages(
client_ec2.describe_instance_status,
{
'InstanceIds': ec2_instances,
'Filters': [{
'Name': 'instance-state-name',
'Values': ['pending', 'running']
}]
},
lambda r: [i['InstanceId'] for i in r.get('InstanceStatuses', [])],
)
if running_instances:
# wait for instances to terminate
waiter_inst_terminated = client_ec2.get_waiter('instance_terminated')
waiter_inst_terminated.wait(InstanceIds=running_instances)
# setting ECS desiredCount to zero
services = [
r for r in resources
if r['ResourceType'] == 'AWS::ECS::Service'
]
if services:
template, parameters = _get_template_parameters(awsclient, stack_name)
_stop_ecs_services(awsclient, services, template, parameters)
# stopping ec2 instances
instances = [
r['PhysicalResourceId'] for r in resources
if r['ResourceType'] == 'AWS::EC2::Instance'
]
_stop_ec2_instances(awsclient, instances)
# stopping db instances
db_instances = [
r['PhysicalResourceId'] for r in resources
if r['ResourceType'] == 'AWS::RDS::DBInstance'
]
running_db_instances = _filter_db_instances_by_status(
awsclient, db_instances, ['available']
)
for db in running_db_instances:
log.info('Stopping RDS instance \'%s\'', db)
client_rds.stop_db_instance(DBInstanceIdentifier=db)
return exit_code |
python | def _selectView( self ):
"""
Matches the view selection to the trees selection.
"""
scene = self.uiGanttVIEW.scene()
scene.blockSignals(True)
scene.clearSelection()
for item in self.uiGanttTREE.selectedItems():
item.viewItem().setSelected(True)
scene.blockSignals(False)
curr_item = self.uiGanttTREE.currentItem()
vitem = curr_item.viewItem()
if vitem:
self.uiGanttVIEW.centerOn(vitem) |
java | public static <T extends MethodDescription> ElementMatcher.Junction<T> takesArgument(int index, ElementMatcher<? super TypeDescription> matcher) {
return takesGenericArgument(index, erasure(matcher));
} |
python | def ro(self):
""" Return read-only copy of this object
:return: WHTTPHeaders
"""
ro_headers = WHTTPHeaders()
names = self.headers()
for name in names:
ro_headers.add_headers(name, *self.get_headers(name))
ro_headers.__cookies = self.__set_cookies.ro()
ro_headers.__ro_flag = True
return ro_headers |
python | def space_labels(document):
"""Ensure space around bold compound labels."""
for label in document.xpath('.//bold'):
# TODO: Make this more permissive to match chemical_label in parser
if not label.text or not re.match('^\(L?\d\d?[a-z]?\):?$', label.text, re.I):
continue
parent = label.getparent()
previous = label.getprevious()
if previous is None:
text = parent.text or ''
if not text.endswith(' '):
parent.text = text + ' '
else:
text = previous.tail or ''
if not text.endswith(' '):
previous.tail = text + ' '
text = label.tail or ''
if not text.endswith(' '):
label.tail = text + ' '
return document |
java | public <T,E extends TypedEdge<T>> void write(
Multigraph<T,E> g, File f,
Indexer<String> vertexLabels)
throws IOException {
write(g, f, null, false, vertexLabels, true);
} |
python | def get_directories(self, request):
"""Return directories
"""
queryset = self.folder.media_folder_children.all().order_by(*config.MEDIA_FOLDERS_ORDER_BY.split(","))
paginator = Paginator(queryset, self.objects_per_page)
page = request.GET.get('page', None)
try:
object_list = paginator.page(page)
except PageNotAnInteger:
if page == "all":
object_list = queryset
else:
object_list = paginator.page(1)
except EmptyPage:
object_list = paginator.page(paginator.num_pages)
return object_list |
java | @SuppressWarnings("rawtypes")
static OutputType getStreamingType(Class<? extends CommandOutput> commandOutputClass) {
ClassTypeInformation<? extends CommandOutput> classTypeInformation = ClassTypeInformation.from(commandOutputClass);
TypeInformation<?> superTypeInformation = classTypeInformation.getSuperTypeInformation(StreamingOutput.class);
if (superTypeInformation == null) {
return null;
}
List<TypeInformation<?>> typeArguments = superTypeInformation.getTypeArguments();
return new OutputType(commandOutputClass, typeArguments.get(0), true) {
@Override
public ResolvableType withCodec(RedisCodec<?, ?> codec) {
TypeInformation<?> typeInformation = ClassTypeInformation.from(codec.getClass());
ResolvableType resolvableType = ResolvableType.forType(commandOutputClass, new CodecVariableTypeResolver(
typeInformation));
while (resolvableType != ResolvableType.NONE) {
ResolvableType[] interfaces = resolvableType.getInterfaces();
for (ResolvableType resolvableInterface : interfaces) {
if (resolvableInterface.getRawClass().equals(StreamingOutput.class)) {
return resolvableInterface.getGeneric(0);
}
}
resolvableType = resolvableType.getSuperType();
}
throw new IllegalStateException();
}
};
} |
java | private void parseAllowRetries(Map<Object, Object> props) {
Object value = props.get(HttpConfigConstants.PROPNAME_ALLOW_RETRIES);
if (null != value) {
this.bAllowRetries = convertBoolean(value);
if (TraceComponent.isAnyTracingEnabled() && tc.isEventEnabled()) {
Tr.event(tc, "Config: allow retries is " + allowsRetries());
}
}
} |
java | private static double getSideLengthFromCircumscribedRadius(double radius, int n) {
if (n == 2) {
return radius;
}
return radius * (2 * Math.sin(Math.PI/n));
} |
python | def assert_valid_execution_arguments(
schema: GraphQLSchema,
document: DocumentNode,
raw_variable_values: Dict[str, Any] = None,
) -> None:
"""Check that the arguments are acceptable.
Essential assertions before executing to provide developer feedback for improper use
of the GraphQL library.
"""
if not document:
raise TypeError("Must provide document")
# If the schema used for execution is invalid, throw an error.
assert_valid_schema(schema)
# Variables, if provided, must be a dictionary.
if not (raw_variable_values is None or isinstance(raw_variable_values, dict)):
raise TypeError(
"Variables must be provided as a dictionary where each property is a"
" variable value. Perhaps look to see if an unparsed JSON string was"
" provided."
) |
java | public static void saveMapFileSequences(String path, JavaRDD<List<List<Writable>>> rdd, int interval,
Integer maxOutputFiles) {
Configuration c = new Configuration();
c.set(MAP_FILE_INDEX_INTERVAL_KEY, String.valueOf(interval));
saveMapFileSequences(path, rdd, c, maxOutputFiles);
} |
python | def run(self, synchronous=True, **kwargs):
"""Helper to run existing job template
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
'data' supports next fields:
required:
job_template_id/feature,
targeting_type,
search_query/bookmark_id,
inputs
optional:
description_format,
concurrency_control
scheduling,
ssh,
recurrence,
execution_timeout_interval
:returns: The server's response, with all JSON decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message.
"""
kwargs = kwargs.copy() # shadow the passed-in kwargs
kwargs.update(self._server_config.get_client_kwargs())
if 'data' in kwargs:
if 'job_template_id' not in kwargs['data'] and 'feature' not in kwargs['data']:
raise KeyError('Provide either job_template_id or feature value')
if 'search_query' not in kwargs['data'] and 'bookmark_id' not in kwargs['data']:
raise KeyError('Provide either search_query or bookmark_id value')
for param_name in ['targeting_type', 'inputs']:
if param_name not in kwargs['data']:
raise KeyError('Provide {} value'.format(param_name))
kwargs['data'] = {u'job_invocation': kwargs['data']}
response = client.post(self.path('base'), **kwargs)
response.raise_for_status()
if synchronous is True:
return ForemanTask(
server_config=self._server_config, id=response.json()['task']['id']).poll()
return response.json() |
java | public void marshall(DetachThingPrincipalRequest detachThingPrincipalRequest, ProtocolMarshaller protocolMarshaller) {
if (detachThingPrincipalRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(detachThingPrincipalRequest.getThingName(), THINGNAME_BINDING);
protocolMarshaller.marshall(detachThingPrincipalRequest.getPrincipal(), PRINCIPAL_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
python | def _station_info(station_code):
"""filename based meta data for a station code."""
url_file = open(env.SRC_PATH + '/eere.csv')
for line in csv.DictReader(url_file):
if line['station_code'] == station_code:
return line
raise KeyError('Station not found') |
python | def lt(self, v, limit=None, offset=None):
"""Returns the list of the members of the set that have scores
less than v.
"""
if limit is not None and offset is None:
offset = 0
return self.zrangebyscore(self._min_score, "(%f" % v,
start=offset, num=limit) |
python | def mkdir(self, mdir, parents=False):
"""Make a directory.
Note that this will not error out if the directory already exists
(that is how the PutDirectory Manta API behaves).
@param mdir {str} A manta path, e.g. '/trent/stor/mydir'.
@param parents {bool} Optional. Default false. Like 'mkdir -p', this
will create parent dirs as necessary.
"""
assert mdir.startswith('/'), "%s: invalid manta path" % mdir
parts = mdir.split('/')
assert len(parts) > 3, "%s: cannot create top-level dirs" % mdir
if not parents:
self.put_directory(mdir)
else:
# Find the first non-existant dir: binary search. Because
# PutDirectory doesn't error on 'mkdir .../already-exists' we
# don't have a way to detect a miss on `start`. So basically we
# keep doing the binary search until we hit and close the `start`
# to `end` gap.
# Example:
# - mdir: /trent/stor/builds/a/b/c (need to mk a/b/c)
# parts: ['', 'trent', 'stor', 'builds', 'a', 'b', 'c']
# start: 4
# end: 8
# - idx: 6
# d: /trent/stor/builds/a/b (put_directory fails)
# end: 6
# - idx: 5
# d: /trent/stor/builds/a (put_directory succeeds)
# start: 5
# (break out of loop)
# - for i in range(6, 8):
# i=6 -> d: /trent/stor/builds/a/b
# i=7 -> d: /trent/stor/builds/a/b/c
end = len(parts) + 1
start = 3 # Index of the first possible dir to create.
while start < end - 1:
idx = int((end - start) // 2 + start)
d = '/'.join(parts[:idx])
try:
self.put_directory(d)
except errors.MantaAPIError:
_, ex, _ = sys.exc_info()
if ex.code == 'DirectoryDoesNotExist':
end = idx
else:
raise
else:
start = idx
# Now need to create from (end-1, len(parts)].
for i in range(end, len(parts) + 1):
d = '/'.join(parts[:i])
self.put_directory(d) |
python | def assert_almost_equal(
first, second, msg_fmt="{msg}", places=None, delta=None
):
"""Fail if first and second are not equal after rounding.
By default, the difference between first and second is rounded to
7 decimal places. This can be configured with the places argument.
Alternatively, delta can be used to specify the maximum allowed
difference between first and second.
If first and second can not be rounded or both places and delta are
supplied, a TypeError is raised.
>>> assert_almost_equal(5, 5.00000001)
>>> assert_almost_equal(5, 5.001)
Traceback (most recent call last):
...
AssertionError: 5 != 5.001 within 7 places
>>> assert_almost_equal(5, 5.001, places=2)
>>> assert_almost_equal(5, 5.001, delta=0.1)
The following msg_fmt arguments are supported:
* msg - the default error message
* first - the first argument
* second - the second argument
* places - number of places to compare or None
* delta - delta or None
"""
if delta is not None and places is not None:
raise TypeError("'places' and 'delta' are mutually exclusive")
if delta is not None:
if delta <= 0:
raise ValueError("delta must be larger than 0")
diff = abs(second - first)
success = diff < delta
detail_msg = "with delta={}".format(delta)
else:
if places is None:
places = 7
success = not round(second - first, places)
detail_msg = "within {} places".format(places)
if not success:
msg = "{!r} != {!r} {}".format(first, second, detail_msg)
fail(
msg_fmt.format(
msg=msg, first=first, second=second, places=places, delta=delta
)
) |
python | def get_properties(self, obj):
"""Fill out properties field."""
properties = {}
for field_name, field in sorted(obj.fields.items()):
schema = self._get_schema_for_field(obj, field)
properties[field.name] = schema
return properties |
python | def bounding_box(positions):
'''Computes the bounding box for a list of 3-dimensional points.
'''
(x0, y0, z0) = (x1, y1, z1) = positions[0]
for x, y, z in positions:
x0 = min(x0, x)
y0 = min(y0, y)
z0 = min(z0, z)
x1 = max(x1, x)
y1 = max(y1, y)
z1 = max(z1, z)
return (x0, y0, z0), (x1, y1, z1) |
python | def _infer_record_outputs(inputs, unlist, file_vs, std_vs, parallel, to_include=None,
exclude=None):
"""Infer the outputs of a record from the original inputs
"""
fields = []
unlist = set([_get_string_vid(x) for x in unlist])
input_vids = set([_get_string_vid(v) for v in _handle_special_inputs(inputs, file_vs)])
to_include = set([_get_string_vid(x) for x in to_include]) if to_include else None
to_exclude = tuple(set([_get_string_vid(x) for x in exclude])) if exclude else None
added = set([])
for raw_v in std_vs + [v for v in file_vs if get_base_id(v["id"]) in input_vids]:
# unpack record inside this record and un-nested inputs to avoid double nested
cur_record = is_cwl_record(raw_v)
if cur_record:
# unlist = unlist | set([field["name"] for field in cur_record["fields"]])
nested_vs = [{"id": field["name"], "type": field["type"]} for field in cur_record["fields"]]
else:
nested_vs = [raw_v]
for orig_v in nested_vs:
if (get_base_id(orig_v["id"]) not in added
and (not to_include or get_base_id(orig_v["id"]) in to_include)):
if to_exclude is None or not get_base_id(orig_v["id"]).startswith(to_exclude):
cur_v = {}
cur_v["name"] = get_base_id(orig_v["id"])
cur_v["type"] = orig_v["type"]
if cur_v["name"] in unlist:
cur_v = _flatten_nested_input(cur_v)
fields.append(_add_secondary_to_rec_field(orig_v, cur_v))
added.add(get_base_id(orig_v["id"]))
return fields |
python | def last_update_time(self) -> float:
"""The last time at which the report was modified."""
stdout = self.stdout_interceptor
stderr = self.stderr_interceptor
return max([
self._last_update_time,
stdout.last_write_time if stdout else 0,
stderr.last_write_time if stderr else 0,
]) |
java | @Nonnull
public CSSRect setBottom (@Nonnull @Nonempty final String sBottom)
{
ValueEnforcer.notEmpty (sBottom, "Bottom");
m_sBottom = sBottom;
return this;
} |
java | public static SObject of(String key, byte[] buf, Map<String, String> attrs) {
SObject sobj = of(key, $.requireNotNull(buf));
sobj.setAttributes(attrs);
return sobj;
} |
python | def create_subnet(self, vpc_id, cidr_block, availability_zone=None):
"""
Create a new Subnet
:type vpc_id: str
:param vpc_id: The ID of the VPC where you want to create the subnet.
:type cidr_block: str
:param cidr_block: The CIDR block you want the subnet to cover.
:type availability_zone: str
:param availability_zone: The AZ you want the subnet in
:rtype: The newly created Subnet
:return: A :class:`boto.vpc.customergateway.Subnet` object
"""
params = {'VpcId' : vpc_id,
'CidrBlock' : cidr_block}
if availability_zone:
params['AvailabilityZone'] = availability_zone
return self.get_object('CreateSubnet', params, Subnet) |
java | public void clearExpired() {
Iterator keys = cacheLineTable.keySet().iterator();
while (keys.hasNext()) {
Object key = keys.next();
if (hasExpired(key)) {
removeObject(key);
}
}
} |
python | def _get_index(self):
"""
Get the anchor's index.
This must return an ``int``.
Subclasses may override this method.
"""
glyph = self.glyph
if glyph is None:
return None
return glyph.anchors.index(self) |
python | def __updateStack(self, key):
"""
Update the input stack in non-hotkey mode, and determine if anything
further is needed.
@return: True if further action is needed
"""
#if self.lastMenu is not None:
# if not ConfigManager.SETTINGS[MENU_TAKES_FOCUS]:
# self.app.hide_menu()
#
# self.lastMenu = None
if key == Key.ENTER:
# Special case - map Enter to \n
key = '\n'
if key == Key.TAB:
# Special case - map Tab to \t
key = '\t'
if key == Key.BACKSPACE:
if ConfigManager.SETTINGS[UNDO_USING_BACKSPACE] and self.phraseRunner.can_undo():
self.phraseRunner.undo_expansion()
else:
# handle backspace by dropping the last saved character
try:
self.inputStack.pop()
except IndexError:
# in case self.inputStack is empty
pass
return False
elif len(key) > 1:
# non-simple key
self.inputStack.clear()
self.phraseRunner.clear_last()
return False
else:
# Key is a character
self.phraseRunner.clear_last()
# if len(self.inputStack) == MAX_STACK_LENGTH, front items will removed for appending new items.
self.inputStack.append(key)
return True |
java | public void setNameconflict(String nameconflict) throws ApplicationException {
this.nameconflict = FileUtil.toNameConflict(nameconflict, NAMECONFLICT_UNDEFINED | NAMECONFLICT_ERROR | NAMECONFLICT_OVERWRITE, NAMECONFLICT_DEFAULT);
} |
java | private String getStringResource(String path) {
InputStream in = getClass().getResourceAsStream(path);
StringBuilder index = new StringBuilder();
char[] buffer = new char[1024];
int read = 0;
try(InputStreamReader reader = new InputStreamReader(in)){
while((read = reader.read(buffer)) != -1) {
index.append(buffer, 0, read);
}
} catch (IOException e) {
//just return what we've got
return index.toString();
}
return index.toString();
} |
java | public String toQueryString() {
String result = null;
List<String> parameters = new ArrayList<>();
for (Map.Entry<String, String> entry : entrySet()) {
// We don't encode the key because it could legitimately contain
// things like underscores, e.g. "_escaped_fragment_" would become:
// "%5Fescaped%5Ffragment%5F"
String key = entry.getKey();
String value;
try {
value = URLEncoder.encode(entry.getValue(), StandardCharsets.UTF_8.name());
} catch (UnsupportedEncodingException e) {
throw new IllegalArgumentException("Error encoding URL", e);
}
// Jetty class *appears* to need Java 8
//String value = UrlEncoded.encodeString(entry.getValue());
parameters.add(key + "=" + value);
}
if (parameters.size() > 0) {
result = StringUtils.join(parameters, '&');
}
return result;
} |
python | def set_ipv4_routing(self, vrf_name, default=False, disable=False):
""" Configures ipv4 routing for the vrf
Args:
vrf_name (str): The VRF name to configure
default (bool): Configures ipv4 routing for the vrf value to
default if this value is true
disable (bool): Negates the ipv4 routing for the vrf if set to true
Returns:
True if the operation was successful otherwise False
"""
cmd = 'ip routing vrf %s' % vrf_name
if default:
cmd = 'default %s' % cmd
elif disable:
cmd = 'no %s' % cmd
cmd = make_iterable(cmd)
return self.configure(cmd) |
python | def calcUminUmax(self,**kwargs):
"""
NAME:
calcUminUmax
PURPOSE:
calculate the u 'apocenter' and 'pericenter'
INPUT:
OUTPUT:
(umin,umax)
HISTORY:
2012-11-27 - Written - Bovy (IAS)
"""
if hasattr(self,'_uminumax'): #pragma: no cover
return self._uminumax
E, L= self._E, self._Lz
if nu.fabs(self._pux) < 10.**-7.: #We are at umin or umax
eps= 10.**-8.
peps= _JRStaeckelIntegrandSquared(self._ux+eps,
E,L,self._I3U,self._delta,
self._u0,self._sinhu0**2.,
self._vx,self._sinvx**2.,
self._potu0v0,self._pot)
meps= _JRStaeckelIntegrandSquared(self._ux-eps,
E,L,self._I3U,self._delta,
self._u0,self._sinhu0**2.,
self._vx,self._sinvx**2.,
self._potu0v0,self._pot)
if peps < 0. and meps > 0.: #we are at umax
umax= self._ux
rstart,prevr= _uminUmaxFindStart(self._ux,
E,L,self._I3U,self._delta,
self._u0,self._sinhu0**2.,
self._vx,self._sinvx**2.,
self._potu0v0,self._pot)
if rstart == 0.: umin= 0.
else:
try:
umin= optimize.brentq(_JRStaeckelIntegrandSquared,
rstart,self._ux-eps,
(E,L,self._I3U,self._delta,
self._u0,self._sinhu0**2.,
self._vx,self._sinvx**2.,
self._potu0v0,self._pot),
maxiter=200)
except RuntimeError: #pragma: no cover
raise UnboundError("Orbit seems to be unbound")
elif peps > 0. and meps < 0.: #we are at umin
umin= self._ux
rend,prevr= _uminUmaxFindStart(self._ux,
E,L,self._I3U,self._delta,
self._u0,self._sinhu0**2.,
self._vx,self._sinvx**2.,
self._potu0v0,self._pot,
umax=True)
umax= optimize.brentq(_JRStaeckelIntegrandSquared,
self._ux+eps,rend,
(E,L,self._I3U,self._delta,
self._u0,self._sinhu0**2.,
self._vx,self._sinvx**2.,
self._potu0v0,self._pot),
maxiter=200)
else: #circular orbit
umin= self._ux
umax= self._ux
else:
rstart,prevr= _uminUmaxFindStart(self._ux,
E,L,self._I3U,self._delta,
self._u0,self._sinhu0**2.,
self._vx,self._sinvx**2.,
self._potu0v0,self._pot)
if rstart == 0.: umin= 0.
else:
if nu.fabs(prevr-self._ux) < 10.**-2.: rup= self._ux
else: rup= prevr
try:
umin= optimize.brentq(_JRStaeckelIntegrandSquared,
rstart,rup,
(E,L,self._I3U,self._delta,
self._u0,self._sinhu0**2.,
self._vx,self._sinvx**2.,
self._potu0v0,self._pot),
maxiter=200)
except RuntimeError: #pragma: no cover
raise UnboundError("Orbit seems to be unbound")
rend,prevr= _uminUmaxFindStart(self._ux,
E,L,self._I3U,self._delta,
self._u0,self._sinhu0**2.,
self._vx,self._sinvx**2.,
self._potu0v0,self._pot,
umax=True)
umax= optimize.brentq(_JRStaeckelIntegrandSquared,
prevr,rend,
(E,L,self._I3U,self._delta,
self._u0,self._sinhu0**2.,
self._vx,self._sinvx**2.,
self._potu0v0,self._pot),
maxiter=200)
self._uminumax= (umin,umax)
return self._uminumax |
java | @Override
public WorkspaceStorageConnection openConnection(boolean readOnly) throws RepositoryException
{
try
{
if (this.containerConfig.dbStructureType.isMultiDatabase())
{
return new PostgreMultiDbJDBCConnection(getJdbcConnection(readOnly), readOnly, containerConfig);
}
return new PostgreSingleDbJDBCConnection(getJdbcConnection(readOnly), readOnly, containerConfig);
}
catch (SQLException e)
{
throw new RepositoryException(e);
}
} |
java | private void setOtherAttachments(
RROtherProjectInfo12Document.RROtherProjectInfo12 rrOtherProjectInfo) {
OtherAttachments otherAttachments = OtherAttachments.Factory
.newInstance();
otherAttachments.setOtherAttachmentArray(getAttachedFileDataTypes());
rrOtherProjectInfo.setOtherAttachments(otherAttachments);
} |
java | static <T extends SAMLObject> MessageContext<T> toSamlObject(AggregatedHttpMessage msg, String name) {
final SamlParameters parameters = new SamlParameters(msg);
final byte[] decoded;
try {
decoded = Base64.getMimeDecoder().decode(parameters.getFirstValue(name));
} catch (IllegalArgumentException e) {
throw new SamlException("failed to decode a base64 string of the parameter: " + name, e);
}
@SuppressWarnings("unchecked")
final T message = (T) deserialize(decoded);
final MessageContext<T> messageContext = new MessageContext<>();
messageContext.setMessage(message);
final String relayState = parameters.getFirstValueOrNull(RELAY_STATE);
if (relayState != null) {
final SAMLBindingContext context = messageContext.getSubcontext(SAMLBindingContext.class, true);
assert context != null;
context.setRelayState(relayState);
}
return messageContext;
} |
java | private @Nonnull LoadBalancerHealthCheck toLoadBalancerHealthCheck(JSONObject ob) throws JSONException, InternalException, CloudException {
LoadBalancerHealthCheck.HCProtocol protocol = fromOSProtocol(ob.getString("type"));
int count = ob.getInt("max_retries");
int port = -1;
String path = ob.optString("url_path", null);
JSONArray pools = ob.optJSONArray("pools");
for( int i=0; i<pools.length(); i++ ) {
String lbId = pools.getJSONObject(i).getString("pool_id");
LoadBalancer lb = getLoadBalancer(lbId);
if( lb != null ) {
LbListener[] listeners = lb.getListeners();
if( listeners != null && listeners.length > 0 ) {
port = listeners[0].getPrivatePort();
break;
}
}
}
String id = ob.getString("id");
int timeout = ob.getInt("timeout");
int interval = ob.getInt("delay");
LoadBalancerHealthCheck lbhc = LoadBalancerHealthCheck.getInstance(id, protocol, port, path, interval, timeout, count, count);
for( int i=0; i<pools.length(); i++ ) {
lbhc.addProviderLoadBalancerId(pools.getJSONObject(i).getString("pool_id"));
}
return lbhc;
} |
java | protected CompletableFuture<AppendResponse> handleAppend(final AppendRequest request) {
CompletableFuture<AppendResponse> future = new CompletableFuture<>();
// Check that the term of the given request matches the local term or update the term.
if (!checkTerm(request, future)) {
return future;
}
// Check that the previous index/term matches the local log's last entry.
if (!checkPreviousEntry(request, future)) {
return future;
}
// Append the entries to the log.
appendEntries(request, future);
return future;
} |
python | def files_comments_delete(self, *, file: str, id: str, **kwargs) -> SlackResponse:
"""Deletes an existing comment on a file.
Args:
file (str): The file id. e.g. 'F1234467890'
id (str): The file comment id. e.g. 'Fc1234567890'
"""
kwargs.update({"file": file, "id": id})
return self.api_call("files.comments.delete", json=kwargs) |
python | def _run_queries(self, queries, *args, **kwargs):
"""run the queries
queries -- list -- the queries to run
return -- string -- the results of the query?
"""
# write out all the commands to a temp file and then have psql run that file
f = self._get_file()
for q in queries:
f.write("{};\n".format(q))
f.close()
psql_args = self._get_args('psql', '-X', '-f {}'.format(f.name))
return self._run_cmd(' '.join(psql_args), *args, **kwargs) |
python | def preprocess(
self, nb: "NotebookNode", resources: dict
) -> Tuple["NotebookNode", dict]:
"""Remove any raw cells from the Notebook.
By default, exclude raw cells from the output. Change this by including
global_content_filter->include_raw = True in the resources dictionary.
This preprocessor is necessary because the NotebookExporter doesn't
include the exclude_raw config."""
if not resources.get("global_content_filter", {}).get("include_raw", False):
keep_cells = []
for cell in nb.cells:
if cell.cell_type != "raw":
keep_cells.append(cell)
nb.cells = keep_cells
return nb, resources |
java | public InputStream getResourceAsStream(String name, BeanContextChild bcc) throws IllegalArgumentException {
// bcc must be a child of this context
if (!contains(bcc)) {
throw new IllegalArgumentException("Child is not a member of this context");
}
ClassLoader cl = bcc.getClass().getClassLoader();
InputStream is;
if (cl != null && (is = cl.getResourceAsStream(name)) != null) {
return is;
}
return ClassLoader.getSystemResourceAsStream(name);
} |
java | public CouchDbConsentDecision findFirstConsentDecision(final String principal, final String service) {
val view = createQuery("by_consent_decision").key(ComplexKey.of(principal, service)).limit(1).includeDocs(true);
return db.queryView(view, CouchDbConsentDecision.class).stream().findFirst().orElse(null);
} |
java | public void saveCertificateChain(OutputStream out) throws IOException, CertificateEncodingException {
CertificateIOUtil.writeCertificate(out, this.certChain[0]);
for (int i = 1; i < this.certChain.length; i++) {
// skip the self-signed certificates
if (this.certChain[i].getSubjectDN().equals(certChain[i].getIssuerDN())) {
continue;
}
CertificateIOUtil.writeCertificate(out, this.certChain[i]);
}
out.flush();
} |
python | def _f_ops(op1, op2, swap=True):
""" Receives a list with two strings (operands).
If none of them contains integers, returns None.
Otherwise, returns a t-uple with (op[0], op[1]),
where op[1] is the integer one (the list is swapped)
unless swap is False (e.g. sub and div used this
because they're not commutative).
The integer operand is always converted to int type.
"""
if is_float(op1):
if swap:
return op2, float(op1)
else:
return float(op1), op2
if is_float(op2):
return op1, float(op2)
return None |
python | def bucket_update(self, name, current, bucket_password=None, replicas=None,
ram_quota=None, flush_enabled=None):
"""
Update an existing bucket's settings.
:param string name: The name of the bucket to update
:param dict current: Current state of the bucket.
This can be retrieve from :meth:`bucket_info`
:param str bucket_password: Change the bucket's password
:param int replicas: The number of replicas for the bucket
:param int ram_quota: The memory available to the bucket
on each node.
:param bool flush_enabled: Whether the flush API should be allowed
from normal clients
:return: A :class:`~.HttpResult` object
:raise: :exc:`~.HTTPError` if the request could not be
completed
.. note::
The default value for all options in this method is
``None``. If a value is set to something else, it will
modify the setting.
Change the bucket password::
adm.bucket_update('a_bucket', adm.bucket_info('a_bucket'),
bucket_password='n3wpassw0rd')
Enable the flush API::
adm.bucket_update('a_bucket', adm.bucket_info('a_bucket'),
flush_enabled=True)
"""
params = {}
current = current.value
# Merge params
params['authType'] = current['authType']
if 'saslPassword' in current:
params['saslPassword'] = current['saslPassword']
if bucket_password is not None:
params['authType'] = 'sasl'
params['saslPassword'] = bucket_password
params['replicaNumber'] = (
replicas if replicas is not None else current['replicaNumber'])
if ram_quota:
params['ramQuotaMB'] = ram_quota
else:
params['ramQuotaMB'] = current['quota']['ram'] / 1024 / 1024
if flush_enabled is not None:
params['flushEnabled'] = int(flush_enabled)
params['proxyPort'] = current['proxyPort']
return self.http_request(path='/pools/default/buckets/' + name,
method='POST',
content_type='application/x-www-form-urlencoded',
content=self._mk_formstr(params)) |
java | public Map getStringDecodedMap(final Map encodedMap, final Map defaults) {
if (TraceComponent.isAnyTracingEnabled() && TRACE.isEntryEnabled()) {
SibTr.entry(this, TRACE, "getStringDecodedMap", new Object[] {encodedMap,defaults});
}
final Map<String,Object> decoded = new HashMap<String,Object>();
// Preload with the defaults - if the property exists in the input
// it will override this default
decoded.putAll(defaults);
// Look at each property in turn.
final Iterator keyList = encodedMap.keySet().iterator();
while (keyList.hasNext()) {
// These variables will point to the info to be placed
// in the map.
String propName = null;
Object propVal = null;
// Get the coded version of the name. This will start with one
// of the prefix values. The codedName must have been non-null.
String encodedKey = (String) keyList.next();
String encodedVal = (String) encodedMap.get(encodedKey);
// Extract the prefix.
String prefix = null;
int sepIndex = encodedKey.indexOf(PREFIX_SEPARATOR);
if (sepIndex == -1) {
// The separator was not found - this is really bad, and
// suggests
// that the encoding step was flawed.
if (TraceComponent.isAnyTracingEnabled() && TRACE.isDebugEnabled())
SibTr.debug(TRACE, "Ignoring malformed encoded name: "
+ encodedKey);
continue;
} else {
// Extract the prefix and clean version of the name.
prefix = encodedKey.substring(0, sepIndex);
propName = encodedKey.substring(sepIndex
+ PREFIX_SEPARATOR.length());
}//if
// Catch any number conversion errors that arise while converting
// the
// string to an object.
try {
// Decode the prefix to recreate the data type.
if (PREFIX_NULL.equals(prefix)) {
// The value was null.
propVal = null;
} else if (PREFIX_STRING.equals(prefix)) {
propVal = encodedVal;
// Because the value was not prefixed with PREFIX_NULL, we
// know that
// if this val is null, it was meant to be an empty
// string...
if (propVal == null) propVal = "";
} else if (PREFIX_BOOLEAN.equals(prefix)) {
propVal = Boolean.valueOf(encodedVal);
} else if (PREFIX_INT.equals(prefix)) {
propVal = Integer.valueOf(encodedVal);
} else if (PREFIX_BYTE.equals(prefix)) {
propVal = Byte.valueOf(encodedVal);
} else if (PREFIX_SHORT.equals(prefix)) {
propVal = Short.valueOf(encodedVal);
} else if (PREFIX_FLOAT.equals(prefix)) {
propVal = Float.valueOf(encodedVal);
} else if (PREFIX_DOUBLE.equals(prefix)) {
propVal = Double.valueOf(encodedVal);
} else if (PREFIX_LONG.equals(prefix)) {
propVal = Long.valueOf(encodedVal);
} else if (PREFIX_ROUTING_PATH.equals(prefix)) {
// encodedVal = array represented as one long string.
// This uses the Java 1.4 regex method on a string to split
// it into
// an array, with the individual strings being separated by
// the string passed in.
String[] array = encodedVal
.split(JmsraConstants.PATH_ELEMENT_SEPARATOR);
// propVal = what we want to return (a string array wrapper
// containing the string[])
String bigDestName = (String) encodedMap
.get(PREFIX_STRING + PREFIX_SEPARATOR
+ JmsInternalConstants.DEST_NAME);
propVal = StringArrayWrapper.create(array, bigDestName);
} else {
// Did not match any of the known prefixes
if (TraceComponent.isAnyTracingEnabled() && TRACE.isDebugEnabled()) {
SibTr
.debug(TRACE, "Ignoring unknown prefix: "
+ prefix);
}
continue;
}// (if)switch on prefix type.
// We have successfully decoded the property, so now add it to
// the
// temporary map of properties.
decoded.put(propName, propVal);
if (TraceComponent.isAnyTracingEnabled() && TRACE.isDebugEnabled()) {
SibTr.debug(TRACE, "retrieved: " + propName + " = "
+ propVal);
}
} catch (final Exception exception) {
FFDCFilter
.processException(
exception,
"com.ibm.ws.sib.api.jmsra.impl.JmsJcaReferenceUtilsImpl.getStringDecodedMap",
FFDC_PROBE_1, this);
// Catch any NumberFormatException or similar thing that arises
// from the attempt to convert the string to another data type.
if (TraceComponent.isAnyTracingEnabled() && TRACE.isDebugEnabled()) {
SibTr.debug(TRACE, "Error decoding string to object. ",
exception);
}
continue;
}//try
}//while
if (TraceComponent.isAnyTracingEnabled() && TRACE.isEntryEnabled()) {
SibTr.exit(this, TRACE, "getStringDecodedMap", decoded);
}
return decoded;
} |
java | public void hubHeartbeat(UpdateServerHeartbeat updateServer,
UpdateRackHeartbeat updateRack,
UpdatePodSystem updatePod,
long sourceTime)
{
RackHeartbeat rack = getCluster().findRack(updateRack.getId());
if (rack == null) {
rack = getRack();
}
updateRack(updateRack);
updateServerStart(updateServer);
// XXX: _podService.onUpdateFromPeer(updatePod);
// rack.update();
updateTargetServers();
PodHeartbeatService podHeartbeat = getPodHeartbeat();
if (podHeartbeat != null && updatePod != null) {
podHeartbeat.updatePodSystem(updatePod);
}
_joinState = _joinState.onHubHeartbeat(this);
// updateHubHeartbeat();
updateHeartbeats();
} |
java | public boolean columnContainsKey(String key) {
String resolvedKey = resolveColumnKey(key);
return columnMap.containsKey(resolvedKey)
|| parent != null && parent.columnContainsKey(resolvedKey);
} |
python | def ylabel(self, labels):
"""
Determine x-axis label
Parameters
----------
labels : dict
Labels as specified by the user through the ``labs`` or
``ylab`` calls.
Returns
-------
out : str
y-axis label
"""
if self.panel_scales_y[0].name is not None:
return self.panel_scales_y[0].name
else:
return labels.get('y', '') |
python | def cli(ctx):
"""PyHardLinkBackup"""
click.secho("\nPyHardLinkBackup v%s\n" % PyHardLinkBackup.__version__, bg="blue", fg="white", bold=True) |
java | public String build() {
String url = buffer.toString();
if (url.length() > 2083) {
LOG.warning("URL " + url + " is longer than 2083 chars (" + buffer.length()
+ "). It may not work properly in old IE versions.");
}
return url;
} |
python | def detect_events(self, max_attempts=3):
"""Returns a list of `Event`s detected from differences in state
between the current snapshot and the Kindle Library.
`books` and `progress` attributes will be set with the latest API
results upon successful completion of the function.
Returns:
If failed to retrieve progress, None
Else, the list of `Event`s
"""
# Attempt to retrieve current state from KindleAPI
for _ in xrange(max_attempts):
try:
with KindleCloudReaderAPI\
.get_instance(self.uname, self.pword) as kcr:
self.books = kcr.get_library_metadata()
self.progress = kcr.get_library_progress()
except KindleAPIError:
continue
else:
break
else:
return None
# Calculate diffs from new progress
progress_map = {book.asin: self.progress[book.asin].locs[1]
for book in self.books}
new_events = self._snapshot.calc_update_events(progress_map)
update_event = UpdateEvent(datetime.now().replace(microsecond=0))
new_events.append(update_event)
self._event_buf.extend(new_events)
return new_events |
java | public static String detectFormat(String strTime) {
String format = null;
if ((format = tryAndGetFormat(strTime, dateTimeWithSubSecAndTZFormat)) == null) {
if ((format = tryAndGetFormat(strTime, dateTimeAndTZFormat)) == null) {
if ((format = tryAndGetFormat(strTime, dateTimeWithSubSecFormat)) == null) {
if ((format = tryAndGetFormat(strTime, dateTimeFormat)) == null) {
if ((format = tryAndGetFormat(strTime, dateHourMinOnlyFormat)) == null) {
if ((format = tryAndGetFormat(strTime, dateHourOnlyFormat)) == null) {
if ((format = tryAndGetFormat(strTime, dateTimeWithSubSecAndTZWithSpaceFormat)) == null) {
if ((format = tryAndGetFormat(strTime, dateTimeAndTZWithSpaceFormat)) == null) {
if ((format = tryAndGetFormat(strTime, dateTimeWithSubSecWithSpaceFormat)) == null) {
if ((format = tryAndGetFormat(strTime, dateTimeWithSpaceFormat)) == null) {
if ((format = tryAndGetFormat(strTime, dateHourMinWithSpaceOnlyFormat)) == null) {
if ((format = tryAndGetFormat(strTime, dateHourWithSpaceOnlyFormat)) == null) {
if ((format = tryAndGetFormat(strTime, dateOnlyFormat)) == null) {
if ((format = tryAndGetFormat(strTime, yearMonthOnlyFormat)) == null) {
if ((format = tryAndGetFormat(strTime, yearOnlyFormat)) == null) {
return format;
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
return format;
} |
java | public void setMetricsToAnnotate(List<String> metricsToAnnotate) {
this.metricsToAnnotate.clear();
if (metricsToAnnotate != null && !metricsToAnnotate.isEmpty()) {
for (String metric : metricsToAnnotate) {
requireArgument(getMetricToAnnotate(metric) != null, "Metrics to annotate should be of the form 'scope:metric[{[tagk=tagv]+}]");
this.metricsToAnnotate.add(metric);
}
}
} |
java | @Override
public GetActiveNamesResult getActiveNames(GetActiveNamesRequest request) {
request = beforeClientExecution(request);
return executeGetActiveNames(request);
} |
java | public static final <T> Function<T,List<T>> intoSingletonListOf(final Type<T> type) {
return new IntoSingletonList<T>();
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.