language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java | private static void sendMessageBatchOperationMd5Check(SendMessageBatchRequest sendMessageBatchRequest,
SendMessageBatchResult sendMessageBatchResult) {
Map<String, SendMessageBatchRequestEntry> idToRequestEntryMap = new HashMap<String, SendMessageBatchRequestEntry>();
if (sendMessageBatchRequest.getEntries() != null) {
for (SendMessageBatchRequestEntry entry : sendMessageBatchRequest.getEntries()) {
idToRequestEntryMap.put(entry.getId(), entry);
}
}
if (sendMessageBatchResult.getSuccessful() != null) {
for (SendMessageBatchResultEntry entry : sendMessageBatchResult.getSuccessful()) {
String messageBody = idToRequestEntryMap.get(entry.getId()).getMessageBody();
String bodyMd5Returned = entry.getMD5OfMessageBody();
String clientSideBodyMd5 = calculateMessageBodyMd5(messageBody);
if (!clientSideBodyMd5.equals(bodyMd5Returned)) {
throw new AmazonClientException(String.format(MD5_MISMATCH_ERROR_MESSAGE_WITH_ID, MESSAGE_BODY,
entry.getId(), clientSideBodyMd5, bodyMd5Returned));
}
Map<String, MessageAttributeValue> messageAttr = idToRequestEntryMap.get(entry.getId())
.getMessageAttributes();
if (messageAttr != null && !messageAttr.isEmpty()) {
String attrMd5Returned = entry.getMD5OfMessageAttributes();
String clientSideAttrMd5 = calculateMessageAttributesMd5(messageAttr);
if (!clientSideAttrMd5.equals(attrMd5Returned)) {
throw new AmazonClientException(String.format(MD5_MISMATCH_ERROR_MESSAGE_WITH_ID,
MESSAGE_ATTRIBUTES, entry.getId(), clientSideAttrMd5, attrMd5Returned));
}
}
}
}
} |
java | public List<String> getUsernameAttributes() throws GuacamoleException {
return environment.getProperty(
LDAPGuacamoleProperties.LDAP_USERNAME_ATTRIBUTE,
Collections.singletonList("uid")
);
} |
python | def get(company='', company_uri=''):
"""Performs a HTTP GET for a glassdoor page and returns json"""
if not company and not company_uri:
raise Exception("glassdoor.gd.get(company='', company_uri=''): "\
" company or company_uri required")
payload = {}
if not company_uri:
payload.update({'clickSource': 'searchBtn',
'sc.keyword': company
})
uri = '%s/%s' % (GLASSDOOR_API, REVIEWS_URL)
else:
uri = '%s%s' % (GLASSDOOR_API, company_uri)
r = requests.get(uri, params=payload)
soup = BeautifulSoup(r.content)
results = parse(soup)
return results |
java | public Archetype parse(String adl) {
try {
return parse(new StringReader(adl));
} catch (IOException e) {
// StringReader should never throw an IOException
throw new AssertionError(e);
}
} |
java | public static boolean check(String token, String signature, String timestamp, String nonce) {
// 防范长密文攻击
if (signature == null
|| signature.length() > 128
|| timestamp == null
|| timestamp.length() > 128
|| nonce == null
|| nonce.length() > 128) {
log.warnf("bad check : signature=%s,timestamp=%s,nonce=%s",
signature,
timestamp,
nonce);
return false;
}
ArrayList<String> tmp = new ArrayList<String>();
tmp.add(token);
tmp.add(timestamp);
tmp.add(nonce);
Collections.sort(tmp);
String key = Lang.concat("", tmp).toString();
return Lang.sha1(key).equalsIgnoreCase(signature);
} |
python | def is_cf_trajectory(nc, variable):
'''
Returns true if the variable is a CF trajectory feature type
:param netCDF4.Dataset nc: An open netCDF dataset
:param str variable: name of the variable to check
'''
# x(i, o), y(i, o), z(i, o), t(i, o)
# X(i, o)
dims = nc.variables[variable].dimensions
cmatrix = coordinate_dimension_matrix(nc)
for req in ('x', 'y', 't'):
if req not in cmatrix:
return False
if len(cmatrix['x']) != 2:
return False
if cmatrix['x'] != cmatrix['y']:
return False
if cmatrix['x'] != cmatrix['t']:
return False
if 'z' in cmatrix and cmatrix['x'] != cmatrix['z']:
return False
if dims == cmatrix['x']:
return True
return False |
python | def load_multiformat_time_series():
"""Loading time series data from a zip file in the repo"""
data = get_example_data('multiformat_time_series.json.gz')
pdf = pd.read_json(data)
pdf.ds = pd.to_datetime(pdf.ds, unit='s')
pdf.ds2 = pd.to_datetime(pdf.ds2, unit='s')
pdf.to_sql(
'multiformat_time_series',
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'ds': Date,
'ds2': DateTime,
'epoch_s': BigInteger,
'epoch_ms': BigInteger,
'string0': String(100),
'string1': String(100),
'string2': String(100),
'string3': String(100),
},
index=False)
print('Done loading table!')
print('-' * 80)
print('Creating table [multiformat_time_series] reference')
obj = db.session.query(TBL).filter_by(table_name='multiformat_time_series').first()
if not obj:
obj = TBL(table_name='multiformat_time_series')
obj.main_dttm_col = 'ds'
obj.database = utils.get_or_create_main_db()
dttm_and_expr_dict = {
'ds': [None, None],
'ds2': [None, None],
'epoch_s': ['epoch_s', None],
'epoch_ms': ['epoch_ms', None],
'string2': ['%Y%m%d-%H%M%S', None],
'string1': ['%Y-%m-%d^%H:%M:%S', None],
'string0': ['%Y-%m-%d %H:%M:%S.%f', None],
'string3': ['%Y/%m/%d%H:%M:%S.%f', None],
}
for col in obj.columns:
dttm_and_expr = dttm_and_expr_dict[col.column_name]
col.python_date_format = dttm_and_expr[0]
col.dbatabase_expr = dttm_and_expr[1]
col.is_dttm = True
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
print('Creating Heatmap charts')
for i, col in enumerate(tbl.columns):
slice_data = {
'metrics': ['count'],
'granularity_sqla': col.column_name,
'row_limit': config.get('ROW_LIMIT'),
'since': '2015',
'until': '2016',
'where': '',
'viz_type': 'cal_heatmap',
'domain_granularity': 'month',
'subdomain_granularity': 'day',
}
slc = Slice(
slice_name=f'Calendar Heatmap multiformat {i}',
viz_type='cal_heatmap',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
merge_slice(slc)
misc_dash_slices.add('Calendar Heatmap multiformat 0') |
java | @GwtIncompatible("Class.getDeclaredFields")
public ToStringBuilder addDeclaredFields() {
Field[] fields = instance.getClass().getDeclaredFields();
for(Field field : fields) {
addField(field);
}
return this;
} |
java | private List<Object> create(
final Object rowObj,
final ExecutionContext ec,
final Object previousRow) {
if (rowObj instanceof ExcelFixtureRowHandler) {
final ExcelFixtureRowHandler rowHandler = (ExcelFixtureRowHandler) rowObj;
return rowHandler.handleRow(ec, this, previousRow);
} else {
repositoryService.persist(rowObj);
ec.addResult(this, rowObj);
return Collections.singletonList(rowObj);
}
} |
python | def get_order(self, order_id, **params):
"""https://developers.coinbase.com/api/v2#show-an-order"""
response = self._get('v2', 'orders', order_id, params=params)
return self._make_api_object(response, Order) |
java | public static void changeAlpha(InputStream srcInput, OutputStream destOutput,
byte alpha) throws IOException {
changeAlpha(srcInput, destOutput, alpha, null);
} |
python | def get_context_data(self, **kwargs):
"""
Return a dictionary containing all of the values needed to render the
menu instance to a template, including values that might be used by
the 'sub_menu' tag to render any additional levels.
"""
ctx_vals = self._contextual_vals
opt_vals = self._option_vals
data = self.create_dict_from_parent_context()
data.update(ctx_vals._asdict())
data.update({
'apply_active_classes': opt_vals.apply_active_classes,
'allow_repeating_parents': opt_vals.allow_repeating_parents,
'use_absolute_page_urls': opt_vals.use_absolute_page_urls,
'max_levels': self.max_levels,
'use_specific': self.use_specific,
'menu_instance': self,
self.menu_instance_context_name: self,
# Repeat some vals with backwards-compatible keys
'section_root': data['current_section_root_page'],
'current_ancestor_ids': data['current_page_ancestor_ids'],
})
if not ctx_vals.original_menu_instance and ctx_vals.current_level == 1:
data['original_menu_instance'] = self
if 'menu_items' not in kwargs:
data['menu_items'] = self.get_menu_items_for_rendering()
data.update(kwargs)
return data |
java | protected final <T> void putAdvancedConfig(AdvancedConfig.Key<T> key, T value) {
advancedConfig.put(key, value);
} |
java | @Override
protected void addGenericHeaders(final UIContext uic, final WComponent ui) {
// Note: This effectively prevents caching of anything served up from a WServlet.
// We are ok for WContent and thrown ContentEscapes, as addGenericHeaders will not be called
if (getBackingRequest() instanceof SubSessionHttpServletRequestWrapper) {
getBackingResponse().setHeader("Cache-Control", CacheType.NO_CACHE.getSettings());
getBackingResponse().setHeader("Pragma", "no-cache");
getBackingResponse().setHeader("Expires", "-1");
}
// This is to prevent clickjacking. It can also be set to "DENY" to prevent embedding in a frames at all or
// "ALLOW-FROM uri" to allow embedding in a frame within a particular site.
// The default will allow WComponents applications in a frame on the same origin.
getBackingResponse().setHeader("X-Frame-Options", "SAMEORIGIN");
} |
java | @Override
public CPDefinitionOptionValueRel fetchCPDefinitionOptionValueRelByUuidAndGroupId(
String uuid, long groupId) {
return cpDefinitionOptionValueRelPersistence.fetchByUUID_G(uuid, groupId);
} |
python | def is_holiday(now=None, holidays="/etc/acct/holidays"):
"""is_holiday({now}, {holidays="/etc/acct/holidays"}"""
now = _Time(now)
# Now, parse holiday file.
if not os.path.exists(holidays):
raise Exception("There is no holidays file: %s" % holidays)
f = open(holidays, "r")
# First, read all leading comments.
line = f.readline()
while line[0] == '*': line = f.readline()
# We just got the year line.
(year, primestart, primeend) = str.split(line)
# If not the right year, we have no idea for certain. Skip.
if not year == now.year: return 0
# Now the dates. Check each against now.
while line != '':
# Of course, ignore comments.
if line[0] == '*':
line = f.readline()
continue
try:
# Format: "1/1 New Years Day"
(month, day) = str.split(str.split(line)[0], "/")
# The _Time class has leading-zero padded day numbers.
if len(day) == 1: day = '0' + day
# Get month number from index map (compensate for zero indexing).
month = MONTH_MAP[int(month) - 1]
# Check the date.
#print month, now.month, day, now.day
if month == now.month and day == now.day:
return 1
line = f.readline()
except:
# Skip malformed lines.
line = f.readline()
continue
# If no match found, we must not be in a holiday.
return 0 |
python | def get_or_create_with_validation(cls, *args, **kwargs):
"""
Factory method that gets or creates-and-validates the model object before it is saved.
Similar to the get_or_create method on Models, it returns a tuple of (object, created),
where created is a boolean specifying whether an object was created.
"""
try:
return cls.objects.get(*args, **kwargs), False
except cls.DoesNotExist:
return cls.create_with_validation(*args, **kwargs), True |
java | @Override
public UntagResourcesResult untagResources(UntagResourcesRequest request) {
request = beforeClientExecution(request);
return executeUntagResources(request);
} |
python | def _validate_and_parse(self, batch_object):
"""
Performs validation on the batch object to make sure it is in the proper format.
Parameters:
* batch_object: The data provided to a POST. The expected format is the following:
{
"username": "username",
"course_key": "course-key",
"blocks": {
"block_key1": 0.0,
"block_key2": 1.0,
"block_key3": 1.0,
}
}
Return Value:
* tuple: (User, CourseKey, List of tuples (UsageKey, completion_float)
Raises:
django.core.exceptions.ValidationError:
If any aspect of validation fails a ValidationError is raised.
ObjectDoesNotExist:
If a database object cannot be found an ObjectDoesNotExist is raised.
"""
if not waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING):
raise ValidationError(
_("BlockCompletion.objects.submit_batch_completion should not be called when the feature is disabled.")
)
for key in self.REQUIRED_KEYS:
if key not in batch_object:
raise ValidationError(_("Key '{key}' not found.").format(key=key))
username = batch_object['username']
user = User.objects.get(username=username)
course_key_obj = self._validate_and_parse_course_key(batch_object['course_key'])
if not CourseEnrollment.is_enrolled(user, course_key_obj):
raise ValidationError(_('User is not enrolled in course.'))
blocks = batch_object['blocks']
block_objs = []
for block_key in blocks:
block_key_obj = self._validate_and_parse_block_key(block_key, course_key_obj)
completion = float(blocks[block_key])
block_objs.append((block_key_obj, completion))
return user, course_key_obj, block_objs |
python | def query_helper(request,namespace, docid, configuration=None):
"""Does the actual query, called by query() or pub_query(), not directly"""
flatargs = {
'customslicesize': request.POST.get('customslicesize',settings.CONFIGURATIONS[configuration].get('customslicesize','50')), #for pagination of search results
}
#stupid compatibility stuff
if sys.version < '3':
if hasattr(request, 'body'):
data = json.loads(unicode(request.body,'utf-8')) #pylint: disable=undefined-variable
else: #older django
data = json.loads(unicode(request.raw_post_data,'utf-8')) #pylint: disable=undefined-variable
else:
if hasattr(request, 'body'):
data = json.loads(str(request.body,'utf-8'))
else: #older django
data = json.loads(str(request.raw_post_data,'utf-8'))
if not data['queries']:
return HttpResponseForbidden("No queries to run")
for query in data['queries']:
#get document selector and check it doesn't violate the namespace
docselector, query = getdocumentselector(query)
if not docselector:
return HttpResponseForbidden("Query does not start with a valid document selector (USE keyword)!")
elif docselector[0] != namespace:
return HttpResponseForbidden("Query would affect a different namespace than your current one, forbidden!")
if query != "GET" and query[:4] != "CQL " and query[:4] != "META":
#parse query on this end to catch syntax errors prior to sending, should be fast enough anyway
#first resolve variables to dummies (real ones will be handled server-side) as it won't be valid FQL otherwise
query = query.replace("$FOLIADOCSERVE_PROCESSOR", "PROCESSOR name \"foliadocserve\"")
query = query.replace("$FLAT_PROCESSOR", "PROCESSOR name \"FLAT\" version \"" + VERSION + "\" host \"" + request.get_host() + "\" src \"" + request.build_absolute_uri("/") + "\"") #also another instance in comm.py
try:
query = fql.Query(query)
except fql.SyntaxError as e:
return HttpResponseForbidden("FQL Syntax Error: " + str(e))
needwritepermission = query.declarations or query.action and query.action.action != "SELECT"
else:
needwritepermission = False
if configuration != "pub":
if needwritepermission and not flat.users.models.haswritepermission(request.user.username, namespace, request):
return HttpResponseForbidden("Permission denied, no write access")
query = "\n".join(data['queries']) #throw all queries on a big pile to transmit
try:
d = flat.comm.query(request, query,**flatargs)
except Exception as e:
if sys.version < '3':
errmsg = docserveerror(e)['fatalerror_text']
return HttpResponseForbidden("FoLiA Document Server error: ".encode('utf-8') + errmsg.encode('utf-8'))
else:
return HttpResponseForbidden("FoLiA Document Server error: " + docserveerror(e)['fatalerror_text'])
return HttpResponse(json.dumps(d).encode('utf-8'), content_type='application/json') |
python | def get_listing_view(self, request, queryset, opts=None):
"""
Instantiates and returns the view class that will generate the
actual context for this plugin.
``queryset`` can be an actual QuerySet or any iterable.
"""
view = self.get_view(request, self.view_class, opts)
view.queryset = queryset
return view |
java | private static void parseStyle(SvgElementBase obj, String style)
{
TextScanner scan = new TextScanner(style.replaceAll("/\\*.*?\\*/", "")); // regex strips block comments
while (true)
{
String propertyName = scan.nextToken(':');
scan.skipWhitespace();
if (!scan.consume(':'))
break; // Syntax error. Stop processing CSS rules.
scan.skipWhitespace();
String propertyValue = scan.nextTokenWithWhitespace(';');
if (propertyValue == null)
break; // Syntax error
scan.skipWhitespace();
if (scan.empty() || scan.consume(';'))
{
if (obj.style == null)
obj.style = new Style();
processStyleProperty(obj.style, propertyName, propertyValue);
scan.skipWhitespace();
}
}
} |
python | def convert_rtc(cls, timestamp):
"""Convert a number of seconds since 1/1/2000 to UTC time."""
if timestamp & (1 << 31):
timestamp &= ~(1 << 31)
delta = datetime.timedelta(seconds=timestamp)
return cls._Y2KReference + delta |
java | static String rewriteIPv4MappedNotation(String string)
{
if (!string.contains("."))
{
return string;
}
else
{
int lastColon = string.lastIndexOf(":");
String firstPart = string.substring(0, lastColon + 1);
String mappedIPv4Part = string.substring(lastColon + 1);
if (mappedIPv4Part.contains("."))
{
String[] dotSplits = DOT_DELIM.split(mappedIPv4Part);
if (dotSplits.length != 4)
throw new IllegalArgumentException(String.format("can not parse [%s]", string));
StringBuilder rewrittenString = new StringBuilder();
rewrittenString.append(firstPart);
int byteZero = Integer.parseInt(dotSplits[0]);
int byteOne = Integer.parseInt(dotSplits[1]);
int byteTwo = Integer.parseInt(dotSplits[2]);
int byteThree = Integer.parseInt(dotSplits[3]);
rewrittenString.append(String.format("%02x", byteZero));
rewrittenString.append(String.format("%02x", byteOne));
rewrittenString.append(":");
rewrittenString.append(String.format("%02x", byteTwo));
rewrittenString.append(String.format("%02x", byteThree));
return rewrittenString.toString();
}
else
{
throw new IllegalArgumentException(String.format("can not parse [%s]", string));
}
}
} |
java | protected String generateRelationshipText(final RelationshipType relationshipType, boolean shortSyntax, final String spacer) {
final StringBuilder retValue;
final List<Relationship> relationships;
// Create the relationship heading
if (relationshipType == RelationshipType.REFER_TO) {
if (shortSyntax) {
retValue = new StringBuilder(" [R: ");
} else {
retValue = new StringBuilder("\n" + spacer + "[Refer-to:");
}
relationships = getRelatedRelationships();
} else if (relationshipType == RelationshipType.PREREQUISITE) {
if (shortSyntax) {
retValue = new StringBuilder(" [P: ");
} else {
retValue = new StringBuilder("\n" + spacer + "[Prerequisite:");
}
relationships = getPrerequisiteRelationships();
} else if (relationshipType == RelationshipType.LINKLIST) {
if (shortSyntax) {
retValue = new StringBuilder(" [L: ");
} else {
retValue = new StringBuilder("\n" + spacer + "[Link-List:");
}
relationships = getLinkListRelationships();
} else {
throw new IllegalArgumentException("Unable to create a text based formation for the " + relationshipType.toString() + " " +
"relationship type.");
}
// Create the list of relationships
if (shortSyntax) {
final List<String> relatedIds = new ArrayList<String>();
for (final Relationship related : relationships) {
relatedIds.add(related.getSecondaryRelationshipId());
}
retValue.append(StringUtilities.buildString(relatedIds.toArray(new String[relatedIds.size()]), ", "));
} else {
boolean first = true;
for (final Relationship related : relationships) {
if (first) {
retValue.append("\n");
first = false;
} else {
retValue.append(",\n");
}
retValue.append(spacer);
retValue.append(SPACER);
if (related.getRelationshipTitle() != null && !related.getRelationshipTitle().trim().isEmpty()) {
retValue.append(ContentSpecUtilities.escapeRelationshipTitle(related.getRelationshipTitle())).append(" ");
}
retValue.append("[");
retValue.append(related.getSecondaryRelationshipId());
retValue.append("]");
}
}
retValue.append("]");
return retValue.toString();
} |
java | public void setFirstObservedAt(java.util.Collection<DateFilter> firstObservedAt) {
if (firstObservedAt == null) {
this.firstObservedAt = null;
return;
}
this.firstObservedAt = new java.util.ArrayList<DateFilter>(firstObservedAt);
} |
java | public static void handleError(HttpResponse response) throws ParseException, IOException {
log.debug("{}", response.getStatusLine().toString());
HttpEntity entity = response.getEntity();
if (entity != null) {
log.debug("{}", EntityUtils.toString(entity));
}
} |
java | @Override public DirectedGraph<T> copy(Set<Integer> vertices) {
Graph<T> g = super.copy(vertices);
return new DirectedGraphAdaptor<T>(g);
} |
python | def duration(self):
"""
Return a timedelta for this task.
Measure the time between this task's start and end time, or "now"
if the task has not yet finished.
:returns: timedelta object, or None if the task has not even started.
"""
if not self.started:
return None
start = self.started
end = self.completed
if not end:
end = datetime.utcnow()
return end - start |
python | def pformat(self, prefix=()):
'''
Makes a pretty ASCII format of the data, suitable for
displaying in a console or saving to a text file.
Returns a list of lines.
'''
nan = float("nan")
def sformat(segment, stat):
FMT = "n={0}, mean={1}, p50/95={2}/{3}, max={4}"
line_segs = [segment]
for s in [stat]:
p = s.get_percentiles()
p50, p95 = p.get(0.50, nan), p.get(0.95, nan)
line_segs.append(FMT.format(s.n, s.mean, p50, p95, s.max))
return '{0}: {1}'.format(*line_segs)
lines = []
for path in sorted(self.path_stats.keys()):
lines.append('=====================')
for seg, stat in zip(path, self.path_stats[path]):
lines.append(sformat(seg, stat))
return lines |
python | def set_rules(self, rules):
"""
Sets the rules to be run or ignored for the audit.
Args:
rules: a dictionary of the format `{"ignore": [], "apply": []}`.
See https://github.com/GoogleChrome/accessibility-developer-tools/tree/master/src/audits
Passing `{"apply": []}` or `{}` means to check for all available rules.
Passing `{"apply": None}` means that no audit should be done for this page.
Passing `{"ignore": []}` means to run all otherwise enabled rules.
Any rules in the "ignore" list will be ignored even if they were also
specified in the "apply".
Examples:
To check only `badAriaAttributeValue`::
page.a11y_audit.config.set_rules({
"apply": ['badAriaAttributeValue']
})
To check all rules except `badAriaAttributeValue`::
page.a11y_audit.config.set_rules({
"ignore": ['badAriaAttributeValue'],
})
"""
self.rules_to_ignore = rules.get("ignore", [])
self.rules_to_run = rules.get("apply", []) |
python | def babel_compile(target):
"""
Babel, Compiles all translations
"""
click.echo(click.style("Starting Compile target:{0}".format(target), fg="green"))
os.popen("pybabel compile -f -d {0}".format(target)) |
python | def fit(self, pixel_flux, data_placeholder, var_list, session, feed_dict={}):
"""
Parameters
----------
pixel_flux : ndarray
The TPF-like pixel flux time series. The first dimension
must represent time, and the remaining two dimensions
must represent the spatial dimensions.
data_placeholder : tf.placeholder
A placeholder which will be used to pass the n-th time stamp
to `self.optimizer.minimize`.
var_list : list
The list of parameters (as tensors) to optimize for.
session : instance of tf.Session
feed_dict : dict
Dictionary of additional arguments used to feed the loss function.
"""
opt_params = []
cadences = range(pixel_flux.shape[0])
for n in tqdm.tqdm(cadences):
feed_dict[data_placeholder] = pixel_flux[n]
self.optimizer.minimize(session=session, feed_dict=feed_dict)
opt_params.append([session.run(var) for var in var_list])
return opt_params |
java | public static void init(final Map<String, String> _values)
{
if (AppConfigHandler.HANDLER == null) {
AppConfigHandler.HANDLER = new AppConfigHandler(_values);
}
} |
java | public static String convertGlobToRegex(String pattern) {
pattern = pattern.replaceAll("[\\\\\\.\\(\\)\\+\\|\\^\\$]", "\\\\$0");
pattern = pattern.replaceAll("\\[\\\\\\^", "[^");
pattern = Ruby.String.of(pattern).gsub("\\{[^\\}]+\\}", m -> {
m = m.replaceAll(",", "|");
m = "(" + m.substring(1, m.lastIndexOf('}')) + ")";
return m;
}).toS();
pattern = pattern.replaceAll("\\?", ".{1}");
pattern = pattern.replaceAll("\\*\\*/", "(.+/)?");
pattern = pattern.replaceAll("\\*", "[^/]*");
return pattern;
} |
java | public static FDBigInteger valueOfPow52(int p5, int p2) {
if (p5 != 0) {
if (p2 == 0) {
return big5pow(p5);
} else if (p5 < SMALL_5_POW.length) {
int pow5 = SMALL_5_POW[p5];
int wordcount = p2 >> 5;
int bitcount = p2 & 0x1f;
if (bitcount == 0) {
return new FDBigInteger(new int[]{pow5}, wordcount);
} else {
return new FDBigInteger(new int[]{
pow5 << bitcount,
pow5 >>> (32 - bitcount)
}, wordcount);
}
} else {
return big5pow(p5).leftShift(p2);
}
} else {
return valueOfPow2(p2);
}
} |
java | public void println(int i) throws IOException
{
if(this._listener!= null && !checkIfCalledFromWLonError()){
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled())
Tr.debug(tc, "non blocking println int , WriteListener enabled: " + this._listener);
this.println_NonBlocking(Integer.toString(i));
}
else {
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled())
Tr.debug(tc, "println int");
super.println(i);
}
} |
python | def write_file(filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
contents = "\n".join(contents)
# assuming the contents has been vetted for utf-8 encoding
contents = contents.encode("utf-8")
with open(filename, "wb") as f: # always write POSIX-style manifest
f.write(contents) |
java | public OvhNetwork project_serviceName_network_private_POST(String serviceName, String name, String[] regions, Long vlanId) throws IOException {
String qPath = "/cloud/project/{serviceName}/network/private";
StringBuilder sb = path(qPath, serviceName);
HashMap<String, Object>o = new HashMap<String, Object>();
addBody(o, "name", name);
addBody(o, "regions", regions);
addBody(o, "vlanId", vlanId);
String resp = exec(qPath, "POST", sb.toString(), o);
return convertTo(resp, OvhNetwork.class);
} |
java | protected static String ellipsis(String message, int maxLength)
{
if(message == null) {
return "null";
}
return message.length() < maxLength ? message : message.substring(0, maxLength - ELLIPSIS.length()) + ELLIPSIS;
} |
java | public static <U> U valueOrElse(Versioned<U> versioned, U defaultValue) {
return versioned == null ? defaultValue : versioned.value();
} |
java | public void transfromHeadline()
{
if (this.hlDepth > 0)
{
return;
}
int level = 0;
final Line line = this.lines;
if (line.isEmpty)
{
return;
}
int start = line.leading;
while (start < line.value.length() && line.value.charAt(start) == '#')
{
level++;
start++;
}
while (start < line.value.length() && line.value.charAt(start) == ' ')
{
start++;
}
if (start >= line.value.length())
{
line.setEmpty();
}
else
{
int end = line.value.length() - line.trailing - 1;
while (line.value.charAt(end) == '#')
{
end--;
}
while (line.value.charAt(end) == ' ')
{
end--;
}
line.value = line.value.substring(start, end + 1);
line.leading = line.trailing = 0;
}
this.hlDepth = Math.min(level, 6);
} |
python | def RandomStandardNormal(shape, dtype, seed):
"""
Standard (mu=0, sigma=1) gaussian op.
"""
if seed:
np.random.seed(seed)
return np.random.normal(size=reduce(mul, shape)).reshape(shape).astype(dtype_map[dtype]), |
python | def get_console_size():
"""Return console size as tuple = (width, height).
Returns (None,None) in non-interactive session.
"""
display_width = options.display.width
# deprecated.
display_height = options.display.max_rows
# Consider
# interactive shell terminal, can detect term size
# interactive non-shell terminal (ipnb/ipqtconsole), cannot detect term
# size non-interactive script, should disregard term size
# in addition
# width,height have default values, but setting to 'None' signals
# should use Auto-Detection, But only in interactive shell-terminal.
# Simple. yeah.
if in_interactive_session():
if in_ipython_frontend():
# sane defaults for interactive non-shell terminal
# match default for width,height in config_init
try:
from pandas.core.config import get_default_val
terminal_width = get_default_val('display.width')
terminal_height = get_default_val('display.max_rows')
except ImportError:
terminal_width, terminal_height = None, None
else:
# pure terminal
terminal_width, terminal_height = get_terminal_size()
else:
terminal_width, terminal_height = None, None
# Note if the User sets width/Height to None (auto-detection)
# and we're in a script (non-inter), this will return (None,None)
# caller needs to deal.
return (display_width or terminal_width, display_height or terminal_height) |
java | public static String fixLength(final String text, final int charsNum, final char paddingChar)
{
return fixLength(text, charsNum, paddingChar, false);
} |
java | protected void openVideo(boolean reopen , String ...filePaths) {
synchronized (lockStartingProcess) {
if( startingProcess ) {
System.out.println("Ignoring video request. Detected spamming");
return;
}
startingProcess = true;
}
synchronized (inputStreams) {
if (inputStreams.size() != filePaths.length)
throw new IllegalArgumentException("Input streams not equal to "+filePaths.length+". Override openVideo()");
}
stopAllInputProcessing();
streamPaused = false;
boolean failed = false;
for( int which = 0; which < filePaths.length; which++ ) {
CacheSequenceStream cache = inputStreams.get(which);
SimpleImageSequence sequence = media.openVideo(filePaths[which], cache.getImageType());
if( sequence == null ) {
failed = true;
System.out.println("Can't find file. "+filePaths[which]);
break;
}
configureVideo(which,sequence);
synchronized (inputStreams) {
cache.reset();
cache.setSequence(sequence);
}
}
if (!failed) {
setInputName(new File(filePaths[0]).getName());
synchronized (inputStreams) {
inputMethod = InputMethod.VIDEO;
streamPeriod = 33; // default to 33 FPS for a video
if( threadProcess != null )
throw new RuntimeException("There was still an active stream thread!");
threadProcess = new SynchronizedStreamsThread();
}
if( !reopen ) {
for (int i = 0; i < inputStreams.size(); i++) {
CacheSequenceStream stream = inputStreams.get(i);
handleInputChange(i, inputMethod, stream.getWidth(), stream.getHeight());
}
}
threadPool.execute(threadProcess);
} else {
synchronized (inputStreams) {
inputMethod = InputMethod.NONE;
inputFilePath = null;
}
synchronized (lockStartingProcess) {
startingProcess = false;
}
showRejectDiaglog("Can't open file");
}
} |
python | def get_unknown_opttrans_attr(path):
"""Utility method that gives a `dict` of unknown and unsupported optional
transitive path attributes of `path`.
Returns dict: <key> - attribute type code, <value> - unknown path-attr.
"""
path_attrs = path.pathattr_map
unknown_opt_tran_attrs = {}
for _, attr in path_attrs.items():
if (isinstance(attr, BGPPathAttributeUnknown) and
attr.flags & (BGP_ATTR_FLAG_OPTIONAL |
BGP_ATTR_FLAG_TRANSITIVE)) or \
isinstance(attr, BGPPathAttributeAs4Path) or \
isinstance(attr, BGPPathAttributeAs4Aggregator):
unknown_opt_tran_attrs[attr.type] = attr
return unknown_opt_tran_attrs |
python | def set_end_point_uri(self) -> bool:
"""
Extracts the route from the accessed URL and sets it to __end_point_uri
:rtype: bool
"""
expected_parts = self.__route.split("/")
actual_parts = self.__uri.split("/")
i = 0
for part in expected_parts:
if part != actual_parts[i]:
return False
i = i + 1
uri_prefix = len(self.__route)
self.__end_point_uri = self.__uri[uri_prefix:]
return True |
java | @Override
public void execute(ExecutorService executor) {
File f = new File(_locAdmin.resolveString(_location));
if (f.isAbsolute()) {
resolve(_location, _filesToMonitor);
} else {
resolve(AppManagerConstants.SERVER_APPS_DIR + _location, _filesToMonitor);
resolve(AppManagerConstants.SHARED_APPS_DIR + _location, _filesToMonitor);
}
_mon.setProperty(FileMonitor.MONITOR_DIRECTORIES, _filesToMonitor);
_mon.setProperty(FileMonitor.MONITOR_FILES, _filesToMonitor);
if (_trigger == UpdateTrigger.DISABLED) {
findFile(true);
} else {
_mon.register(_ctx, FileMonitor.class, this);
if (_container.get() == null) {
if (_file.get() == null) {
if (!FrameworkState.isStopping()) {
// Don't issue this message if the server is stopping
AppMessageHelper.get(_handler.get()).warning("APPLICATION_NOT_FOUND", _name, _location);
}
}
}
}
} |
python | def DeleteUser(self, user_link, options=None):
"""Deletes a user.
:param str user_link:
The link to the user entity.
:param dict options:
The request options for the request.
:return:
The deleted user.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(user_link)
user_id = base.GetResourceIdOrFullNameFromLink(user_link)
return self.DeleteResource(path,
'users',
user_id,
None,
options) |
python | def rank(self):
""" Returns the rank of this worker node.
Returns
-------
rank : int
The rank of this node, which is in range [0, num_workers())
"""
rank = ctypes.c_int()
check_call(_LIB.MXKVStoreGetRank(self.handle, ctypes.byref(rank)))
return rank.value |
java | private int readFlags(byte flags) {
// Verify the reserved bits at 7 and 6 are 0
int reserved7 = (flags >> 7) & 1;
int reserved6 = (flags >> 6) & 1;
if (reserved7 != 0 || reserved6 != 0) {
throw new GeoPackageException(
"Unexpected GeoPackage Geometry flags. Flag bit 7 and 6 should both be 0, 7="
+ reserved7 + ", 6=" + reserved6);
}
// Get the binary type from bit 5, 0 for standard and 1 for extended
int binaryType = (flags >> 5) & 1;
extended = binaryType == 1;
// Get the empty geometry flag from bit 4, 0 for non-empty and 1 for
// empty
int emptyValue = (flags >> 4) & 1;
empty = emptyValue == 1;
// Get the envelope contents indicator code (3-bit unsigned integer from
// bits 3, 2, and 1)
int envelopeIndicator = (flags >> 1) & 7;
if (envelopeIndicator > 4) {
throw new GeoPackageException(
"Unexpected GeoPackage Geometry flags. Envelope contents indicator must be between 0 and 4. Actual: "
+ envelopeIndicator);
}
// Get the byte order from bit 0, 0 for Big Endian and 1 for Little
// Endian
int byteOrderValue = flags & 1;
byteOrder = byteOrderValue == 0 ? ByteOrder.BIG_ENDIAN
: ByteOrder.LITTLE_ENDIAN;
return envelopeIndicator;
} |
java | private void clearEdit() {
m_fileTable.setEditable(false);
if (m_editItemId != null) {
updateItem(m_editItemId, false);
}
m_editItemId = null;
m_editProperty = null;
m_editHandler = null;
updateSorting();
} |
java | public static Timer getNamedTimer(String timerName, int todoFlags,
long threadId) {
Timer key = new Timer(timerName, todoFlags, threadId);
registeredTimers.putIfAbsent(key, key);
return registeredTimers.get(key);
} |
java | protected String formatDDLStatement(String sql) {
String result = removeComments(sql);
String[] parts = getTokens(sql, 0);
if (parts.length > 2 && parts[0].equalsIgnoreCase("create")
&& parts[1].equalsIgnoreCase("table")) {
String sqlWithSingleSpaces = String.join(" ", parts);
int primaryKeyIndex = sqlWithSingleSpaces.toUpperCase().indexOf(", PRIMARY KEY (");
if (primaryKeyIndex > -1) {
int endPrimaryKeyIndex = sqlWithSingleSpaces.indexOf(')', primaryKeyIndex);
String primaryKeySpec =
sqlWithSingleSpaces.substring(primaryKeyIndex + 2, endPrimaryKeyIndex + 1);
sqlWithSingleSpaces = sqlWithSingleSpaces.replace(", " + primaryKeySpec, "");
sqlWithSingleSpaces = sqlWithSingleSpaces + " " + primaryKeySpec;
result = sqlWithSingleSpaces.replaceAll("\\s+\\)", ")");
}
}
return result;
} |
java | public static Schema superSetOf(Schema schema, Field... newFields) {
return superSetOf("superSetSchema" + (COUNTER++), schema, newFields);
} |
java | public void initializeSubContainers(CmsContainerPageElementPanel containerElement) {
int containerCount = m_targetContainers.size();
m_targetContainers.putAll(m_containerpageUtil.consumeContainers(m_containers, containerElement.getElement()));
updateContainerLevelInfo();
if (m_targetContainers.size() > containerCount) {
// in case new containers have been added, the gallery data needs to be updated
scheduleGalleryUpdate();
}
} |
java | @Nullable
@Size(2)
public int[] getValidDateFields() {
if (!mIsDateValid) {
return null;
}
final int[] monthYearPair = new int[2];
final String rawNumericInput = getText().toString().replaceAll("/", "");
final String[] dateFields = DateUtils.separateDateStringParts(rawNumericInput);
try {
monthYearPair[0] = Integer.parseInt(dateFields[0]);
final int twoDigitYear = Integer.parseInt(dateFields[1]);
final int fourDigitYear = DateUtils.convertTwoDigitYearToFour(twoDigitYear);
monthYearPair[1] = fourDigitYear;
} catch (NumberFormatException numEx) {
// Given that the date should already be valid when getting to this method, we should
// not his this exception. Returning null to indicate error if we do.
return null;
}
return monthYearPair;
} |
python | def bck_chunk(self):
"""
Returns the chunk backward from this chunk in the list of free chunks.
"""
raise NotImplementedError("%s not implemented for %s" % (self.bck_chunk.__func__.__name__,
self.__class__.__name__)) |
java | private static synchronized PrepPipeline createOrFindPreparer(DatabasePreparer preparer, Iterable<Consumer<Builder>> customizers) throws IOException, SQLException
{
final ClusterKey key = new ClusterKey(preparer, customizers);
PrepPipeline result = CLUSTERS.get(key);
if (result != null) {
return result;
}
final Builder builder = EmbeddedPostgres.builder();
customizers.forEach(c -> c.accept(builder));
final EmbeddedPostgres pg = builder.start();
preparer.prepare(pg.getTemplateDatabase());
result = new PrepPipeline(pg).start();
CLUSTERS.put(key, result);
return result;
} |
python | def write_backreferences(seen_backrefs, gallery_conf,
target_dir, fname, snippet):
"""Writes down back reference files, which include a thumbnail list
of examples using a certain module"""
if gallery_conf['backreferences_dir'] is None:
return
example_file = os.path.join(target_dir, fname)
backrefs = scan_used_functions(example_file, gallery_conf)
for backref in backrefs:
include_path = os.path.join(gallery_conf['src_dir'],
gallery_conf['backreferences_dir'],
'%s.examples.new' % backref)
seen = backref in seen_backrefs
with codecs.open(include_path, 'a' if seen else 'w',
encoding='utf-8') as ex_file:
if not seen:
heading = '\n\nExamples using ``%s``' % backref
ex_file.write(heading + '\n')
ex_file.write('^' * len(heading) + '\n')
ex_file.write(_thumbnail_div(target_dir, gallery_conf['src_dir'],
fname, snippet, is_backref=True))
seen_backrefs.add(backref) |
python | def _republish_processing_error(self, error):
"""Republish the original message that was received because a
:exc:`~rejected.consumer.ProcessingException` was raised.
This for internal use and should not be extended or used directly.
Add a header that keeps track of how many times this has happened
for this message.
:param str error: The string value for the exception
"""
self.logger.debug('Republishing due to ProcessingException')
properties = dict(self._message.properties) or {}
if 'headers' not in properties or not properties['headers']:
properties['headers'] = {}
if error:
properties['headers']['X-Processing-Exception'] = error
if _PROCESSING_EXCEPTIONS not in properties['headers']:
properties['headers'][_PROCESSING_EXCEPTIONS] = 1
else:
try:
properties['headers'][_PROCESSING_EXCEPTIONS] += 1
except TypeError:
properties['headers'][_PROCESSING_EXCEPTIONS] = 1
self._message.channel.basic_publish(
self._error_exchange, self._message.routing_key,
self._message.body, pika.BasicProperties(**properties)) |
python | def deploy(self, job_name, command='', blocksize=1):
instances = []
"""Deploy the template to a resource group."""
self.client.resource_groups.create_or_update(
self.resource_group,
{
'location': self.location,
}
)
template_path = os.path.join(os.path.dirname(
__file__), 'templates', 'template.json')
with open(template_path, 'r') as template_file_fd:
template = json.load(template_file_fd)
parameters = {
'sshKeyData': self.pub_ssh_key,
'vmName': 'azure-deployment-sample-vm',
'dnsLabelPrefix': self.dns_label_prefix
}
parameters = {k: {'value': v} for k, v in parameters.items()}
deployment_properties = {
'mode': DeploymentMode.incremental,
'template': template,
'parameters': parameters
}
for i in range(blocksize):
deployment_async_operation = self.client.deployments.create_or_update(
self.resource_group,
'azure-sample',
deployment_properties
)
instances.append(deployment_async_operation.wait())
return instances |
java | private Where addWhereClauseForRowRange(String keyAlias, Select select, RowRange<?> rowRange) {
Where where = null;
boolean keyIsPresent = false;
boolean tokenIsPresent = false;
if (rowRange.getStartKey() != null || rowRange.getEndKey() != null) {
keyIsPresent = true;
}
if (rowRange.getStartToken() != null || rowRange.getEndToken() != null) {
tokenIsPresent = true;
}
if (keyIsPresent && tokenIsPresent) {
throw new RuntimeException("Cannot provide both token and keys for range query");
}
if (keyIsPresent) {
if (rowRange.getStartKey() != null && rowRange.getEndKey() != null) {
where = select.where(gte(keyAlias, BIND_MARKER))
.and(lte(keyAlias, BIND_MARKER));
} else if (rowRange.getStartKey() != null) {
where = select.where(gte(keyAlias, BIND_MARKER));
} else if (rowRange.getEndKey() != null) {
where = select.where(lte(keyAlias, BIND_MARKER));
}
} else if (tokenIsPresent) {
String tokenOfKey ="token(" + keyAlias + ")";
if (rowRange.getStartToken() != null && rowRange.getEndToken() != null) {
where = select.where(gte(tokenOfKey, BIND_MARKER))
.and(lte(tokenOfKey, BIND_MARKER));
} else if (rowRange.getStartToken() != null) {
where = select.where(gte(tokenOfKey, BIND_MARKER));
} else if (rowRange.getEndToken() != null) {
where = select.where(lte(tokenOfKey, BIND_MARKER));
}
} else {
where = select.where();
}
if (rowRange.getCount() > 0) {
// TODO: fix this
//where.limit(rowRange.getCount());
}
return where;
} |
java | private boolean isTruncationNeeded(PerJVMInfo lInfo,
Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails,
LogName logName) {
boolean truncationNeeded = false;
LogFileDetail logFileDetail = null;
for (Task task : lInfo.allAttempts) {
long taskRetainSize =
(task.isMapTask() ? mapRetainSize : reduceRetainSize);
Map<LogName, LogFileDetail> allLogsFileDetails =
taskLogFileDetails.get(task);
logFileDetail = allLogsFileDetails.get(logName);
if (taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION
&& logFileDetail.length > taskRetainSize) {
truncationNeeded = true;
break;
}
}
return truncationNeeded;
} |
python | def to_string_short(self):
"""
see also :meth:`to_string`
:return: a shorter abreviated string reprentation of the parameter
"""
if hasattr(self, 'constrained_by') and len(self.constrained_by) > 0:
return "* {:>30}: {}".format(self.uniquetwig_trunc, self.get_quantity() if hasattr(self, 'quantity') else self.get_value())
else:
return "{:>32}: {}".format(self.uniquetwig_trunc, self.get_quantity() if hasattr(self, 'quantity') else self.get_value()) |
python | def linear_interpolate(tensor1, tensor2, coeffs):
"""Linearly interpolate between two tensors at coeff.
Args:
tensor1: 4-D Tensor, shape=(NHWC)
tensor2: 4-D Tensor, shape=(NHWC)
coeffs: list of floats.
Returns:
interp_latents: 5-D Tensor, with interp_latents[i] representing
interpolations at coeffs[i].
shape=(len(coeffs), NHWC)
"""
interp_tensors = []
for coeff in coeffs:
interp_tensor = tensor1 + coeff * (tensor2 - tensor1)
interp_tensors.append(interp_tensor)
return tf.concat(interp_tensors, axis=0) |
java | protected String getQueryModifier() {
String queryModifier = parseOptionalStringValue(m_configObject, JSON_KEY_QUERY_MODIFIER);
return (null == queryModifier) && (null != m_baseConfig)
? m_baseConfig.getGeneralConfig().getQueryModifier()
: queryModifier;
} |
python | def build_getters_support_matrix(app):
"""Build the getters support matrix."""
status = subprocess.call("./test.sh", stdout=sys.stdout, stderr=sys.stderr)
if status != 0:
print("Something bad happened when processing the test reports.")
sys.exit(-1)
drivers = set()
matrix = {
m: defaultdict(dict)
for m in dir(NetworkDriver)
if not (m.startswith("_") or m in EXCLUDE_METHODS)
}
regex_name = re.compile(r"(?P<driver>\w+)\/.*::test_(?P<getter>\w+)")
filename = "./support/tests/report.json"
with open(filename, "r") as f:
data = json.loads(f.read())
for test in data["report"]["tests"]:
match = regex_name.search(test["name"])
if match:
driver = match.group("driver")
drivers.add(driver)
method = match.group("getter")
else:
continue
if method in EXCLUDE_IN_REPORT:
continue
result = test["outcome"]
if method in METHOD_ALIASES.keys():
method = METHOD_ALIASES[method]
intermediate_result = matrix[method].get(driver, None)
matrix[method][driver] = _merge_results(result, intermediate_result)
sorted_methods = sorted(matrix.keys())
drivers = sorted(drivers)
env = Environment(loader=FileSystemLoader("."))
template_file = env.get_template("matrix.j2")
rendered_template = template_file.render(
matrix=matrix, drivers=drivers, sorted_methods=sorted_methods
)
with open("support/matrix.rst", "w") as f:
f.write(rendered_template) |
python | def nvlist_to_dict(nvlist):
'''Convert a CORBA namevalue list into a dictionary.'''
result = {}
for item in nvlist :
result[item.name] = item.value.value()
return result |
python | def read_resampled(self):
"""Return a block of audio data resampled to 16000hz, blocking if necessary."""
return self.resample(data=self.buffer_queue.get(),
input_rate=self.input_rate) |
python | def add(self, layer, verbosity = 0, position = None):
"""
Adds a layer. Layer verbosity is optional (default 0).
"""
layer._verbosity = verbosity
layer._maxRandom = self._maxRandom
layer.minTarget = 0.0
layer.maxTarget = 1.0
layer.minActivation = 0.0
layer.maxActivation = 1.0
if position == None:
self.layers.append(layer)
else:
self.layers.insert(position, layer)
self.layersByName[layer.name] = layer |
java | public String asString() throws TransformerException {
Properties outputProperties = new Properties();
outputProperties.put(javax.xml.transform.OutputKeys.OMIT_XML_DECLARATION, "yes");
return asString(outputProperties);
} |
python | def setup_lookup_table( self, hamiltonian='nearest-neighbour' ):
"""
Create a jump-probability look-up table corresponding to the appropriate Hamiltonian.
Args:
hamiltonian (Str, optional): String specifying the simulation Hamiltonian.
valid values are 'nearest-neighbour' (default) and 'coordination_number'.
Returns:
None
"""
expected_hamiltonian_values = [ 'nearest-neighbour', 'coordination_number' ]
if hamiltonian not in expected_hamiltonian_values:
raise ValueError
self.lattice.jump_lookup_table = lookup_table.LookupTable( self.lattice, hamiltonian ) |
python | def multiply(self, number):
"""Return a Vector as the product of the vector and a real number."""
return self.from_list([x * number for x in self.to_list()]) |
java | public Hashtable[] getMultipleParams(String name)
{
List parts = (List)_partMap.getValues(name);
if (parts==null)
return null;
Hashtable[] params = new Hashtable[parts.size()];
for (int i=0; i<params.length; i++) {
params[i] = ((Part)parts.get(i))._headers;
}
return params;
} |
python | def _create_mel_filter_bank(self):
"""
Create the Mel filter bank,
and store it in ``self.filters``.
Note that it is a function of the audio sample rate,
so it cannot be created in the class initializer,
but only later in :func:`aeneas.mfcc.MFCC.compute_from_data`.
"""
self.filters = numpy.zeros((1 + (self.fft_order // 2), self.filter_bank_size), 'd')
dfreq = float(self.sample_rate) / self.fft_order
nyquist_frequency = self.sample_rate / 2
if self.upper_frequency > nyquist_frequency:
self.log_exc(u"Upper frequency %f exceeds Nyquist frequency %f" % (self.upper_frequency, nyquist_frequency), None, True, ValueError)
melmax = MFCC._hz2mel(self.upper_frequency)
melmin = MFCC._hz2mel(self.lower_frequency)
dmelbw = (melmax - melmin) / (self.filter_bank_size + 1)
filt_edge = MFCC._mel2hz(melmin + dmelbw * numpy.arange(self.filter_bank_size + 2, dtype='d'))
# TODO can this code be written more numpy-style?
# (the performance loss is negligible, it is just ugly to see)
for whichfilt in range(0, self.filter_bank_size):
# int() casts to native int instead of working with numpy.float64
leftfr = int(round(filt_edge[whichfilt] / dfreq))
centerfr = int(round(filt_edge[whichfilt + 1] / dfreq))
rightfr = int(round(filt_edge[whichfilt + 2] / dfreq))
fwidth = (rightfr - leftfr) * dfreq
height = 2.0 / fwidth
if centerfr != leftfr:
leftslope = height / (centerfr - leftfr)
else:
leftslope = 0
freq = leftfr + 1
while freq < centerfr:
self.filters[freq, whichfilt] = (freq - leftfr) * leftslope
freq = freq + 1
# the next if should always be true!
if freq == centerfr:
self.filters[freq, whichfilt] = height
freq = freq + 1
if centerfr != rightfr:
rightslope = height / (centerfr - rightfr)
while freq < rightfr:
self.filters[freq, whichfilt] = (freq - rightfr) * rightslope
freq = freq + 1 |
java | @SuppressWarnings("deprecation")
private static String getPhysicalPath(ArtifactEntry artifactEntry) {
String physicalPath = artifactEntry.getPhysicalPath();
if ( physicalPath != null ) {
return physicalPath;
}
String entryPath = artifactEntry.getPath();
String rootPath = artifactEntry.getRoot().getPhysicalPath();
if ( rootPath != null ) {
return rootPath + "!" + entryPath;
}
while ( (artifactEntry = artifactEntry.getRoot().getEntryInEnclosingContainer()) != null ) {
String nextPhysicalPath = artifactEntry.getPhysicalPath();
if ( nextPhysicalPath != null ) {
return nextPhysicalPath + "!" + entryPath;
}
entryPath = artifactEntry.getPath() + "!" + entryPath;
}
return entryPath;
} |
python | def set_high_water_mark(socket, config):
""" Set a high water mark on the zmq socket. Do so in a way that is
cross-compatible with zeromq2 and zeromq3.
"""
if config['high_water_mark']:
if hasattr(zmq, 'HWM'):
# zeromq2
socket.setsockopt(zmq.HWM, config['high_water_mark'])
else:
# zeromq3
socket.setsockopt(zmq.SNDHWM, config['high_water_mark'])
socket.setsockopt(zmq.RCVHWM, config['high_water_mark']) |
python | def create_session_entity_type(
self,
parent,
session_entity_type,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Creates a session entity type.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.SessionEntityTypesClient()
>>>
>>> parent = client.session_path('[PROJECT]', '[SESSION]')
>>>
>>> # TODO: Initialize ``session_entity_type``:
>>> session_entity_type = {}
>>>
>>> response = client.create_session_entity_type(parent, session_entity_type)
Args:
parent (str): Required. The session to create a session entity type for.
Format: ``projects/<Project ID>/agent/sessions/<Session ID>``.
session_entity_type (Union[dict, ~google.cloud.dialogflow_v2.types.SessionEntityType]): Required. The session entity type to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.SessionEntityType`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types.SessionEntityType` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'create_session_entity_type' not in self._inner_api_calls:
self._inner_api_calls[
'create_session_entity_type'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_session_entity_type,
default_retry=self._method_configs[
'CreateSessionEntityType'].retry,
default_timeout=self._method_configs[
'CreateSessionEntityType'].timeout,
client_info=self._client_info,
)
request = session_entity_type_pb2.CreateSessionEntityTypeRequest(
parent=parent,
session_entity_type=session_entity_type,
)
return self._inner_api_calls['create_session_entity_type'](
request, retry=retry, timeout=timeout, metadata=metadata) |
java | private boolean removeKeyForAllLanguages(String key) {
try {
if (hasDescriptor()) {
lockDescriptor();
}
loadAllRemainingLocalizations();
lockAllLocalizations(key);
} catch (CmsException | IOException e) {
LOG.warn("Not able lock all localications for bundle.", e);
return false;
}
if (!hasDescriptor()) {
for (Entry<Locale, SortedProperties> entry : m_localizations.entrySet()) {
SortedProperties localization = entry.getValue();
if (localization.containsKey(key)) {
localization.remove(key);
m_changedTranslations.add(entry.getKey());
}
}
}
return true;
} |
java | public static XIncludeContext newContext(final XIncludeContext contextToCopy)
{
final XIncludeContext newContext = new XIncludeContext(contextToCopy.configuration);
newContext.currentBaseURI = contextToCopy.currentBaseURI;
newContext.basesURIDeque.addAll(contextToCopy.basesURIDeque);
newContext.language = contextToCopy.language;
newContext.xincludeDeque.addAll(contextToCopy.xincludeDeque);
newContext.docType = DocType.copy(contextToCopy.docType);
return newContext;
} |
python | def __merge_json_values(current, previous):
"""Merges the values between the current and previous run of the script."""
for value in current:
name = value['name']
# Find the previous value
previous_value = __find_and_remove_value(previous, value)
if previous_value is not None:
flags = value['flags']
previous_flags = previous_value['flags']
if flags != previous_flags:
logging.warning(
'Flags for %s are different. Using previous value.', name)
value['flags'] = previous_flags
else:
logging.warning('Value %s is a new value', name)
for value in previous:
name = value['name']
logging.warning(
'Value %s not present in current run. Appending value.', name)
current.append(value) |
python | def list_users():
"""
List users.
"""
users = user_manager.all()
if users:
print_table(
['ID', 'Email', 'Active', 'Confirmed At'],
[(user.id,
user.email,
'True' if user.active else 'False',
user.confirmed_at.strftime('%Y-%m-%d %H:%M%z')
if user.confirmed_at else 'None',
) for user in users])
else:
click.echo('No users found.') |
python | def loaddata(settings_module,
fixtures,
bin_env=None,
database=None,
pythonpath=None,
env=None):
'''
Load fixture data
Fixtures:
comma separated list of fixtures to load
CLI Example:
.. code-block:: bash
salt '*' django.loaddata <settings_module> <comma delimited list of fixtures>
'''
args = []
kwargs = {}
if database:
kwargs['database'] = database
cmd = '{0} {1}'.format('loaddata', ' '.join(fixtures.split(',')))
return command(settings_module,
cmd,
bin_env,
pythonpath,
env,
*args, **kwargs) |
java | public static void head(String url, HttpConsumer<HttpExchange> endpoint, MediaTypes... mediaTypes) {
addResource(Methods.HEAD, url, endpoint, mediaTypes);
} |
python | def standard_deviation(x):
"""
Return a numpy array of column standard deviation
Parameters
----------
x : ndarray
A numpy array instance
Returns
-------
ndarray
A 1 x n numpy array instance of column standard deviation
Examples
--------
>>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> np.testing.assert_array_almost_equal(
... standard_deviation(a),
... [0.816496, 0.816496, 0.816496])
>>> a = np.array([1, 2, 3])
>>> np.testing.assert_array_almost_equal(
... standard_deviation(a),
... 0.816496)
"""
if x.ndim > 1 and len(x[0]) > 1:
return np.std(x, axis=1)
return np.std(x) |
java | public void marshall(GlobalSecondaryIndex globalSecondaryIndex, ProtocolMarshaller protocolMarshaller) {
if (globalSecondaryIndex == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(globalSecondaryIndex.getIndexName(), INDEXNAME_BINDING);
protocolMarshaller.marshall(globalSecondaryIndex.getKeySchema(), KEYSCHEMA_BINDING);
protocolMarshaller.marshall(globalSecondaryIndex.getProjection(), PROJECTION_BINDING);
protocolMarshaller.marshall(globalSecondaryIndex.getProvisionedThroughput(), PROVISIONEDTHROUGHPUT_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
java | public V get(K key) throws E {
clean();
ValueRef<K, V> valueRef = mValues.get(key);
V value;
if (valueRef == null || (value = valueRef.get()) == null) {
try {
value = create(key);
} catch (Exception e) {
// Workaround compiler bug.
org.cojen.util.ThrowUnchecked.fire(e);
return null;
}
valueRef = new ValueRef<K, V>(value, mValueRefQueue, key);
while (true) {
ValueRef<K, V> existingRef = mValues.putIfAbsent(key, valueRef);
if (existingRef == null) {
// Newly created value is now the official value.
break;
}
V existing = existingRef.get();
if (existing != null) {
// Someone else just created value before us. Use that
// instead and chuck the new value object.
value = existing;
valueRef.clear();
break;
}
// Reference just got cleared. Try again. Explicitly remove it
// to prevent an infinite loop. Note that the two argument
// remove method is called to ensure that what is being removed
// is not a new value.
mValues.remove(((ValueRef<K, V>) existingRef).mKey, existingRef);
}
}
return value;
} |
java | public String next() {
if (fifo.isEmpty()) {
throw new RuntimeException("Fifo is empty");
}
String cmd = (String) fifo.get(0);
fifo.remove(0);
return cmd;
} |
python | def _B(self, R):
"""Return numpy array from B1 up to and including Bn. (eqn. 6)"""
HNn_R = self._HNn / R
return HNn_R / self._sin_alpha * (0.4 * HNn_R / self._sin_alpha + 1) |
python | def set_result(self, result):
"""
Sets the result of the Future.
:param result: Result of the Future.
"""
if result is None:
self._result = NONE_RESULT
else:
self._result = result
self._event.set()
self._invoke_callbacks() |
java | @XmlElementDecl(namespace = "http://www.opengis.net/gml", name = "_Datum", substitutionHeadNamespace = "http://www.opengis.net/gml", substitutionHeadName = "Definition")
public JAXBElement<AbstractDatumType> create_Datum(AbstractDatumType value) {
return new JAXBElement<AbstractDatumType>(__Datum_QNAME, AbstractDatumType.class, null, value);
} |
python | def check_lat(self, dataset):
'''
float lat(time) ;//....................................... Depending on the precision used for the variable, the data type could be int or double instead of float.
lat:long_name = "" ; //...................................... RECOMMENDED - Provide a descriptive, long name for this variable.
lat:standard_name = "latitude" ; //.......................... REQUIRED - Do not change.
lat:units = "degrees_north" ; //............................. REQUIRED - CF recommends degrees_north, but at least must use UDUNITS.
lat:axis = "Y" ; //.......................................... REQUIRED - Do not change.
lat:valid_min = 0.0f ; //.................................... RECOMMENDED - Replace with correct value.
lat:valid_max = 0.0f ; //.................................... RECOMMENDED - Replace with correct value.
lat:_FillValue = 0.0f;//..................................... REQUIRED if there could be missing values in the data.
lat:ancillary_variables = "" ; //............................ RECOMMENDED - List other variables providing information about this variable.
lat:comment = "" ; //........................................ RECOMMENDED - Add useful, additional information here.
'''
results = []
lat = util.get_lat_variable(dataset)
if not lat:
return Result(BaseCheck.HIGH, False, 'latitude', ['a variable for latitude doesn\'t exist'])
lat_var = dataset.variables[lat]
test_ctx = TestCtx(BaseCheck.HIGH, 'Required attributes for variable {}'.format(lat))
test_ctx.assert_true(getattr(lat_var, 'standard_name', '') == 'latitude', 'standard_name attribute must be latitude')
units = getattr(lat_var, 'units', '')
test_ctx.assert_true(units and units_convertible(units, 'degrees_north'), 'units are valid UDUNITS for latitude')
test_ctx.assert_true(getattr(lat_var, 'axis', '') == 'Y', '{} axis attribute must be Y'.format(lat))
results.append(test_ctx.to_result())
test_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended attributes for variable {}'.format(lat))
test_ctx.assert_true(getattr(lat_var, 'long_name', '') != '', 'long_name attribute should exist and not be empty')
self._check_min_max_range(lat_var, test_ctx)
if hasattr(lat_var, 'comment'):
test_ctx.assert_true(getattr(lat_var, 'comment', '') != '', 'comment attribute should not be empty if specified')
test_ctx.assert_true(getattr(lat_var, 'comment', '') != '', 'comment attribute should exist and not be empty')
test_ctx.assert_true(units == 'degrees_north', '{} should have units degrees_north'.format(lat))
results.append(test_ctx.to_result())
return results |
java | protected Supplier<Set<Location>> overrideLocationSupplier(Injector injector,
Supplier<Set<Location>> originalSupplier) {
return originalSupplier;
} |
java | @Override
@Deprecated
public void forEachRemaining(java.util.function.Consumer<? super Pair<A, B>> action) {
super.forEachRemaining(action);
} |
java | private static long computeSerialVersionUID(Class<?> cl, Field[] fields) {
/*
* First we should try to fetch the static slot 'static final long
* serialVersionUID'. If it is defined, return it. If not defined, we
* really need to compute SUID using SHAOutputStream
*/
for (int i = 0; i < fields.length; i++) {
final Field field = fields[i];
if (field.getType() == long.class) {
int modifiers = field.getModifiers();
if (Modifier.isStatic(modifiers) && Modifier.isFinal(modifiers)) {
if (UID_FIELD_NAME.equals(field.getName())) {
/*
* We need to be able to see it even if we have no
* visibility. That is why we set accessible first (new
* API in reflect 1.2)
*/
field.setAccessible(true);
try {
// Static field, parameter is ignored
return field.getLong(null);
} catch (IllegalAccessException iae) {
throw new RuntimeException("Error fetching SUID: " + iae);
}
}
}
}
}
Digest digest = getDigest();
ByteArrayOutputStream sha = new ByteArrayOutputStream();
try {
DataOutputStream output = new DataOutputStream(sha);
output.writeUTF(cl.getName());
int classModifiers = CLASS_MODIFIERS_MASK & cl.getModifiers();
/*
* Workaround for 1F9LOQO. Arrays are ABSTRACT in JDK, but that is
* not in the specification. Since we want to be compatible for
* X-loading, we have to pretend we have the same shape
*/
boolean isArray = cl.isArray();
if (isArray) {
classModifiers |= Modifier.ABSTRACT;
}
// Required for JDK UID compatibility
if (cl.isInterface() && !Modifier.isPublic(classModifiers)) {
classModifiers &= ~Modifier.ABSTRACT;
}
output.writeInt(classModifiers);
/*
* In JDK1.2 arrays implement Cloneable and Serializable but not in
* JDK 1.1.7. So, JDK 1.2 "pretends" arrays have no interfaces when
* computing SHA-1 to be compatible.
*/
if (!isArray) {
// Interface information
Class<?>[] interfaces = cl.getInterfaces();
if (interfaces.length > 1) {
// Only attempt to sort if really needed (saves object
// creation, etc)
Comparator<Class<?>> interfaceComparator = new Comparator<Class<?>>() {
public int compare(Class<?> itf1, Class<?> itf2) {
return itf1.getName().compareTo(itf2.getName());
}
};
Arrays.sort(interfaces, interfaceComparator);
}
// Dump them
for (int i = 0; i < interfaces.length; i++) {
output.writeUTF(interfaces[i].getName());
}
}
// Field information
if (fields.length > 1) {
// Only attempt to sort if really needed (saves object creation,
// etc)
Comparator<Field> fieldComparator = new Comparator<Field>() {
public int compare(Field field1, Field field2) {
return field1.getName().compareTo(field2.getName());
}
};
Arrays.sort(fields, fieldComparator);
}
// Dump them
for (int i = 0; i < fields.length; i++) {
Field field = fields[i];
int modifiers = field.getModifiers() & FIELD_MODIFIERS_MASK;
boolean skip = Modifier.isPrivate(modifiers) &&
(Modifier.isTransient(modifiers) || Modifier.isStatic(modifiers));
if (!skip) {
// write name, modifier & "descriptor" of all but private
// static and private transient
output.writeUTF(field.getName());
output.writeInt(modifiers);
output.writeUTF(descriptorForFieldSignature(getFieldSignature(field)));
}
}
/*
* Normally constructors come before methods (because <init> <
* anyMethodName). However, <clinit> is an exception. Besides,
* reflect will not let us get to it.
*/
if (hasClinit(cl)) {
// write name, modifier & "descriptor"
output.writeUTF(CLINIT_NAME);
output.writeInt(CLINIT_MODIFIERS);
output.writeUTF(CLINIT_SIGNATURE);
}
// Constructor information
Constructor<?>[] constructors = cl.getDeclaredConstructors();
if (constructors.length > 1) {
// Only attempt to sort if really needed (saves object creation,
// etc)
Comparator<Constructor<?>> constructorComparator = new Comparator<Constructor<?>>() {
public int compare(Constructor<?> ctr1, Constructor<?> ctr2) {
// All constructors have same name, so we sort based on
// signature
return (getConstructorSignature(ctr1)
.compareTo(getConstructorSignature(ctr2)));
}
};
Arrays.sort(constructors, constructorComparator);
}
// Dump them
for (int i = 0; i < constructors.length; i++) {
Constructor<?> constructor = constructors[i];
int modifiers = constructor.getModifiers()
& METHOD_MODIFIERS_MASK;
boolean isPrivate = Modifier.isPrivate(modifiers);
if (!isPrivate) {
/*
* write name, modifier & "descriptor" of all but private
* ones
*
* constructor.getName() returns the constructor name as
* typed, not the VM name
*/
output.writeUTF("<init>");
output.writeInt(modifiers);
output.writeUTF(descriptorForSignature(
getConstructorSignature(constructor)).replace('/',
'.'));
}
}
// Method information
Method[] methods = cl.getDeclaredMethods();
if (methods.length > 1) {
Comparator<Method> methodComparator = new Comparator<Method>() {
public int compare(Method m1, Method m2) {
int result = m1.getName().compareTo(m2.getName());
if (result == 0) {
// same name, signature will tell which one comes
// first
return getMethodSignature(m1).compareTo(
getMethodSignature(m2));
}
return result;
}
};
Arrays.sort(methods, methodComparator);
}
// Dump them
for (int i = 0; i < methods.length; i++) {
Method method = methods[i];
int modifiers = method.getModifiers() & METHOD_MODIFIERS_MASK;
boolean isPrivate = Modifier.isPrivate(modifiers);
if (!isPrivate) {
// write name, modifier & "descriptor" of all but private
// ones
output.writeUTF(method.getName());
output.writeInt(modifiers);
output.writeUTF(descriptorForSignature(
getMethodSignature(method)).replace('/', '.'));
}
}
} catch (IOException e) {
throw new RuntimeException(e + " computing SHA-1/SUID");
}
// now compute the UID based on the SHA
byte[] hash = digest.digest(sha.toByteArray());
return Memory.peekLong(hash, 0, ByteOrder.LITTLE_ENDIAN);
} |
python | def floor(start, resolution):
"""Floor a datetime by a resolution.
>>> now = datetime(2012, 7, 6, 20, 33, 16, 573225)
>>> floor(now, STEP_1_HOUR)
datetime.datetime(2012, 7, 6, 20, 0)
"""
if resolution == STEP_10_SEC:
return datetime(start.year, start.month, start.day, start.hour,
start.minute, start.second - (start.second % 10))
elif resolution == STEP_1_MIN:
return datetime(start.year, start.month, start.day, start.hour,
start.minute)
elif resolution == STEP_5_MIN:
return datetime(start.year, start.month, start.day, start.hour,
start.minute - (start.minute % 5))
elif resolution == STEP_1_HOUR:
return datetime(start.year, start.month, start.day, start.hour)
elif resolution == STEP_1_DAY:
return datetime(start.year, start.month, start.day)
raise ValueError("{resolution} is not a valid resolution. Valid choices "
"are {choices}".format(resolution=resolution,
choices=STEP_CHOICES)) |
python | def _permission_trees(permissions):
"""Get the cached permission tree, or build a new one if necessary."""
treecache = PermissionTreeCache()
cached = treecache.get()
if not cached:
tree = PermissionTreeBuilder()
for permission in permissions:
tree.insert(permission)
result = tree.serialize()
treecache.set(result)
return result
return cached |
python | def _preprocess_params(cls, kwargs):
"""Returns a preprocessed dictionary of parameters.
Use this to filter the kwargs passed to `new`, `create`,
`build` methods.
Args:
**kwargs: a dictionary of parameters
"""
# kwargs.pop('csrf_token', None)
for attr, val in kwargs.items():
if cls.is_the_primary_key(attr) and cls._prevent_primary_key_initialization_:
del kwargs[attr]
continue
if val == "":
# Making an assumption that there is no good usecase
# for setting an empty string. This will help prevent
# cases where empty string is sent because of client
# not clearing form fields to null
kwargs[attr] = None
continue
if attr in class_mapper(cls).relationships and attr not in cls._no_overwrite_:
rel = class_mapper(cls).relationships[attr]
if rel.uselist:
if isinstance(val, list):
if all(isinstance(v, dict) for v in val):
rel_cls = cls.mapped_rel_class(attr)
kwargs[attr] = rel_cls.update_or_new_all(
list_of_kwargs=val, keys=[rel_cls.primary_key_name()])
elif isinstance(val, dict):
rel_cls = cls.mapped_rel_class(attr)
mapping_col = rel.collection_class().keyfunc.name
list_of_kwargs = [merge(v, {mapping_col: k}) for k, v in val.items()]
kwargs[attr] = {getattr(obj, mapping_col): obj for obj in rel_cls.update_or_new_all(
list_of_kwargs=list_of_kwargs, keys=[rel_cls.primary_key_name()])}
elif isinstance(val, dict):
rel_cls = cls.mapped_rel_class(attr)
kwargs[attr] = rel_cls.update_or_new(
**merge(val, {'keys': [rel_cls.primary_key_name()]}))
return kwargs |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.