repo_name
stringlengths 4
116
| path
stringlengths 4
379
| size
stringlengths 1
7
| content
stringlengths 3
1.05M
| license
stringclasses 15
values |
---|---|---|---|---|
rowanmiller/Demo-MVADec14 | CompletedSource/CycleSales/src/CycleSales/Migrations/201412150243215_BikeTable.Designer.cs | 1478 | using CycleSales.Models;
using Microsoft.Data.Entity;
using Microsoft.Data.Entity.Metadata;
using Microsoft.Data.Entity.Migrations.Infrastructure;
using System;
namespace CycleSales.Migrations
{
[ContextType(typeof(CycleSales.Models.CycleSalesContext))]
public partial class BikeTable : IMigrationMetadata
{
string IMigrationMetadata.MigrationId
{
get
{
return "201412150243215_BikeTable";
}
}
string IMigrationMetadata.ProductVersion
{
get
{
return "7.0.0-beta2-11771";
}
}
IModel IMigrationMetadata.TargetModel
{
get
{
var builder = new BasicModelBuilder();
builder.Entity("CycleSales.Models.Bike", b =>
{
b.Property<int>("BikeId")
.GenerateValueOnAdd();
b.Property<string>("Description");
b.Property<string>("ImageUrl");
b.Property<string>("ModelNo");
b.Property<string>("Name");
b.Property<decimal>("Retail");
b.Key("BikeId");
});
return builder.Model;
}
}
}
} | apache-2.0 |
SpiGetOrg/spiget.org | public/index.php | 78 | <?php
include_once("../internal/Smarty.php");
$smarty->display("index.tpl");
| apache-2.0 |
vespa-engine/vespa | node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java | 7921 | // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.maintenance;
import com.yahoo.component.Vtag;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancer;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancerId;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
import org.junit.Test;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Supplier;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
/**
* @author mpolden
*/
public class LoadBalancerExpirerTest {
private final ProvisioningTester tester = new ProvisioningTester.Builder().build();
@Test
public void expire_inactive() {
LoadBalancerExpirer expirer = new LoadBalancerExpirer(tester.nodeRepository(),
Duration.ofDays(1),
tester.loadBalancerService(),
new TestMetric());
Supplier<Map<LoadBalancerId, LoadBalancer>> loadBalancers = () -> tester.nodeRepository().database().readLoadBalancers((ignored) -> true);
// Deploy two applications with a total of three load balancers
ClusterSpec.Id cluster1 = ClusterSpec.Id.from("qrs");
ClusterSpec.Id cluster2 = ClusterSpec.Id.from("qrs2");
ApplicationId app1 = ProvisioningTester.applicationId();
ApplicationId app2 = ProvisioningTester.applicationId();
LoadBalancerId lb1 = new LoadBalancerId(app1, cluster1);
LoadBalancerId lb2 = new LoadBalancerId(app2, cluster1);
LoadBalancerId lb3 = new LoadBalancerId(app2, cluster2);
deployApplication(app1, cluster1);
deployApplication(app2, cluster1, cluster2);
assertEquals(3, loadBalancers.get().size());
// Remove one application deactivates load balancers for that application
tester.remove(app1);
assertSame(LoadBalancer.State.inactive, loadBalancers.get().get(lb1).state());
assertNotSame(LoadBalancer.State.inactive, loadBalancers.get().get(lb2).state());
// Expirer defers removal while nodes are still allocated to application
tester.nodeRepository().nodes().setReady(tester.nodeRepository().nodes().list(Node.State.dirty).asList(), Agent.system, getClass().getSimpleName());
expirer.maintain();
assertEquals(Set.of(), tester.loadBalancerService().instances().get(lb1).reals());
assertEquals(Set.of(), loadBalancers.get().get(lb1).instance().get().reals());
// Expirer defers removal of load balancer until expiration time passes
expirer.maintain();
assertSame(LoadBalancer.State.inactive, loadBalancers.get().get(lb1).state());
assertTrue("Inactive load balancer not removed", tester.loadBalancerService().instances().containsKey(lb1));
// Expirer removes load balancers once expiration time passes
tester.clock().advance(Duration.ofHours(1).plus(Duration.ofSeconds(1)));
expirer.maintain();
assertFalse("Inactive load balancer removed", tester.loadBalancerService().instances().containsKey(lb1));
// Active load balancer is left alone
assertSame(LoadBalancer.State.active, loadBalancers.get().get(lb2).state());
assertTrue("Active load balancer is not removed", tester.loadBalancerService().instances().containsKey(lb2));
// A single cluster is removed
deployApplication(app2, cluster1);
expirer.maintain();
assertSame(LoadBalancer.State.inactive, loadBalancers.get().get(lb3).state());
// Expirer defers removal while nodes are still allocated to cluster
expirer.maintain();
assertEquals(2, tester.loadBalancerService().instances().size());
removeNodesOf(app2, cluster2);
// Expirer removes load balancer for removed cluster
tester.clock().advance(Duration.ofHours(1).plus(Duration.ofSeconds(1)));
expirer.maintain();
assertFalse("Inactive load balancer removed", tester.loadBalancerService().instances().containsKey(lb3));
}
@Test
public void expire_reserved() {
LoadBalancerExpirer expirer = new LoadBalancerExpirer(tester.nodeRepository(),
Duration.ofDays(1),
tester.loadBalancerService(),
new TestMetric());
Supplier<Map<LoadBalancerId, LoadBalancer>> loadBalancers = () -> tester.nodeRepository().database().readLoadBalancers((ignored) -> true);
// Prepare application
ClusterSpec.Id cluster = ClusterSpec.Id.from("qrs");
ApplicationId app = ProvisioningTester.applicationId();
LoadBalancerId lb = new LoadBalancerId(app, cluster);
deployApplication(app, false, cluster);
// Provisions load balancer in reserved
assertSame(LoadBalancer.State.reserved, loadBalancers.get().get(lb).state());
// Expirer does nothing
expirer.maintain();
assertSame(LoadBalancer.State.reserved, loadBalancers.get().get(lb).state());
// Application never activates and nodes are dirtied and readied. Expirer moves load balancer to inactive after timeout
removeNodesOf(app, cluster);
tester.clock().advance(Duration.ofHours(1).plus(Duration.ofSeconds(1)));
expirer.maintain();
assertSame(LoadBalancer.State.inactive, loadBalancers.get().get(lb).state());
// Expirer does nothing as inactive expiration time has not yet passed
expirer.maintain();
assertSame(LoadBalancer.State.inactive, loadBalancers.get().get(lb).state());
// Expirer removes inactive load balancer
tester.clock().advance(Duration.ofHours(1).plus(Duration.ofSeconds(1)));
expirer.maintain();
assertFalse("Inactive load balancer removed", loadBalancers.get().containsKey(lb));
}
private void removeNodesOf(ApplicationId application, ClusterSpec.Id cluster) {
var nodes = tester.nodeRepository().nodes().list()
.owner(application)
.cluster(cluster)
.asList();
nodes = tester.nodeRepository().nodes().deallocate(nodes, Agent.system, getClass().getSimpleName());
tester.nodeRepository().nodes().setReady(nodes, Agent.system, getClass().getSimpleName());
}
private void deployApplication(ApplicationId application, ClusterSpec.Id... clusters) {
deployApplication(application, true, clusters);
}
private void deployApplication(ApplicationId application, boolean activate, ClusterSpec.Id... clusters) {
tester.makeReadyNodes(10, new NodeResources(1, 4, 10, 0.3));
List<HostSpec> hosts = new ArrayList<>();
for (var cluster : clusters) {
hosts.addAll(tester.prepare(application, ClusterSpec.request(ClusterSpec.Type.container, cluster).vespaVersion(Vtag.currentVersion).build(),
2, 1,
new NodeResources(1, 4, 10, 0.3)));
}
if (activate) {
tester.activate(application, hosts);
}
}
}
| apache-2.0 |
alexhilton/miscellaneous | cc-work/basics/oop-in-cpp/chap05/window.cc | 7522 | /*
* window.cc
*
* Implement a hierarchy of class windows.
* window, FrameWindow, MenuWindow, Dialog,
*/
#include <string>
#include <iostream>
using namespace std;
#define SCREEN 24
// abstract class Window
class Window {
private:
int x0, y0; // left-top co-ordinate
int length; // length of window
int width; // width of window
string caption; // caption or title of the window
public:
Window() {
x0 = 0;
y0 = 0;
length = 40;
width = 12;
caption = "Window";
}
Window( const int &x, const int &y, const int &l, const int &w,
const string &c ) {
x0 = x;
y0 = y;
length = l;
width = w;
caption = c;
}
void setPosition( const int &x, const int &y ) {
x0 = x;
y0 = y;
}
int getPositionX() const {
return x0;
}
int getPositionY() const {
return y0;
}
void setLength( const int &l ) {
length = l;
}
int getLength() const {
return length;
}
void setWidth( const int &w ) {
width = w;
}
int getWidth() const {
return width;
}
void setCaption( const string &c ) {
caption = c;
}
string getCaption() const {
return caption;
}
virtual ~Window() {}
virtual void create( const string &c ) {
caption = c;
}
virtual void show() const {
int i;
for ( i = 0; i < x0; i++ ) {
cout << endl;
}
printHeader();
printBody();
printTail();
}
virtual void move( const int &nx, const int &ny ) {
setPosition( nx, ny );
show();
}
virtual void hide() {
int i;
for ( i = 0; i < SCREEN; i++ ) {
cout << endl;
}
}
virtual void destroy() {
int i;
for ( i = 0; i < SCREEN; i++ ) {
cout << endl;
}
}
protected:
void printHeader() const {
int i;
cout << "+";
for ( i = 0; i < length-2; i++ ) {
cout << "-";
}
cout << "+" << endl;
cout << "| " << caption;
int tmp = length - caption.length() - 3;
for ( i = 0; i < tmp; i++ ) {
cout << " ";
}
cout << "|" << endl;
for ( i = 0; i < length; i++ ) {
cout << "-";
}
cout << endl;
}
void printBody() const {
int i;
int j;
for ( i = 0; i < width-2; i++ ) {
cout << "|";
for ( j = 0; j < length-2; j++ ) {
cout << " ";
}
cout << "|" << endl;
}
}
void printTail() const {
int i;
cout << "+";
for ( i = 0; i < length-2; i++ ) {
cout << "-";
}
cout << "+" << endl;
}
};
// derive class from
class FrameWindow : public Window {
private:
int status;
string translate() const {
switch ( status ) {
case 0:
return "0 status";
case 1:
return "1 status";
case 2:
return "2 status";
default:
return "";
}
}
void printStatus() const {
int i;
for ( i = 0; i < getLength(); i++ ) {
cout << "-";
}
cout << endl;
cout << "| status: " << translate();
int tmp;
tmp = getLength()-2*translate().length()-3;
for ( i = 0; i < tmp; i++ ) {
cout << " ";
}
cout << "|" << endl;
}
public:
void setStatus( const int &s ) {
status = s;
}
int getStatus() const {
return status;
}
virtual void create( const string &c ) {
setCaption( c );
}
FrameWindow() : Window() {
status = 0;
}
FrameWindow( const int &x, const int &y, const int &l, const int &w,
const string &c, const int &s )
: Window( x, y, l, w, c ) {
status = s;
}
virtual void show() const {
int i;
for ( i = 0; i < getPositionX(); i++ ) {
cout << endl;
}
printHeader();
printBody();
printStatus();
printTail();
}
virtual void move( const int &nx, const int &ny ) {
setPosition( nx, ny );
show();
}
virtual void hide() const {
int i;
for ( i = 0; i < SCREEN; i++ ) {
cout << endl;
}
}
virtual void destory() const {
int i;
for ( i = 0; i < SCREEN; i++ ) {
cout << endl;
}
}
};
// derive another class Dialog from Window
class Dialog : public Window {
private:
string button1;
string button2;
void printButton() const {
int i;
cout << "|";
int tmp = getLength()-2 - (button1.length() + button2.length() + 6);
for ( i = 0; i < tmp; i++ ) {
cout << " ";
}
cout << button1 << " " << button2 << " " << "|" << endl;
}
public:
Dialog() : Window() {
button1 = "OK";
button2 = "Cancel";
}
Dialog( const int &x, const int &y, const int &l, const int &w,
const string &c, const string &b1, const string &b2 )
: Window( x, y, l, w, c ) {
button1 = b1;
button2 = b2;
}
virtual void create( const string &c ) {
setCaption( c );
}
virtual void show() const {
int i;
for ( i = 0; i < getPositionX(); i++ ) {
cout << endl;
}
printHeader();
printBody();
printButton();
printTail();
}
virtual void move( const int &nx, const int &ny ) {
setPosition( nx, ny );
show();
}
virtual void hide() const {
int i;
for ( i = 0; i < SCREEN; i++ ) {
cout << endl;
}
}
virtual void destroy() const {
int i;
for ( i = 0; i < SCREEN; i++ ) {
cout << endl;
}
}
};
// derive another class MenuWindow from Window
class MenuWindow : public Window {
private:
string *menu;
int size;
void printMenu() const {
int i;
int tmp = getLength() - 3 - (size - 1);
for ( i = 0; i < size; i++ ) {
tmp -= menu[ i ].length();
}
cout << "|";
for ( i = 0; i < size; i++ ) {
cout << " " << menu[ i ];
}
for ( i = 0; i < tmp; i++ ) {
cout << " ";
}
cout << "|" << endl;
for ( i = 0; i < getLength(); i++ ) {
cout << "-";
}
cout << endl;
}
public:
MenuWindow() : Window() {
size = 6;
menu = new string[ size ];
menu[ 0 ] = "File";
menu[ 1 ] = "Edit";
menu[ 2 ] = "View";
menu[ 3 ] = "Tools";
menu[ 4 ] = "Window";
menu[ 5 ] = "Help";
}
MenuWindow( const int &x, const int &y, const int &l, const int &w,
const string &c, const int &s, const string m[] )
: Window( x, y, l, w, c ) {
menu = new string[ size = s ];
int i;
for ( i = 0; i < size; i++ ) {
menu[ i ] = m[ i ];
}
}
~MenuWindow() {
delete [] menu;
}
void setSize( const int &s ) {
size = s;
}
int getSize() const {
return size;
}
void setMenu( const string m[] ) {
int i;
for ( i = 0; i < size; i++ ) {
menu[ i ] = m[ i ];
}
}
virtual void create( const string &c ) {
setCaption( c );
}
virtual void show() const {
int i;
for ( i = 0; i < getPositionX(); i++ ) {
cout << endl;
}
printHeader();
printMenu();
printBody();
printTail();
}
virtual void hide() const {
int i;
for ( i = 0; i < SCREEN; i++ ) {
cout << endl;
}
}
virtual void move( const int &nx, const int &ny ) {
setPosition( nx, ny );
show();
}
virtual void destory() const {
int i;
for ( i = 0; i < SCREEN; i++ ) {
cout << endl;
}
}
};
int main() {
char ch;
Window one;
one.show();
cout << "input 'n' to continue...";
cin >> ch;
one.destroy();
FrameWindow two;
two.show();
cout << "input 'n' to continue...";
cin >> ch;
two.destroy();
Dialog three;
three.show();
cout << "input 'n' to continue...";
cin >> ch;
three.destroy();
MenuWindow four;
four.show();
cout << "input 'n' to continue...";
cin >> ch;
return 0;
}
| apache-2.0 |
Xarthisius/girder | girder/utility/acl_mixin.py | 8166 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2015 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import itertools
import six
from ..models.model_base import Model
from ..exceptions import AccessException
from ..constants import AccessType
class AccessControlMixin(object):
"""
This mixin is intended to be used for resources which aren't access
controlled by default, but resolve their access controls through other
resources. As such, the overridden methods retain the same parameters and
only alter functionality related to access control resolution.
resourceColl corresponds to the resource collection that needs to be used
for resolution, for example the Item model has a resourceColl of folder.
resourceParent corresponds to the field in which the parent resource
belongs, so for an item it would be the folderId.
"""
resourceColl = None
resourceParent = None
def load(self, id, level=AccessType.ADMIN, user=None, objectId=True,
force=False, fields=None, exc=False):
"""
Calls Model.load on the current item, and then attempts to load the
resourceParent which the user must have access to in order to load this
model.
Takes the same parameters as
:py:func:`girder.models.model_base.AccessControlledModel.load`.
"""
loadFields = fields
if not force:
extraFields = {'attachedToId', 'attachedToType'}
if self.resourceParent:
extraFields.add(self.resourceParent)
loadFields = self._supplementFields(fields, extraFields)
doc = Model.load(self, id=id, objectId=objectId, fields=loadFields, exc=exc)
if not force and doc is not None:
if doc.get(self.resourceParent):
loadType = self.resourceColl
loadId = doc[self.resourceParent]
else:
loadType = doc.get('attachedToType')
loadId = doc.get('attachedToId')
if isinstance(loadType, six.string_types):
self.model(loadType).load(loadId, level=level, user=user, exc=exc)
elif isinstance(loadType, list) and len(loadType) == 2:
self.model(*loadType).load(loadId, level=level, user=user, exc=exc)
else:
raise Exception('Invalid model type: %s' % str(loadType))
self._removeSupplementalFields(doc, fields)
return doc
def hasAccess(self, resource, user=None, level=AccessType.READ):
"""
Determines if a user has access to a resource based on their access to
the resourceParent.
Takes the same parameters as
:py:func:`girder.models.model_base.AccessControlledModel.hasAccess`.
"""
resource = self.model(self.resourceColl) \
.load(resource[self.resourceParent], force=True)
return self.model(self.resourceColl).hasAccess(
resource, user=user, level=level)
def hasAccessFlags(self, doc, user=None, flags=None):
"""
See the documentation of AccessControlledModel.hasAccessFlags, which this wraps.
"""
if not flags:
return True
resource = self.model(self.resourceColl).load(doc[self.resourceParent], force=True)
return self.model(self.resourceColl).hasAccessFlags(resource, user, flags)
def requireAccess(self, doc, user=None, level=AccessType.READ):
"""
This wrapper just provides a standard way of throwing an
access denied exception if the access check fails.
"""
if not self.hasAccess(doc, user, level):
if level == AccessType.READ:
perm = 'Read'
elif level == AccessType.WRITE:
perm = 'Write'
elif level in (AccessType.ADMIN, AccessType.SITE_ADMIN):
perm = 'Admin'
else:
perm = 'Unknown level'
if user:
userid = str(user.get('_id', ''))
else:
userid = None
raise AccessException("%s access denied for %s %s (user %s)." %
(perm, self.name, doc.get('_id', 'unknown'),
userid))
def requireAccessFlags(self, doc, user=None, flags=None):
"""
See the documentation of AccessControlledModel.requireAccessFlags, which this wraps.
"""
if not flags:
return
resource = self.model(self.resourceColl).load(doc[self.resourceParent], force=True)
return self.model(self.resourceColl).requireAccessFlags(resource, user, flags)
def filterResultsByPermission(self, cursor, user, level, limit=0, offset=0,
removeKeys=(), flags=None):
"""
Yields filtered results from the cursor based on the access control
existing for the resourceParent.
Takes the same parameters as
:py:func:`girder.models.model_base.AccessControlledModel.filterResultsByPermission`.
"""
# Cache mapping resourceIds -> access granted (bool)
resourceAccessCache = {}
def hasAccess(_result):
resourceId = _result[self.resourceParent]
# return cached check if it exists
if resourceId in resourceAccessCache:
return resourceAccessCache[resourceId]
# if the resourceId is not cached, check for permission "level"
# and set the cache
resource = self.model(self.resourceColl).load(resourceId, force=True)
val = self.model(self.resourceColl).hasAccess(
resource, user=user, level=level)
if flags:
val = val and self.model(self.resourceColl).hasAccessFlags(
resource, user=user, flags=flags)
resourceAccessCache[resourceId] = val
return val
endIndex = offset + limit if limit else None
filteredCursor = six.moves.filter(hasAccess, cursor)
for result in itertools.islice(filteredCursor, offset, endIndex):
for key in removeKeys:
if key in result:
del result[key]
yield result
def textSearch(self, query, user=None, filters=None, limit=0, offset=0,
sort=None, fields=None, level=AccessType.READ):
filters = filters or {}
cursor = Model.textSearch(
self, query=query, filters=filters, sort=sort, fields=fields)
return self.filterResultsByPermission(
cursor, user=user, level=level, limit=limit, offset=offset)
def prefixSearch(self, query, user=None, filters=None, limit=0, offset=0,
sort=None, fields=None, level=AccessType.READ):
"""
Custom override of Model.prefixSearch to also force permission-based
filtering. The parameters are the same as Model.prefixSearch.
:param user: The user to apply permission filtering for.
:type user: dict or None
:param level: The access level to require.
:type level: girder.constants.AccessType
"""
filters = filters or {}
cursor = Model.prefixSearch(
self, query=query, filters=filters, sort=sort, fields=fields)
return self.filterResultsByPermission(
cursor, user=user, level=level, limit=limit, offset=offset)
| apache-2.0 |
Mariovc/kinballwc2015 | app/src/main/java/com/mvc/kinballwc/broadcast/PeriodBroadcastReceiver.java | 1964 | package com.mvc.kinballwc.broadcast;
import android.content.BroadcastReceiver;
import android.content.Context;
import android.content.Intent;
import android.widget.TextView;
import com.mvc.kinballwc.application.App;
import com.mvc.kinballwc.ui.activity.MatchActivity;
import com.mvc.kinballwc.ui.fragment.PeriodFragment;
import java.util.Random;
/**
* Author: Mario Velasco Casquero
* Date: 15/08/2015
* Email: [email protected]
*/
public class PeriodBroadcastReceiver extends BroadcastReceiver {
public static final String PERIOD_INTENT_ACTION = "com.mvc.kinballwc.PERIOD_UPDATE";
public static final String FIELD_PERIOD_ID = "periodId";
public static final String FIELD_TEAM_POSITION = "teamPos";
public static final String FIELD_SCORE = "score";
public static final String FIELD_UPDATE = "update";
public static final String FIELD_ADD = "add";
public static final String FIELD_REMOVE = "remove";
public static final String FIELD_REFRESH = "refresh";
public static final String FIELD_ACTION = "action";
private MatchActivity mMatchActivity;
public PeriodBroadcastReceiver(MatchActivity matchActivity) {
this.mMatchActivity = matchActivity;
}
@Override
public void onReceive(Context context, Intent intent) {
String action = intent.getStringExtra(FIELD_ACTION);
String periodId = intent.getStringExtra(FIELD_PERIOD_ID);
if (action.equals(FIELD_UPDATE)) {
int teamPos = intent.getIntExtra(FIELD_TEAM_POSITION, 0);
int score = intent.getIntExtra(FIELD_SCORE, 0);
mMatchActivity.onUpdatePeriod(periodId, teamPos, score);
} else if (action.equals(FIELD_ADD)) {
mMatchActivity.onAddPeriod(periodId);
} else if (action.equals(FIELD_REMOVE)) {
mMatchActivity.onRemovePeriod(periodId);
} else if (action.equals(FIELD_REFRESH)) {
mMatchActivity.onRefresh();
}
}
}
| apache-2.0 |
liming/hellosleep-site | src/app.js | 1459 | /**
* Hellosleep
*/
// Load .env for development environments
(process.env.NODE_ENV !== 'production') && require('dotenv').load();
const keystone = require('keystone');
keystone.init({
'name': '睡吧',
'brand': '睡吧',
'less': 'public',
'static': 'public',
'views': 'templates/views',
'view engine': 'jade',
'port': process.env.PORT || 3000,
'session': true,
'auth': true,
'user model': 'User',
'cookie secret': '--- insomnia ---',
'mongo': process.env.MONGO_URI || 'mongodb://localhost/hellosleep',
'auto update': true,
// 'session store': 'mongo',
// editor configuration
'wysiwyg additional buttons': 'styleselect',
'basedir': __dirname,
// using mailgun as the mail service. visit mail gun dashboard for mailgun api
// information: https://mailgun.com/app/dashboard
'email transport': 'mailgun',
'emails': 'templates/emails'
});
keystone.import('models');
keystone.set('routes', require('./routes'));
keystone.set('locals', {
ga: {
property: process.env.GA_SITE_PROPERTY,
domain: process.env.GA_SITE_DOMAIN
},
env: process.env.NODE_ENV || "development",
host: (function() {
return (keystone.get('host') || 'http://localhost:') + (keystone.get('port') || '3000');
})()
});
// setup emails
if (process.env.MAILGUN_API_KEY) {
keystone.set('mailgun api key', process.env.MAILGUN_API_KEY);
keystone.set('mailgun domain', process.env.MAILGUN_DOMAIN);
}
keystone.start();
| apache-2.0 |
miswenwen/My_bird_work | Bird_work/我的项目/Music/src/com/android/music/TouchInterceptor.java | 19240 | /*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.music;
import android.content.Context;
import android.content.SharedPreferences;
import android.content.res.Resources;
import android.graphics.Bitmap;
import android.graphics.PixelFormat;
import android.graphics.Rect;
import android.graphics.drawable.Drawable;
import android.graphics.drawable.LevelListDrawable;
import android.util.AttributeSet;
import android.util.Log;
import android.view.GestureDetector;
import android.view.Gravity;
import android.view.MotionEvent;
import android.view.View;
import android.view.ViewConfiguration;
import android.view.ViewGroup;
import android.view.WindowManager;
import android.view.GestureDetector.SimpleOnGestureListener;
import android.widget.AdapterView;
import android.widget.ImageView;
import android.widget.ListView;
public class TouchInterceptor extends ListView {
private ImageView mDragView;
private WindowManager mWindowManager;
private WindowManager.LayoutParams mWindowParams;
/**
* At which position is the item currently being dragged. Note that this
* takes in to account header items.
*/
private int mDragPos;
/**
* At which position was the item being dragged originally
*/
private int mSrcDragPos;
private int mDragPointX; // at what x offset inside the item did the user grab it
private int mDragPointY; // at what y offset inside the item did the user grab it
private int mXOffset; // the difference between screen coordinates and coordinates in this view
private int mYOffset; // the difference between screen coordinates and coordinates in this view
private DragListener mDragListener;
private DropListener mDropListener;
private RemoveListener mRemoveListener;
private int mUpperBound;
private int mLowerBound;
private int mHeight;
private GestureDetector mGestureDetector;
private static final int FLING = 0;
private static final int SLIDE = 1;
private static final int TRASH = 2;
private int mRemoveMode = -1;
private Rect mTempRect = new Rect();
private Bitmap mDragBitmap;
private final int mTouchSlop;
private int mItemHeightNormal;
private int mItemHeightExpanded;
private int mItemHeightHalf;
private Drawable mTrashcan;
// SPRD 476966
private View mView;
public TouchInterceptor(Context context, AttributeSet attrs) {
super(context, attrs);
SharedPreferences pref = context.getSharedPreferences("Music", 3);
mRemoveMode = pref.getInt("deletemode", -1);
mTouchSlop = ViewConfiguration.get(context).getScaledTouchSlop();
Resources res = getResources();
mItemHeightNormal = res.getDimensionPixelSize(R.dimen.normal_height);
mItemHeightHalf = mItemHeightNormal / 2;
mItemHeightExpanded = res.getDimensionPixelSize(R.dimen.expanded_height);
}
@Override
public boolean onInterceptTouchEvent(MotionEvent ev) {
if (mRemoveListener != null && mGestureDetector == null) {
if (mRemoveMode == FLING) {
mGestureDetector = new GestureDetector(getContext(), new SimpleOnGestureListener() {
@Override
public boolean onFling(MotionEvent e1, MotionEvent e2, float velocityX,
float velocityY) {
if (mDragView != null) {
if (velocityX > 1000) {
Rect r = mTempRect;
mDragView.getDrawingRect(r);
if ( e2.getX() > r.right * 2 / 3) {
// fast fling right with release near the right edge of the screen
stopDragging();
mRemoveListener.remove(mSrcDragPos);
unExpandViews(true);
}
}
// flinging while dragging should have no effect
return true;
}
return false;
}
});
}
}
if (mDragListener != null || mDropListener != null) {
switch (ev.getAction()) {
case MotionEvent.ACTION_DOWN:
int x = (int) ev.getX();
int y = (int) ev.getY();
int itemnum = pointToPosition(x, y);
if (itemnum == AdapterView.INVALID_POSITION) {
break;
}
ViewGroup item = (ViewGroup) getChildAt(itemnum - getFirstVisiblePosition());
mDragPointX = x - item.getLeft();
mDragPointY = y - item.getTop();
mXOffset = ((int)ev.getRawX()) - x;
mYOffset = ((int)ev.getRawY()) - y;
// The left side of the item is the grabber for dragging the item
if (x < 64) {
item.setDrawingCacheEnabled(true);
// Create a copy of the drawing cache so that it does not get recycled
// by the framework when the list tries to clean up memory
Bitmap bitmap = Bitmap.createBitmap(item.getDrawingCache());
// SPRD 476966
item.setDrawingCacheEnabled(false);
startDragging(bitmap, x, y);
mDragPos = itemnum;
mSrcDragPos = mDragPos;
mHeight = getHeight();
int touchSlop = mTouchSlop;
mUpperBound = Math.min(y - touchSlop, mHeight / 3);
mLowerBound = Math.max(y + touchSlop, mHeight * 2 /3);
return false;
}
stopDragging();
break;
}
}
return super.onInterceptTouchEvent(ev);
}
/*
* pointToPosition() doesn't consider invisible views, but we
* need to, so implement a slightly different version.
*/
private int myPointToPosition(int x, int y) {
if (y < 0) {
// when dragging off the top of the screen, calculate position
// by going back from a visible item
int pos = myPointToPosition(x, y + mItemHeightNormal);
if (pos > 0) {
return pos - 1;
}
}
Rect frame = mTempRect;
final int count = getChildCount();
for (int i = count - 1; i >= 0; i--) {
final View child = getChildAt(i);
child.getHitRect(frame);
if (frame.contains(x, y)) {
return getFirstVisiblePosition() + i;
}
}
return INVALID_POSITION;
}
private int getItemForPosition(int y) {
int adjustedy = y - mDragPointY - mItemHeightHalf;
int pos = myPointToPosition(0, adjustedy);
if (pos >= 0) {
if (pos <= mSrcDragPos) {
pos += 1;
}
} else if (adjustedy < 0) {
// this shouldn't happen anymore now that myPointToPosition deals
// with this situation
pos = 0;
}
return pos;
}
private void adjustScrollBounds(int y) {
if (y >= mHeight / 3) {
mUpperBound = mHeight / 3;
}
if (y <= mHeight * 2 / 3) {
mLowerBound = mHeight * 2 / 3;
}
}
/*
* Restore size and visibility for all listitems
*/
private void unExpandViews(boolean deletion) {
for (int i = 0;; i++) {
View v = getChildAt(i);
if (v == null) {
if (deletion) {
// HACK force update of mItemCount
int position = getFirstVisiblePosition();
int y = getChildAt(0).getTop();
setAdapter(getAdapter());
setSelectionFromTop(position, y);
// end hack
}
try {
layoutChildren(); // force children to be recreated where needed
v = getChildAt(i);
} catch (IllegalStateException ex) {
// layoutChildren throws this sometimes, presumably because we're
// in the process of being torn down but are still getting touch
// events
}
if (v == null) {
return;
}
}
ViewGroup.LayoutParams params = v.getLayoutParams();
params.height = mItemHeightNormal;
v.setLayoutParams(params);
v.setVisibility(View.VISIBLE);
}
}
/* Adjust visibility and size to make it appear as though
* an item is being dragged around and other items are making
* room for it:
* If dropping the item would result in it still being in the
* same place, then make the dragged listitem's size normal,
* but make the item invisible.
* Otherwise, if the dragged listitem is still on screen, make
* it as small as possible and expand the item below the insert
* point.
* If the dragged item is not on screen, only expand the item
* below the current insertpoint.
*/
private void doExpansion() {
int childnum = mDragPos - getFirstVisiblePosition();
if (mDragPos > mSrcDragPos) {
childnum++;
}
int numheaders = getHeaderViewsCount();
View first = getChildAt(mSrcDragPos - getFirstVisiblePosition());
for (int i = 0;; i++) {
View vv = getChildAt(i);
if (vv == null) {
break;
}
int height = mItemHeightNormal;
int visibility = View.VISIBLE;
if (mDragPos < numheaders && i == numheaders) {
// dragging on top of the header item, so adjust the item below
// instead
if (vv.equals(first)) {
// SPRD 476966
mView = vv;
visibility = View.INVISIBLE;
} else {
height = mItemHeightExpanded;
}
} else if (vv.equals(first)) {
// processing the item that is being dragged
if (mDragPos == mSrcDragPos || getPositionForView(vv) == getCount() - 1) {
// hovering over the original location
// SPRD 476966
mView = vv;
visibility = View.INVISIBLE;
} else {
// not hovering over it
// Ideally the item would be completely gone, but neither
// setting its size to 0 nor settings visibility to GONE
// has the desired effect.
height = 1;
}
} else if (i == childnum) {
if (mDragPos >= numheaders && mDragPos < getCount() - 1) {
height = mItemHeightExpanded;
}
}
ViewGroup.LayoutParams params = vv.getLayoutParams();
params.height = height;
vv.setLayoutParams(params);
vv.setVisibility(visibility);
}
}
@Override
public boolean onTouchEvent(MotionEvent ev) {
if (mGestureDetector != null) {
mGestureDetector.onTouchEvent(ev);
}
if ((mDragListener != null || mDropListener != null) && mDragView != null) {
int action = ev.getAction();
switch (action) {
case MotionEvent.ACTION_UP:
case MotionEvent.ACTION_CANCEL:
Rect r = mTempRect;
mDragView.getDrawingRect(r);
stopDragging();
if (mRemoveMode == SLIDE && ev.getX() > r.right * 3 / 4) {
if (mRemoveListener != null) {
mRemoveListener.remove(mSrcDragPos);
}
unExpandViews(true);
} else {
if (mDropListener != null && mDragPos >= 0 && mDragPos < getCount()) {
mDropListener.drop(mSrcDragPos, mDragPos);
}
unExpandViews(false);
}
break;
case MotionEvent.ACTION_DOWN:
case MotionEvent.ACTION_MOVE:
int x = (int) ev.getX();
int y = (int) ev.getY();
dragView(x, y);
int itemnum = getItemForPosition(y);
if (itemnum >= 0) {
if (action == MotionEvent.ACTION_DOWN || itemnum != mDragPos) {
if (mDragListener != null) {
mDragListener.drag(mDragPos, itemnum);
}
mDragPos = itemnum;
doExpansion();
}
int speed = 0;
adjustScrollBounds(y);
if (y > mLowerBound) {
// scroll the list up a bit
if (getLastVisiblePosition() < getCount() - 1) {
speed = y > (mHeight + mLowerBound) / 2 ? 16 : 4;
} else {
speed = 1;
}
} else if (y < mUpperBound) {
// scroll the list down a bit
speed = y < mUpperBound / 2 ? -16 : -4;
if (getFirstVisiblePosition() == 0
&& getChildAt(0).getTop() >= getPaddingTop()) {
// if we're already at the top, don't try to scroll, because
// it causes the framework to do some extra drawing that messes
// up our animation
speed = 0;
}
}
if (speed != 0) {
smoothScrollBy(speed, 30);
}
}
break;
}
return true;
}
return super.onTouchEvent(ev);
}
private void startDragging(Bitmap bm, int x, int y) {
stopDragging();
mWindowParams = new WindowManager.LayoutParams();
mWindowParams.gravity = Gravity.TOP | Gravity.LEFT;
mWindowParams.x = x - mDragPointX + mXOffset;
mWindowParams.y = y - mDragPointY + mYOffset;
mWindowParams.height = WindowManager.LayoutParams.WRAP_CONTENT;
mWindowParams.width = WindowManager.LayoutParams.WRAP_CONTENT;
mWindowParams.flags = WindowManager.LayoutParams.FLAG_NOT_FOCUSABLE
| WindowManager.LayoutParams.FLAG_NOT_TOUCHABLE
| WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON
| WindowManager.LayoutParams.FLAG_LAYOUT_IN_SCREEN
| WindowManager.LayoutParams.FLAG_LAYOUT_NO_LIMITS;
mWindowParams.format = PixelFormat.TRANSLUCENT;
mWindowParams.windowAnimations = 0;
Context context = getContext();
ImageView v = new ImageView(context);
//int backGroundColor = context.getResources().getColor(R.color.dragndrop_background);
//v.setBackgroundColor(backGroundColor);
v.setBackgroundResource(R.drawable.playlist_tile_drag);
v.setPadding(0, 0, 0, 0);
v.setImageBitmap(bm);
mDragBitmap = bm;
mWindowManager = (WindowManager)context.getSystemService(Context.WINDOW_SERVICE);
mWindowManager.addView(v, mWindowParams);
mDragView = v;
}
private void dragView(int x, int y) {
if (mRemoveMode == SLIDE) {
float alpha = 1.0f;
int width = mDragView.getWidth();
if (x > width / 2) {
alpha = ((float)(width - x)) / (width / 2);
}
mWindowParams.alpha = alpha;
}
if (mRemoveMode == FLING || mRemoveMode == TRASH) {
mWindowParams.x = x - mDragPointX + mXOffset;
} else {
mWindowParams.x = 0;
}
mWindowParams.y = y - mDragPointY + mYOffset;
mWindowManager.updateViewLayout(mDragView, mWindowParams);
if (mTrashcan != null) {
int width = mDragView.getWidth();
if (y > getHeight() * 3 / 4) {
mTrashcan.setLevel(2);
} else if (width > 0 && x > width / 4) {
mTrashcan.setLevel(1);
} else {
mTrashcan.setLevel(0);
}
}
}
private void stopDragging() {
if (mDragView != null) {
mDragView.setVisibility(GONE);
WindowManager wm = (WindowManager)getContext().getSystemService(Context.WINDOW_SERVICE);
wm.removeView(mDragView);
mDragView.setImageDrawable(null);
mDragView = null;
}
if (mDragBitmap != null) {
mDragBitmap.recycle();
mDragBitmap = null;
}
if (mTrashcan != null) {
mTrashcan.setLevel(0);
}
/* SPRD 476972 @{ */
if (mView != null) {
ViewGroup.LayoutParams params = mView.getLayoutParams();
params.height = mItemHeightNormal;
mView.setLayoutParams(params);
mView.setVisibility(View.VISIBLE);
mView.requestLayout();
}
/* @} */
}
public void setTrashcan(Drawable trash) {
mTrashcan = trash;
mRemoveMode = TRASH;
}
public void setDragListener(DragListener l) {
mDragListener = l;
}
public void setDropListener(DropListener l) {
mDropListener = l;
}
public void setRemoveListener(RemoveListener l) {
mRemoveListener = l;
}
public interface DragListener {
void drag(int from, int to);
}
public interface DropListener {
void drop(int from, int to);
}
public interface RemoveListener {
void remove(int which);
}
}
| apache-2.0 |
loolooyyyy/netty-functional | src/main/java/io/koosha/nettyfunctional/log/Slf4jNettyFuncLogger.java | 452 | package io.koosha.nettyfunctional.log;
public final class Slf4jNettyFuncLogger implements NettyFuncLogger {
private final org.slf4j.Logger log = org.slf4j.LoggerFactory.getLogger(Slf4jNettyFuncLogger.class);
@Override
public void error(String msg, Throwable t) {
log.error(msg, SerrLogger.causeMsg(t));
}
@Override
public void warn(String msg, Throwable t) {
log.warn(msg, SerrLogger.causeMsg(t));
}
}
| apache-2.0 |
DWCS2333/coolweather | app/src/main/java/com/example/wow/coolweather/util/Utility.java | 3549 | package com.example.wow.coolweather.util;
import android.text.TextUtils;
import android.util.Log;
import com.example.wow.coolweather.db.City;
import com.example.wow.coolweather.db.County;
import com.example.wow.coolweather.db.Province;
import com.example.wow.coolweather.gson.Weather;
import com.google.gson.Gson;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
/**
* Created by wow on 2017/7/30.
*/
public class Utility {
/**
* 解析和处理服务器返回的省级数据
*/
public static boolean handleProvinceResponse(String response){
if (!TextUtils.isEmpty(response)){
try{
JSONArray allProvinces = new JSONArray(response);
for (int i = 0;i < allProvinces.length();i++){
JSONObject provinceObject = allProvinces.getJSONObject(i);
Province province = new Province();
province.setProvinceName(provinceObject.getString("name"));
province.setProvinceCode(provinceObject.getInt("id"));
province.save();
}
return true;
}catch (JSONException e){
e.printStackTrace();
}
}
return false;
}
/**
* 解析和处理服务器返回的市级的数据
*/
public static boolean handleCityResponse(String response,int provinceId){
if (!TextUtils.isEmpty(response)) {
try {
JSONArray allCities = new JSONArray(response);
for (int i = 0; i < allCities.length(); i++) {
JSONObject cityObject = allCities.getJSONObject(i);
City city = new City();
city.setCityName(cityObject.getString("name"));
city.setCityCode(cityObject.getInt("id"));
city.setProvinceId(provinceId);
city.save();
}
return true;
} catch (JSONException e) {
e.printStackTrace();
}
}
return false;
}
/**
* 解析和处理服务器返回的县级数据
*/
public static boolean handleCountyResponse(String response,int cityId){
if (!TextUtils.isEmpty(response)) {
try {
JSONArray allCounties = new JSONArray(response);
for (int i = 0; i < allCounties.length(); i++) {
JSONObject countyObject = allCounties.getJSONObject(i);
County county = new County();
county.setCountyName(countyObject.getString("name"));
county.setWeatherId(countyObject.getString("weather_id"));
county.setCityId(cityId);
county.save();
}
return true;
} catch (JSONException e) {
e.printStackTrace();
}
}
return false;
}
/**
* 将返回的JSON数据解析成Weather实体类
*/
public static Weather handleWeatherResponse(String response){
try{
JSONObject jsonObject = new JSONObject(response);
JSONArray jsonArray = jsonObject.getJSONArray("HeWeather");
String weatherContent = jsonArray.getJSONObject(0).toString();
return new Gson().fromJson(weatherContent,Weather.class);
}catch (JSONException e){
e.printStackTrace();
}
return null;
}
}
| apache-2.0 |
leborety/CJia | CJia.EvaluationProject/CJia.Evaluation.Web/Backstage/AddDept.aspx.cs | 2348 | using System;
using System.Collections.Generic;
using System.Data;
using System.Linq;
using System.Web;
using System.Web.UI;
using System.Web.UI.WebControls;
using ExtAspNet;
namespace CJia.Evaluation.Web.Backstage
{
public partial class AddDept : CJia.Evaluation.Tools.Page,CJia.Evaluation.Views.Backstage.IAddDept
{
CJia.Evaluation.Views.Backstage.AddDeptArgs addDeptArgs = new Views.Backstage.AddDeptArgs();
protected void Page_Load(object sender, EventArgs e)
{
if (!IsPostBack)
{
OnQueryAllDept(null, null);
}
}
protected override object CreatePresenter()
{
return new Presenters.Backstage.AddDeptPresenter(this);
}
#region【时间方法】
protected void btn_Save_Click(object sender, EventArgs e)
{
DataTable dtUser = Session["User"] as DataTable;
addDeptArgs.DeptName = txt_Dept_Name.Text;
addDeptArgs.ParentId = ddl_Parent_Dept.SelectedValue;
addDeptArgs.UserId = dtUser.Rows[0]["USER_ID"].ToString();
OnInsertDept(null, addDeptArgs);
}
protected void btn_Cancle_Click(object sender, EventArgs e)
{
PageContext.RegisterStartupScript(ActiveWindow.GetHidePostBackReference());
}
#endregion
#region【实现接口】
public event EventHandler<Views.Backstage.AddDeptArgs> OnQueryAllDept;
public event EventHandler<Views.Backstage.AddDeptArgs> OnInsertDept;
public void ExeReturnAddDeptMsg(bool isInsert)
{
if (!isInsert)
{
Session["IsDeptUpdate"] = "1";
ExtAspNet.Alert.ShowInTop("添加失败!", ExtAspNet.MessageBoxIcon.Error);
}
else
{
Session["IsDeptUpdate"] = "1";
PageContext.RegisterStartupScript(ActiveWindow.GetHidePostBackReference());
}
}
public void ExeBindDept(System.Data.DataTable dtDept)
{
ddl_Parent_Dept.DataValueField = "DEPT_ID";
ddl_Parent_Dept.DataTextField = "DEPT_NAME";
ddl_Parent_Dept.DataSource = dtDept;
ddl_Parent_Dept.DataBind();
}
#endregion
}
} | apache-2.0 |
ExpediaDotCom/haystack-ui | server/connectors/serviceGraph/zipkin/serviceGraphConnector.js | 1509 | /*
* Copyright 2018 Expedia Group
*
* Licensed under the Apache License, Version 2.0 (the 'License');
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an 'AS IS' BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const Q = require('q');
const fetcher = require('../../operations/restFetcher');
const config = require('../../../config/config');
const converter = require('./converter');
const extractor = require('../haystack/graphDataExtractor');
const dependenciesFetcher = fetcher('getDependencies');
const connector = {};
const baseZipkinUrl = config.connectors.serviceGraph.zipkinUrl;
function fetchServiceGraph(from, to) {
const endTs = parseInt(to, 10);
const lookback = endTs - parseInt(from, 10);
return dependenciesFetcher
.fetch(`${baseZipkinUrl}/dependencies?endTs=${endTs}&lookback=${lookback}`)
.then(data => extractor.extractGraphFromEdges(converter.toHaystackServiceEdges(data)));
}
connector.getServiceGraphForTimeLine = (from, to) => Q.fcall(() => fetchServiceGraph(from, to));
module.exports = connector;
| apache-2.0 |
Just4pLeisure/T5SuiteII | Trionic5Controls/frmFirmwareSettings.Designer.cs | 3914 | namespace Trionic5Controls
{
partial class frmFirmwareSettings
{
/// <summary>
/// Required designer variable.
/// </summary>
private System.ComponentModel.IContainer components = null;
/// <summary>
/// Clean up any resources being used.
/// </summary>
/// <param name="disposing">true if managed resources should be disposed; otherwise, false.</param>
protected override void Dispose(bool disposing)
{
if (disposing && (components != null))
{
components.Dispose();
}
base.Dispose(disposing);
}
#region Windows Form Designer generated code
/// <summary>
/// Required method for Designer support - do not modify
/// the contents of this method with the code editor.
/// </summary>
private void InitializeComponent()
{
this.propertyGrid1 = new System.Windows.Forms.PropertyGrid();
this.simpleButton1 = new DevExpress.XtraEditors.SimpleButton();
this.simpleButton2 = new DevExpress.XtraEditors.SimpleButton();
this.SuspendLayout();
//
// propertyGrid1
//
this.propertyGrid1.Anchor = ((System.Windows.Forms.AnchorStyles)((((System.Windows.Forms.AnchorStyles.Top | System.Windows.Forms.AnchorStyles.Bottom)
| System.Windows.Forms.AnchorStyles.Left)
| System.Windows.Forms.AnchorStyles.Right)));
this.propertyGrid1.Location = new System.Drawing.Point(12, 12);
this.propertyGrid1.Name = "propertyGrid1";
this.propertyGrid1.Size = new System.Drawing.Size(687, 470);
this.propertyGrid1.TabIndex = 0;
//
// simpleButton1
//
this.simpleButton1.Anchor = ((System.Windows.Forms.AnchorStyles)((System.Windows.Forms.AnchorStyles.Bottom | System.Windows.Forms.AnchorStyles.Right)));
this.simpleButton1.Location = new System.Drawing.Point(624, 488);
this.simpleButton1.Name = "simpleButton1";
this.simpleButton1.Size = new System.Drawing.Size(75, 23);
this.simpleButton1.TabIndex = 1;
this.simpleButton1.Text = "Ok";
this.simpleButton1.Click += new System.EventHandler(this.simpleButton1_Click);
//
// simpleButton2
//
this.simpleButton2.Anchor = ((System.Windows.Forms.AnchorStyles)((System.Windows.Forms.AnchorStyles.Bottom | System.Windows.Forms.AnchorStyles.Right)));
this.simpleButton2.Location = new System.Drawing.Point(543, 488);
this.simpleButton2.Name = "simpleButton2";
this.simpleButton2.Size = new System.Drawing.Size(75, 23);
this.simpleButton2.TabIndex = 2;
this.simpleButton2.Text = "Cancel";
this.simpleButton2.Click += new System.EventHandler(this.simpleButton2_Click);
//
// frmFirmwareSettings
//
this.AutoScaleDimensions = new System.Drawing.SizeF(6F, 13F);
this.AutoScaleMode = System.Windows.Forms.AutoScaleMode.Font;
this.ClientSize = new System.Drawing.Size(711, 523);
this.Controls.Add(this.simpleButton2);
this.Controls.Add(this.simpleButton1);
this.Controls.Add(this.propertyGrid1);
this.Name = "frmFirmwareSettings";
this.StartPosition = System.Windows.Forms.FormStartPosition.CenterScreen;
this.Text = "Trionic options";
this.ResumeLayout(false);
}
#endregion
private System.Windows.Forms.PropertyGrid propertyGrid1;
private DevExpress.XtraEditors.SimpleButton simpleButton1;
private DevExpress.XtraEditors.SimpleButton simpleButton2;
}
} | apache-2.0 |
Kerbores/blog | blog/src/main/java/com/kerbores/utils/file/FileUtils.java | 4817 | package com.kerbores.utils.file;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import org.nutz.lang.Files;
import org.nutz.lang.Strings;
import org.nutz.log.Log;
import org.nutz.log.Logs;
/**
* @author 贵源 <br>
* create at 2014年5月10日
*/
public class FileUtils {
private static final Log log = Logs.get();
/**
* 清除SVN
*
* @param dir
* 待清除的目录
* @return 清除成功状态标识
*/
public static boolean cleanSvn(File dir) {
try {
Files.cleanAllFolderInSubFolderes(dir, ".svn");
} catch (IOException e) {
log.error(e.getMessage());
return false;
}
return true;
}
/**
* 统计文件或者目录下的java代码的行数
*
* @param file
* 文件或者目录
* @return java代码行数
*/
public static long countJAVACodeLines(File file) {
return countLines(file, ".java");
}
/**
* 统计文件或者目录下的指定类型文件的行数
*
* @param file
* 文件或者目录
* @param suf
* 扩展名
* @return 行数
*/
public static long countLines(File file, String suf) {
long target = 0;
if (file.isFile() && file.getName().endsWith(suf)) {
return countLine(file);
} else if (file.isFile()) {
return 0;
} else if (file.isDirectory()) {
File[] files = file.listFiles();
for (File f : files) {
target += countJAVACodeLines(f);
}
}
return target;
}
/**
* 计算文件行数
*
* @param file
* 文件(非目录类型)
* @return 行数
*/
public static long countLine(File file) {
long target = 0;
BufferedReader reader = null;
try {
reader = new BufferedReader(new InputStreamReader(
new FileInputStream(file)));
if (reader != null) {
while (reader.readLine() != null) {
target++;
}
}
} catch (FileNotFoundException e) {
log.error(e.getMessage());
} catch (IOException e) {
log.error(e.getMessage());
} finally {
try {
reader.close();
} catch (IOException e) {
log.error(e.getMessage());
}
}
return target;
}
// /**
// * 获取APK版本信息
// *
// * @param filePath
// * 文件路径
// * @return APK内置版本信息
// */
// public static String getApkVersionInfo(String filePath) {
// try {
// return GetApkInfo.getApkInfoByFilePath(filePath).getVersionName();
// } catch (IOException e) {
// log.error(e.getMwssage());
// return null;
// }
// }
/**
* 基本实现 文件查找
*
* @param dir
* 查找的开始位置
* @param name
* 查找的文件的名字
* @return 文件
*/
public static File findFile(File dir, String name) {
File target = null;
File[] files = dir.listFiles();
if (files != null) {
for (File file : files) {
if (file.isFile() && Strings.equals(file.getName(), name)) {
return file;
} else if (file.isDirectory()) {
target = findFile(file, name);
if (target != null) {
return target;
}
}
}
}
return target;
}
/**
* 快速查找
*
* @param dir
* 基目录
* @param name
* 待查找文件名
* @return 文件
*/
public static File fastFindFile(File dir, String name) {
return fastFindFile(dir, name, 1);
}
public static File fastFindFile(String dir, String name) {
return fastFindFile(new File(dir), name, 1);
}
/**
* 快速查找
*
* @param dir
* 基目录
* @param name
* 待查找文件名
* @return 文件
*/
public static File fastFindFileLikeName(File dir, String name) {
return fastFindFile(dir, name, 2);
}
/**
* 快速查找文件
*
* @param dir
* 基目录
* @param name
* 文件名
* @param method
* 查找方法 1 全等查询 2模糊查找 3 忽略大小写全等 4忽略大小写模糊
* @return 文件
*/
public static File fastFindFile(File dir, String name, int method) {
File target = null;
File[] dirs = Files.dirs(dir);// 获取目录
File[] files = Files.files(dir, name);// 获取文件
// 优先扫描文件
if (files != null) {
for (File file : files) {
if (method == 1 ? Strings.equals(file.getName(), name)
: method == 2 ? file.getName().endsWith(name)
: method == 3 ? Strings.equals(file.getName()
.toUpperCase(), name.toUpperCase())
: file.getName().toUpperCase()
.endsWith(name.toUpperCase())) {
return file;
}
}
}
// 然后扫目录
if (dirs != null) {
for (File file : dirs) {
target = findFile(file, name);
if (target != null) {
return target;
}
}
}
return target;
}
}
| apache-2.0 |
aws/aws-sdk-java | aws-java-sdk-kinesisanalyticsv2/src/main/java/com/amazonaws/services/kinesisanalyticsv2/model/CreateApplicationPresignedUrlResult.java | 3896 | /*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.kinesisanalyticsv2.model;
import java.io.Serializable;
import javax.annotation.Generated;
/**
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/kinesisanalyticsv2-2018-05-23/CreateApplicationPresignedUrl"
* target="_top">AWS API Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class CreateApplicationPresignedUrlResult extends com.amazonaws.AmazonWebServiceResult<com.amazonaws.ResponseMetadata> implements Serializable,
Cloneable {
/**
* <p>
* The URL of the extension.
* </p>
*/
private String authorizedUrl;
/**
* <p>
* The URL of the extension.
* </p>
*
* @param authorizedUrl
* The URL of the extension.
*/
public void setAuthorizedUrl(String authorizedUrl) {
this.authorizedUrl = authorizedUrl;
}
/**
* <p>
* The URL of the extension.
* </p>
*
* @return The URL of the extension.
*/
public String getAuthorizedUrl() {
return this.authorizedUrl;
}
/**
* <p>
* The URL of the extension.
* </p>
*
* @param authorizedUrl
* The URL of the extension.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CreateApplicationPresignedUrlResult withAuthorizedUrl(String authorizedUrl) {
setAuthorizedUrl(authorizedUrl);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getAuthorizedUrl() != null)
sb.append("AuthorizedUrl: ").append(getAuthorizedUrl());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof CreateApplicationPresignedUrlResult == false)
return false;
CreateApplicationPresignedUrlResult other = (CreateApplicationPresignedUrlResult) obj;
if (other.getAuthorizedUrl() == null ^ this.getAuthorizedUrl() == null)
return false;
if (other.getAuthorizedUrl() != null && other.getAuthorizedUrl().equals(this.getAuthorizedUrl()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getAuthorizedUrl() == null) ? 0 : getAuthorizedUrl().hashCode());
return hashCode;
}
@Override
public CreateApplicationPresignedUrlResult clone() {
try {
return (CreateApplicationPresignedUrlResult) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
}
| apache-2.0 |
ralphbean/moksha | moksha/tests/test_feed.py | 5059 | # This file is part of Moksha.
# Copyright (C) 2008-2010 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tg import config
from paste.deploy.converters import asbool
import tw.api
import tw2.core as twc
from moksha.api.widgets.feed import Feed
# Monkey-patch moksha.utils.feed_cache so we don't have to actually
# fetch any feeds to run them
import moksha
import moksha.utils
class FakeCache(object):
def fetch(self, url):
from bunch import Bunch
feed = Bunch()
feed.link = 'http://lewk.org/rss'
feed.title = 'l e w k . o r g'
feed.status = 200
feed.feed = feed
feed.entries = [feed, feed]
def get(key, *args, **kw):
return getattr(feed, key, '')
feed.get = get
return feed
moksha.utils.feed_cache = FakeCache()
class TestFeed(object):
def test_feed_subclassing(self):
""" Ensure that we can easily subclass our Feed widget """
moksha.utils.feed_cache = FakeCache()
class MyFeed(Feed):
url = 'http://lewk.org/rss'
feed = MyFeed()
assert feed.url == 'http://lewk.org/rss'
assert feed.num_entries() > 0
for entry in feed.iterentries():
pass
for entry in feed.get_entries():
pass
def test_widget_children(self):
""" Ensure that we can easily set Feeds as ToscaWidget children """
moksha.utils.feed_cache = FakeCache()
if asbool(config.get('moksha.use_tw2', False)):
class MyWidget(twc.Widget):
myfeedurl = 'http://lewk.org/rss'
myfeed = Feed(url=myfeedurl)
template = "mako:moksha.tests.templates.myfeed"
widget = MyWidget
assert len(widget.children) > 0
else:
class MyWidget(tw.api.Widget):
myfeedurl = 'http://lewk.org/rss'
children = [Feed('myfeed', url=myfeedurl)]
engine_name = 'mako'
template = "${c.myfeed()}"
widget = MyWidget()
assert widget.c.myfeed
rendered = widget.display()
print rendered
assert '<div id="myfeed"' in rendered
def test_widget_child_with_dynamic_url(self):
moksha.utils.feed_cache = FakeCache()
if asbool(config.get('moksha.use_tw2', False)):
class MyWidget(twc.Widget):
url = twc.Param("a url")
feed = Feed
template = "mako:moksha.tests.templates.dynfeed"
else:
class MyWidget(tw.api.Widget):
params = ['url']
children = [Feed('feed')]
template = "mako:moksha.tests.templates.dynfeed"
widget = MyWidget()
rendered = widget.display(url='http://lewk.org/rss')
assert '<div id="feed"' in rendered
def test_genshi_widget(self):
""" Ensure that our Feed widget can be rendered in a Genshi widget """
moksha.utils.feed_cache = FakeCache()
if asbool(config.get('moksha.use_tw2', False)):
class MyWidget(twc.Widget):
myfeed = Feed(url='http://lewk.org/rss')
template = "genshi:moksha.tests.templates.myfeed"
else:
class MyWidget(tw.api.Widget):
children = [Feed('myfeed', url='http://lewk.org/rss')]
engine_name = 'genshi'
template = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml"
xmlns:py="http://genshi.edgewall.org/"
xmlns:xi="http://www.w3.org/2001/XInclude">
${c.myfeed()}
</html>
"""
widget = MyWidget()
rendered = widget.display()
assert '<div id="myfeed"' in rendered
def test_feed_generator(self):
""" Ensure that our Feed object can return a generator """
moksha.utils.feed_cache = FakeCache()
feed = Feed(url='http://lewk.org/rss')
iter = feed.iterentries()
data = iter.next()
assert iter.next()
def test_feed_render_url(self):
""" Ensure that a generic feed can be rendered with a url """
moksha.utils.feed_cache = FakeCache()
feed = Feed()
rendered = feed.display(url='http://lewk.org/rss')
assert 'l e w k . o r g' in rendered, rendered
| apache-2.0 |
jbousquie/Babylon.js | src/Physics/Plugins/oimoJSPlugin.ts | 20488 | import { IPhysicsEnginePlugin, PhysicsImpostorJoint } from "../../Physics/IPhysicsEngine";
import { PhysicsImpostor, IPhysicsEnabledObject } from "../../Physics/physicsImpostor";
import { PhysicsJoint, IMotorEnabledJoint, DistanceJointData, SpringJointData } from "../../Physics/physicsJoint";
import { PhysicsEngine } from "../../Physics/physicsEngine";
import { AbstractMesh } from "../../Meshes/abstractMesh";
import { Vector3, Quaternion } from "../../Maths/math.vector";
import { Nullable } from "../../types";
import { Logger } from "../../Misc/logger";
import { PhysicsRaycastResult } from "../physicsRaycastResult";
declare var OIMO: any;
/** @hidden */
export class OimoJSPlugin implements IPhysicsEnginePlugin {
public world: any;
public name: string = "OimoJSPlugin";
public BJSOIMO: any;
private _raycastResult: PhysicsRaycastResult;
private _fixedTimeStep: number = 1 / 60;
constructor(private _useDeltaForWorldStep: boolean = true, iterations?: number, oimoInjection = OIMO) {
this.BJSOIMO = oimoInjection;
this.world = new this.BJSOIMO.World({
iterations: iterations
});
this.world.clear();
this._raycastResult = new PhysicsRaycastResult();
}
public setGravity(gravity: Vector3) {
this.world.gravity.set(gravity.x, gravity.y, gravity.z);
}
public setTimeStep(timeStep: number) {
this.world.timeStep = timeStep;
}
public getTimeStep(): number {
return this.world.timeStep;
}
private _tmpImpostorsArray: Array<PhysicsImpostor> = [];
public executeStep(delta: number, impostors: Array<PhysicsImpostor>) {
impostors.forEach(function(impostor) {
impostor.beforeStep();
});
this.world.timeStep = this._useDeltaForWorldStep ? delta : this._fixedTimeStep;
this.world.step();
impostors.forEach((impostor) => {
impostor.afterStep();
//update the ordered impostors array
this._tmpImpostorsArray[impostor.uniqueId] = impostor;
});
//check for collisions
var contact = this.world.contacts;
while (contact !== null) {
if (contact.touching && !contact.body1.sleeping && !contact.body2.sleeping) {
contact = contact.next;
continue;
}
//is this body colliding with any other? get the impostor
var mainImpostor = this._tmpImpostorsArray[+contact.body1.name];
var collidingImpostor = this._tmpImpostorsArray[+contact.body2.name];
if (!mainImpostor || !collidingImpostor) {
contact = contact.next;
continue;
}
mainImpostor.onCollide({ body: collidingImpostor.physicsBody });
collidingImpostor.onCollide({ body: mainImpostor.physicsBody });
contact = contact.next;
}
}
public applyImpulse(impostor: PhysicsImpostor, force: Vector3, contactPoint: Vector3) {
var mass = impostor.physicsBody.mass;
impostor.physicsBody.applyImpulse(contactPoint.scale(this.world.invScale), force.scale(this.world.invScale * mass));
}
public applyForce(impostor: PhysicsImpostor, force: Vector3, contactPoint: Vector3) {
Logger.Warn("Oimo doesn't support applying force. Using impule instead.");
this.applyImpulse(impostor, force, contactPoint);
}
public generatePhysicsBody(impostor: PhysicsImpostor) {
//parent-child relationship. Does this impostor has a parent impostor?
if (impostor.parent) {
if (impostor.physicsBody) {
this.removePhysicsBody(impostor);
//TODO is that needed?
impostor.forceUpdate();
}
return;
}
if (impostor.isBodyInitRequired()) {
var bodyConfig: any = {
name: impostor.uniqueId,
//Oimo must have mass, also for static objects.
config: [impostor.getParam("mass") || 0.001, impostor.getParam("friction"), impostor.getParam("restitution")],
size: [],
type: [],
pos: [],
posShape: [],
rot: [],
rotShape: [],
move: impostor.getParam("mass") !== 0,
density: impostor.getParam("mass"),
friction: impostor.getParam("friction"),
restitution: impostor.getParam("restitution"),
//Supporting older versions of Oimo
world: this.world
};
var impostors = [impostor];
let addToArray = (parent: IPhysicsEnabledObject) => {
if (!parent.getChildMeshes) { return; }
parent.getChildMeshes().forEach(function(m) {
if (m.physicsImpostor) {
impostors.push(m.physicsImpostor);
//m.physicsImpostor._init();
}
});
};
addToArray(impostor.object);
let checkWithEpsilon = (value: number): number => {
return Math.max(value, PhysicsEngine.Epsilon);
};
let globalQuaternion: Quaternion = new Quaternion();
impostors.forEach((i) => {
if (!i.object.rotationQuaternion) {
return;
}
//get the correct bounding box
var oldQuaternion = i.object.rotationQuaternion;
globalQuaternion = oldQuaternion.clone();
i.object.rotationQuaternion.set(0, 0, 0, 1);
i.object.computeWorldMatrix(true);
var rot = oldQuaternion.toEulerAngles();
var extendSize = i.getObjectExtendSize();
const radToDeg = 57.295779513082320876;
if (i === impostor) {
var center = impostor.getObjectCenter();
impostor.object.getAbsolutePivotPoint().subtractToRef(center, this._tmpPositionVector);
this._tmpPositionVector.divideInPlace(impostor.object.scaling);
//Can also use Array.prototype.push.apply
bodyConfig.pos.push(center.x);
bodyConfig.pos.push(center.y);
bodyConfig.pos.push(center.z);
bodyConfig.posShape.push(0, 0, 0);
bodyConfig.rotShape.push(0, 0, 0);
} else {
let localPosition = i.object.position.clone();
bodyConfig.posShape.push(localPosition.x);
bodyConfig.posShape.push(localPosition.y);
bodyConfig.posShape.push(localPosition.z);
// bodyConfig.pos.push(0, 0, 0);
bodyConfig.rotShape.push(rot.x * radToDeg);
bodyConfig.rotShape.push(rot.y * radToDeg);
bodyConfig.rotShape.push(rot.z * radToDeg);
}
i.object.rotationQuaternion.copyFrom(globalQuaternion);
// register mesh
switch (i.type) {
case PhysicsImpostor.ParticleImpostor:
Logger.Warn("No Particle support in OIMO.js. using SphereImpostor instead");
case PhysicsImpostor.SphereImpostor:
var radiusX = extendSize.x;
var radiusY = extendSize.y;
var radiusZ = extendSize.z;
var size = Math.max(
checkWithEpsilon(radiusX),
checkWithEpsilon(radiusY),
checkWithEpsilon(radiusZ)) / 2;
bodyConfig.type.push('sphere');
//due to the way oimo works with compounds, add 3 times
bodyConfig.size.push(size);
bodyConfig.size.push(size);
bodyConfig.size.push(size);
break;
case PhysicsImpostor.CylinderImpostor:
var sizeX = checkWithEpsilon(extendSize.x) / 2;
var sizeY = checkWithEpsilon(extendSize.y);
bodyConfig.type.push('cylinder');
bodyConfig.size.push(sizeX);
bodyConfig.size.push(sizeY);
//due to the way oimo works with compounds, add one more value.
bodyConfig.size.push(sizeY);
break;
case PhysicsImpostor.PlaneImpostor:
case PhysicsImpostor.BoxImpostor:
default:
var sizeX = checkWithEpsilon(extendSize.x);
var sizeY = checkWithEpsilon(extendSize.y);
var sizeZ = checkWithEpsilon(extendSize.z);
bodyConfig.type.push('box');
//if (i === impostor) {
bodyConfig.size.push(sizeX);
bodyConfig.size.push(sizeY);
bodyConfig.size.push(sizeZ);
//} else {
// bodyConfig.size.push(0,0,0);
//}
break;
}
//actually not needed, but hey...
i.object.rotationQuaternion = oldQuaternion;
});
impostor.physicsBody = this.world.add(bodyConfig);
// set the quaternion, ignoring the previously defined (euler) rotation
impostor.physicsBody.resetQuaternion(globalQuaternion);
// update with delta 0, so the body will reveive the new rotation.
impostor.physicsBody.updatePosition(0);
} else {
this._tmpPositionVector.copyFromFloats(0, 0, 0);
}
impostor.setDeltaPosition(this._tmpPositionVector);
//this._tmpPositionVector.addInPlace(impostor.mesh.getBoundingInfo().boundingBox.center);
//this.setPhysicsBodyTransformation(impostor, this._tmpPositionVector, impostor.mesh.rotationQuaternion);
}
private _tmpPositionVector: Vector3 = Vector3.Zero();
public removePhysicsBody(impostor: PhysicsImpostor) {
//impostor.physicsBody.dispose();
//Same as : (older oimo versions)
this.world.removeRigidBody(impostor.physicsBody);
}
public generateJoint(impostorJoint: PhysicsImpostorJoint) {
var mainBody = impostorJoint.mainImpostor.physicsBody;
var connectedBody = impostorJoint.connectedImpostor.physicsBody;
if (!mainBody || !connectedBody) {
return;
}
var jointData = impostorJoint.joint.jointData;
var options = jointData.nativeParams || {};
var type;
var nativeJointData: any = {
body1: mainBody,
body2: connectedBody,
axe1: options.axe1 || (jointData.mainAxis ? jointData.mainAxis.asArray() : null),
axe2: options.axe2 || (jointData.connectedAxis ? jointData.connectedAxis.asArray() : null),
pos1: options.pos1 || (jointData.mainPivot ? jointData.mainPivot.asArray() : null),
pos2: options.pos2 || (jointData.connectedPivot ? jointData.connectedPivot.asArray() : null),
min: options.min,
max: options.max,
collision: options.collision || jointData.collision,
spring: options.spring,
//supporting older version of Oimo
world: this.world
};
switch (impostorJoint.joint.type) {
case PhysicsJoint.BallAndSocketJoint:
type = "jointBall";
break;
case PhysicsJoint.SpringJoint:
Logger.Warn("OIMO.js doesn't support Spring Constraint. Simulating using DistanceJoint instead");
var springData = <SpringJointData>jointData;
nativeJointData.min = springData.length || nativeJointData.min;
//Max should also be set, just make sure it is at least min
nativeJointData.max = Math.max(nativeJointData.min, nativeJointData.max);
case PhysicsJoint.DistanceJoint:
type = "jointDistance";
nativeJointData.max = (<DistanceJointData>jointData).maxDistance;
break;
case PhysicsJoint.PrismaticJoint:
type = "jointPrisme";
break;
case PhysicsJoint.SliderJoint:
type = "jointSlide";
break;
case PhysicsJoint.WheelJoint:
type = "jointWheel";
break;
case PhysicsJoint.HingeJoint:
default:
type = "jointHinge";
break;
}
nativeJointData.type = type;
impostorJoint.joint.physicsJoint = this.world.add(nativeJointData);
}
public removeJoint(impostorJoint: PhysicsImpostorJoint) {
//Bug in Oimo prevents us from disposing a joint in the playground
//joint.joint.physicsJoint.dispose();
//So we will bruteforce it!
try {
this.world.removeJoint(impostorJoint.joint.physicsJoint);
} catch (e) {
Logger.Warn(e);
}
}
public isSupported(): boolean {
return this.BJSOIMO !== undefined;
}
public setTransformationFromPhysicsBody(impostor: PhysicsImpostor) {
if (!impostor.physicsBody.sleeping) {
if (impostor.physicsBody.shapes.next) {
let parent = impostor.physicsBody.shapes;
while (parent.next) {
parent = parent.next;
}
impostor.object.position.set(parent.position.x, parent.position.y, parent.position.z);
} else {
const pos = impostor.physicsBody.getPosition();
impostor.object.position.set(pos.x, pos.y, pos.z);
}
//}
if (impostor.object.rotationQuaternion) {
const quat = impostor.physicsBody.getQuaternion();
impostor.object.rotationQuaternion.set(quat.x, quat.y, quat.z, quat.w);
}
}
}
public setPhysicsBodyTransformation(impostor: PhysicsImpostor, newPosition: Vector3, newRotation: Quaternion) {
var body = impostor.physicsBody;
// disable bidirectional for compound meshes
if (impostor.physicsBody.shapes.next) {
return;
}
body.position.set(newPosition.x, newPosition.y, newPosition.z);
body.orientation.set(newRotation.x, newRotation.y, newRotation.z, newRotation.w);
body.syncShapes();
body.awake();
}
/*private _getLastShape(body: any): any {
var lastShape = body.shapes;
while (lastShape.next) {
lastShape = lastShape.next;
}
return lastShape;
}*/
public setLinearVelocity(impostor: PhysicsImpostor, velocity: Vector3) {
impostor.physicsBody.linearVelocity.set(velocity.x, velocity.y, velocity.z);
}
public setAngularVelocity(impostor: PhysicsImpostor, velocity: Vector3) {
impostor.physicsBody.angularVelocity.set(velocity.x, velocity.y, velocity.z);
}
public getLinearVelocity(impostor: PhysicsImpostor): Nullable<Vector3> {
var v = impostor.physicsBody.linearVelocity;
if (!v) {
return null;
}
return new Vector3(v.x, v.y, v.z);
}
public getAngularVelocity(impostor: PhysicsImpostor): Nullable<Vector3> {
var v = impostor.physicsBody.angularVelocity;
if (!v) {
return null;
}
return new Vector3(v.x, v.y, v.z);
}
public setBodyMass(impostor: PhysicsImpostor, mass: number) {
var staticBody: boolean = mass === 0;
//this will actually set the body's density and not its mass.
//But this is how oimo treats the mass variable.
impostor.physicsBody.shapes.density = staticBody ? 1 : mass;
impostor.physicsBody.setupMass(staticBody ? 0x2 : 0x1);
}
public getBodyMass(impostor: PhysicsImpostor): number {
return impostor.physicsBody.shapes.density;
}
public getBodyFriction(impostor: PhysicsImpostor): number {
return impostor.physicsBody.shapes.friction;
}
public setBodyFriction(impostor: PhysicsImpostor, friction: number) {
impostor.physicsBody.shapes.friction = friction;
}
public getBodyRestitution(impostor: PhysicsImpostor): number {
return impostor.physicsBody.shapes.restitution;
}
public setBodyRestitution(impostor: PhysicsImpostor, restitution: number) {
impostor.physicsBody.shapes.restitution = restitution;
}
public sleepBody(impostor: PhysicsImpostor) {
impostor.physicsBody.sleep();
}
public wakeUpBody(impostor: PhysicsImpostor) {
impostor.physicsBody.awake();
}
public updateDistanceJoint(joint: PhysicsJoint, maxDistance: number, minDistance?: number) {
joint.physicsJoint.limitMotor.upperLimit = maxDistance;
if (minDistance !== void 0) {
joint.physicsJoint.limitMotor.lowerLimit = minDistance;
}
}
public setMotor(joint: IMotorEnabledJoint, speed: number, force?: number, motorIndex?: number) {
if (force !== undefined) {
Logger.Warn("OimoJS plugin currently has unexpected behavior when using setMotor with force parameter");
} else {
force = 1e6;
}
speed *= -1;
//TODO separate rotational and transational motors.
var motor = motorIndex ? joint.physicsJoint.rotationalLimitMotor2 : joint.physicsJoint.rotationalLimitMotor1 || joint.physicsJoint.rotationalLimitMotor || joint.physicsJoint.limitMotor;
if (motor) {
motor.setMotor(speed, force);
}
}
public setLimit(joint: IMotorEnabledJoint, upperLimit: number, lowerLimit?: number, motorIndex?: number) {
//TODO separate rotational and transational motors.
var motor = motorIndex ? joint.physicsJoint.rotationalLimitMotor2 : joint.physicsJoint.rotationalLimitMotor1 || joint.physicsJoint.rotationalLimitMotor || joint.physicsJoint.limitMotor;
if (motor) {
motor.setLimit(upperLimit, lowerLimit === void 0 ? -upperLimit : lowerLimit);
}
}
public syncMeshWithImpostor(mesh: AbstractMesh, impostor: PhysicsImpostor) {
var body = impostor.physicsBody;
mesh.position.x = body.position.x;
mesh.position.y = body.position.y;
mesh.position.z = body.position.z;
if (mesh.rotationQuaternion) {
mesh.rotationQuaternion.x = body.orientation.x;
mesh.rotationQuaternion.y = body.orientation.y;
mesh.rotationQuaternion.z = body.orientation.z;
mesh.rotationQuaternion.w = body.orientation.s;
}
}
public getRadius(impostor: PhysicsImpostor): number {
return impostor.physicsBody.shapes.radius;
}
public getBoxSizeToRef(impostor: PhysicsImpostor, result: Vector3): void {
var shape = impostor.physicsBody.shapes;
result.x = shape.halfWidth * 2;
result.y = shape.halfHeight * 2;
result.z = shape.halfDepth * 2;
}
public dispose() {
this.world.clear();
}
/**
* Does a raycast in the physics world
* @param from when should the ray start?
* @param to when should the ray end?
* @returns PhysicsRaycastResult
*/
public raycast(from: Vector3, to: Vector3): PhysicsRaycastResult {
Logger.Warn("raycast is not currently supported by the Oimo physics plugin");
this._raycastResult.reset(from, to);
return this._raycastResult;
}
}
| apache-2.0 |
Tgy31/PIMS | src/controller/InspectionsServlet.java | 16147 | package controller;
import java.io.IOException;
import java.io.PrintWriter;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import javax.servlet.ServletException;
import javax.servlet.annotation.WebServlet;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpSession;
import org.json.JSONException;
import org.json.JSONObject;
import model.dao.InspectionDAO;
import model.dao.InspectionweekDAO;
import model.dao.InspectorDAO;
import model.dao.InspectorKeywordDAO;
import model.dao.ModuleDAO;
import model.dao.StudentDAO;
import model.dao.StudentKeywordDAO;
import model.entity.Inspection;
import model.entity.Inspectionweek;
import model.entity.Inspector;
import model.entity.Keyword;
import model.entity.Module;
import model.entity.Student;
import model.entity.User;
/**
* Servlet implementation class InspectionsServlet
*/
@WebServlet("/InspectionsServlet")
public class InspectionsServlet extends BootstrapServlet {
private static final long serialVersionUID = 1L;
private static final int minimumSuggestedInspectors = 3; // Minimum number of inspector returned by the suggestions
private static final int maximumSuggestedInspectors = 5; // Maximum number of inspector returned by the suggestions
/**
* @see HttpServlet#HttpServlet()
*/
public InspectionsServlet() {
super();
this.addJavascriptFile("moment.min.js");
this.addJavascriptFile("fullcalendar.min.js");
this.addJavascriptFile("knockout-3.2.0.js");
this.addJavascriptFile("inspection-calendar.js");
this.addJavascriptFile("inspection.js");
}
/**
* @see HttpServlet#doGet(HttpServletRequest request, HttpServletResponse response)
*/
@Override
protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
String contentType = request.getParameter("content");
if (contentType == null) {
contentType = "";
}
switch (contentType) {
case "json": {
this.doJSON(request, response);
break;
}
case "view": {
this.doView(request, response);
break;
}
default: {
this.doView(request, response);
}
}
}
/**
* @see HttpServlet#doPost(HttpServletRequest request, HttpServletResponse response)
*/
@Override
protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
String firstInspectorSlug = request.getParameter("firstInspector");
String secondInspectorSlug = request.getParameter("secondInspector");
int firstInspectorID = -1;
int secondInspectorID = -1;
try {
firstInspectorID = Integer.parseInt(firstInspectorSlug);
secondInspectorID = Integer.parseInt(secondInspectorSlug);
} catch (NumberFormatException e) {
response.setStatus(500);
PrintWriter out = response.getWriter();
out.println("Invalid users");
out.close();
e.printStackTrace();
}
Date start = null;
Date end = null;
try {
String sSlot = request.getParameter("slot");
System.out.println(sSlot);
JSONObject slot = new JSONObject(sSlot);
String sStart = slot.getString("start");
String sEnd = slot.getString("end");
start = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss").parse(sStart);
end = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss").parse(sEnd);
} catch (JSONException e) {
response.setStatus(500);
PrintWriter out = response.getWriter();
out.println("Invalid data");
out.close();
e.printStackTrace();
} catch (ParseException e) {
response.setStatus(500);
PrintWriter out = response.getWriter();
out.println("Invalid time");
out.close();
e.printStackTrace();
}
int inspectionWeekID = -1;
int studentID = -1;
try {
inspectionWeekID = Integer.parseInt(this.getObjectSlug(request));
studentID = Integer.parseInt(this.getSecondObjectSlug(request));
} catch (NumberFormatException e) {
response.setStatus(500);
PrintWriter out = response.getWriter();
out.println("Invalid inspection");
out.close();
e.printStackTrace();
}
InspectionDAO inspectionDAO = new InspectionDAO();
Inspection inspection = inspectionDAO.findByStudentAndInspectionWeek(studentID, inspectionWeekID);
if (inspection == null) {
inspection = new Inspection();
inspection.setStudent_id(studentID);
inspection.setInspectionweek_id(inspectionWeekID);
}
inspection.setFirst_inspector_id(firstInspectorID);
inspection.setSecond_inspector_id(secondInspectorID);
inspection.setStart_date(start);
inspection.setEnd_date(end);
if (inspection.getInspection_id() > 0) {
System.out.println("Update "+inspection.getInspection_id());
inspectionDAO.update(inspection);
} else {
System.out.println("Save "+inspection.getInspection_id());
inspectionDAO.save(inspection);
}
}
private void doView(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
try {
int inspectionWeekID = Integer.parseInt(this.getObjectSlug(request));
int studentID = Integer.parseInt(this.getSecondObjectSlug(request));
InspectionDAO inspectionDAO = new InspectionDAO();
Inspection inspection = inspectionDAO.findByStudentAndInspectionWeek(studentID, inspectionWeekID);
if (inspection != null) {
this.proceedSingleInspection(inspection, request, response);
} else {
StudentDAO studentDAO = new StudentDAO();
Student student = studentDAO.findByStudentID(studentID);
if (student != null) { // create inspection if student ok
inspection = new Inspection();
inspection.setInspectionweek_id(inspectionWeekID);
inspection.setStudent_id(studentID);
this.proceedSingleInspection(inspection, request, response);
} else { // Inspection not found either
this.setAlertView(AlertType.AlertTypeDanger, "Inspection not found", request);
this.proceedSingleInspection(inspection, request, response);
}
}
} catch (NumberFormatException e) {
this.proceedInspectionList(request, response);
}
}
private void doJSON(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
String inspectionWeekSlug = this.getModuleSlug(request);
int inspectionWeekID = Integer.parseInt(inspectionWeekSlug);
InspectionweekDAO inspectionWeekDAO = new InspectionweekDAO();
Inspectionweek inspectionWeek = inspectionWeekDAO.findByID(inspectionWeekID);
request.setAttribute("inspectionWeek", inspectionWeek);
ModuleDAO moduleDAO = new ModuleDAO();
Module module = moduleDAO.findByModuleID(inspectionWeek.getModule_id());
StudentDAO studentDAO = new StudentDAO();
List<Student> students = studentDAO.findByModuleID(module.getModule_id());
request.setAttribute("students", students);
request.setAttribute("servlet", this);
this.layoutType = LayoutType.JSON;
response.setContentType("application/json");
this.proceedGet("/InspectionsJSON.jsp", request, response);
}
protected void proceedInspectionList(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
Module module = this.getSelectedModule(request);
this.setBreadcrumbTitles("Modules%"+ module.getModule_name() +"%Inspections", request);
this.setBreadcrumbLinks("/PIMS/modules/%/PIMS/modules/"+ module.getModule_id() +"/", request);
this.relatedMenuClass = "inspections inspection-weeks";
this.layoutType = LayoutType.Grid;
this.proceedGet("/Inspections.jsp", request, response);
}
protected void proceedSingleInspection(Inspection inspection, HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
this.relatedMenuClass = "inspections inspection inspection-weeks";
// DAOs
StudentDAO studentDAO = new StudentDAO();
StudentKeywordDAO studentKeywordDAO = new StudentKeywordDAO();
InspectorDAO inspectorDAO = new InspectorDAO();
InspectionweekDAO inspectionWeekDAO = new InspectionweekDAO();
// Inspection
request.setAttribute("inspection", inspection);
// Inspection week
Inspectionweek inspectionWeek = inspectionWeekDAO.findByID(inspection.getInspectionweek_id());
request.setAttribute("inspectionWeek", inspectionWeek);
// Student
Student student = studentDAO.findByStudentID(inspection.getStudent_id());
request.setAttribute("student", student);
// Keywords
List<Keyword> studentKeywords = studentKeywordDAO.findByStudentID(student.getStudent_id());
String lKeywords = null;
if (studentKeywords.size() > 0) {
for (Keyword keyword : studentKeywords) {
if (lKeywords == null) {
lKeywords = new String(keyword.getKeyword_name());
} else {
lKeywords += ", " + keyword.getKeyword_name();
}
}
}
request.setAttribute("keywords", lKeywords);
// Supervisor
Inspector supervisor = inspectorDAO.findByUsername(student.getSupervisor());
request.setAttribute("supervisor", supervisor);
// Inspectors
List<Inspector> allInspectors = inspectorDAO.findAll();
List<Inspector> suggestedInspectors = this.suggestedInspectorsForStudent(student, allInspectors, inspection);
List<Inspector> otherInspectors = new ArrayList<Inspector>(allInspectors);
otherInspectors.removeAll(suggestedInspectors);
request.setAttribute("otherInspectors", otherInspectors);
request.setAttribute("suggestedInspectors", suggestedInspectors);
// First inspector
Inspector firstInspector = inspectorDAO.findByInspectorID(inspection.getFirst_inspector_id());
if (firstInspector != null) {
request.setAttribute("firstInspector", firstInspector.getInspector_id());
} else {
request.setAttribute("firstInspector", -1);
}
// Second inspector
Inspector secondInspector = inspectorDAO.findByInspectorID(inspection.getSecond_inspector_id());
if (secondInspector != null) {
request.setAttribute("secondInspector", secondInspector.getInspector_id());
} else {
request.setAttribute("secondInspector", -1);
}
Module module = this.getSelectedModule(request);
this.setBreadcrumbTitles("Modules%"+
module.getModule_name()+"%"+
"Inspection weeks%"+
inspectionWeek.getInspection_title() +"%"+
student.getUsername(), request);
this.setBreadcrumbLinks("/PIMS/modules/%"+
"/PIMS/modules/"+ module.getModule_id() +"/%"+
"/PIMS/inspectionweeks/"+ module.getModule_id() +"/%"+
"/PIMS/inspectionweeks/"+ module.getModule_id() +"/"+ inspection.getInspectionweek_id()+"/", request);
request.setAttribute("servlet", this);
this.layoutType = LayoutType.Grid;
this.proceedGet("/Inspection.jsp", request, response);
}
protected void proceedSingleInspectionError(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
Module module = this.getSelectedModule(request);
this.setBreadcrumbTitles("Modules%"+ module.getModule_name() +"%Inspections%Error", request);
this.setBreadcrumbLinks("/PIMS/modules/%/PIMS/modules/"+ module.getModule_id() +"/%/PIMS/inspections/"+ module.getModule_id() +"/", request);
this.layoutType = LayoutType.Grid;
this.proceedGet("/Inspection.jsp", request, response);
}
public Inspection inspectionForStudent(Student student, Inspectionweek inspectionWeek) {
InspectionDAO inspectionDAO = new InspectionDAO();
Inspection inspection = inspectionDAO.findByStudentAndInspectionWeek(student.getStudent_id(), inspectionWeek.getInspectionweek_id());
return inspection;
}
public String studentHasInspection(Student student, Inspectionweek inspectionWeek) {
if(this.inspectionForStudent(student, inspectionWeek) != null) {
return "true";
} else {
return "false";
}
}
public int loadForInspector(Inspector inspector, Inspection inspection) {
InspectionDAO inspectionDAO = new InspectionDAO();
List<Inspection> firstInspectorInspections = inspectionDAO.inspectionAsFirstInspectorOrSupervisor(inspector);
return firstInspectorInspections.size();
}
public String matchedKeywords(Student student, Inspector inspector) {
StudentKeywordDAO studentKeywordDAO = new StudentKeywordDAO();
List<Keyword> studentKeywords = studentKeywordDAO.findByStudentID(student.getStudent_id());
List<Keyword> matchedKeywords = this.matchedKeywordsFromList(studentKeywords, inspector);
String result = null;
for (Keyword keyword : matchedKeywords) {
if (result == null) {
result = new String(keyword.getKeyword_name());
} else {
result += ", " + keyword.getKeyword_name();
}
}
return result != null ? result : "None";
}
public List<Keyword> matchedKeywordsFromList(List<Keyword> keywords, Inspector inspector) {
InspectorKeywordDAO inspectorKeywordDAO = new InspectorKeywordDAO();
List<Keyword> inspectorKeywords = inspectorKeywordDAO.findByInspectorID(inspector.getInspector_id());
List<Keyword> matchedKeywords = new ArrayList<Keyword>();
for (Keyword keyword : keywords) {
if (inspectorKeywords.contains(keyword)) {
matchedKeywords.add(keyword);
}
}
return matchedKeywords;
}
private List<Inspector> suggestedInspectorsForStudent(Student student, List<Inspector> allInspectors, Inspection inspection) {
StudentKeywordDAO studentKeywordDAO = new StudentKeywordDAO();
List<Keyword> studentKeywords = studentKeywordDAO.findByStudentID(student.getStudent_id());
List<Inspector> suggestedInspectors = new ArrayList<Inspector>();
HashMap<Integer, List<Inspector>> orderedInspectors = new HashMap<Integer, List<Inspector>>();
if (allInspectors.size() <= InspectionsServlet.minimumSuggestedInspectors) {
return allInspectors;
} else {
// Order inspectors by number of keyword matched
for (Inspector inspector : allInspectors) {
// Get number keyword matched
List<Keyword> matchedKeywords = this.matchedKeywordsFromList(studentKeywords, inspector);
Integer matching = matchedKeywords.size();
// Get the list of inspector with the same amount
List<Inspector> rangeMatching = orderedInspectors.get(matching);
if (rangeMatching == null) { // Create this list if does not exist yet
rangeMatching = new ArrayList<Inspector>();
orderedInspectors.put(matching, rangeMatching);
}
// Add the inspector
rangeMatching.add(inspector);
} // end for
System.out.println("-------------------- orderedInspectors --------------------");
System.out.println(orderedInspectors);
System.out.println("-------------------- /orderedInspectors -------------------");
// Pick inspectors until lower limit reached
Integer maxMatch = studentKeywords.size();
System.out.println("-- MaxMatch = "+maxMatch);
System.out.println("-- Size = "+suggestedInspectors.size());
System.out.println("-------------------- rangeInspectors ----------------------");
for (Integer match = maxMatch; match >= 0 && suggestedInspectors.size() < InspectionsServlet.minimumSuggestedInspectors; match--) {
List<Inspector> rangeInspectors = orderedInspectors.get(match);
System.out.println(rangeInspectors);
if (rangeInspectors != null) {
suggestedInspectors.addAll(rangeInspectors);
}
}
System.out.println("-------------------- /rangeInspectors ---------------------");
if (suggestedInspectors.size() > InspectionsServlet.maximumSuggestedInspectors) {
suggestedInspectors = suggestedInspectors.subList(0, InspectionsServlet.maximumSuggestedInspectors); // Trunc if too long
}
return suggestedInspectors;
}
};
@Override
public Boolean shouldDenyAcces(HttpServletRequest request) {
if (super.shouldDenyAcces(request)) {
return true;
}
HttpSession session = request.getSession();
User user = (User) session.getAttribute("user");
return !user.isCoordinator();
}
}
| apache-2.0 |
ingve/IncludeOS | lib/microLB/micro_lb/serialize.cpp | 3732 | #include "balancer.hpp"
#include <stdexcept>
#define LB_VERBOSE 0
#if LB_VERBOSE
#define LBOUT(fmt, ...) printf(fmt, ##__VA_ARGS__)
#else
#define LBOUT(fmt, ...) /** **/
#endif
using namespace liu;
namespace microLB
{
void Nodes::serialize(Storage& store)
{
store.add<int64_t>(100, this->session_total);
store.add_int(100, this->session_timeouts);
store.put_marker(100);
const int tot_sessions = sessions.size() - free_sessions.size();
LBOUT("Serialize %llu sessions\n", tot_sessions);
store.add_int(102, tot_sessions);
int alive = 0;
for(auto& session : sessions)
{
if(session.is_alive())
{
session.serialize(store);
++alive;
}
}
assert(alive == tot_sessions
&& "Mismatch between number of said serialized sessions and the actual number serialized.");
}
void Session::serialize(Storage& store)
{
store.add_connection(120, incoming);
store.add_connection(121, outgoing);
store.put_marker(120);
}
void Nodes::deserialize(netstack_t& in, netstack_t& out, Restore& store)
{
/// nodes member fields ///
this->session_total = store.as_type<int64_t>(); store.go_next();
this->session_timeouts = store.as_int(); store.go_next();
store.pop_marker(100);
/// sessions ///
auto& tcp_in = in.tcp();
auto& tcp_out = out.tcp();
const int tot_sessions = store.as_int(); store.go_next();
// since we are remaking all the sessions, reduce total
this->session_total -= tot_sessions;
LBOUT("Deserialize %llu sessions\n", tot_sessions);
for(auto i = 0; i < static_cast<int>(tot_sessions); i++)
{
auto incoming = store.as_tcp_connection(tcp_in); store.go_next();
auto outgoing = store.as_tcp_connection(tcp_out); store.go_next();
store.pop_marker(120);
create_session(true /* no readq atm */, incoming, outgoing);
}
}
void Waiting::serialize(liu::Storage& store)
{
store.add_connection(10, this->conn);
store.add_int(11, (int) readq.size());
for (auto buffer : readq) {
store.add_buffer(12, buffer->data(), buffer->size());
}
store.put_marker(10);
}
Waiting::Waiting(liu::Restore& store, net::TCP& stack)
{
this->conn = store.as_tcp_connection(stack); store.go_next();
int qsize = store.as_int(); store.go_next();
for (int i = 0; i < qsize; i++)
{
auto buf = store.as_buffer(); store.go_next();
readq.push_back(net::tcp::construct_buffer(buf.begin(), buf.end()));
}
store.pop_marker(10);
}
void Balancer::serialize(Storage& store, const buffer_t*)
{
store.add_int(0, this->throw_counter);
store.put_marker(0);
/// wait queue
store.add_int(1, (int) queue.size());
for (auto& client : queue) {
client.serialize(store);
}
/// nodes
nodes.serialize(store);
}
void Balancer::deserialize(Restore& store)
{
this->throw_counter = store.as_int(); store.go_next();
store.pop_marker(0);
/// wait queue
int wsize = store.as_int(); store.go_next();
for (int i = 0; i < wsize; i++) {
queue.emplace_back(store, this->netin.tcp());
}
/// nodes
nodes.deserialize(netin, netout, store);
}
void Balancer::resume_callback(liu::Restore& store)
{
try {
this->deserialize(store);
}
catch (std::exception& e) {
printf("\n!!! Error during microLB resume !!!\n");
printf("REASON: %s\n", e.what());
}
}
void Balancer::init_liveupdate()
{
liu::LiveUpdate::register_partition("microlb", {this, &Balancer::serialize});
if(liu::LiveUpdate::is_resumable())
{
liu::LiveUpdate::resume("microlb", {this, &Balancer::resume_callback});
}
}
}
| apache-2.0 |
shakamunyi/beam | sdk/src/main/java/com/google/cloud/dataflow/sdk/util/ReduceFnRunner.java | 37408 | /*
* Copyright (C) 2015 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.cloud.dataflow.sdk.util;
import com.google.cloud.dataflow.sdk.options.PipelineOptions;
import com.google.cloud.dataflow.sdk.transforms.Aggregator;
import com.google.cloud.dataflow.sdk.transforms.DoFn;
import com.google.cloud.dataflow.sdk.transforms.GroupByKey.GroupByKeyOnly;
import com.google.cloud.dataflow.sdk.transforms.windowing.AfterWatermark;
import com.google.cloud.dataflow.sdk.transforms.windowing.BoundedWindow;
import com.google.cloud.dataflow.sdk.transforms.windowing.OutputTimeFn;
import com.google.cloud.dataflow.sdk.transforms.windowing.PaneInfo;
import com.google.cloud.dataflow.sdk.transforms.windowing.PaneInfo.Timing;
import com.google.cloud.dataflow.sdk.transforms.windowing.Window.ClosingBehavior;
import com.google.cloud.dataflow.sdk.transforms.windowing.WindowFn;
import com.google.cloud.dataflow.sdk.util.ReduceFnContextFactory.OnTriggerCallbacks;
import com.google.cloud.dataflow.sdk.util.ReduceFnContextFactory.StateStyle;
import com.google.cloud.dataflow.sdk.util.TimerInternals.TimerData;
import com.google.cloud.dataflow.sdk.util.WindowingStrategy.AccumulationMode;
import com.google.cloud.dataflow.sdk.util.state.ReadableState;
import com.google.cloud.dataflow.sdk.util.state.StateInternals;
import com.google.cloud.dataflow.sdk.util.state.StateNamespaces.WindowNamespace;
import com.google.cloud.dataflow.sdk.values.KV;
import com.google.cloud.dataflow.sdk.values.PCollection;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.joda.time.Duration;
import org.joda.time.Instant;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import javax.annotation.Nullable;
/**
* Manages the execution of a {@link ReduceFn} after a {@link GroupByKeyOnly} has partitioned the
* {@link PCollection} by key.
*
* <p>The {@link #onTrigger} relies on a {@link TriggerRunner} to manage the execution of
* the triggering logic. The {@code ReduceFnRunner}s responsibilities are:
*
* <ul>
* <li>Tracking the windows that are active (have buffered data) as elements arrive and
* triggers are fired.
* <li>Holding the watermark based on the timestamps of elements in a pane and releasing it
* when the trigger fires.
* <li>Calling the appropriate callbacks on {@link ReduceFn} based on trigger execution, timer
* firings, etc, and providing appropriate contexts to the {@link ReduceFn} for actions
* such as output.
* <li>Scheduling garbage collection of state associated with a specific window, and making that
* happen when the appropriate timer fires.
* </ul>
*
* @param <K> The type of key being processed.
* @param <InputT> The type of values associated with the key.
* @param <OutputT> The output type that will be produced for each key.
* @param <W> The type of windows this operates on.
*/
public class ReduceFnRunner<K, InputT, OutputT, W extends BoundedWindow> {
/**
* The {@link ReduceFnRunner} depends on most aspects of the {@link WindowingStrategy}.
*
* <ul>
* <li>It runs the trigger from the {@link WindowingStrategy}.</li>
* <li>It merges windows according to the {@link WindowingStrategy}.</li>
* <li>It chooses how to track active windows and clear out expired windows
* according to the {@link WindowingStrategy}, based on the allowed lateness and
* whether windows can merge.</li>
* <li>It decides whether to emit empty final panes according to whether the
* {@link WindowingStrategy} requires it.<li>
* <li>It uses discarding or accumulation mode according to the {@link WindowingStrategy}.</li>
* </ul>
*/
private final WindowingStrategy<Object, W> windowingStrategy;
private final OutputWindowedValue<KV<K, OutputT>> outputter;
private final StateInternals<K> stateInternals;
private final Aggregator<Long, Long> droppedDueToClosedWindow;
private final K key;
private final OnMergeCallback onMergeCallback = new OnMergeCallback();
/**
* Track which windows are still active and which 'state address' windows contain state
* for a merged window.
*
* <ul>
* <li>State: Global map for all active windows for this computation and key.
* <li>Lifetime: Cleared when no active windows need to be tracked. A window lives within
* the active window set until its trigger is closed or the window is garbage collected.
* </ul>
*/
private final ActiveWindowSet<W> activeWindows;
/**
* Always a {@link SystemReduceFn}.
*
* <ul>
* <li>State: A bag of accumulated values, or the intermediate result of a combiner.
* <li>State style: RENAMED
* <li>Merging: Concatenate or otherwise combine the state from each merged window.
* <li>Lifetime: Cleared when a pane fires if DISCARDING_FIRED_PANES. Otherwise cleared
* when trigger is finished or when the window is garbage collected.
* </ul>
*/
private final ReduceFn<K, InputT, OutputT, W> reduceFn;
/**
* Manage the setting and firing of timer events.
*
* <ul>
* <li>Merging: End-of-window and garbage collection timers are cancelled when windows are
* merged away. Timers created by triggers are never garbage collected and are left to
* fire and be ignored.
* <li>Lifetime: Timers automatically disappear after they fire.
* </ul>
*/
private final TimerInternals timerInternals;
/**
* Manage the execution and state for triggers.
*
* <ul>
* <li>State: Tracks which sub-triggers have finished, and any additional state needed to
* determine when the trigger should fire.
* <li>State style: DIRECT
* <li>Merging: Finished bits are explicitly managed. Other state is eagerly merged as
* needed.
* <li>Lifetime: Most trigger state is cleared when the final pane is emitted. However
* the finished bits are left behind and must be cleared when the window is
* garbage collected.
* </ul>
*/
private final TriggerRunner<W> triggerRunner;
/**
* Store the output watermark holds for each window.
*
* <ul>
* <li>State: Bag of hold timestamps.
* <li>State style: RENAMED
* <li>Merging: Depending on {@link OutputTimeFn}, may need to be recalculated on merging.
* When a pane fires it may be necessary to add (back) an end-of-window or garbage collection
* hold.
* <li>Lifetime: Cleared when a pane fires or when the window is garbage collected.
* </ul>
*/
private final WatermarkHold<W> watermarkHold;
private final ReduceFnContextFactory<K, InputT, OutputT, W> contextFactory;
/**
* Store the previously emitted pane (if any) for each window.
*
* <ul>
* <li>State: The previous {@link PaneInfo} passed to the user's {@link DoFn#processElement},
* if any.
* <li>Style style: DIRECT
* <li>Merging: Always keyed by actual window, so does not depend on {@link #activeWindows}.
* Cleared when window is merged away.
* <li>Lifetime: Cleared when trigger is closed or window is garbage collected.
* </ul>
*/
private final PaneInfoTracker paneInfoTracker;
/**
* Store whether we've seen any elements for a window since the last pane was emitted.
*
* <ul>
* <li>State: Unless DISCARDING_FIRED_PANES, a count of number of elements added so far.
* <li>State style: RENAMED.
* <li>Merging: Counts are summed when windows are merged.
* <li>Lifetime: Cleared when pane fires or window is garbage collected.
* </ul>
*/
private final NonEmptyPanes<K, W> nonEmptyPanes;
public ReduceFnRunner(
K key,
WindowingStrategy<?, W> windowingStrategy,
StateInternals<K> stateInternals,
TimerInternals timerInternals,
WindowingInternals<?, KV<K, OutputT>> windowingInternals,
Aggregator<Long, Long> droppedDueToClosedWindow,
ReduceFn<K, InputT, OutputT, W> reduceFn,
PipelineOptions options) {
this.key = key;
this.timerInternals = timerInternals;
this.paneInfoTracker = new PaneInfoTracker(timerInternals);
this.stateInternals = stateInternals;
this.outputter = new OutputViaWindowingInternals<>(windowingInternals);
this.droppedDueToClosedWindow = droppedDueToClosedWindow;
this.reduceFn = reduceFn;
@SuppressWarnings("unchecked")
WindowingStrategy<Object, W> objectWindowingStrategy =
(WindowingStrategy<Object, W>) windowingStrategy;
this.windowingStrategy = objectWindowingStrategy;
this.nonEmptyPanes = NonEmptyPanes.create(this.windowingStrategy, this.reduceFn);
// Note this may incur I/O to load persisted window set data.
this.activeWindows = createActiveWindowSet();
this.contextFactory =
new ReduceFnContextFactory<K, InputT, OutputT, W>(key, reduceFn, this.windowingStrategy,
stateInternals, this.activeWindows, timerInternals, windowingInternals, options);
this.watermarkHold = new WatermarkHold<>(timerInternals, windowingStrategy);
this.triggerRunner =
new TriggerRunner<>(
windowingStrategy.getTrigger(),
new TriggerContextFactory<>(windowingStrategy, stateInternals, activeWindows));
}
private ActiveWindowSet<W> createActiveWindowSet() {
return windowingStrategy.getWindowFn().isNonMerging()
? new NonMergingActiveWindowSet<W>()
: new MergingActiveWindowSet<W>(windowingStrategy.getWindowFn(), stateInternals);
}
@VisibleForTesting
boolean isFinished(W window) {
return triggerRunner.isClosed(contextFactory.base(window, StateStyle.DIRECT).state());
}
/**
* Incorporate {@code values} into the underlying reduce function, and manage holds, timers,
* triggers, and window merging.
*
* <p>The general strategy is:
* <ol>
* <li>Use {@link WindowedValue#getWindows} (itself determined using
* {@link WindowFn#assignWindows}) to determine which windows each element belongs to. Some
* of those windows will already have state associated with them. The rest are considered
* NEW.
* <li>Use {@link WindowFn#mergeWindows} to attempt to merge currently ACTIVE and NEW windows.
* Each NEW window will become either ACTIVE, MERGED, or EPHEMERAL. (See {@link
* ActiveWindowSet} for definitions of these terms.)
* <li>If at all possible, eagerly substitute EPHEMERAL windows with their ACTIVE state address
* windows before any state is associated with the EPHEMERAL window. In the common case that
* windows for new elements are merged into existing ACTIVE windows then no additional
* storage or merging overhead will be incurred.
* <li>Otherwise, keep track of the state address windows for ACTIVE windows so that their
* states can be merged on-demand when a pane fires.
* <li>Process the element for each of the windows it's windows have been merged into according
* to {@link ActiveWindowSet}. Processing may require running triggers, setting timers,
* setting holds, and invoking {@link ReduceFn#onTrigger}.
* </ol>
*/
public void processElements(Iterable<WindowedValue<InputT>> values) throws Exception {
// If an incoming element introduces a new window, attempt to merge it into an existing
// window eagerly. The outcome is stored in the ActiveWindowSet.
collectAndMergeWindows(values);
Set<W> windowsToConsider = new HashSet<>();
// Process each element, using the updated activeWindows determined by collectAndMergeWindows.
for (WindowedValue<InputT> value : values) {
windowsToConsider.addAll(processElement(value));
}
// Trigger output from any window for which the trigger is ready
for (W mergedWindow : windowsToConsider) {
ReduceFn<K, InputT, OutputT, W>.Context directContext =
contextFactory.base(mergedWindow, StateStyle.DIRECT);
ReduceFn<K, InputT, OutputT, W>.Context renamedContext =
contextFactory.base(mergedWindow, StateStyle.RENAMED);
triggerRunner.prefetchShouldFire(mergedWindow, directContext.state());
emitIfAppropriate(directContext, renamedContext);
}
// We're all done with merging and emitting elements so can compress the activeWindow state.
activeWindows.removeEphemeralWindows();
}
public void persist() {
activeWindows.persist();
}
/**
* Extract the windows associated with the values, and invoke merge.
*/
private void collectAndMergeWindows(Iterable<WindowedValue<InputT>> values) throws Exception {
// No-op if no merging can take place
if (windowingStrategy.getWindowFn().isNonMerging()) {
return;
}
// Collect the windows from all elements (except those which are too late) and
// make sure they are already in the active window set or are added as NEW windows.
for (WindowedValue<?> value : values) {
for (BoundedWindow untypedWindow : value.getWindows()) {
@SuppressWarnings("unchecked")
W window = (W) untypedWindow;
ReduceFn<K, InputT, OutputT, W>.Context directContext =
contextFactory.base(window, StateStyle.DIRECT);
if (triggerRunner.isClosed(directContext.state())) {
// This window has already been closed.
// We will update the counter for this in the corresponding processElement call.
continue;
}
if (activeWindows.isActive(window)) {
Set<W> stateAddressWindows = activeWindows.readStateAddresses(window);
if (stateAddressWindows.size() > 1) {
// This is a legacy window who's state has not been eagerly merged.
// Do that now.
ReduceFn<K, InputT, OutputT, W>.OnMergeContext premergeContext =
contextFactory.forPremerge(window);
reduceFn.onMerge(premergeContext);
watermarkHold.onMerge(premergeContext);
activeWindows.merged(window);
}
}
// Add this window as NEW if we've not yet seen it.
activeWindows.addNew(window);
}
}
// Merge all of the active windows and retain a mapping from source windows to result windows.
mergeActiveWindows();
}
private class OnMergeCallback implements ActiveWindowSet.MergeCallback<W> {
/**
* Called from the active window set to indicate {@code toBeMerged} (of which only
* {@code activeToBeMerged} are ACTIVE and thus have state associated with them) will later
* be merged into {@code mergeResult}.
*/
@Override
public void prefetchOnMerge(
Collection<W> toBeMerged, Collection<W> activeToBeMerged, W mergeResult) throws Exception {
ReduceFn<K, InputT, OutputT, W>.OnMergeContext directMergeContext =
contextFactory.forMerge(activeToBeMerged, mergeResult, StateStyle.DIRECT);
ReduceFn<K, InputT, OutputT, W>.OnMergeContext renamedMergeContext =
contextFactory.forMerge(activeToBeMerged, mergeResult, StateStyle.RENAMED);
// Prefetch various state.
triggerRunner.prefetchForMerge(mergeResult, activeToBeMerged, directMergeContext.state());
reduceFn.prefetchOnMerge(renamedMergeContext.state());
watermarkHold.prefetchOnMerge(renamedMergeContext.state());
nonEmptyPanes.prefetchOnMerge(renamedMergeContext.state());
}
/**
* Called from the active window set to indicate {@code toBeMerged} (of which only
* {@code activeToBeMerged} are ACTIVE and thus have state associated with them) are about
* to be merged into {@code mergeResult}.
*/
@Override
public void onMerge(Collection<W> toBeMerged, Collection<W> activeToBeMerged, W mergeResult)
throws Exception {
// At this point activeWindows has NOT incorporated the results of the merge.
ReduceFn<K, InputT, OutputT, W>.OnMergeContext directMergeContext =
contextFactory.forMerge(activeToBeMerged, mergeResult, StateStyle.DIRECT);
ReduceFn<K, InputT, OutputT, W>.OnMergeContext renamedMergeContext =
contextFactory.forMerge(activeToBeMerged, mergeResult, StateStyle.RENAMED);
// Run the reduceFn to perform any needed merging.
reduceFn.onMerge(renamedMergeContext);
// Merge the watermark holds.
watermarkHold.onMerge(renamedMergeContext);
// Merge non-empty pane state.
nonEmptyPanes.onMerge(renamedMergeContext.state());
// Have the trigger merge state as needed
triggerRunner.onMerge(
directMergeContext.window(), directMergeContext.timers(), directMergeContext.state());
for (W active : activeToBeMerged) {
if (active.equals(mergeResult)) {
// Not merged away.
continue;
}
// Cleanup flavor A: Currently ACTIVE window is about to become MERGED.
// Clear any state not already cleared by the onMerge calls above.
WindowTracing.debug("ReduceFnRunner.onMerge: Merging {} into {}", active, mergeResult);
ReduceFn<K, InputT, OutputT, W>.Context directClearContext =
contextFactory.base(active, StateStyle.DIRECT);
// No need for the end-of-window or garbage collection timers.
// We will establish a new end-of-window or garbage collection timer for the mergeResult
// window in processElement below. There must be at least one element for the mergeResult
// window since a new element with a new window must have triggered this onMerge.
cancelEndOfWindowAndGarbageCollectionTimers(directClearContext);
// We no longer care about any previous panes of merged away windows. The
// merge result window gets to start fresh if it is new.
paneInfoTracker.clear(directClearContext.state());
}
}
}
private void mergeActiveWindows() throws Exception {
activeWindows.merge(onMergeCallback);
}
/**
* Process an element.
* @param value the value being processed
*
* @return the set of windows in which the element was actually processed
*/
private Collection<W> processElement(WindowedValue<InputT> value) throws Exception {
// Redirect element windows to the ACTIVE windows they have been merged into.
// The compressed representation (value, {window1, window2, ...}) actually represents
// distinct elements (value, window1), (value, window2), ...
// so if window1 and window2 merge, the resulting window will contain both copies
// of the value.
Collection<W> windows = new ArrayList<>();
for (BoundedWindow untypedWindow : value.getWindows()) {
@SuppressWarnings("unchecked")
W window = (W) untypedWindow;
W active = activeWindows.representative(window);
Preconditions.checkState(active != null, "Window %s should have been added", window);
windows.add(active);
}
// Prefetch in each of the windows if we're going to need to process triggers
for (W window : windows) {
ReduceFn<K, InputT, OutputT, W>.ProcessValueContext directContext = contextFactory.forValue(
window, value.getValue(), value.getTimestamp(), StateStyle.DIRECT);
triggerRunner.prefetchForValue(window, directContext.state());
}
// Process the element for each (representative) window it belongs to.
for (W window : windows) {
ReduceFn<K, InputT, OutputT, W>.ProcessValueContext directContext = contextFactory.forValue(
window, value.getValue(), value.getTimestamp(), StateStyle.DIRECT);
ReduceFn<K, InputT, OutputT, W>.ProcessValueContext renamedContext = contextFactory.forValue(
window, value.getValue(), value.getTimestamp(), StateStyle.RENAMED);
// Check to see if the triggerRunner thinks the window is closed. If so, drop that window.
if (triggerRunner.isClosed(directContext.state())) {
droppedDueToClosedWindow.addValue(1L);
WindowTracing.debug(
"ReduceFnRunner.processElement: Dropping element at {} for key:{}; window:{} "
+ "since window is no longer active at inputWatermark:{}; outputWatermark:{}",
value.getTimestamp(), key, window, timerInternals.currentInputWatermarkTime(),
timerInternals.currentOutputWatermarkTime());
continue;
}
nonEmptyPanes.recordContent(renamedContext.state());
// Make sure we've scheduled the end-of-window or garbage collection timer for this window.
Instant timer = scheduleEndOfWindowOrGarbageCollectionTimer(directContext);
// Hold back progress of the output watermark until we have processed the pane this
// element will be included within. If the element is too late for that, place a hold at
// the end-of-window or garbage collection time to allow empty panes to contribute elements
// which won't be dropped due to lateness by a following computation (assuming the following
// computation uses the same allowed lateness value...)
@Nullable Instant hold = watermarkHold.addHolds(renamedContext);
if (hold != null) {
// Assert that holds have a proximate timer.
boolean holdInWindow = !hold.isAfter(window.maxTimestamp());
boolean timerInWindow = !timer.isAfter(window.maxTimestamp());
Preconditions.checkState(
holdInWindow == timerInWindow,
"set a hold at %s, a timer at %s, which disagree as to whether they are in window %s",
hold,
timer,
directContext.window());
}
// Execute the reduceFn, which will buffer the value as appropriate
reduceFn.processValue(renamedContext);
// Run the trigger to update its state
triggerRunner.processValue(
directContext.window(),
directContext.timestamp(),
directContext.timers(),
directContext.state());
}
return windows;
}
/**
* Called when an end-of-window, garbage collection, or trigger-specific timer fires.
*/
public void onTimer(TimerData timer) throws Exception {
// Which window is the timer for?
Preconditions.checkArgument(timer.getNamespace() instanceof WindowNamespace,
"Expected timer to be in WindowNamespace, but was in %s", timer.getNamespace());
@SuppressWarnings("unchecked")
WindowNamespace<W> windowNamespace = (WindowNamespace<W>) timer.getNamespace();
W window = windowNamespace.getWindow();
ReduceFn<K, InputT, OutputT, W>.Context directContext =
contextFactory.base(window, StateStyle.DIRECT);
ReduceFn<K, InputT, OutputT, W>.Context renamedContext =
contextFactory.base(window, StateStyle.RENAMED);
// Has this window had its trigger finish?
// - The trigger may implement isClosed as constant false.
// - If the window function does not support windowing then all windows will be considered
// active.
// So we must take conjunction of activeWindows and triggerRunner state.
boolean windowIsActive =
activeWindows.isActive(window) && !triggerRunner.isClosed(directContext.state());
if (!windowIsActive) {
WindowTracing.debug(
"ReduceFnRunner.onTimer: Note that timer {} is for non-ACTIVE window {}", timer, window);
}
// If this is a garbage collection timer then we should trigger and garbage collect the window.
Instant cleanupTime = window.maxTimestamp().plus(windowingStrategy.getAllowedLateness());
boolean isGarbageCollection =
TimeDomain.EVENT_TIME == timer.getDomain() && timer.getTimestamp().equals(cleanupTime);
if (isGarbageCollection) {
WindowTracing.debug(
"ReduceFnRunner.onTimer: Cleaning up for key:{}; window:{} at {} with "
+ "inputWatermark:{}; outputWatermark:{}",
key, window, timer.getTimestamp(), timerInternals.currentInputWatermarkTime(),
timerInternals.currentOutputWatermarkTime());
if (windowIsActive) {
// We need to call onTrigger to emit the final pane if required.
// The final pane *may* be ON_TIME if no prior ON_TIME pane has been emitted,
// and the watermark has passed the end of the window.
onTrigger(directContext, renamedContext, true/* isFinished */);
}
// Cleanup flavor B: Clear all the remaining state for this window since we'll never
// see elements for it again.
clearAllState(directContext, renamedContext, windowIsActive);
} else {
WindowTracing.debug(
"ReduceFnRunner.onTimer: Triggering for key:{}; window:{} at {} with "
+ "inputWatermark:{}; outputWatermark:{}",
key, window, timer.getTimestamp(), timerInternals.currentInputWatermarkTime(),
timerInternals.currentOutputWatermarkTime());
if (windowIsActive) {
emitIfAppropriate(directContext, renamedContext);
}
// If this is an end-of-window timer then, we need to set a GC timer
boolean isEndOfWindow = TimeDomain.EVENT_TIME == timer.getDomain()
&& timer.getTimestamp().equals(window.maxTimestamp());
if (isEndOfWindow) {
// Since we are processing an on-time firing we should schedule the garbage collection
// timer. (If getAllowedLateness is zero then the timer event will be considered a
// cleanup event and handled by the above).
// Note we must do this even if the trigger is finished so that we are sure to cleanup
// any final trigger tombstones.
Preconditions.checkState(
windowingStrategy.getAllowedLateness().isLongerThan(Duration.ZERO),
"Unexpected zero getAllowedLateness");
WindowTracing.debug(
"ReduceFnRunner.onTimer: Scheduling cleanup timer for key:{}; window:{} at {} with "
+ "inputWatermark:{}; outputWatermark:{}",
key, directContext.window(), cleanupTime, timerInternals.currentInputWatermarkTime(),
timerInternals.currentOutputWatermarkTime());
directContext.timers().setTimer(cleanupTime, TimeDomain.EVENT_TIME);
}
}
}
/**
* Clear all the state associated with {@code context}'s window.
* Should only be invoked if we know all future elements for this window will be considered
* beyond allowed lateness.
* This is a superset of the clearing done by {@link #emitIfAppropriate} below since:
* <ol>
* <li>We can clear the trigger state tombstone since we'll never need to ask about it again.
* <li>We can clear any remaining garbage collection hold.
* </ol>
*/
private void clearAllState(
ReduceFn<K, InputT, OutputT, W>.Context directContext,
ReduceFn<K, InputT, OutputT, W>.Context renamedContext,
boolean windowIsActive)
throws Exception {
if (windowIsActive) {
// Since both the window is in the active window set AND the trigger was not yet closed,
// it is possible we still have state.
reduceFn.clearState(renamedContext);
watermarkHold.clearHolds(renamedContext);
nonEmptyPanes.clearPane(renamedContext.state());
triggerRunner.clearState(
directContext.window(), directContext.timers(), directContext.state());
} else {
// Needed only for backwards compatibility over UPDATE.
// Clear any end-of-window or garbage collection holds keyed by the current window.
// Only needed if:
// - We have merging windows.
// - We are DISCARDING_FIRED_PANES.
// - A pane has fired.
// - But the trigger is not (yet) closed.
if (windowingStrategy.getMode() == AccumulationMode.DISCARDING_FIRED_PANES
&& !windowingStrategy.getWindowFn().isNonMerging()) {
watermarkHold.clearHolds(directContext);
}
}
paneInfoTracker.clear(directContext.state());
if (activeWindows.isActive(directContext.window())) {
// Don't need to track address state windows anymore.
activeWindows.remove(directContext.window());
}
// We'll never need to test for the trigger being closed again.
triggerRunner.clearFinished(directContext.state());
}
/** Should the reduce function state be cleared? */
private boolean shouldDiscardAfterFiring(boolean isFinished) {
if (isFinished) {
// This is the last firing for trigger.
return true;
}
if (windowingStrategy.getMode() == AccumulationMode.DISCARDING_FIRED_PANES) {
// Nothing should be accumulated between panes.
return true;
}
return false;
}
/**
* Possibly emit a pane if a trigger is ready to fire or timers require it, and cleanup state.
*/
private void emitIfAppropriate(ReduceFn<K, InputT, OutputT, W>.Context directContext,
ReduceFn<K, InputT, OutputT, W>.Context renamedContext)
throws Exception {
if (!triggerRunner.shouldFire(
directContext.window(), directContext.timers(), directContext.state())) {
// Ignore unless trigger is ready to fire
return;
}
// Inform the trigger of the transition to see if it is finished
triggerRunner.onFire(directContext.window(), directContext.timers(), directContext.state());
boolean isFinished = triggerRunner.isClosed(directContext.state());
// Will be able to clear all element state after triggering?
boolean shouldDiscard = shouldDiscardAfterFiring(isFinished);
// Run onTrigger to produce the actual pane contents.
// As a side effect it will clear all element holds, but not necessarily any
// end-of-window or garbage collection holds.
onTrigger(directContext, renamedContext, isFinished);
// Now that we've triggered, the pane is empty.
nonEmptyPanes.clearPane(renamedContext.state());
// Cleanup buffered data if appropriate
if (shouldDiscard) {
// Cleanup flavor C: The user does not want any buffered data to persist between panes.
reduceFn.clearState(renamedContext);
}
if (isFinished) {
// Cleanup flavor D: If trigger is closed we will ignore all new incoming elements.
// Clear state not otherwise cleared by onTrigger and clearPane above.
// Remember the trigger is, indeed, closed until the window is garbage collected.
triggerRunner.clearState(
directContext.window(), directContext.timers(), directContext.state());
paneInfoTracker.clear(directContext.state());
activeWindows.remove(directContext.window());
}
}
/**
* Do we need to emit a pane?
*/
private boolean needToEmit(boolean isEmpty, boolean isFinished, PaneInfo.Timing timing) {
if (!isEmpty) {
// The pane has elements.
return true;
}
if (timing == Timing.ON_TIME) {
// This is the unique ON_TIME pane.
return true;
}
if (isFinished && windowingStrategy.getClosingBehavior() == ClosingBehavior.FIRE_ALWAYS) {
// This is known to be the final pane, and the user has requested it even when empty.
return true;
}
return false;
}
/**
* Run the {@link ReduceFn#onTrigger} method and produce any necessary output.
*/
private void onTrigger(
final ReduceFn<K, InputT, OutputT, W>.Context directContext,
ReduceFn<K, InputT, OutputT, W>.Context renamedContext,
boolean isFinished)
throws Exception {
// Prefetch necessary states
ReadableState<Instant> outputTimestampFuture =
watermarkHold.extractAndRelease(renamedContext, isFinished).readLater();
ReadableState<PaneInfo> paneFuture =
paneInfoTracker.getNextPaneInfo(directContext, isFinished).readLater();
ReadableState<Boolean> isEmptyFuture =
nonEmptyPanes.isEmpty(renamedContext.state()).readLater();
reduceFn.prefetchOnTrigger(directContext.state());
triggerRunner.prefetchOnFire(directContext.window(), directContext.state());
// Calculate the pane info.
final PaneInfo pane = paneFuture.read();
// Extract the window hold, and as a side effect clear it.
final Instant outputTimestamp = outputTimestampFuture.read();
// Only emit a pane if it has data or empty panes are observable.
if (needToEmit(isEmptyFuture.read(), isFinished, pane.getTiming())) {
// Run reduceFn.onTrigger method.
final List<W> windows = Collections.singletonList(directContext.window());
ReduceFn<K, InputT, OutputT, W>.OnTriggerContext renamedTriggerContext =
contextFactory.forTrigger(directContext.window(), paneFuture, StateStyle.RENAMED,
new OnTriggerCallbacks<OutputT>() {
@Override
public void output(OutputT toOutput) {
// We're going to output panes, so commit the (now used) PaneInfo.
// TODO: This is unnecessary if the trigger isFinished since the saved
// state will be immediately deleted.
paneInfoTracker.storeCurrentPaneInfo(directContext, pane);
// Output the actual value.
outputter.outputWindowedValue(
KV.of(key, toOutput), outputTimestamp, windows, pane);
}
});
reduceFn.onTrigger(renamedTriggerContext);
}
}
/**
* Make sure we'll eventually have a timer fire which will tell us to garbage collect
* the window state. For efficiency we may need to do this in two steps rather
* than one. Return the time at which the timer will fire.
*
* <ul>
* <li>If allowedLateness is zero then we'll garbage collect at the end of the window.
* For simplicity we'll set our own timer for this situation even though an
* {@link AfterWatermark} trigger may have also set an end-of-window timer.
* ({@code setTimer} is idempotent.)
* <li>If allowedLateness is non-zero then we could just always set a timer for the garbage
* collection time. However if the windows are large (eg hourly) and the allowedLateness is small
* (eg seconds) then we'll end up with nearly twice the number of timers in-flight. So we
* instead set an end-of-window timer and then roll that forward to a garbage collection timer
* when it fires. We use the input watermark to distinguish those cases.
* </ul>
*/
private Instant scheduleEndOfWindowOrGarbageCollectionTimer(
ReduceFn<?, ?, ?, W>.Context directContext) {
Instant inputWM = timerInternals.currentInputWatermarkTime();
Instant endOfWindow = directContext.window().maxTimestamp();
Instant fireTime;
String which;
if (inputWM != null && endOfWindow.isBefore(inputWM)) {
fireTime = endOfWindow.plus(windowingStrategy.getAllowedLateness());
which = "garbage collection";
} else {
fireTime = endOfWindow;
which = "end-of-window";
}
WindowTracing.trace(
"ReduceFnRunner.scheduleEndOfWindowOrGarbageCollectionTimer: Scheduling {} timer at {} for "
+ "key:{}; window:{} where inputWatermark:{}; outputWatermark:{}",
which,
fireTime,
key,
directContext.window(),
inputWM,
timerInternals.currentOutputWatermarkTime());
directContext.timers().setTimer(fireTime, TimeDomain.EVENT_TIME);
return fireTime;
}
private void cancelEndOfWindowAndGarbageCollectionTimers(ReduceFn<?, ?, ?, W>.Context context) {
WindowTracing.debug(
"ReduceFnRunner.cancelEndOfWindowAndGarbageCollectionTimers: Deleting timers for "
+ "key:{}; window:{} where inputWatermark:{}; outputWatermark:{}",
key, context.window(), timerInternals.currentInputWatermarkTime(),
timerInternals.currentOutputWatermarkTime());
Instant timer = context.window().maxTimestamp();
context.timers().deleteTimer(timer, TimeDomain.EVENT_TIME);
if (windowingStrategy.getAllowedLateness().isLongerThan(Duration.ZERO)) {
timer = timer.plus(windowingStrategy.getAllowedLateness());
context.timers().deleteTimer(timer, TimeDomain.EVENT_TIME);
}
}
/**
* An object that can output a value with all of its windowing information. This is a deliberately
* restricted subinterface of {@link WindowingInternals} to express how it is used here.
*/
private interface OutputWindowedValue<OutputT> {
void outputWindowedValue(OutputT output, Instant timestamp,
Collection<? extends BoundedWindow> windows, PaneInfo pane);
}
private static class OutputViaWindowingInternals<OutputT>
implements OutputWindowedValue<OutputT> {
private final WindowingInternals<?, OutputT> windowingInternals;
public OutputViaWindowingInternals(WindowingInternals<?, OutputT> windowingInternals) {
this.windowingInternals = windowingInternals;
}
@Override
public void outputWindowedValue(
OutputT output,
Instant timestamp,
Collection<? extends BoundedWindow> windows,
PaneInfo pane) {
windowingInternals.outputWindowedValue(output, timestamp, windows, pane);
}
}
}
| apache-2.0 |
lessthanoptimal/ejml | main/ejml-ddense/src/org/ejml/dense/row/decomposition/qr/QrHelperFunctions_DDRM.java | 12927 | /*
* Copyright (c) 2009-2020, Peter Abeles. All Rights Reserved.
*
* This file is part of Efficient Java Matrix Library (EJML).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.ejml.dense.row.decomposition.qr;
import org.ejml.data.DMatrixRMaj;
//CONCURRENT_INLINE import org.ejml.concurrency.EjmlConcurrency;
/**
* <p>
* Contains different functions that are useful for computing the QR decomposition of a matrix.
* </p>
*
* <p>
* Two different families of functions are provided for help in computing reflectors. Internally
* both of these functions switch between normalization by division or multiplication. Multiplication
* is most often significantly faster than division (2 or 3 times) but produces less accurate results
* on very small numbers. It checks to see if round off error is significant and decides which
* one it should do.
* </p>
*
* <p>
* Tests were done using the stability benchmark in jmatbench and there doesn't seem to be
* any advantage to always dividing by the max instead of checking and deciding. The most
* noticeable difference between the two methods is with very small numbers.
* </p>
*
* @author Peter Abeles
*/
public class QrHelperFunctions_DDRM {
//CONCURRENT_OMIT_BEGIN
public static double findMax( double[] u, int startU, int length ) {
double max = -1;
int index = startU;
int stopIndex = startU + length;
for (; index < stopIndex; index++) {
double val = u[index];
val = (val < 0.0) ? -val : val;
if (val > max)
max = val;
}
return max;
}
public static void divideElements( final int j, final int numRows,
final double[] u, final double u_0 ) {
// double div_u = 1.0/u_0;
//
// if( Double.isInfinite(div_u)) {
for (int i = j; i < numRows; i++) {
u[i] /= u_0;
}
// } else {
// for( int i = j; i < numRows; i++ ) {
// u[i] *= div_u;
// }
// }
}
public static void divideElements( int j, int numRows, double[] u, int startU, double u_0 ) {
// double div_u = 1.0/u_0;
//
// if( Double.isInfinite(div_u)) {
for (int i = j; i < numRows; i++) {
u[i + startU] /= u_0;
}
// } else {
// for( int i = j; i < numRows; i++ ) {
// u[i+startU] *= div_u;
// }
// }
}
public static void divideElements_Brow( int j, int numRows, double[] u,
double[] b, int startB,
double u_0 ) {
// double div_u = 1.0/u_0;
//
// if( Double.isInfinite(div_u)) {
for (int i = j; i < numRows; i++) {
u[i] = b[i + startB] /= u_0;
}
// } else {
// for( int i = j; i < numRows; i++ ) {
// u[i] = b[i+startB] *= div_u;
// }
// }
}
public static void divideElements_Bcol( int j, int numRows, int numCols,
double[] u,
double[] b, int startB,
double u_0 ) {
// double div_u = 1.0/u_0;
//
// if( Double.isInfinite(div_u)) {
int indexB = j*numCols + startB;
for (int i = j; i < numRows; i++, indexB += numCols) {
b[indexB] = u[i] /= u_0;
}
// } else {
// int indexB = j*numCols+startB;
// for( int i = j; i < numRows; i++ , indexB += numCols ) {
// b[indexB] = u[i] *= div_u;
// }
// }
}
public static double computeTauAndDivide( int j, int numRows, double[] u, int startU, double max ) {
// compute the norm2 of the matrix, with each element
// normalized by the max value to avoid overflow problems
double tau = 0;
// double div_max = 1.0/max;
// if( Double.isInfinite(div_max)) {
// more accurate
for (int i = j; i < numRows; i++) {
double d = u[startU + i] /= max;
tau += d*d;
}
// } else {
// // faster
// for( int i = j; i < numRows; i++ ) {
// double d = u[startU+i] *= div_max;
// tau += d*d;
// }
// }
tau = Math.sqrt(tau);
if (u[startU + j] < 0)
tau = -tau;
return tau;
}
/**
* Normalizes elements in 'u' by dividing by max and computes the norm2 of the normalized
* array u. Adjust the sign of the returned value depending on the size of the first
* element in 'u'. Normalization is done to avoid overflow.
*
* <pre>
* for i=j:numRows
* u[i] = u[i] / max
* tau = tau + u[i]*u[i]
* end
* tau = sqrt(tau)
* if( u[j] < 0 )
* tau = -tau;
* </pre>
*
* @param j Element in 'u' that it starts at.
* @param numRows Element in 'u' that it stops at.
* @param u Array
* @param max Max value in 'u' that is used to normalize it.
* @return norm2 of 'u'
*/
public static double computeTauAndDivide( final int j, final int numRows,
final double[] u, final double max ) {
double tau = 0;
// double div_max = 1.0/max;
// if( Double.isInfinite(div_max)) {
for (int i = j; i < numRows; i++) {
double d = u[i] /= max;
tau += d*d;
}
// } else {
// for( int i = j; i < numRows; i++ ) {
// double d = u[i] *= div_max;
// tau += d*d;
// }
// }
tau = Math.sqrt(tau);
if (u[j] < 0)
tau = -tau;
return tau;
}
//CONCURRENT_OMIT_END
/**
* <p>
* Performs a rank-1 update operation on the submatrix specified by w with the multiply on the right.<br>
* <br>
* A = (I - γ*u*u<sup>T</sup>)*A<br>
* </p>
* <p>
* The order that matrix multiplies are performed has been carefully selected
* to minimize the number of operations.
* </p>
*
* <p>
* Before this can become a truly generic operation the submatrix specification needs
* to be made more generic.
* </p>
*/
public static void rank1UpdateMultR( DMatrixRMaj A, double[] u, double gamma,
int colA0,
int w0, int w1,
double[] _temp ) {
// for( int i = colA0; i < A.numCols; i++ ) {
// double val = 0;
//
// for( int k = w0; k < w1; k++ ) {
// val += u[k]*A.data[k*A.numCols +i];
// }
// _temp[i] = gamma*val;
// }
// reordered to reduce cpu cache issues
for (int i = colA0; i < A.numCols; i++) {
_temp[i] = u[w0]*A.data[w0*A.numCols + i];
}
for (int k = w0 + 1; k < w1; k++) {
int indexA = k*A.numCols + colA0;
double valU = u[k];
for (int i = colA0; i < A.numCols; i++) {
_temp[i] += valU*A.data[indexA++];
}
}
for (int i = colA0; i < A.numCols; i++) {
_temp[i] *= gamma;
}
// end of reorder
//CONCURRENT_BELOW EjmlConcurrency.loopFor(w0, w1, i->{
for (int i = w0; i < w1; i++) {
double valU = u[i];
int indexA = i*A.numCols + colA0;
for (int j = colA0; j < A.numCols; j++) {
A.data[indexA++] -= valU*_temp[j];
}
}
//CONCURRENT_ABOVE });
}
// Useful for concurrent implementations where you don't want to modify u[0] to set it to 1.0
public static void rank1UpdateMultR_u0( DMatrixRMaj A, double[] u, final double u_0,
final double gamma,
final int colA0,
final int w0, final int w1,
final double[] _temp ) {
// for( int i = colA0; i < A.numCols; i++ ) {
// double val = 0;
//
// for( int k = w0; k < w1; k++ ) {
// val += u[k]*A.data[k*A.numCols +i];
// }
// _temp[i] = gamma*val;
// }
// reordered to reduce cpu cache issues
for (int i = colA0; i < A.numCols; i++) {
_temp[i] = u_0*A.data[w0*A.numCols + i];
}
for (int k = w0 + 1; k < w1; k++) {
int indexA = k*A.numCols + colA0;
double valU = u[k];
for (int i = colA0; i < A.numCols; i++) {
_temp[i] += valU*A.data[indexA++];
}
}
for (int i = colA0; i < A.numCols; i++) {
_temp[i] *= gamma;
}
// end of reorder
{
int indexA = w0*A.numCols + colA0;
for (int j = colA0; j < A.numCols; j++) {
A.data[indexA++] -= u_0*_temp[j];
}
}
//CONCURRENT_BELOW EjmlConcurrency.loopFor(w0+1, w1, i->{
for (int i = w0 + 1; i < w1; i++) {
final double valU = u[i];
int indexA = i*A.numCols + colA0;
for (int j = colA0; j < A.numCols; j++) {
A.data[indexA++] -= valU*_temp[j];
}
}
//CONCURRENT_ABOVE });
}
public static void rank1UpdateMultR( DMatrixRMaj A,
double[] u, int offsetU,
double gamma,
int colA0,
int w0, int w1,
double[] _temp ) {
// for( int i = colA0; i < A.numCols; i++ ) {
// double val = 0;
//
// for( int k = w0; k < w1; k++ ) {
// val += u[k+offsetU]*A.data[k*A.numCols +i];
// }
// _temp[i] = gamma*val;
// }
// reordered to reduce cpu cache issues
for (int i = colA0; i < A.numCols; i++) {
_temp[i] = u[w0 + offsetU]*A.data[w0*A.numCols + i];
}
for (int k = w0 + 1; k < w1; k++) {
int indexA = k*A.numCols + colA0;
double valU = u[k + offsetU];
for (int i = colA0; i < A.numCols; i++) {
_temp[i] += valU*A.data[indexA++];
}
}
for (int i = colA0; i < A.numCols; i++) {
_temp[i] *= gamma;
}
// end of reorder
//CONCURRENT_BELOW EjmlConcurrency.loopFor(w0, w1, i->{
for (int i = w0; i < w1; i++) {
double valU = u[i + offsetU];
int indexA = i*A.numCols + colA0;
for (int j = colA0; j < A.numCols; j++) {
A.data[indexA++] -= valU*_temp[j];
}
}
//CONCURRENT_ABOVE });
}
/**
* <p>
* Performs a rank-1 update operation on the submatrix specified by w with the multiply on the left.<br>
* <br>
* A = A(I - γ*u*u<sup>T</sup>)<br>
* </p>
* <p>
* The order that matrix multiplies are performed has been carefully selected
* to minimize the number of operations.
* </p>
*
* <p>
* Before this can become a truly generic operation the submatrix specification needs
* to be made more generic.
* </p>
*/
public static void rank1UpdateMultL( DMatrixRMaj A, double[] u,
double gamma,
int colA0,
int w0, int w1 ) {
//CONCURRENT_BELOW EjmlConcurrency.loopFor(colA0, A.numRows, i->{
for (int i = colA0; i < A.numRows; i++) {
int startIndex = i*A.numCols + w0;
double sum = 0;
int rowIndex = startIndex;
for (int j = w0; j < w1; j++) {
sum += A.data[rowIndex++]*u[j];
}
sum = -gamma*sum;
rowIndex = startIndex;
for (int j = w0; j < w1; j++) {
A.data[rowIndex++] += sum*u[j];
}
}
//CONCURRENT_ABOVE });
}
}
| apache-2.0 |
13808796047/hdcms | hd/Hdcms/Admin/Tpl/User/js/add.js | 1413 | $(function () {
$("form").validate({
username: {
rule: {
required: true,
ajax: {url: CONTROL + "&m=check_username", field: ['uid']}
},
error: {
required: "密码不能为空",
ajax: '帐号已经存在'
}
},
password: {
rule: {
required: true,
regexp: /^\w{5,}$/
},
error: {
required: "密码不能为空",
regexp: '密码不能小于5位'
}
},
'password_c': {
rule: {
confirm: 'password'
},
error: {
confirm: '两次密码不匹配'
}
},
credits: {
rule: {
required: true,
regexp: /^\d+$/
},
error: {
required: "积分不能为空",
regexp: "积分必须为数字"
}
},
email: {
rule: {
required: true,
email: true,
ajax: {url: CONTROL + "&m=check_email"}
},
error: {
required: "邮箱不能为空",
email: '邮箱格式不正确',
ajax: '邮箱已经使用'
}
}
})
}) | apache-2.0 |
tendermint/tendermint | config/config.go | 41697 | package config
import (
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"net/http"
"os"
"path/filepath"
"time"
"github.com/tendermint/tendermint/libs/log"
tmos "github.com/tendermint/tendermint/libs/os"
"github.com/tendermint/tendermint/types"
)
const (
// FuzzModeDrop is a mode in which we randomly drop reads/writes, connections or sleep
FuzzModeDrop = iota
// FuzzModeDelay is a mode in which we randomly sleep
FuzzModeDelay
// DefaultLogLevel defines a default log level as INFO.
DefaultLogLevel = "info"
ModeFull = "full"
ModeValidator = "validator"
ModeSeed = "seed"
)
// NOTE: Most of the structs & relevant comments + the
// default configuration options were used to manually
// generate the config.toml. Please reflect any changes
// made here in the defaultConfigTemplate constant in
// config/toml.go
// NOTE: libs/cli must know to look in the config dir!
var (
DefaultTendermintDir = ".tendermint"
defaultConfigDir = "config"
defaultDataDir = "data"
defaultConfigFileName = "config.toml"
defaultGenesisJSONName = "genesis.json"
defaultMode = ModeFull
defaultPrivValKeyName = "priv_validator_key.json"
defaultPrivValStateName = "priv_validator_state.json"
defaultNodeKeyName = "node_key.json"
defaultConfigFilePath = filepath.Join(defaultConfigDir, defaultConfigFileName)
defaultGenesisJSONPath = filepath.Join(defaultConfigDir, defaultGenesisJSONName)
defaultPrivValKeyPath = filepath.Join(defaultConfigDir, defaultPrivValKeyName)
defaultPrivValStatePath = filepath.Join(defaultDataDir, defaultPrivValStateName)
defaultNodeKeyPath = filepath.Join(defaultConfigDir, defaultNodeKeyName)
)
// Config defines the top level configuration for a Tendermint node
type Config struct {
// Top level options use an anonymous struct
BaseConfig `mapstructure:",squash"`
// Options for services
RPC *RPCConfig `mapstructure:"rpc"`
P2P *P2PConfig `mapstructure:"p2p"`
Mempool *MempoolConfig `mapstructure:"mempool"`
StateSync *StateSyncConfig `mapstructure:"statesync"`
Consensus *ConsensusConfig `mapstructure:"consensus"`
TxIndex *TxIndexConfig `mapstructure:"tx-index"`
Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"`
PrivValidator *PrivValidatorConfig `mapstructure:"priv-validator"`
}
// DefaultConfig returns a default configuration for a Tendermint node
func DefaultConfig() *Config {
return &Config{
BaseConfig: DefaultBaseConfig(),
RPC: DefaultRPCConfig(),
P2P: DefaultP2PConfig(),
Mempool: DefaultMempoolConfig(),
StateSync: DefaultStateSyncConfig(),
Consensus: DefaultConsensusConfig(),
TxIndex: DefaultTxIndexConfig(),
Instrumentation: DefaultInstrumentationConfig(),
PrivValidator: DefaultPrivValidatorConfig(),
}
}
// DefaultValidatorConfig returns default config with mode as validator
func DefaultValidatorConfig() *Config {
cfg := DefaultConfig()
cfg.Mode = ModeValidator
return cfg
}
// TestConfig returns a configuration that can be used for testing
func TestConfig() *Config {
return &Config{
BaseConfig: TestBaseConfig(),
RPC: TestRPCConfig(),
P2P: TestP2PConfig(),
Mempool: TestMempoolConfig(),
StateSync: TestStateSyncConfig(),
Consensus: TestConsensusConfig(),
TxIndex: TestTxIndexConfig(),
Instrumentation: TestInstrumentationConfig(),
PrivValidator: DefaultPrivValidatorConfig(),
}
}
// SetRoot sets the RootDir for all Config structs
func (cfg *Config) SetRoot(root string) *Config {
cfg.BaseConfig.RootDir = root
cfg.RPC.RootDir = root
cfg.P2P.RootDir = root
cfg.Mempool.RootDir = root
cfg.Consensus.RootDir = root
cfg.PrivValidator.RootDir = root
return cfg
}
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg *Config) ValidateBasic() error {
if err := cfg.BaseConfig.ValidateBasic(); err != nil {
return err
}
if err := cfg.RPC.ValidateBasic(); err != nil {
return fmt.Errorf("error in [rpc] section: %w", err)
}
if err := cfg.Mempool.ValidateBasic(); err != nil {
return fmt.Errorf("error in [mempool] section: %w", err)
}
if err := cfg.StateSync.ValidateBasic(); err != nil {
return fmt.Errorf("error in [statesync] section: %w", err)
}
if err := cfg.Consensus.ValidateBasic(); err != nil {
return fmt.Errorf("error in [consensus] section: %w", err)
}
if err := cfg.Instrumentation.ValidateBasic(); err != nil {
return fmt.Errorf("error in [instrumentation] section: %w", err)
}
return nil
}
//-----------------------------------------------------------------------------
// BaseConfig
// BaseConfig defines the base configuration for a Tendermint node
type BaseConfig struct { //nolint: maligned
// chainID is unexposed and immutable but here for convenience
chainID string
// The root directory for all data.
// This should be set in viper so it can unmarshal into this struct
RootDir string `mapstructure:"home"`
// TCP or UNIX socket address of the ABCI application,
// or the name of an ABCI application compiled in with the Tendermint binary
ProxyApp string `mapstructure:"proxy-app"`
// A custom human readable name for this node
Moniker string `mapstructure:"moniker"`
// Mode of Node: full | validator | seed
// * validator
// - all reactors
// - with priv_validator_key.json, priv_validator_state.json
// * full
// - all reactors
// - No priv_validator_key.json, priv_validator_state.json
// * seed
// - only P2P, PEX Reactor
// - No priv_validator_key.json, priv_validator_state.json
Mode string `mapstructure:"mode"`
// Database backend: goleveldb | cleveldb | boltdb | rocksdb
// * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
// - pure go
// - stable
// * cleveldb (uses levigo wrapper)
// - fast
// - requires gcc
// - use cleveldb build tag (go build -tags cleveldb)
// * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt)
// - EXPERIMENTAL
// - may be faster is some use-cases (random reads - indexer)
// - use boltdb build tag (go build -tags boltdb)
// * rocksdb (uses github.com/tecbot/gorocksdb)
// - EXPERIMENTAL
// - requires gcc
// - use rocksdb build tag (go build -tags rocksdb)
// * badgerdb (uses github.com/dgraph-io/badger)
// - EXPERIMENTAL
// - use badgerdb build tag (go build -tags badgerdb)
DBBackend string `mapstructure:"db-backend"`
// Database directory
DBPath string `mapstructure:"db-dir"`
// Output level for logging
LogLevel string `mapstructure:"log-level"`
// Output format: 'plain' (colored text) or 'json'
LogFormat string `mapstructure:"log-format"`
// Path to the JSON file containing the initial validator set and other meta data
Genesis string `mapstructure:"genesis-file"`
// A JSON file containing the private key to use for p2p authenticated encryption
NodeKey string `mapstructure:"node-key-file"`
// Mechanism to connect to the ABCI application: socket | grpc
ABCI string `mapstructure:"abci"`
// If true, query the ABCI app on connecting to a new peer
// so the app can decide if we should keep the connection or not
FilterPeers bool `mapstructure:"filter-peers"` // false
Other map[string]interface{} `mapstructure:",remain"`
}
// DefaultBaseConfig returns a default base configuration for a Tendermint node
func DefaultBaseConfig() BaseConfig {
return BaseConfig{
Genesis: defaultGenesisJSONPath,
NodeKey: defaultNodeKeyPath,
Mode: defaultMode,
Moniker: defaultMoniker,
ProxyApp: "tcp://127.0.0.1:26658",
ABCI: "socket",
LogLevel: DefaultLogLevel,
LogFormat: log.LogFormatPlain,
FilterPeers: false,
DBBackend: "goleveldb",
DBPath: "data",
}
}
// TestBaseConfig returns a base configuration for testing a Tendermint node
func TestBaseConfig() BaseConfig {
cfg := DefaultBaseConfig()
cfg.chainID = "tendermint_test"
cfg.Mode = ModeValidator
cfg.ProxyApp = "kvstore"
cfg.DBBackend = "memdb"
return cfg
}
func (cfg BaseConfig) ChainID() string {
return cfg.chainID
}
// GenesisFile returns the full path to the genesis.json file
func (cfg BaseConfig) GenesisFile() string {
return rootify(cfg.Genesis, cfg.RootDir)
}
// NodeKeyFile returns the full path to the node_key.json file
func (cfg BaseConfig) NodeKeyFile() string {
return rootify(cfg.NodeKey, cfg.RootDir)
}
// LoadNodeKey loads NodeKey located in filePath.
func (cfg BaseConfig) LoadNodeKeyID() (types.NodeID, error) {
jsonBytes, err := os.ReadFile(cfg.NodeKeyFile())
if err != nil {
return "", err
}
nodeKey := types.NodeKey{}
err = json.Unmarshal(jsonBytes, &nodeKey)
if err != nil {
return "", err
}
nodeKey.ID = types.NodeIDFromPubKey(nodeKey.PubKey())
return nodeKey.ID, nil
}
// LoadOrGenNodeKey attempts to load the NodeKey from the given filePath. If
// the file does not exist, it generates and saves a new NodeKey.
func (cfg BaseConfig) LoadOrGenNodeKeyID() (types.NodeID, error) {
if tmos.FileExists(cfg.NodeKeyFile()) {
nodeKey, err := cfg.LoadNodeKeyID()
if err != nil {
return "", err
}
return nodeKey, nil
}
nodeKey := types.GenNodeKey()
if err := nodeKey.SaveAs(cfg.NodeKeyFile()); err != nil {
return "", err
}
return nodeKey.ID, nil
}
// DBDir returns the full path to the database directory
func (cfg BaseConfig) DBDir() string {
return rootify(cfg.DBPath, cfg.RootDir)
}
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg BaseConfig) ValidateBasic() error {
switch cfg.LogFormat {
case log.LogFormatJSON, log.LogFormatText, log.LogFormatPlain:
default:
return errors.New("unknown log format (must be 'plain', 'text' or 'json')")
}
switch cfg.Mode {
case ModeFull, ModeValidator, ModeSeed:
case "":
return errors.New("no mode has been set")
default:
return fmt.Errorf("unknown mode: %v", cfg.Mode)
}
return nil
}
//-----------------------------------------------------------------------------
// PrivValidatorConfig
// PrivValidatorConfig defines the configuration parameters for running a validator
type PrivValidatorConfig struct {
RootDir string `mapstructure:"home"`
// Path to the JSON file containing the private key to use as a validator in the consensus protocol
Key string `mapstructure:"key-file"`
// Path to the JSON file containing the last sign state of a validator
State string `mapstructure:"state-file"`
// TCP or UNIX socket address for Tendermint to listen on for
// connections from an external PrivValidator process
ListenAddr string `mapstructure:"laddr"`
// Client certificate generated while creating needed files for secure connection.
// If a remote validator address is provided but no certificate, the connection will be insecure
ClientCertificate string `mapstructure:"client-certificate-file"`
// Client key generated while creating certificates for secure connection
ClientKey string `mapstructure:"client-key-file"`
// Path Root Certificate Authority used to sign both client and server certificates
RootCA string `mapstructure:"root-ca-file"`
}
// DefaultBaseConfig returns a default private validator configuration
// for a Tendermint node.
func DefaultPrivValidatorConfig() *PrivValidatorConfig {
return &PrivValidatorConfig{
Key: defaultPrivValKeyPath,
State: defaultPrivValStatePath,
}
}
// ClientKeyFile returns the full path to the priv_validator_key.json file
func (cfg *PrivValidatorConfig) ClientKeyFile() string {
return rootify(cfg.ClientKey, cfg.RootDir)
}
// ClientCertificateFile returns the full path to the priv_validator_key.json file
func (cfg *PrivValidatorConfig) ClientCertificateFile() string {
return rootify(cfg.ClientCertificate, cfg.RootDir)
}
// CertificateAuthorityFile returns the full path to the priv_validator_key.json file
func (cfg *PrivValidatorConfig) RootCAFile() string {
return rootify(cfg.RootCA, cfg.RootDir)
}
// KeyFile returns the full path to the priv_validator_key.json file
func (cfg *PrivValidatorConfig) KeyFile() string {
return rootify(cfg.Key, cfg.RootDir)
}
// StateFile returns the full path to the priv_validator_state.json file
func (cfg *PrivValidatorConfig) StateFile() string {
return rootify(cfg.State, cfg.RootDir)
}
func (cfg *PrivValidatorConfig) AreSecurityOptionsPresent() bool {
switch {
case cfg.RootCA == "":
return false
case cfg.ClientKey == "":
return false
case cfg.ClientCertificate == "":
return false
default:
return true
}
}
//-----------------------------------------------------------------------------
// RPCConfig
// RPCConfig defines the configuration options for the Tendermint RPC server
type RPCConfig struct {
RootDir string `mapstructure:"home"`
// TCP or UNIX socket address for the RPC server to listen on
ListenAddress string `mapstructure:"laddr"`
// A list of origins a cross-domain request can be executed from.
// If the special '*' value is present in the list, all origins will be allowed.
// An origin may contain a wildcard (*) to replace 0 or more characters (i.e.: http://*.domain.com).
// Only one wildcard can be used per origin.
CORSAllowedOrigins []string `mapstructure:"cors-allowed-origins"`
// A list of methods the client is allowed to use with cross-domain requests.
CORSAllowedMethods []string `mapstructure:"cors-allowed-methods"`
// A list of non simple headers the client is allowed to use with cross-domain requests.
CORSAllowedHeaders []string `mapstructure:"cors-allowed-headers"`
// Activate unsafe RPC commands like /dial-persistent-peers and /unsafe-flush-mempool
Unsafe bool `mapstructure:"unsafe"`
// Maximum number of simultaneous connections (including WebSocket).
// If you want to accept a larger number than the default, make sure
// you increase your OS limits.
// 0 - unlimited.
// Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
// 1024 - 40 - 10 - 50 = 924 = ~900
MaxOpenConnections int `mapstructure:"max-open-connections"`
// Maximum number of unique clientIDs that can /subscribe
// If you're using /broadcast_tx_commit, set to the estimated maximum number
// of broadcast_tx_commit calls per block.
MaxSubscriptionClients int `mapstructure:"max-subscription-clients"`
// Maximum number of unique queries a given client can /subscribe to
// If you're using a Local RPC client and /broadcast_tx_commit, set this
// to the estimated maximum number of broadcast_tx_commit calls per block.
MaxSubscriptionsPerClient int `mapstructure:"max-subscriptions-per-client"`
// How long to wait for a tx to be committed during /broadcast_tx_commit
// WARNING: Using a value larger than 10s will result in increasing the
// global HTTP write timeout, which applies to all connections and endpoints.
// See https://github.com/tendermint/tendermint/issues/3435
TimeoutBroadcastTxCommit time.Duration `mapstructure:"timeout-broadcast-tx-commit"`
// Maximum size of request body, in bytes
MaxBodyBytes int64 `mapstructure:"max-body-bytes"`
// Maximum size of request header, in bytes
MaxHeaderBytes int `mapstructure:"max-header-bytes"`
// The path to a file containing certificate that is used to create the HTTPS server.
// Might be either absolute path or path related to Tendermint's config directory.
//
// If the certificate is signed by a certificate authority,
// the certFile should be the concatenation of the server's certificate, any intermediates,
// and the CA's certificate.
//
// NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server.
// Otherwise, HTTP server is run.
TLSCertFile string `mapstructure:"tls-cert-file"`
// The path to a file containing matching private key that is used to create the HTTPS server.
// Might be either absolute path or path related to tendermint's config directory.
//
// NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server.
// Otherwise, HTTP server is run.
TLSKeyFile string `mapstructure:"tls-key-file"`
// pprof listen address (https://golang.org/pkg/net/http/pprof)
PprofListenAddress string `mapstructure:"pprof-laddr"`
}
// DefaultRPCConfig returns a default configuration for the RPC server
func DefaultRPCConfig() *RPCConfig {
return &RPCConfig{
ListenAddress: "tcp://127.0.0.1:26657",
CORSAllowedOrigins: []string{},
CORSAllowedMethods: []string{http.MethodHead, http.MethodGet, http.MethodPost},
CORSAllowedHeaders: []string{"Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time"},
Unsafe: false,
MaxOpenConnections: 900,
MaxSubscriptionClients: 100,
MaxSubscriptionsPerClient: 5,
TimeoutBroadcastTxCommit: 10 * time.Second,
MaxBodyBytes: int64(1000000), // 1MB
MaxHeaderBytes: 1 << 20, // same as the net/http default
TLSCertFile: "",
TLSKeyFile: "",
}
}
// TestRPCConfig returns a configuration for testing the RPC server
func TestRPCConfig() *RPCConfig {
cfg := DefaultRPCConfig()
cfg.ListenAddress = "tcp://127.0.0.1:36657"
cfg.Unsafe = true
return cfg
}
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg *RPCConfig) ValidateBasic() error {
if cfg.MaxOpenConnections < 0 {
return errors.New("max-open-connections can't be negative")
}
if cfg.MaxSubscriptionClients < 0 {
return errors.New("max-subscription-clients can't be negative")
}
if cfg.MaxSubscriptionsPerClient < 0 {
return errors.New("max-subscriptions-per-client can't be negative")
}
if cfg.TimeoutBroadcastTxCommit < 0 {
return errors.New("timeout-broadcast-tx-commit can't be negative")
}
if cfg.MaxBodyBytes < 0 {
return errors.New("max-body-bytes can't be negative")
}
if cfg.MaxHeaderBytes < 0 {
return errors.New("max-header-bytes can't be negative")
}
return nil
}
// IsCorsEnabled returns true if cross-origin resource sharing is enabled.
func (cfg *RPCConfig) IsCorsEnabled() bool {
return len(cfg.CORSAllowedOrigins) != 0
}
func (cfg RPCConfig) KeyFile() string {
path := cfg.TLSKeyFile
if filepath.IsAbs(path) {
return path
}
return rootify(filepath.Join(defaultConfigDir, path), cfg.RootDir)
}
func (cfg RPCConfig) CertFile() string {
path := cfg.TLSCertFile
if filepath.IsAbs(path) {
return path
}
return rootify(filepath.Join(defaultConfigDir, path), cfg.RootDir)
}
func (cfg RPCConfig) IsTLSEnabled() bool {
return cfg.TLSCertFile != "" && cfg.TLSKeyFile != ""
}
//-----------------------------------------------------------------------------
// P2PConfig
// P2PConfig defines the configuration options for the Tendermint peer-to-peer networking layer
type P2PConfig struct { //nolint: maligned
RootDir string `mapstructure:"home"`
// Address to listen for incoming connections
ListenAddress string `mapstructure:"laddr"`
// Address to advertise to peers for them to dial
ExternalAddress string `mapstructure:"external-address"`
// Comma separated list of seed nodes to connect to
// We only use these if we can’t connect to peers in the addrbook
//
// Deprecated: This value is not used by the new PEX reactor. Use
// BootstrapPeers instead.
//
// TODO(#5670): Remove once the p2p refactor is complete.
Seeds string `mapstructure:"seeds"`
// Comma separated list of peers to be added to the peer store
// on startup. Either BootstrapPeers or PersistentPeers are
// needed for peer discovery
BootstrapPeers string `mapstructure:"bootstrap-peers"`
// Comma separated list of nodes to keep persistent connections to
PersistentPeers string `mapstructure:"persistent-peers"`
// UPNP port forwarding
UPNP bool `mapstructure:"upnp"`
// MaxConnections defines the maximum number of connected peers (inbound and
// outbound).
MaxConnections uint16 `mapstructure:"max-connections"`
// MaxIncomingConnectionAttempts rate limits the number of incoming connection
// attempts per IP address.
MaxIncomingConnectionAttempts uint `mapstructure:"max-incoming-connection-attempts"`
// Set true to enable the peer-exchange reactor
PexReactor bool `mapstructure:"pex"`
// Comma separated list of peer IDs to keep private (will not be gossiped to
// other peers)
PrivatePeerIDs string `mapstructure:"private-peer-ids"`
// Toggle to disable guard against peers connecting from the same ip.
AllowDuplicateIP bool `mapstructure:"allow-duplicate-ip"`
// Time to wait before flushing messages out on the connection
FlushThrottleTimeout time.Duration `mapstructure:"flush-throttle-timeout"`
// Maximum size of a message packet payload, in bytes
MaxPacketMsgPayloadSize int `mapstructure:"max-packet-msg-payload-size"`
// Rate at which packets can be sent, in bytes/second
SendRate int64 `mapstructure:"send-rate"`
// Rate at which packets can be received, in bytes/second
RecvRate int64 `mapstructure:"recv-rate"`
// Peer connection configuration.
HandshakeTimeout time.Duration `mapstructure:"handshake-timeout"`
DialTimeout time.Duration `mapstructure:"dial-timeout"`
// Testing params.
// Force dial to fail
TestDialFail bool `mapstructure:"test-dial-fail"`
// Makes it possible to configure which queue backend the p2p
// layer uses. Options are: "fifo" and "priority",
// with the default being "priority".
QueueType string `mapstructure:"queue-type"`
}
// DefaultP2PConfig returns a default configuration for the peer-to-peer layer
func DefaultP2PConfig() *P2PConfig {
return &P2PConfig{
ListenAddress: "tcp://0.0.0.0:26656",
ExternalAddress: "",
UPNP: false,
MaxConnections: 64,
MaxIncomingConnectionAttempts: 100,
FlushThrottleTimeout: 100 * time.Millisecond,
// The MTU (Maximum Transmission Unit) for Ethernet is 1500 bytes.
// The IP header and the TCP header take up 20 bytes each at least (unless
// optional header fields are used) and thus the max for (non-Jumbo frame)
// Ethernet is 1500 - 20 -20 = 1460
// Source: https://stackoverflow.com/a/3074427/820520
MaxPacketMsgPayloadSize: 1400,
SendRate: 5120000, // 5 mB/s
RecvRate: 5120000, // 5 mB/s
PexReactor: true,
AllowDuplicateIP: false,
HandshakeTimeout: 20 * time.Second,
DialTimeout: 3 * time.Second,
TestDialFail: false,
QueueType: "priority",
}
}
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg *P2PConfig) ValidateBasic() error {
if cfg.FlushThrottleTimeout < 0 {
return errors.New("flush-throttle-timeout can't be negative")
}
if cfg.MaxPacketMsgPayloadSize < 0 {
return errors.New("max-packet-msg-payload-size can't be negative")
}
if cfg.SendRate < 0 {
return errors.New("send-rate can't be negative")
}
if cfg.RecvRate < 0 {
return errors.New("recv-rate can't be negative")
}
return nil
}
// TestP2PConfig returns a configuration for testing the peer-to-peer layer
func TestP2PConfig() *P2PConfig {
cfg := DefaultP2PConfig()
cfg.ListenAddress = "tcp://127.0.0.1:36656"
cfg.AllowDuplicateIP = true
cfg.FlushThrottleTimeout = 10 * time.Millisecond
return cfg
}
//-----------------------------------------------------------------------------
// MempoolConfig
// MempoolConfig defines the configuration options for the Tendermint mempool.
type MempoolConfig struct {
RootDir string `mapstructure:"home"`
Recheck bool `mapstructure:"recheck"`
Broadcast bool `mapstructure:"broadcast"`
// Maximum number of transactions in the mempool
Size int `mapstructure:"size"`
// Limit the total size of all txs in the mempool.
// This only accounts for raw transactions (e.g. given 1MB transactions and
// max-txs-bytes=5MB, mempool will only accept 5 transactions).
MaxTxsBytes int64 `mapstructure:"max-txs-bytes"`
// Size of the cache (used to filter transactions we saw earlier) in transactions
CacheSize int `mapstructure:"cache-size"`
// Do not remove invalid transactions from the cache (default: false)
// Set to true if it's not possible for any invalid transaction to become
// valid again in the future.
KeepInvalidTxsInCache bool `mapstructure:"keep-invalid-txs-in-cache"`
// Maximum size of a single transaction
// NOTE: the max size of a tx transmitted over the network is {max-tx-bytes}.
MaxTxBytes int `mapstructure:"max-tx-bytes"`
// Maximum size of a batch of transactions to send to a peer
// Including space needed by encoding (one varint per transaction).
// XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
MaxBatchBytes int `mapstructure:"max-batch-bytes"`
// TTLDuration, if non-zero, defines the maximum amount of time a transaction
// can exist for in the mempool.
//
// Note, if TTLNumBlocks is also defined, a transaction will be removed if it
// has existed in the mempool at least TTLNumBlocks number of blocks or if it's
// insertion time into the mempool is beyond TTLDuration.
TTLDuration time.Duration `mapstructure:"ttl-duration"`
// TTLNumBlocks, if non-zero, defines the maximum number of blocks a transaction
// can exist for in the mempool.
//
// Note, if TTLDuration is also defined, a transaction will be removed if it
// has existed in the mempool at least TTLNumBlocks number of blocks or if
// it's insertion time into the mempool is beyond TTLDuration.
TTLNumBlocks int64 `mapstructure:"ttl-num-blocks"`
}
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool.
func DefaultMempoolConfig() *MempoolConfig {
return &MempoolConfig{
Recheck: true,
Broadcast: true,
// Each signature verification takes .5ms, Size reduced until we implement
// ABCI Recheck
Size: 5000,
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
CacheSize: 10000,
MaxTxBytes: 1024 * 1024, // 1MB
TTLDuration: 0 * time.Second,
TTLNumBlocks: 0,
}
}
// TestMempoolConfig returns a configuration for testing the Tendermint mempool
func TestMempoolConfig() *MempoolConfig {
cfg := DefaultMempoolConfig()
cfg.CacheSize = 1000
return cfg
}
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg *MempoolConfig) ValidateBasic() error {
if cfg.Size < 0 {
return errors.New("size can't be negative")
}
if cfg.MaxTxsBytes < 0 {
return errors.New("max-txs-bytes can't be negative")
}
if cfg.CacheSize < 0 {
return errors.New("cache-size can't be negative")
}
if cfg.MaxTxBytes < 0 {
return errors.New("max-tx-bytes can't be negative")
}
if cfg.TTLDuration < 0 {
return errors.New("ttl-duration can't be negative")
}
if cfg.TTLNumBlocks < 0 {
return errors.New("ttl-num-blocks can't be negative")
}
return nil
}
//-----------------------------------------------------------------------------
// StateSyncConfig
// StateSyncConfig defines the configuration for the Tendermint state sync service
type StateSyncConfig struct {
// State sync rapidly bootstraps a new node by discovering, fetching, and restoring a
// state machine snapshot from peers instead of fetching and replaying historical
// blocks. Requires some peers in the network to take and serve state machine
// snapshots. State sync is not attempted if the node has any local state
// (LastBlockHeight > 0). The node will have a truncated block history, starting from
// the height of the snapshot.
Enable bool `mapstructure:"enable"`
// State sync uses light client verification to verify state. This can be done either
// through the P2P layer or the RPC layer. Set this to true to use the P2P layer. If
// false (default), the RPC layer will be used.
UseP2P bool `mapstructure:"use-p2p"`
// If using RPC, at least two addresses need to be provided. They should be compatible
// with net.Dial, for example: "host.example.com:2125".
RPCServers []string `mapstructure:"rpc-servers"`
// The hash and height of a trusted block. Must be within the trust-period.
TrustHeight int64 `mapstructure:"trust-height"`
TrustHash string `mapstructure:"trust-hash"`
// The trust period should be set so that Tendermint can detect and gossip
// misbehavior before it is considered expired. For chains based on the Cosmos SDK,
// one day less than the unbonding period should suffice.
TrustPeriod time.Duration `mapstructure:"trust-period"`
// Time to spend discovering snapshots before initiating a restore.
DiscoveryTime time.Duration `mapstructure:"discovery-time"`
// Temporary directory for state sync snapshot chunks, defaults to os.TempDir().
// The synchronizer will create a new, randomly named directory within this directory
// and remove it when the sync is complete.
TempDir string `mapstructure:"temp-dir"`
// The timeout duration before re-requesting a chunk, possibly from a different
// peer (default: 15 seconds).
ChunkRequestTimeout time.Duration `mapstructure:"chunk-request-timeout"`
// The number of concurrent chunk and block fetchers to run (default: 4).
Fetchers int32 `mapstructure:"fetchers"`
}
func (cfg *StateSyncConfig) TrustHashBytes() []byte {
// validated in ValidateBasic, so we can safely panic here
bytes, err := hex.DecodeString(cfg.TrustHash)
if err != nil {
panic(err)
}
return bytes
}
// DefaultStateSyncConfig returns a default configuration for the state sync service
func DefaultStateSyncConfig() *StateSyncConfig {
return &StateSyncConfig{
TrustPeriod: 168 * time.Hour,
DiscoveryTime: 15 * time.Second,
ChunkRequestTimeout: 15 * time.Second,
Fetchers: 4,
}
}
// TestStateSyncConfig returns a default configuration for the state sync service
func TestStateSyncConfig() *StateSyncConfig {
return DefaultStateSyncConfig()
}
// ValidateBasic performs basic validation.
func (cfg *StateSyncConfig) ValidateBasic() error {
if !cfg.Enable {
return nil
}
// If we're not using the P2P stack then we need to validate the
// RPCServers
if !cfg.UseP2P {
if len(cfg.RPCServers) < 2 {
return errors.New("at least two rpc-servers must be specified")
}
for _, server := range cfg.RPCServers {
if server == "" {
return errors.New("found empty rpc-servers entry")
}
}
}
if cfg.DiscoveryTime != 0 && cfg.DiscoveryTime < 5*time.Second {
return errors.New("discovery time must be 0s or greater than five seconds")
}
if cfg.TrustPeriod <= 0 {
return errors.New("trusted-period is required")
}
if cfg.TrustHeight <= 0 {
return errors.New("trusted-height is required")
}
if len(cfg.TrustHash) == 0 {
return errors.New("trusted-hash is required")
}
_, err := hex.DecodeString(cfg.TrustHash)
if err != nil {
return fmt.Errorf("invalid trusted-hash: %w", err)
}
if cfg.ChunkRequestTimeout < 5*time.Second {
return errors.New("chunk-request-timeout must be at least 5 seconds")
}
if cfg.Fetchers <= 0 {
return errors.New("fetchers is required")
}
return nil
}
//-----------------------------------------------------------------------------
// ConsensusConfig
// ConsensusConfig defines the configuration for the Tendermint consensus service,
// including timeouts and details about the WAL and the block structure.
type ConsensusConfig struct {
RootDir string `mapstructure:"home"`
WalPath string `mapstructure:"wal-file"`
walFile string // overrides WalPath if set
// TODO: remove timeout configs, these should be global not local
// How long we wait for a proposal block before prevoting nil
TimeoutPropose time.Duration `mapstructure:"timeout-propose"`
// How much timeout-propose increases with each round
TimeoutProposeDelta time.Duration `mapstructure:"timeout-propose-delta"`
// How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil)
TimeoutPrevote time.Duration `mapstructure:"timeout-prevote"`
// How much the timeout-prevote increases with each round
TimeoutPrevoteDelta time.Duration `mapstructure:"timeout-prevote-delta"`
// How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil)
TimeoutPrecommit time.Duration `mapstructure:"timeout-precommit"`
// How much the timeout-precommit increases with each round
TimeoutPrecommitDelta time.Duration `mapstructure:"timeout-precommit-delta"`
// How long we wait after committing a block, before starting on the new
// height (this gives us a chance to receive some more precommits, even
// though we already have +2/3).
TimeoutCommit time.Duration `mapstructure:"timeout-commit"`
// Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
SkipTimeoutCommit bool `mapstructure:"skip-timeout-commit"`
// EmptyBlocks mode and possible interval between empty blocks
CreateEmptyBlocks bool `mapstructure:"create-empty-blocks"`
CreateEmptyBlocksInterval time.Duration `mapstructure:"create-empty-blocks-interval"`
// Reactor sleep duration parameters
PeerGossipSleepDuration time.Duration `mapstructure:"peer-gossip-sleep-duration"`
PeerQueryMaj23SleepDuration time.Duration `mapstructure:"peer-query-maj23-sleep-duration"`
DoubleSignCheckHeight int64 `mapstructure:"double-sign-check-height"`
}
// DefaultConsensusConfig returns a default configuration for the consensus service
func DefaultConsensusConfig() *ConsensusConfig {
return &ConsensusConfig{
WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"),
TimeoutPropose: 3000 * time.Millisecond,
TimeoutProposeDelta: 500 * time.Millisecond,
TimeoutPrevote: 1000 * time.Millisecond,
TimeoutPrevoteDelta: 500 * time.Millisecond,
TimeoutPrecommit: 1000 * time.Millisecond,
TimeoutPrecommitDelta: 500 * time.Millisecond,
TimeoutCommit: 1000 * time.Millisecond,
SkipTimeoutCommit: false,
CreateEmptyBlocks: true,
CreateEmptyBlocksInterval: 0 * time.Second,
PeerGossipSleepDuration: 100 * time.Millisecond,
PeerQueryMaj23SleepDuration: 2000 * time.Millisecond,
DoubleSignCheckHeight: int64(0),
}
}
// TestConsensusConfig returns a configuration for testing the consensus service
func TestConsensusConfig() *ConsensusConfig {
cfg := DefaultConsensusConfig()
cfg.TimeoutPropose = 40 * time.Millisecond
cfg.TimeoutProposeDelta = 1 * time.Millisecond
cfg.TimeoutPrevote = 10 * time.Millisecond
cfg.TimeoutPrevoteDelta = 1 * time.Millisecond
cfg.TimeoutPrecommit = 10 * time.Millisecond
cfg.TimeoutPrecommitDelta = 1 * time.Millisecond
cfg.TimeoutCommit = 10 * time.Millisecond
cfg.SkipTimeoutCommit = true
cfg.PeerGossipSleepDuration = 5 * time.Millisecond
cfg.PeerQueryMaj23SleepDuration = 250 * time.Millisecond
cfg.DoubleSignCheckHeight = int64(0)
return cfg
}
// WaitForTxs returns true if the consensus should wait for transactions before entering the propose step
func (cfg *ConsensusConfig) WaitForTxs() bool {
return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0
}
// Propose returns the amount of time to wait for a proposal
func (cfg *ConsensusConfig) Propose(round int32) time.Duration {
return time.Duration(
cfg.TimeoutPropose.Nanoseconds()+cfg.TimeoutProposeDelta.Nanoseconds()*int64(round),
) * time.Nanosecond
}
// Prevote returns the amount of time to wait for straggler votes after receiving any +2/3 prevotes
func (cfg *ConsensusConfig) Prevote(round int32) time.Duration {
return time.Duration(
cfg.TimeoutPrevote.Nanoseconds()+cfg.TimeoutPrevoteDelta.Nanoseconds()*int64(round),
) * time.Nanosecond
}
// Precommit returns the amount of time to wait for straggler votes after receiving any +2/3 precommits
func (cfg *ConsensusConfig) Precommit(round int32) time.Duration {
return time.Duration(
cfg.TimeoutPrecommit.Nanoseconds()+cfg.TimeoutPrecommitDelta.Nanoseconds()*int64(round),
) * time.Nanosecond
}
// Commit returns the amount of time to wait for straggler votes after receiving +2/3 precommits
// for a single block (ie. a commit).
func (cfg *ConsensusConfig) Commit(t time.Time) time.Time {
return t.Add(cfg.TimeoutCommit)
}
// WalFile returns the full path to the write-ahead log file
func (cfg *ConsensusConfig) WalFile() string {
if cfg.walFile != "" {
return cfg.walFile
}
return rootify(cfg.WalPath, cfg.RootDir)
}
// SetWalFile sets the path to the write-ahead log file
func (cfg *ConsensusConfig) SetWalFile(walFile string) {
cfg.walFile = walFile
}
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg *ConsensusConfig) ValidateBasic() error {
if cfg.TimeoutPropose < 0 {
return errors.New("timeout-propose can't be negative")
}
if cfg.TimeoutProposeDelta < 0 {
return errors.New("timeout-propose-delta can't be negative")
}
if cfg.TimeoutPrevote < 0 {
return errors.New("timeout-prevote can't be negative")
}
if cfg.TimeoutPrevoteDelta < 0 {
return errors.New("timeout-prevote-delta can't be negative")
}
if cfg.TimeoutPrecommit < 0 {
return errors.New("timeout-precommit can't be negative")
}
if cfg.TimeoutPrecommitDelta < 0 {
return errors.New("timeout-precommit-delta can't be negative")
}
if cfg.TimeoutCommit < 0 {
return errors.New("timeout-commit can't be negative")
}
if cfg.CreateEmptyBlocksInterval < 0 {
return errors.New("create-empty-blocks-interval can't be negative")
}
if cfg.PeerGossipSleepDuration < 0 {
return errors.New("peer-gossip-sleep-duration can't be negative")
}
if cfg.PeerQueryMaj23SleepDuration < 0 {
return errors.New("peer-query-maj23-sleep-duration can't be negative")
}
if cfg.DoubleSignCheckHeight < 0 {
return errors.New("double-sign-check-height can't be negative")
}
return nil
}
//-----------------------------------------------------------------------------
// TxIndexConfig
// Remember that Event has the following structure:
// type: [
// key: value,
// ...
// ]
//
// CompositeKeys are constructed by `type.key`
// TxIndexConfig defines the configuration for the transaction indexer,
// including composite keys to index.
type TxIndexConfig struct {
// The backend database list to back the indexer.
// If list contains `null`, meaning no indexer service will be used.
//
// Options:
// 1) "null" - no indexer services.
// 2) "kv" (default) - the simplest possible indexer,
// backed by key-value storage (defaults to levelDB; see DBBackend).
// 3) "psql" - the indexer services backed by PostgreSQL.
Indexer []string `mapstructure:"indexer"`
// The PostgreSQL connection configuration, the connection format:
// postgresql://<user>:<password>@<host>:<port>/<db>?<opts>
PsqlConn string `mapstructure:"psql-conn"`
}
// DefaultTxIndexConfig returns a default configuration for the transaction indexer.
func DefaultTxIndexConfig() *TxIndexConfig {
return &TxIndexConfig{
Indexer: []string{"kv"},
}
}
// TestTxIndexConfig returns a default configuration for the transaction indexer.
func TestTxIndexConfig() *TxIndexConfig {
return DefaultTxIndexConfig()
}
//-----------------------------------------------------------------------------
// InstrumentationConfig
// InstrumentationConfig defines the configuration for metrics reporting.
type InstrumentationConfig struct {
// When true, Prometheus metrics are served under /metrics on
// PrometheusListenAddr.
// Check out the documentation for the list of available metrics.
Prometheus bool `mapstructure:"prometheus"`
// Address to listen for Prometheus collector(s) connections.
PrometheusListenAddr string `mapstructure:"prometheus-listen-addr"`
// Maximum number of simultaneous connections.
// If you want to accept a larger number than the default, make sure
// you increase your OS limits.
// 0 - unlimited.
MaxOpenConnections int `mapstructure:"max-open-connections"`
// Instrumentation namespace.
Namespace string `mapstructure:"namespace"`
}
// DefaultInstrumentationConfig returns a default configuration for metrics
// reporting.
func DefaultInstrumentationConfig() *InstrumentationConfig {
return &InstrumentationConfig{
Prometheus: false,
PrometheusListenAddr: ":26660",
MaxOpenConnections: 3,
Namespace: "tendermint",
}
}
// TestInstrumentationConfig returns a default configuration for metrics
// reporting.
func TestInstrumentationConfig() *InstrumentationConfig {
return DefaultInstrumentationConfig()
}
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg *InstrumentationConfig) ValidateBasic() error {
if cfg.MaxOpenConnections < 0 {
return errors.New("max-open-connections can't be negative")
}
return nil
}
//-----------------------------------------------------------------------------
// Utils
// helper function to make config creation independent of root dir
func rootify(path, root string) string {
if filepath.IsAbs(path) {
return path
}
return filepath.Join(root, path)
}
//-----------------------------------------------------------------------------
// Moniker
var defaultMoniker = getDefaultMoniker()
// getDefaultMoniker returns a default moniker, which is the host name. If runtime
// fails to get the host name, "anonymous" will be returned.
func getDefaultMoniker() string {
moniker, err := os.Hostname()
if err != nil {
moniker = "anonymous"
}
return moniker
}
| apache-2.0 |
java110/MicroCommunity | service-front/src/main/java/com/java110/front/smo/notice/IAddNoticeSMO.java | 388 | package com.java110.front.smo.notice;
import com.java110.core.context.IPageData;
import org.springframework.http.ResponseEntity;
/**
* 添加公告接口
*
* add by wuxw 2019-06-30
*/
public interface IAddNoticeSMO {
/**
* 添加公告
* @param pd 页面数据封装
* @return ResponseEntity 对象
*/
ResponseEntity<String> saveNotice(IPageData pd);
}
| apache-2.0 |
sriks/titanium_mobile | apidoc/lib/common.js | 4676 | /**
* Copyright (c) 2015 Appcelerator, Inc. All Rights Reserved.
* Licensed under the terms of the Apache Public License.
*
* Common Library for Doctools
*/
var yaml = require('js-yaml'),
fs = require('fs'),
nodeappc = require('node-appc'),
colors = require('colors'),
pagedown = require('pagedown'),
converter = new pagedown.Converter(),
ignoreList = ['node_modules', '.travis.yml'],
LOG_INFO = 0,
LOG_WARN = LOG_INFO + 1,
LOG_ERROR = LOG_WARN + 1,
logLevel = LOG_INFO;
exports.VALID_PLATFORMS = ['android', 'blackberry', 'iphone', 'ipad', 'mobileweb', 'windowsphone'];
exports.VALID_OSES = ['android', 'blackberry', 'ios', 'mobileweb', 'windowsphone'];
exports.DEFAULT_VERSIONS = {
'android' : '0.8',
'iphone' : '0.8',
'ipad' : '0.8',
'mobileweb' : '1.8'
};
exports.ADDON_VERSIONS = {
'blackberry' : '3.1.2',
'windowsphone' : '4.1.0'
};
exports.DATA_TYPES = ['Array', 'Boolean', 'Callback', 'Date', 'Dictionary', 'Number', 'Object', 'String'];
exports.PRETTY_PLATFORM = {
'android': 'Android',
'blackberry': 'BlackBerry',
'ios': 'iOS',
'iphone': 'iPhone',
'ipad': 'iPad',
'mobileweb': 'Mobile Web',
'tizen': 'Tizen',
'windowsphone' : 'Windows Phone'
};
// Matches FOO_CONSTANT
exports.REGEXP_CONSTANTS = /^[A-Z_0-9]*$/;
// Matches <a href="...">Foo</a>
exports.REGEXP_HREF_LINK = /<a href="(.+?)">(.+?)<\/a>/;
exports.REGEXP_HREF_LINKS = /<a href="(.+?)">(.+?)<\/a>/g;
// Matches <code>, </code>, etc.
exports.REGEXP_HTML_TAG = /<\/?[a-z]+[^>]*>/;
// Matches <Titanium.UI.Window>, <ItemTemplate>, etc. (and HTML tags)
exports.REGEXP_CHEVRON_LINK = /<([^>]+?)>/;
exports.REGEXP_CHEVRON_LINKS = /(?!`)<[^>]+?>(?!`)/g;
/**
* Converts a Markdown string to HTML
*/
exports.markdownToHTML = function markdownToHTML(text) {
return converter.makeHtml(text);
};
exports.LOG_INFO = LOG_INFO;
exports.LOG_WARN = LOG_WARN;
exports.LOG_ERROR = LOG_ERROR;
/**
* Logs output
*/
exports.log = function log (level, message) {
var args = [];
if (level < logLevel) {
return;
}
if (arguments.length >= 3) {
for (var key in arguments) {
args.push(arguments[key]);
}
args.splice(0, 2);
}
if (typeof level === 'string') {
console.info.apply(this, arguments);
} else {
switch (level) {
case LOG_INFO:
message = '[INFO] ' + message;
args.unshift(message.white);
console.info.apply(this, args);
break;
case LOG_WARN:
message = '[WARN] ' + message;
args.unshift(message.yellow);
console.warn.apply(this, args);
break;
case LOG_ERROR:
message = '[ERROR] ' + message;
args.unshift(message.red);
console.error.apply(this, args);
break;
}
}
};
/**
* Sets the log level for output
*/
exports.setLogLevel = function setLogLevel (level) {
logLevel = level;
};
/**
* Determines if the key exists in the object and is defined
* Also if it's array, make sure the array is not empty
*/
exports.assertObjectKey = function assertObjectKey(obj, key) {
if (key in obj && obj[key]) {
if (Array.isArray(obj[key])) {
if (obj[key].length > 0) {
return true;
}
} else {
return true;
}
}
return false;
};
/**
* Error message
*/
function errorMessage () {
return 'ERROR: Missing name for doc in file';
}
/**
* Recursively find, load and parse YAML files
* @param {Object} path Root path to start search
* @returns {Object} Dictionary containing the parsed data and any YAML errors
*/
exports.parseYAML = function parseYAML(path) {
var rv = {data : {}, errors : []},
currentFile = path;
try {
var fsArray = fs.readdirSync(path),
i = 0,
len = fsArray.length;
fsArray.forEach(function (fsElement) {
var elem = path + '/' + fsElement,
stat = fs.statSync(elem);
currentFile = elem;
if (~ignoreList.indexOf(fsElement)) {
return;
}
if (stat.isDirectory()) {
nodeappc.util.mixObj(rv, parseYAML(elem));
} else if (stat.isFile()) {
if (elem.split('.').pop() === 'yml') {
try {
var fileBuffer = fs.readFileSync(elem, 'utf8');
// remove comments
fileBuffer.replace(/\w*\#.*/, '');
yaml.safeLoadAll(fileBuffer, function (doc) {
if (!doc.name) {
rv.errors.push({toString: errorMessage(), __file: currentFile});
return;
}
// data does not exist in doc
if (rv.data[doc.name] == null) {
rv.data[doc.name] = doc;
rv.data[doc.name].__file = currentFile;
} else {
console.warn('WARNING: Duplicate key: %s', doc.name);
}
});
}
catch (e) {
e.__file = currentFile;
rv.errors.push(e);
}
}
}
});
return rv;
}
catch (e) {
e.__file = currentFile;
rv.errors.push(e);
}
};
| apache-2.0 |
fhueske/flink | flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/StreamingFileSink.java | 19889 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.api.functions.sink.filesystem;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.api.common.serialization.BulkWriter;
import org.apache.flink.api.common.serialization.Encoder;
import org.apache.flink.api.common.state.ListState;
import org.apache.flink.api.common.state.ListStateDescriptor;
import org.apache.flink.api.common.state.OperatorStateStore;
import org.apache.flink.api.common.typeutils.base.LongSerializer;
import org.apache.flink.api.common.typeutils.base.array.BytePrimitiveArraySerializer;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.core.fs.FileSystem;
import org.apache.flink.core.fs.Path;
import org.apache.flink.runtime.state.CheckpointListener;
import org.apache.flink.runtime.state.FunctionInitializationContext;
import org.apache.flink.runtime.state.FunctionSnapshotContext;
import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.DateTimeBucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.OnCheckpointRollingPolicy;
import org.apache.flink.streaming.api.operators.StreamingRuntimeContext;
import org.apache.flink.streaming.runtime.tasks.ProcessingTimeCallback;
import org.apache.flink.streaming.runtime.tasks.ProcessingTimeService;
import org.apache.flink.util.Preconditions;
import java.io.IOException;
import java.io.Serializable;
/**
* Sink that emits its input elements to {@link FileSystem} files within buckets. This is
* integrated with the checkpointing mechanism to provide exactly once semantics.
*
*
* <p>When creating the sink a {@code basePath} must be specified. The base directory contains
* one directory for every bucket. The bucket directories themselves contain several part files,
* with at least one for each parallel subtask of the sink which is writing data to that bucket.
* These part files contain the actual output data.
*
*
* <p>The sink uses a {@link BucketAssigner} to determine in which bucket directory each element should
* be written to inside the base directory. The {@code BucketAssigner} can, for example, use time or
* a property of the element to determine the bucket directory. The default {@code BucketAssigner} is a
* {@link DateTimeBucketAssigner} which will create one new bucket every hour. You can specify
* a custom {@code BucketAssigner} using the {@code setBucketAssigner(bucketAssigner)} method, after calling
* {@link StreamingFileSink#forRowFormat(Path, Encoder)} or
* {@link StreamingFileSink#forBulkFormat(Path, BulkWriter.Factory)}.
*
*
* <p>The filenames of the part files contain the part prefix, "part-", the parallel subtask index of the sink
* and a rolling counter. For example the file {@code "part-1-17"} contains the data from
* {@code subtask 1} of the sink and is the {@code 17th} bucket created by that subtask.
* Part files roll based on the user-specified {@link RollingPolicy}. By default, a {@link DefaultRollingPolicy}
* is used.
*
* <p>In some scenarios, the open buckets are required to change based on time. In these cases, the user
* can specify a {@code bucketCheckInterval} (by default 1m) and the sink will check periodically and roll
* the part file if the specified rolling policy says so.
*
* <p>Part files can be in one of three states: {@code in-progress}, {@code pending} or {@code finished}.
* The reason for this is how the sink works together with the checkpointing mechanism to provide exactly-once
* semantics and fault-tolerance. The part file that is currently being written to is {@code in-progress}. Once
* a part file is closed for writing it becomes {@code pending}. When a checkpoint is successful the currently
* pending files will be moved to {@code finished}.
*
*
* <p>If case of a failure, and in order to guarantee exactly-once semantics, the sink should roll back to the state it
* had when that last successful checkpoint occurred. To this end, when restoring, the restored files in {@code pending}
* state are transferred into the {@code finished} state while any {@code in-progress} files are rolled back, so that
* they do not contain data that arrived after the checkpoint from which we restore.
*
* @param <IN> Type of the elements emitted by this sink
*/
@PublicEvolving
public class StreamingFileSink<IN>
extends RichSinkFunction<IN>
implements CheckpointedFunction, CheckpointListener, ProcessingTimeCallback {
private static final long serialVersionUID = 1L;
// -------------------------- state descriptors ---------------------------
private static final ListStateDescriptor<byte[]> BUCKET_STATE_DESC =
new ListStateDescriptor<>("bucket-states", BytePrimitiveArraySerializer.INSTANCE);
private static final ListStateDescriptor<Long> MAX_PART_COUNTER_STATE_DESC =
new ListStateDescriptor<>("max-part-counter", LongSerializer.INSTANCE);
// ------------------------ configuration fields --------------------------
private final long bucketCheckInterval;
private final StreamingFileSink.BucketsBuilder<IN, ?> bucketsBuilder;
// --------------------------- runtime fields -----------------------------
private transient Buckets<IN, ?> buckets;
private transient ProcessingTimeService processingTimeService;
// --------------------------- State Related Fields -----------------------------
private transient ListState<byte[]> bucketStates;
private transient ListState<Long> maxPartCountersState;
/**
* Creates a new {@code StreamingFileSink} that writes files to the given base directory.
*/
protected StreamingFileSink(
final StreamingFileSink.BucketsBuilder<IN, ?> bucketsBuilder,
final long bucketCheckInterval) {
Preconditions.checkArgument(bucketCheckInterval > 0L);
this.bucketsBuilder = Preconditions.checkNotNull(bucketsBuilder);
this.bucketCheckInterval = bucketCheckInterval;
}
// ------------------------------------------------------------------------
// --------------------------- Sink Builders -----------------------------
/**
* Creates the builder for a {@code StreamingFileSink} with row-encoding format.
* @param basePath the base path where all the buckets are going to be created as sub-directories.
* @param encoder the {@link Encoder} to be used when writing elements in the buckets.
* @param <IN> the type of incoming elements
* @return The builder where the remaining of the configuration parameters for the sink can be configured.
* In order to instantiate the sink, call {@link RowFormatBuilder#build()} after specifying the desired parameters.
*/
public static <IN> StreamingFileSink.RowFormatBuilder<IN, String> forRowFormat(
final Path basePath, final Encoder<IN> encoder) {
return new StreamingFileSink.RowFormatBuilder<>(basePath, encoder, new DateTimeBucketAssigner<>());
}
/**
* Creates the builder for a {@link StreamingFileSink} with row-encoding format.
* @param basePath the base path where all the buckets are going to be created as sub-directories.
* @param writerFactory the {@link BulkWriter.Factory} to be used when writing elements in the buckets.
* @param <IN> the type of incoming elements
* @return The builder where the remaining of the configuration parameters for the sink can be configured.
* In order to instantiate the sink, call {@link RowFormatBuilder#build()} after specifying the desired parameters.
*/
public static <IN> StreamingFileSink.BulkFormatBuilder<IN, String> forBulkFormat(
final Path basePath, final BulkWriter.Factory<IN> writerFactory) {
return new StreamingFileSink.BulkFormatBuilder<>(basePath, writerFactory, new DateTimeBucketAssigner<>());
}
/**
* The base abstract class for the {@link RowFormatBuilder} and {@link BulkFormatBuilder}.
*/
protected abstract static class BucketsBuilder<IN, BucketID> implements Serializable {
private static final long serialVersionUID = 1L;
abstract Buckets<IN, BucketID> createBuckets(final int subtaskIndex) throws IOException;
}
/**
* A builder for configuring the sink for row-wise encoding formats.
*/
@PublicEvolving
public static class RowFormatBuilder<IN, BucketID> extends StreamingFileSink.BucketsBuilder<IN, BucketID> {
private static final long serialVersionUID = 1L;
private final long bucketCheckInterval;
private final Path basePath;
private final Encoder<IN> encoder;
private final BucketAssigner<IN, BucketID> bucketAssigner;
private final RollingPolicy<IN, BucketID> rollingPolicy;
private final BucketFactory<IN, BucketID> bucketFactory;
private final String partFilePrefix;
private final String partFileSuffix;
RowFormatBuilder(Path basePath, Encoder<IN> encoder, BucketAssigner<IN, BucketID> bucketAssigner) {
this(basePath, encoder, bucketAssigner, DefaultRollingPolicy.builder().build(), 60L * 1000L, new DefaultBucketFactoryImpl<>(), PartFileConfig.DEFAULT_PART_PREFIX, PartFileConfig.DEFAULT_PART_SUFFIX);
}
private RowFormatBuilder(
Path basePath,
Encoder<IN> encoder,
BucketAssigner<IN, BucketID> assigner,
RollingPolicy<IN, BucketID> policy,
long bucketCheckInterval,
BucketFactory<IN, BucketID> bucketFactory,
String partFilePrefix,
String partFileSuffix) {
this.basePath = Preconditions.checkNotNull(basePath);
this.encoder = Preconditions.checkNotNull(encoder);
this.bucketAssigner = Preconditions.checkNotNull(assigner);
this.rollingPolicy = Preconditions.checkNotNull(policy);
this.bucketCheckInterval = bucketCheckInterval;
this.bucketFactory = Preconditions.checkNotNull(bucketFactory);
this.partFilePrefix = Preconditions.checkNotNull(partFilePrefix);
this.partFileSuffix = Preconditions.checkNotNull(partFileSuffix);
}
/**
* Creates a new builder instance with the specified bucket check interval. The interval specifies how often
* time based {@link RollingPolicy}s will be checked/executed for the open buckets.
* @param interval Time interval in milliseconds
* @return A new builder with the check interval set.
*/
public StreamingFileSink.RowFormatBuilder<IN, BucketID> withBucketCheckInterval(final long interval) {
return new RowFormatBuilder<>(basePath, encoder, bucketAssigner, rollingPolicy, interval, bucketFactory, partFilePrefix, partFileSuffix);
}
/**
* Creates a new builder instance with the specified {@link BucketAssigner}.
* @param assigner @{@link BucketAssigner} to be used.
* @return A new builder with the assigner set.
*/
public StreamingFileSink.RowFormatBuilder<IN, BucketID> withBucketAssigner(final BucketAssigner<IN, BucketID> assigner) {
return new RowFormatBuilder<>(basePath, encoder, Preconditions.checkNotNull(assigner), rollingPolicy, bucketCheckInterval, bucketFactory, partFilePrefix, partFileSuffix);
}
/**
* Creates a new builder instance with the specified {@link RollingPolicy} set for the bucketing logic.
* @param policy {@link RollingPolicy} to be applied
* @return A new builder with the check interval set.
*/
public StreamingFileSink.RowFormatBuilder<IN, BucketID> withRollingPolicy(final RollingPolicy<IN, BucketID> policy) {
return new RowFormatBuilder<>(basePath, encoder, bucketAssigner, Preconditions.checkNotNull(policy), bucketCheckInterval, bucketFactory, partFilePrefix, partFileSuffix);
}
public <ID> StreamingFileSink.RowFormatBuilder<IN, ID> withBucketAssignerAndPolicy(final BucketAssigner<IN, ID> assigner, final RollingPolicy<IN, ID> policy) {
return new RowFormatBuilder<>(basePath, encoder, Preconditions.checkNotNull(assigner), Preconditions.checkNotNull(policy), bucketCheckInterval, new DefaultBucketFactoryImpl<>(), partFilePrefix, partFileSuffix);
}
public StreamingFileSink.RowFormatBuilder<IN, BucketID> withPartFilePrefix(final String partPrefix) {
return new RowFormatBuilder<>(basePath, encoder, bucketAssigner, rollingPolicy, bucketCheckInterval, bucketFactory, partPrefix, partFileSuffix);
}
public StreamingFileSink.RowFormatBuilder<IN, BucketID> withPartFileSuffix(final String partSuffix) {
return new RowFormatBuilder<>(basePath, encoder, bucketAssigner, rollingPolicy, bucketCheckInterval, bucketFactory, partFilePrefix, partSuffix);
}
/** Creates the actual sink. */
public StreamingFileSink<IN> build() {
return new StreamingFileSink<>(this, bucketCheckInterval);
}
@Override
Buckets<IN, BucketID> createBuckets(int subtaskIndex) throws IOException {
return new Buckets<>(
basePath,
bucketAssigner,
bucketFactory,
new RowWisePartWriter.Factory<>(encoder),
rollingPolicy,
subtaskIndex,
new PartFileConfig(partFilePrefix, partFileSuffix));
}
@VisibleForTesting
StreamingFileSink.RowFormatBuilder<IN, BucketID> withBucketFactory(final BucketFactory<IN, BucketID> factory) {
return new RowFormatBuilder<>(basePath, encoder, bucketAssigner, rollingPolicy, bucketCheckInterval, Preconditions.checkNotNull(factory), partFilePrefix, partFileSuffix);
}
}
/**
* A builder for configuring the sink for bulk-encoding formats, e.g. Parquet/ORC.
*/
@PublicEvolving
public static class BulkFormatBuilder<IN, BucketID> extends StreamingFileSink.BucketsBuilder<IN, BucketID> {
private static final long serialVersionUID = 1L;
private final long bucketCheckInterval;
private final Path basePath;
private final BulkWriter.Factory<IN> writerFactory;
private final BucketAssigner<IN, BucketID> bucketAssigner;
private final BucketFactory<IN, BucketID> bucketFactory;
private final String partFilePrefix;
private final String partFileSuffix;
BulkFormatBuilder(Path basePath, BulkWriter.Factory<IN> writerFactory, BucketAssigner<IN, BucketID> assigner) {
this(basePath, writerFactory, assigner, 60L * 1000L, new DefaultBucketFactoryImpl<>(), PartFileConfig.DEFAULT_PART_PREFIX, PartFileConfig.DEFAULT_PART_SUFFIX);
}
private BulkFormatBuilder(
Path basePath,
BulkWriter.Factory<IN> writerFactory,
BucketAssigner<IN, BucketID> assigner,
long bucketCheckInterval,
BucketFactory<IN, BucketID> bucketFactory,
String partFilePrefix,
String partFileSuffix) {
this.basePath = Preconditions.checkNotNull(basePath);
this.writerFactory = writerFactory;
this.bucketAssigner = Preconditions.checkNotNull(assigner);
this.bucketCheckInterval = bucketCheckInterval;
this.bucketFactory = Preconditions.checkNotNull(bucketFactory);
this.partFilePrefix = Preconditions.checkNotNull(partFilePrefix);
this.partFileSuffix = Preconditions.checkNotNull(partFileSuffix);
}
/**
* Currently bulk formats always use the {@link OnCheckpointRollingPolicy} therefore this settings does
* not have any effect.
*/
public StreamingFileSink.BulkFormatBuilder<IN, BucketID> withBucketCheckInterval(long interval) {
return new BulkFormatBuilder<>(basePath, writerFactory, bucketAssigner, interval, bucketFactory, partFilePrefix, partFileSuffix);
}
/**
* Creates a new builder instance with the specified {@link BucketAssigner}.
* @param assigner @{@link BucketAssigner} to be used.
* @return A new builder with the assigner set.
*/
public <ID> StreamingFileSink.BulkFormatBuilder<IN, ID> withBucketAssigner(BucketAssigner<IN, ID> assigner) {
return new BulkFormatBuilder<>(basePath, writerFactory, Preconditions.checkNotNull(assigner), bucketCheckInterval, new DefaultBucketFactoryImpl<>(), partFilePrefix, partFileSuffix);
}
@VisibleForTesting
StreamingFileSink.BulkFormatBuilder<IN, BucketID> withBucketFactory(final BucketFactory<IN, BucketID> factory) {
return new BulkFormatBuilder<>(basePath, writerFactory, bucketAssigner, bucketCheckInterval, Preconditions.checkNotNull(factory), partFilePrefix, partFileSuffix);
}
public StreamingFileSink.BulkFormatBuilder<IN, BucketID> withPartFilePrefix(final String partPrefix) {
return new BulkFormatBuilder<>(basePath, writerFactory, bucketAssigner, bucketCheckInterval, bucketFactory, partPrefix, partFileSuffix);
}
public StreamingFileSink.BulkFormatBuilder<IN, BucketID> withPartFileSuffix(final String partSuffix) {
return new BulkFormatBuilder<>(basePath, writerFactory, bucketAssigner, bucketCheckInterval, bucketFactory, partFilePrefix, partSuffix);
}
/** Creates the actual sink. */
public StreamingFileSink<IN> build() {
return new StreamingFileSink<>(this, bucketCheckInterval);
}
@Override
Buckets<IN, BucketID> createBuckets(int subtaskIndex) throws IOException {
return new Buckets<>(
basePath,
bucketAssigner,
bucketFactory,
new BulkPartWriter.Factory<>(writerFactory),
OnCheckpointRollingPolicy.build(),
subtaskIndex,
new PartFileConfig(partFilePrefix, partFileSuffix));
}
}
// --------------------------- Sink Methods -----------------------------
@Override
public void initializeState(FunctionInitializationContext context) throws Exception {
final int subtaskIndex = getRuntimeContext().getIndexOfThisSubtask();
this.buckets = bucketsBuilder.createBuckets(subtaskIndex);
final OperatorStateStore stateStore = context.getOperatorStateStore();
bucketStates = stateStore.getListState(BUCKET_STATE_DESC);
maxPartCountersState = stateStore.getUnionListState(MAX_PART_COUNTER_STATE_DESC);
if (context.isRestored()) {
buckets.initializeState(bucketStates, maxPartCountersState);
}
}
@Override
public void notifyCheckpointComplete(long checkpointId) throws Exception {
buckets.commitUpToCheckpoint(checkpointId);
}
@Override
public void snapshotState(FunctionSnapshotContext context) throws Exception {
Preconditions.checkState(bucketStates != null && maxPartCountersState != null, "sink has not been initialized");
buckets.snapshotState(
context.getCheckpointId(),
bucketStates,
maxPartCountersState);
}
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
this.processingTimeService = ((StreamingRuntimeContext) getRuntimeContext()).getProcessingTimeService();
long currentProcessingTime = processingTimeService.getCurrentProcessingTime();
processingTimeService.registerTimer(currentProcessingTime + bucketCheckInterval, this);
}
@Override
public void onProcessingTime(long timestamp) throws Exception {
final long currentTime = processingTimeService.getCurrentProcessingTime();
buckets.onProcessingTime(currentTime);
processingTimeService.registerTimer(currentTime + bucketCheckInterval, this);
}
@Override
public void invoke(IN value, SinkFunction.Context context) throws Exception {
buckets.onElement(value, context);
}
@Override
public void close() throws Exception {
if (buckets != null) {
buckets.close();
}
}
}
| apache-2.0 |
annarev/tensorflow | tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.cc | 10868 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.h"
#include <string>
#include <utility>
#include <vector>
#include "absl/base/casts.h"
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/cpu/cpu_runtime.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
#include "tensorflow/stream_executor/stream_executor.h"
namespace xla {
namespace {
class CpuInfeedBuffer : public cpu::runtime::XfeedBuffer {
public:
explicit CpuInfeedBuffer(int32 length)
: length_(length),
buffer_(new char[length]),
device_memory_(buffer_, length_) {}
~CpuInfeedBuffer() override { delete[] buffer_; }
int32 length() override { return length_; }
void* data() override { return buffer_; }
void Done(StatusOr<Shape> /*shape*/) override { delete this; }
se::DeviceMemoryBase* device_memory() { return &device_memory_; }
private:
int32 length_;
char* buffer_;
se::DeviceMemoryBase device_memory_;
};
class CpuOutfeedBuffer : public cpu::runtime::XfeedBuffer {
public:
CpuOutfeedBuffer(void* destination, int32 length)
: destination_(destination), length_(length) {}
StatusOr<Shape> WaitForNotification() {
done_.WaitForNotification();
return status_;
}
int32 length() override { return length_; }
void* data() override { return destination_; }
void Done(StatusOr<Shape> shape) override {
status_ = std::move(shape);
done_.Notify();
}
private:
void* destination_;
int32 length_;
StatusOr<Shape> status_;
tensorflow::Notification done_;
};
} // namespace
CpuTransferManager::CpuTransferManager()
: GenericTransferManager(se::host::kHostPlatformId,
/*pointer_size=*/sizeof(void*)) {}
Status CpuTransferManager::TransferLiteralToInfeed(
se::StreamExecutor* executor, const LiteralSlice& literal) {
const Shape& shape = literal.shape();
VLOG(2) << "Transferring literal to infeed with shape: "
<< ShapeUtil::HumanString(shape);
if (!shape.IsTuple()) {
int64 size = GetByteSizeRequirement(shape);
return TransferBufferToInfeed(executor, size, literal.untyped_data());
}
if (ShapeUtil::IsNestedTuple(shape)) {
return Unimplemented(
"Infeed with a nested tuple shape is not supported: %s",
ShapeUtil::HumanString(literal.shape()));
}
// For a tuple, we transfer each of its elements to the device and
// enqueue the resulting destination device addresses with the
// infeed manager.
std::vector<cpu::runtime::XfeedBuffer*> buffers;
buffers.reserve(ShapeUtil::TupleElementCount(shape));
auto cleanup = tensorflow::gtl::MakeCleanup([&buffers]() {
for (cpu::runtime::XfeedBuffer* b : buffers) {
b->Done(Cancelled("Failed to infeed buffer to device."));
}
});
for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
const Shape& tuple_element_shape = ShapeUtil::GetSubshape(shape, {i});
int64 tuple_element_size = GetByteSizeRequirement(tuple_element_shape);
TF_ASSIGN_OR_RETURN(
cpu::runtime::XfeedBuffer * buffer,
TransferBufferToInfeedInternal(executor, tuple_element_size,
literal.untyped_data({i})));
buffers.push_back(buffer);
}
cpu::runtime::XfeedManager* xfeed_manager =
cpu::runtime::GetXfeedManager(executor->device_ordinal());
xfeed_manager->infeed()->EnqueueBuffersAtomically(buffers);
cleanup.release();
return Status::OK();
}
Status CpuTransferManager::TransferBufferToInfeed(se::StreamExecutor* executor,
int64 size,
const void* source) {
TF_ASSIGN_OR_RETURN(cpu::runtime::XfeedBuffer * buffer,
TransferBufferToInfeedInternal(executor, size, source));
cpu::runtime::XfeedManager* xfeed_manager =
cpu::runtime::GetXfeedManager(executor->device_ordinal());
xfeed_manager->infeed()->EnqueueBuffersAtomically({buffer});
return Status::OK();
}
StatusOr<cpu::runtime::XfeedBuffer*>
CpuTransferManager::TransferBufferToInfeedInternal(se::StreamExecutor* executor,
int64 size,
const void* source) {
if (size > std::numeric_limits<int32>::max()) {
return InvalidArgument("CPU infeed of %d bytes exceeds maximum of %d bytes",
size, std::numeric_limits<int32>::max());
}
if (size <= 0) {
return InvalidArgument("Infeed shape must have positive size; got %d",
size);
}
int32 size_32 = static_cast<int32>(size);
CpuInfeedBuffer* queued_buffer = new CpuInfeedBuffer(size_32);
Status s = executor->SynchronousMemcpyH2D(
/*host_src=*/source, /*size=*/size, queued_buffer->device_memory());
if (!s.ok()) {
queued_buffer->Done(s);
return s;
}
return queued_buffer;
}
Status CpuTransferManager::TransferLiteralFromOutfeed(
se::StreamExecutor* executor, MutableBorrowingLiteral literal) {
if (!literal.shape().IsTuple()) {
int64 size = GetByteSizeRequirement(literal.shape());
// Note: OSS build didn't like implicit conversion from
// literal.shape().dimensions() to the array slice on 2017-07-10.
absl::Span<const int64> dimensions(
absl::bit_cast<const int64*>(literal.shape().dimensions().data()),
literal.shape().dimensions().size());
TF_ASSIGN_OR_RETURN(
Shape received_shape,
TransferArrayBufferFromOutfeed(executor, literal.untyped_data(), size));
TF_RET_CHECK(ShapeUtil::Compatible(received_shape, literal.shape()))
<< "Shape received from outfeed "
<< ShapeUtil::HumanString(received_shape)
<< " did not match the shape that was requested for outfeed: "
<< ShapeUtil::HumanString(literal.shape());
TF_RET_CHECK(size == GetByteSizeRequirement(received_shape));
*literal.mutable_shape_do_not_use() = received_shape;
return Status::OK();
}
if (ShapeUtil::IsNestedTuple(literal.shape())) {
return Unimplemented(
"Nested tuple outfeeds are not yet implemented on CPU.");
}
std::vector<std::pair<void*, int64>> buffer_data;
for (int64 i = 0; i < literal.shape().tuple_shapes_size(); ++i) {
const Shape& tuple_element_shape =
ShapeUtil::GetTupleElementShape(literal.shape(), i);
int64 size = GetByteSizeRequirement(tuple_element_shape);
buffer_data.push_back({literal.untyped_data({i}), size});
}
TF_ASSIGN_OR_RETURN(Shape received_shape,
TransferTupleBuffersFromOutfeed(executor, buffer_data));
TF_RET_CHECK(ShapeUtil::Compatible(received_shape, literal.shape()))
<< "Shape received from outfeed "
<< ShapeUtil::HumanString(received_shape)
<< " did not match the shape that was requested for outfeed: "
<< ShapeUtil::HumanString(literal.shape());
TF_RET_CHECK(GetByteSizeRequirement(literal.shape()) ==
GetByteSizeRequirement(received_shape));
TF_RET_CHECK(ShapeUtil::Equal(literal.shape(), literal.shape()));
return Status::OK();
}
StatusOr<Shape> CpuTransferManager::TransferTupleBuffersFromOutfeed(
se::StreamExecutor* executor,
absl::Span<const std::pair<void*, int64>> buffer_data) {
return TransferBuffersFromOutfeedInternal(executor, buffer_data,
/*is_tuple=*/true);
}
StatusOr<Shape> CpuTransferManager::TransferArrayBufferFromOutfeed(
se::StreamExecutor* executor, void* destination, int64 size_bytes) {
return TransferBuffersFromOutfeedInternal(
executor, {{destination, size_bytes}}, /*is_tuple=*/false);
}
StatusOr<Shape> CpuTransferManager::TransferBuffersFromOutfeedInternal(
se::StreamExecutor* executor,
absl::Span<const std::pair<void*, int64>> buffer_data, bool is_tuple) {
std::vector<std::unique_ptr<CpuOutfeedBuffer>> buffers;
for (auto b : buffer_data) {
int64 size = b.second;
if (size > std::numeric_limits<int32>::max()) {
return InvalidArgument("Outfeed shape is too large: needs %d bytes",
size);
}
if (size < 0) {
return InvalidArgument(
"Outfeed shape must have non-negative size; got %d", size);
}
int32 size_32 = static_cast<int32>(size);
VLOG(2)
<< "Enqueueing outfeed buffer (for the device to populate) of length "
<< size_32 << "B";
buffers.emplace_back(absl::make_unique<CpuOutfeedBuffer>(b.first, size_32));
}
std::vector<cpu::runtime::XfeedBuffer*> buffer_pointers;
buffer_pointers.reserve(buffers.size());
for (auto& b : buffers) {
buffer_pointers.push_back(b.get());
}
cpu::runtime::XfeedManager* xfeed_manager =
cpu::runtime::GetXfeedManager(executor->device_ordinal());
xfeed_manager->outfeed()->EnqueueBuffersAtomically(buffer_pointers);
VLOG(2) << "Waiting for buffer to be notified as populated.";
std::vector<Shape> outfed_shapes;
for (auto& buffer : buffers) {
TF_ASSIGN_OR_RETURN(Shape outfed_shape, buffer->WaitForNotification());
outfed_shapes.push_back(std::move(outfed_shape));
}
if (is_tuple) {
return ShapeUtil::MakeTupleShape(outfed_shapes);
}
TF_RET_CHECK(outfed_shapes.size() == 1);
return std::move(outfed_shapes[0]);
}
} // namespace xla
static std::unique_ptr<xla::TransferManager> CreateCpuTransferManager() {
return absl::make_unique<xla::CpuTransferManager>();
}
static bool InitModule() {
xla::TransferManager::RegisterTransferManager(
stream_executor::host::kHostPlatformId, &CreateCpuTransferManager);
return true;
}
static bool module_initialized = InitModule();
| apache-2.0 |
ToCSharp/AsyncChromeDriver | ChromeDevToolsClient/Network/ResponseReceivedExtraInfoEvent.cs | 1852 | namespace Zu.ChromeDevTools.Network
{
using Newtonsoft.Json;
/// <summary>
/// Fired when additional information about a responseReceived event is available from the network
/// stack. Not every responseReceived event will have an additional responseReceivedExtraInfo for
/// it, and responseReceivedExtraInfo may be fired before or after responseReceived.
/// </summary>
public sealed class ResponseReceivedExtraInfoEvent : IEvent
{
/// <summary>
/// Request identifier. Used to match this information to another responseReceived event.
/// </summary>
[JsonProperty("requestId")]
public string RequestId
{
get;
set;
}
/// <summary>
/// A list of cookies which were not stored from the response along with the corresponding
/// reasons for blocking. The cookies here may not be valid due to syntax errors, which
/// are represented by the invalid cookie line string instead of a proper cookie.
/// </summary>
[JsonProperty("blockedCookies")]
public BlockedSetCookieWithReason[] BlockedCookies
{
get;
set;
}
/// <summary>
/// Raw response headers as they were received over the wire.
/// </summary>
[JsonProperty("headers")]
public Headers Headers
{
get;
set;
}
/// <summary>
/// Raw response header text as it was received over the wire. The raw text may not always be
/// available, such as in the case of HTTP/2 or QUIC.
/// </summary>
[JsonProperty("headersText", DefaultValueHandling = DefaultValueHandling.Ignore)]
public string HeadersText
{
get;
set;
}
}
} | apache-2.0 |
ebyhr/presto | core/trino-main/src/test/java/io/trino/server/security/oauth2/TestDynamicCallbackOAuth2Service.java | 3970 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.server.security.oauth2;
import com.github.scribejava.core.model.OAuth2AccessToken;
import com.google.common.collect.ImmutableList;
import io.airlift.http.client.HttpClient;
import io.airlift.http.client.HttpClientConfig;
import io.airlift.http.client.jetty.JettyHttpClient;
import io.airlift.units.Duration;
import io.jsonwebtoken.Claims;
import io.jsonwebtoken.Jwts;
import io.jsonwebtoken.SigningKeyResolver;
import io.trino.server.security.jwt.JwkService;
import io.trino.server.security.jwt.JwkSigningKeyResolver;
import io.trino.server.security.oauth2.ScribeJavaOAuth2Client.DynamicCallbackOAuth2Service;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import java.net.URI;
import java.util.List;
import java.util.concurrent.TimeUnit;
import static com.google.common.io.Resources.getResource;
import static io.airlift.testing.Closeables.closeAll;
import static io.trino.server.security.oauth2.TokenEndpointAuthMethod.CLIENT_SECRET_BASIC;
import static org.assertj.core.api.Assertions.assertThat;
public class TestDynamicCallbackOAuth2Service
{
private static final String CLIENT_ID = "client";
private static final String CLIENT_SECRET = "secret";
private final TestingHydraIdentityProvider hydraIdP = new TestingHydraIdentityProvider();
private final HttpClient httpClient = new JettyHttpClient(new HttpClientConfig()
.setTrustStorePath(getResource("cert/localhost.pem").getPath()));
private String hydraUrl;
private SigningKeyResolver signingKeyResolver;
@BeforeClass
public void setUp()
throws Exception
{
hydraIdP.start();
hydraUrl = "https://localhost:" + hydraIdP.getAuthPort();
hydraIdP.createClient(
CLIENT_ID,
CLIENT_SECRET,
CLIENT_SECRET_BASIC,
ImmutableList.of("https://localhost:8080"),
"https://localhost:8080/oauth2/callback");
signingKeyResolver = new JwkSigningKeyResolver(new JwkService(
URI.create(hydraUrl + "/.well-known/jwks.json"),
httpClient,
new Duration(5, TimeUnit.MINUTES)));
}
@AfterClass(alwaysRun = true)
public void tearDown()
throws Exception
{
closeAll(hydraIdP, httpClient);
}
@Test
public void testMultipleScopes()
throws Exception
{
DynamicCallbackOAuth2Service service = new DynamicCallbackOAuth2Service(
new OAuth2Config()
.setIssuer(hydraUrl)
.setAuthUrl(hydraUrl + "/oauth2/auth")
.setTokenUrl(hydraUrl + "/oauth2/token")
.setJwksUrl(hydraUrl + "/.well-known/jwks.json")
.setClientId(CLIENT_ID)
.setClientSecret(CLIENT_SECRET)
.setScopes("openid,offline"),
httpClient);
OAuth2AccessToken token = service.getAccessTokenClientCredentialsGrant();
Claims claims = Jwts.parserBuilder()
.setSigningKeyResolver(signingKeyResolver)
.build()
.parseClaimsJws(token.getAccessToken())
.getBody();
assertThat(claims.get("scp", List.class)).containsExactlyInAnyOrder("openid", "offline");
}
}
| apache-2.0 |
reactor/reactor-netty | reactor-netty-http/src/main/java/reactor/netty/http/server/HttpServerOperations.java | 32184 | /*
* Copyright (c) 2011-2022 VMware, Inc. or its affiliates, All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package reactor.netty.http.server;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.BiFunction;
import java.util.function.BiPredicate;
import java.util.function.Consumer;
import java.util.function.Function;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.DefaultHeaders;
import io.netty.handler.codec.TooLongFrameException;
import io.netty.handler.codec.http.DefaultFullHttpResponse;
import io.netty.handler.codec.http.DefaultHttpHeaders;
import io.netty.handler.codec.http.DefaultHttpResponse;
import io.netty.handler.codec.http.DefaultLastHttpContent;
import io.netty.handler.codec.http.FullHttpRequest;
import io.netty.handler.codec.http.FullHttpResponse;
import io.netty.handler.codec.http.HttpContent;
import io.netty.handler.codec.http.HttpHeaderNames;
import io.netty.handler.codec.http.HttpHeaderValues;
import io.netty.handler.codec.http.HttpHeaders;
import io.netty.handler.codec.http.HttpMessage;
import io.netty.handler.codec.http.HttpMethod;
import io.netty.handler.codec.http.HttpRequest;
import io.netty.handler.codec.http.HttpResponse;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.netty.handler.codec.http.HttpUtil;
import io.netty.handler.codec.http.HttpVersion;
import io.netty.handler.codec.http.LastHttpContent;
import io.netty.handler.codec.http.cookie.Cookie;
import io.netty.handler.codec.http.cookie.ServerCookieDecoder;
import io.netty.handler.codec.http.cookie.ServerCookieEncoder;
import io.netty.handler.codec.http.multipart.HttpData;
import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder;
import io.netty.handler.codec.http.websocketx.CloseWebSocketFrame;
import io.netty.handler.codec.http.websocketx.WebSocketCloseStatus;
import io.netty.handler.codec.http2.HttpConversionUtil;
import io.netty.util.AsciiString;
import io.netty.util.ReferenceCountUtil;
import org.reactivestreams.Publisher;
import org.reactivestreams.Subscription;
import reactor.core.CoreSubscriber;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.netty.Connection;
import reactor.netty.ConnectionObserver;
import reactor.netty.FutureMono;
import reactor.netty.NettyOutbound;
import reactor.netty.NettyPipeline;
import reactor.netty.channel.AbortedException;
import reactor.netty.channel.ChannelOperations;
import reactor.netty.http.HttpOperations;
import reactor.netty.http.websocket.WebsocketInbound;
import reactor.netty.http.websocket.WebsocketOutbound;
import reactor.util.Logger;
import reactor.util.Loggers;
import reactor.util.annotation.Nullable;
import reactor.util.context.Context;
import static io.netty.buffer.Unpooled.EMPTY_BUFFER;
import static io.netty.handler.codec.http.HttpUtil.isTransferEncodingChunked;
import static reactor.netty.ReactorNetty.format;
import static reactor.netty.http.server.HttpServerFormDecoderProvider.DEFAULT_FORM_DECODER_SPEC;
import static reactor.netty.http.server.HttpServerState.REQUEST_DECODING_FAILED;
/**
* Conversion between Netty types and Reactor types ({@link HttpOperations}.
*
* @author Stephane Maldini1
*/
class HttpServerOperations extends HttpOperations<HttpServerRequest, HttpServerResponse>
implements HttpServerRequest, HttpServerResponse {
final BiPredicate<HttpServerRequest, HttpServerResponse> compressionPredicate;
final ConnectionInfo connectionInfo;
final ServerCookieDecoder cookieDecoder;
final ServerCookieEncoder cookieEncoder;
final ServerCookies cookieHolder;
final HttpServerFormDecoderProvider formDecoderProvider;
final BiFunction<? super Mono<Void>, ? super Connection, ? extends Mono<Void>> mapHandle;
final HttpRequest nettyRequest;
final HttpResponse nettyResponse;
final String path;
final HttpHeaders responseHeaders;
final String scheme;
Function<? super String, Map<String, String>> paramsResolver;
Consumer<? super HttpHeaders> trailerHeadersConsumer;
volatile Context currentContext;
HttpServerOperations(HttpServerOperations replaced) {
super(replaced);
this.compressionPredicate = replaced.compressionPredicate;
this.connectionInfo = replaced.connectionInfo;
this.cookieDecoder = replaced.cookieDecoder;
this.cookieEncoder = replaced.cookieEncoder;
this.cookieHolder = replaced.cookieHolder;
this.currentContext = replaced.currentContext;
this.formDecoderProvider = replaced.formDecoderProvider;
this.mapHandle = replaced.mapHandle;
this.nettyRequest = replaced.nettyRequest;
this.nettyResponse = replaced.nettyResponse;
this.paramsResolver = replaced.paramsResolver;
this.path = replaced.path;
this.responseHeaders = replaced.responseHeaders;
this.scheme = replaced.scheme;
this.trailerHeadersConsumer = replaced.trailerHeadersConsumer;
}
HttpServerOperations(Connection c, ConnectionObserver listener, HttpRequest nettyRequest,
@Nullable BiPredicate<HttpServerRequest, HttpServerResponse> compressionPredicate,
@Nullable ConnectionInfo connectionInfo,
ServerCookieDecoder decoder,
ServerCookieEncoder encoder,
HttpServerFormDecoderProvider formDecoderProvider,
@Nullable BiFunction<? super Mono<Void>, ? super Connection, ? extends Mono<Void>> mapHandle,
boolean secured) {
this(c, listener, nettyRequest, compressionPredicate, connectionInfo, decoder, encoder, formDecoderProvider,
mapHandle, true, secured);
}
HttpServerOperations(Connection c, ConnectionObserver listener, HttpRequest nettyRequest,
@Nullable BiPredicate<HttpServerRequest, HttpServerResponse> compressionPredicate,
@Nullable ConnectionInfo connectionInfo,
ServerCookieDecoder decoder,
ServerCookieEncoder encoder,
HttpServerFormDecoderProvider formDecoderProvider,
@Nullable BiFunction<? super Mono<Void>, ? super Connection, ? extends Mono<Void>> mapHandle,
boolean resolvePath,
boolean secured) {
super(c, listener);
this.compressionPredicate = compressionPredicate;
this.connectionInfo = connectionInfo;
this.cookieDecoder = decoder;
this.cookieEncoder = encoder;
this.cookieHolder = ServerCookies.newServerRequestHolder(nettyRequest.headers(), decoder);
this.currentContext = Context.empty();
this.formDecoderProvider = formDecoderProvider;
this.mapHandle = mapHandle;
this.nettyRequest = nettyRequest;
this.nettyResponse = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK);
if (resolvePath) {
this.path = resolvePath(nettyRequest.uri());
}
else {
this.path = null;
}
this.responseHeaders = nettyResponse.headers();
this.responseHeaders.set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED);
this.scheme = secured ? "https" : "http";
}
@Override
public NettyOutbound sendHeaders() {
if (hasSentHeaders()) {
return this;
}
return then(Mono.empty());
}
@Override
public HttpServerOperations withConnection(Consumer<? super Connection> withConnection) {
Objects.requireNonNull(withConnection, "withConnection");
withConnection.accept(this);
return this;
}
@Override
protected HttpMessage newFullBodyMessage(ByteBuf body) {
HttpResponse res =
new DefaultFullHttpResponse(version(), status(), body);
if (!HttpMethod.HEAD.equals(method())) {
responseHeaders.remove(HttpHeaderNames.TRANSFER_ENCODING);
if (!HttpResponseStatus.NOT_MODIFIED.equals(status())) {
if (HttpUtil.getContentLength(nettyResponse, -1) == -1) {
responseHeaders.setInt(HttpHeaderNames.CONTENT_LENGTH, body.readableBytes());
}
}
}
// For HEAD requests:
// - if there is Transfer-Encoding and Content-Length, Transfer-Encoding will be removed
// - if there is only Transfer-Encoding, it will be kept and not replaced by
// Content-Length: body.readableBytes()
// For HEAD requests, the I/O handler may decide to provide only the headers and complete
// the response. In that case body will be EMPTY_BUFFER and if we set Content-Length: 0,
// this will not be correct
// https://github.com/reactor/reactor-netty/issues/1333
else if (HttpUtil.getContentLength(nettyResponse, -1) != -1) {
responseHeaders.remove(HttpHeaderNames.TRANSFER_ENCODING);
}
res.headers().set(responseHeaders);
return res;
}
@Override
public HttpServerResponse addCookie(Cookie cookie) {
if (!hasSentHeaders()) {
this.responseHeaders.add(HttpHeaderNames.SET_COOKIE,
cookieEncoder.encode(cookie));
}
else {
throw new IllegalStateException("Status and headers already sent");
}
return this;
}
@Override
public HttpServerResponse addHeader(CharSequence name, CharSequence value) {
if (!hasSentHeaders()) {
this.responseHeaders.add(name, value);
}
else {
throw new IllegalStateException("Status and headers already sent");
}
return this;
}
@Override
public HttpServerOperations chunkedTransfer(boolean chunked) {
if (!hasSentHeaders() && isTransferEncodingChunked(nettyResponse) != chunked) {
responseHeaders.remove(HttpHeaderNames.TRANSFER_ENCODING);
HttpUtil.setTransferEncodingChunked(nettyResponse, chunked);
}
return this;
}
@Override
public Map<CharSequence, Set<Cookie>> cookies() {
if (cookieHolder != null) {
return cookieHolder.getCachedCookies();
}
throw new IllegalStateException("request not parsed");
}
@Override
public Map<CharSequence, List<Cookie>> allCookies() {
if (cookieHolder != null) {
return cookieHolder.getAllCachedCookies();
}
throw new IllegalStateException("request not parsed");
}
@Override
public Context currentContext() {
return currentContext;
}
@Override
public HttpServerResponse header(CharSequence name, CharSequence value) {
if (!hasSentHeaders()) {
this.responseHeaders.set(name, value);
}
else {
throw new IllegalStateException("Status and headers already sent");
}
return this;
}
@Override
public HttpServerResponse headers(HttpHeaders headers) {
if (!hasSentHeaders()) {
this.responseHeaders.set(headers);
}
else {
throw new IllegalStateException("Status and headers already sent");
}
return this;
}
@Override
public boolean isFormUrlencoded() {
CharSequence mimeType = HttpUtil.getMimeType(nettyRequest);
return mimeType != null &&
HttpHeaderValues.APPLICATION_X_WWW_FORM_URLENCODED.contentEqualsIgnoreCase(mimeType.toString().trim());
}
@Override
public boolean isKeepAlive() {
return HttpUtil.isKeepAlive(nettyRequest);
}
@Override
public boolean isMultipart() {
return HttpPostRequestDecoder.isMultipart(nettyRequest);
}
@Override
public boolean isWebsocket() {
return get(channel()) instanceof WebsocketServerOperations;
}
final boolean isHttp2() {
return requestHeaders().contains(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text());
}
@Override
public HttpServerResponse keepAlive(boolean keepAlive) {
HttpUtil.setKeepAlive(nettyResponse, keepAlive);
return this;
}
@Override
public HttpMethod method() {
return nettyRequest.method();
}
@Override
@Nullable
public String param(CharSequence key) {
Objects.requireNonNull(key, "key");
Map<String, String> params = null;
if (paramsResolver != null) {
params = this.paramsResolver.apply(uri());
}
return null != params ? params.get(key.toString()) : null;
}
@Override
@Nullable
public Map<String, String> params() {
return null != paramsResolver ? paramsResolver.apply(uri()) : null;
}
@Override
public HttpServerRequest paramsResolver(Function<? super String, Map<String, String>> paramsResolver) {
this.paramsResolver = paramsResolver;
return this;
}
@Override
public Flux<HttpData> receiveForm() {
return receiveFormInternal(formDecoderProvider);
}
@Override
public Flux<HttpData> receiveForm(Consumer<HttpServerFormDecoderProvider.Builder> formDecoderBuilder) {
Objects.requireNonNull(formDecoderBuilder, "formDecoderBuilder");
HttpServerFormDecoderProvider.Build builder = new HttpServerFormDecoderProvider.Build();
formDecoderBuilder.accept(builder);
HttpServerFormDecoderProvider config = builder.build();
return receiveFormInternal(config);
}
@Override
public Flux<?> receiveObject() {
// Handle the 'Expect: 100-continue' header if necessary.
// TODO: Respond with 413 Request Entity Too Large
// and discard the traffic or close the connection.
// No need to notify the upstream handlers - just log.
// If decoding a response, just throw an error.
if (HttpUtil.is100ContinueExpected(nettyRequest)) {
return FutureMono.deferFuture(() -> {
if (!hasSentHeaders()) {
return channel().writeAndFlush(CONTINUE);
}
return channel().newSucceededFuture();
})
.thenMany(super.receiveObject());
}
else {
return super.receiveObject();
}
}
@Override
@Nullable
public InetSocketAddress hostAddress() {
if (connectionInfo != null) {
return this.connectionInfo.getHostAddress();
}
else {
return null;
}
}
@Override
@Nullable
public InetSocketAddress remoteAddress() {
if (connectionInfo != null) {
return this.connectionInfo.getRemoteAddress();
}
else {
return null;
}
}
@Override
public HttpHeaders requestHeaders() {
if (nettyRequest != null) {
return nettyRequest.headers();
}
throw new IllegalStateException("request not parsed");
}
@Override
public String scheme() {
if (connectionInfo != null) {
return this.connectionInfo.getScheme();
}
else {
return scheme;
}
}
@Override
public HttpHeaders responseHeaders() {
return responseHeaders;
}
@Override
public Mono<Void> send() {
if (markSentHeaderAndBody()) {
HttpMessage response = newFullBodyMessage(EMPTY_BUFFER);
return FutureMono.deferFuture(() -> channel().writeAndFlush(response));
}
else {
return Mono.empty();
}
}
@Override
public NettyOutbound sendFile(Path file) {
try {
return sendFile(file, 0L, Files.size(file));
}
catch (IOException e) {
if (log.isDebugEnabled()) {
log.debug(format(channel(), "Path not resolved"), e);
}
return then(sendNotFound());
}
}
@Override
public Mono<Void> sendNotFound() {
return this.status(HttpResponseStatus.NOT_FOUND)
.send();
}
@Override
public Mono<Void> sendRedirect(String location) {
Objects.requireNonNull(location, "location");
return this.status(HttpResponseStatus.FOUND)
.header(HttpHeaderNames.LOCATION, location)
.send();
}
/**
* @return the Transfer setting SSE for this http connection (e.g. event-stream)
*/
@Override
public HttpServerResponse sse() {
header(HttpHeaderNames.CONTENT_TYPE, EVENT_STREAM);
return this;
}
@Override
public HttpResponseStatus status() {
return this.nettyResponse.status();
}
@Override
public HttpServerResponse status(HttpResponseStatus status) {
if (!hasSentHeaders()) {
this.nettyResponse.setStatus(status);
}
else {
throw new IllegalStateException("Status and headers already sent");
}
return this;
}
@Override
public HttpServerResponse trailerHeaders(Consumer<? super HttpHeaders> trailerHeaders) {
this.trailerHeadersConsumer = Objects.requireNonNull(trailerHeaders, "trailerHeaders");
return this;
}
@Override
public Mono<Void> sendWebsocket(
BiFunction<? super WebsocketInbound, ? super WebsocketOutbound, ? extends Publisher<Void>> websocketHandler,
WebsocketServerSpec configurer) {
return withWebsocketSupport(uri(), configurer, websocketHandler);
}
@Override
public String uri() {
if (nettyRequest != null) {
return nettyRequest.uri();
}
throw new IllegalStateException("request not parsed");
}
@Override
public String fullPath() {
if (path != null) {
return path;
}
throw new IllegalStateException("request not parsed");
}
@Override
public HttpVersion version() {
if (nettyRequest != null) {
return nettyRequest.protocolVersion();
}
throw new IllegalStateException("request not parsed");
}
@Override
public HttpServerResponse compression(boolean compress) {
if (!compress) {
removeHandler(NettyPipeline.CompressionHandler);
}
else if (channel().pipeline()
.get(NettyPipeline.CompressionHandler) == null) {
SimpleCompressionHandler handler = new SimpleCompressionHandler();
try {
//Do not invoke handler.channelRead as it will trigger ctx.fireChannelRead
handler.decode(channel().pipeline().context(NettyPipeline.ReactiveBridge), nettyRequest);
addHandlerFirst(NettyPipeline.CompressionHandler, handler);
}
catch (Throwable e) {
log.error(format(channel(), ""), e);
}
}
return this;
}
@Override
protected void onInboundNext(ChannelHandlerContext ctx, Object msg) {
if (msg instanceof HttpRequest) {
try {
listener().onStateChange(this, HttpServerState.REQUEST_RECEIVED);
}
catch (Exception e) {
onInboundError(e);
ReferenceCountUtil.release(msg);
return;
}
if (msg instanceof FullHttpRequest) {
FullHttpRequest request = (FullHttpRequest) msg;
if (request.content().readableBytes() > 0) {
super.onInboundNext(ctx, msg);
}
else {
request.release();
}
if (isHttp2()) {
//force auto read to enable more accurate close selection now inbound is done
channel().config().setAutoRead(true);
onInboundComplete();
}
}
return;
}
if (msg instanceof HttpContent) {
if (msg != LastHttpContent.EMPTY_LAST_CONTENT) {
super.onInboundNext(ctx, msg);
}
if (msg instanceof LastHttpContent) {
//force auto read to enable more accurate close selection now inbound is done
channel().config().setAutoRead(true);
onInboundComplete();
}
}
else {
super.onInboundNext(ctx, msg);
}
}
@Override
protected void onInboundClose() {
discardWhenNoReceiver();
if (!(isInboundCancelled() || isInboundDisposed())) {
onInboundError(new AbortedException("Connection has been closed"));
}
terminate();
}
@Override
protected void afterMarkSentHeaders() {
if (HttpResponseStatus.NOT_MODIFIED.equals(status())) {
responseHeaders.remove(HttpHeaderNames.TRANSFER_ENCODING)
.remove(HttpHeaderNames.CONTENT_LENGTH);
}
if (compressionPredicate != null && compressionPredicate.test(this, this)) {
compression(true);
}
}
@Override
protected void beforeMarkSentHeaders() {
//noop
}
@Override
protected void onHeadersSent() {
//noop
}
@Override
protected void onOutboundComplete() {
if (isWebsocket()) {
return;
}
final ChannelFuture f;
if (log.isDebugEnabled()) {
log.debug(format(channel(), "Last HTTP response frame"));
}
if (markSentHeaderAndBody()) {
if (log.isDebugEnabled()) {
log.debug(format(channel(), "No sendHeaders() called before complete, sending " +
"zero-length header"));
}
f = channel().writeAndFlush(newFullBodyMessage(EMPTY_BUFFER));
}
else if (markSentBody()) {
LastHttpContent lastHttpContent = LastHttpContent.EMPTY_LAST_CONTENT;
// https://datatracker.ietf.org/doc/html/rfc7230#section-4.1.2
// A trailer allows the sender to include additional fields at the end
// of a chunked message in order to supply metadata that might be
// dynamically generated while the message body is sent, such as a
// message integrity check, digital signature, or post-processing
// status.
if (trailerHeadersConsumer != null && isTransferEncodingChunked(nettyResponse)) {
// https://datatracker.ietf.org/doc/html/rfc7230#section-4.4
// When a message includes a message body encoded with the chunked
// transfer coding and the sender desires to send metadata in the form
// of trailer fields at the end of the message, the sender SHOULD
// generate a Trailer header field before the message body to indicate
// which fields will be present in the trailers.
String declaredHeaderNames = responseHeaders.get(HttpHeaderNames.TRAILER);
if (declaredHeaderNames != null) {
HttpHeaders trailerHeaders = new TrailerHeaders(declaredHeaderNames);
try {
trailerHeadersConsumer.accept(trailerHeaders);
}
catch (IllegalArgumentException e) {
// A sender MUST NOT generate a trailer when header names are
// HttpServerOperations.TrailerHeaders.DISALLOWED_TRAILER_HEADER_NAMES
log.error(format(channel(), "Cannot apply trailer headers [{0}]"), declaredHeaderNames, e);
}
if (!trailerHeaders.isEmpty()) {
lastHttpContent = new DefaultLastHttpContent();
lastHttpContent.trailingHeaders().set(trailerHeaders);
}
}
}
f = channel().writeAndFlush(lastHttpContent);
}
else {
discard();
return;
}
f.addListener(s -> {
discard();
if (!s.isSuccess() && log.isDebugEnabled()) {
log.debug(format(channel(), "Failed flushing last frame"), s.cause());
}
});
}
static void cleanHandlerTerminate(Channel ch) {
ChannelOperations<?, ?> ops = get(ch);
if (ops == null) {
return;
}
ops.discard();
//Try to defer the disposing to leave a chance for any synchronous complete following this callback
if (!ops.isSubscriptionDisposed()) {
ch.eventLoop()
.execute(((HttpServerOperations) ops)::terminate);
}
else {
//if already disposed, we can immediately call terminate
((HttpServerOperations) ops).terminate();
}
}
static long requestsCounter(Channel channel) {
HttpServerOperations ops = Connection.from(channel).as(HttpServerOperations.class);
if (ops == null) {
return -1;
}
return ((AtomicLong) ops.connection()).get();
}
static void sendDecodingFailures(
ChannelHandlerContext ctx,
ConnectionObserver listener,
boolean secure,
Throwable t,
Object msg) {
Connection conn = Connection.from(ctx.channel());
Throwable cause = t.getCause() != null ? t.getCause() : t;
if (log.isWarnEnabled()) {
log.warn(format(ctx.channel(), "Decoding failed: " + msg + " : "), cause);
}
ReferenceCountUtil.release(msg);
HttpResponse response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_0,
cause instanceof TooLongFrameException ? HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE :
HttpResponseStatus.BAD_REQUEST);
response.headers()
.setInt(HttpHeaderNames.CONTENT_LENGTH, 0)
.set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE);
ctx.writeAndFlush(response)
.addListener(ChannelFutureListener.CLOSE);
HttpRequest request = null;
if (msg instanceof HttpRequest) {
request = (HttpRequest) msg;
}
listener.onStateChange(new FailedHttpServerRequest(conn, listener, request, response, secure), REQUEST_DECODING_FAILED);
}
/**
* There is no need of invoking {@link #discard()}, the inbound will
* be canceled on channel inactive event if there is no subscriber available
*
* @param err the {@link Throwable} cause
*/
@Override
protected void onOutboundError(Throwable err) {
if (!channel().isActive()) {
super.onOutboundError(err);
return;
}
if (markSentHeaders()) {
log.error(format(channel(), "Error starting response. Replying error status"), err);
nettyResponse.setStatus(HttpResponseStatus.INTERNAL_SERVER_ERROR);
responseHeaders.set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE);
channel().writeAndFlush(newFullBodyMessage(EMPTY_BUFFER))
.addListener(ChannelFutureListener.CLOSE);
return;
}
markSentBody();
log.error(format(channel(), "Error finishing response. Closing connection"), err);
channel().writeAndFlush(EMPTY_BUFFER)
.addListener(ChannelFutureListener.CLOSE);
}
@Override
protected HttpMessage outboundHttpMessage() {
return nettyResponse;
}
final Flux<HttpData> receiveFormInternal(HttpServerFormDecoderProvider config) {
boolean isMultipart = isMultipart();
if (!Objects.equals(method(), HttpMethod.POST) || !(isFormUrlencoded() || isMultipart)) {
return Flux.error(new IllegalStateException(
"Request is not POST or does not have Content-Type " +
"with value 'application/x-www-form-urlencoded' or 'multipart/form-data'"));
}
return Flux.defer(() ->
config.newHttpPostRequestDecoder(nettyRequest, isMultipart).flatMapMany(decoder ->
receiveObject() // receiveContent uses filter operator, this operator buffers, but we don't want it
.concatMap(object -> {
if (!(object instanceof HttpContent)) {
return Mono.empty();
}
HttpContent httpContent = (HttpContent) object;
if (config.maxInMemorySize > -1) {
httpContent.retain();
}
return config.maxInMemorySize == -1 ?
Flux.using(
() -> decoder.offer(httpContent),
d -> Flux.fromIterable(decoder.currentHttpData(!config.streaming)),
d -> decoder.cleanCurrentHttpData(!config.streaming)) :
Flux.usingWhen(
Mono.fromCallable(() -> decoder.offer(httpContent))
.subscribeOn(config.scheduler)
.doFinally(sig -> httpContent.release()),
d -> Flux.fromIterable(decoder.currentHttpData(true)),
// FIXME Can we have cancellation for the resourceSupplier that will
// cause this one to not be invoked?
d -> Mono.fromRunnable(() -> decoder.cleanCurrentHttpData(true)));
}, 0) // There is no need of prefetch, we already have the buffers in the Reactor Netty inbound queue
.doFinally(sig -> decoder.destroy())));
}
final Mono<Void> withWebsocketSupport(String url,
WebsocketServerSpec websocketServerSpec,
BiFunction<? super WebsocketInbound, ? super WebsocketOutbound, ? extends Publisher<Void>> websocketHandler) {
Objects.requireNonNull(websocketServerSpec, "websocketServerSpec");
Objects.requireNonNull(websocketHandler, "websocketHandler");
if (markSentHeaders()) {
WebsocketServerOperations ops = new WebsocketServerOperations(url, websocketServerSpec, this);
return FutureMono.from(ops.handshakerResult)
.doOnEach(signal -> {
if (!signal.hasError() && (websocketServerSpec.protocols() == null || ops.selectedSubprotocol() != null)) {
websocketHandler.apply(ops, ops)
.subscribe(new WebsocketSubscriber(ops, Context.of(signal.getContextView())));
}
});
}
else {
log.error(format(channel(), "Cannot enable websocket if headers have already been sent"));
}
return Mono.error(new IllegalStateException("Failed to upgrade to websocket"));
}
static final class WebsocketSubscriber implements CoreSubscriber<Void>, ChannelFutureListener {
final WebsocketServerOperations ops;
final Context context;
WebsocketSubscriber(WebsocketServerOperations ops, Context context) {
this.ops = ops;
this.context = context;
}
@Override
public void onSubscribe(Subscription s) {
s.request(Long.MAX_VALUE);
}
@Override
public void onNext(Void aVoid) {
}
@Override
public void onError(Throwable t) {
ops.onError(t);
}
@Override
public void operationComplete(ChannelFuture future) {
ops.terminate();
}
@Override
public void onComplete() {
if (ops.channel()
.isActive()) {
ops.sendCloseNow(new CloseWebSocketFrame(WebSocketCloseStatus.NORMAL_CLOSURE), this);
}
}
@Override
public Context currentContext() {
return context;
}
}
static final Logger log = Loggers.getLogger(HttpServerOperations.class);
final static AsciiString EVENT_STREAM = new AsciiString("text/event-stream");
final static FullHttpResponse CONTINUE =
new DefaultFullHttpResponse(HttpVersion.HTTP_1_1,
HttpResponseStatus.CONTINUE,
EMPTY_BUFFER);
static final class FailedHttpServerRequest extends HttpServerOperations {
final HttpResponse customResponse;
FailedHttpServerRequest(
Connection c,
ConnectionObserver listener,
@Nullable HttpRequest nettyRequest,
HttpResponse nettyResponse,
boolean secure) {
super(c, listener, nettyRequest, null, null, ServerCookieDecoder.STRICT, ServerCookieEncoder.STRICT,
DEFAULT_FORM_DECODER_SPEC, null, false, secure);
this.customResponse = nettyResponse;
}
@Override
protected HttpMessage outboundHttpMessage() {
return customResponse;
}
@Override
public HttpResponseStatus status() {
return customResponse.status();
}
}
static final class TrailerHeaders extends DefaultHttpHeaders {
static final Set<String> DISALLOWED_TRAILER_HEADER_NAMES = new HashSet<>(14);
static {
// https://datatracker.ietf.org/doc/html/rfc7230#section-4.1.2
// A sender MUST NOT generate a trailer that contains a field necessary
// for message framing (e.g., Transfer-Encoding and Content-Length),
// routing (e.g., Host), request modifiers (e.g., controls and
// conditionals in Section 5 of [RFC7231]), authentication (e.g., see
// [RFC7235] and [RFC6265]), response control data (e.g., see Section
// 7.1 of [RFC7231]), or determining how to process the payload (e.g.,
// Content-Encoding, Content-Type, Content-Range, and Trailer).
DISALLOWED_TRAILER_HEADER_NAMES.add("age");
DISALLOWED_TRAILER_HEADER_NAMES.add("cache-control");
DISALLOWED_TRAILER_HEADER_NAMES.add("content-encoding");
DISALLOWED_TRAILER_HEADER_NAMES.add("content-length");
DISALLOWED_TRAILER_HEADER_NAMES.add("content-range");
DISALLOWED_TRAILER_HEADER_NAMES.add("content-type");
DISALLOWED_TRAILER_HEADER_NAMES.add("date");
DISALLOWED_TRAILER_HEADER_NAMES.add("expires");
DISALLOWED_TRAILER_HEADER_NAMES.add("location");
DISALLOWED_TRAILER_HEADER_NAMES.add("retry-after");
DISALLOWED_TRAILER_HEADER_NAMES.add("trailer");
DISALLOWED_TRAILER_HEADER_NAMES.add("transfer-encoding");
DISALLOWED_TRAILER_HEADER_NAMES.add("vary");
DISALLOWED_TRAILER_HEADER_NAMES.add("warning");
}
TrailerHeaders(String declaredHeaderNames) {
super(true, new TrailerNameValidator(filterHeaderNames(declaredHeaderNames)));
}
static Set<String> filterHeaderNames(String declaredHeaderNames) {
Objects.requireNonNull(declaredHeaderNames, "declaredHeaderNames");
Set<String> result = new HashSet<>();
String[] names = declaredHeaderNames.split(",", -1);
for (String name : names) {
String trimmedStr = name.trim();
if (trimmedStr.isEmpty() ||
DISALLOWED_TRAILER_HEADER_NAMES.contains(trimmedStr.toLowerCase(Locale.ENGLISH))) {
continue;
}
result.add(trimmedStr);
}
return result;
}
static final class TrailerNameValidator implements DefaultHeaders.NameValidator<CharSequence> {
/**
* Contains the headers names specified with {@link HttpHeaderNames#TRAILER}
*/
final Set<String> declaredHeaderNames;
TrailerNameValidator(Set<String> declaredHeaderNames) {
this.declaredHeaderNames = declaredHeaderNames;
}
@Override
public void validateName(CharSequence name) {
if (!declaredHeaderNames.contains(name.toString())) {
throw new IllegalArgumentException("Trailer header name [" + name +
"] not declared with [Trailer] header, or it is not a valid trailer header name");
}
}
}
}
}
| apache-2.0 |
maxml/sample-apps | cityguide/source/app/src/main/java/org/kaaproject/kaa/demo/cityguide/adapter/PlacesAdapter.java | 2755 | /**
* Copyright 2014-2016 CyberVision, Inc.
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kaaproject.kaa.demo.cityguide.adapter;
import android.content.Context;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ArrayAdapter;
import android.widget.BaseAdapter;
import android.widget.TextView;
import org.kaaproject.kaa.demo.cityguide.Place;
import org.kaaproject.kaa.demo.cityguide.R;
import org.kaaproject.kaa.demo.cityguide.ui.ProgressImageView;
import java.util.List;
/**
* The implementation of the {@link BaseAdapter} class. Used as an adapter class for the places list view.
* Provides list item views containing a photo, name and description of each place.
*/
public class PlacesAdapter extends ArrayAdapter<Place> {
private Context mContext;
public PlacesAdapter(Context context, List<Place> places) {
super(context, R.layout.place_list_item, places);
mContext = context;
}
@Override
public View getView(int position, View convertView, ViewGroup parent) {
Place place = getItem(position);
ViewHolder holder;
if (convertView == null) {
LayoutInflater inflater = (LayoutInflater) mContext
.getSystemService(Context.LAYOUT_INFLATER_SERVICE);
convertView = inflater.inflate(R.layout.place_list_item, null);
holder = new ViewHolder();
holder.photo = (ProgressImageView) convertView.findViewById(R.id.placePhoto);
holder.name = (TextView) convertView.findViewById(R.id.placeName);
holder.description = (TextView) convertView.findViewById(R.id.placeDesc);
convertView.setTag(holder);
} else {
holder = (ViewHolder) convertView.getTag();
}
// mImageLoader.loadImage(place.getPhotoUrl(), holder.photo, ImageType.THUMBNAIL);
holder.photo.setImage(place.getPhotoUrl());
holder.name.setText(place.getTitle());
holder.description.setText(place.getDescription());
return convertView;
}
class ViewHolder {
TextView name;
TextView description;
ProgressImageView photo;
}
}
| apache-2.0 |
MissionCriticalCloud/cosmic | cosmic-core/engine/schema/src/main/java/com/cloud/domain/DomainVO.java | 4945 | package com.cloud.domain;
import com.cloud.legacymodel.domain.Domain;
import com.cloud.utils.db.GenericDao;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Table;
import java.util.Date;
import java.util.UUID;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Entity
@Table(name = "domain")
public class DomainVO implements Domain {
public static final Logger s_logger = LoggerFactory.getLogger(DomainVO.class.getName());
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
@Column(name = "id")
private long id;
@Column(name = "parent")
private Long parent;
@Column(name = "name")
private String name;
@Column(name = "owner")
private long accountId;
@Column(name = "path")
private String path;
@Column(name = "level")
private int level;
@Column(name = GenericDao.REMOVED_COLUMN)
private Date removed;
@Column(name = "child_count")
private int childCount = 0;
@Column(name = "next_child_seq")
private long nextChildSeq = 1L;
@Column(name = "state")
private Domain.State state;
@Column(name = "network_domain")
private String networkDomain;
@Column(name = "uuid")
private String uuid;
@Column(name = "email")
private String email;
@Column(name = "slack_channel_name")
private String slackChannelName;
public DomainVO() {
}
public DomainVO(final String name, final long owner, final Long parentId, final String networkDomain) {
this.parent = parentId;
this.name = name;
this.accountId = owner;
this.path = "";
this.level = 0;
this.state = Domain.State.Active;
this.networkDomain = networkDomain;
this.uuid = UUID.randomUUID().toString();
}
public DomainVO(final String name, final long owner, final Long parentId, final String networkDomain, final String uuid, final String email, final String slackChannelName) {
this.parent = parentId;
this.name = name;
this.accountId = owner;
this.path = "";
this.level = 0;
this.state = Domain.State.Active;
this.networkDomain = networkDomain;
this.uuid = uuid;
this.email = email;
this.slackChannelName = slackChannelName;
}
@Override
public long getId() {
return id;
}
@Override
public Long getParent() {
return parent;
}
@Override
public void setParent(final Long parent) {
if (parent == null) {
this.parent = Domain.ROOT_DOMAIN;
} else {
if (parent.longValue() <= Domain.ROOT_DOMAIN) {
this.parent = Domain.ROOT_DOMAIN;
} else {
this.parent = parent;
}
}
}
@Override
public String getName() {
return name;
}
@Override
public void setName(final String name) {
this.name = name;
}
@Override
public Date getRemoved() {
return removed;
}
@Override
public String getPath() {
return path;
}
@Override
public void setPath(final String path) {
this.path = path;
}
@Override
public int getLevel() {
return level;
}
public void setLevel(final int level) {
this.level = level;
}
@Override
public int getChildCount() {
return childCount;
}
public void setChildCount(final int count) {
childCount = count;
}
@Override
public long getNextChildSeq() {
return nextChildSeq;
}
public void setNextChildSeq(final long seq) {
nextChildSeq = seq;
}
@Override
public Domain.State getState() {
return state;
}
@Override
public void setState(final Domain.State state) {
this.state = state;
}
@Override
public String getNetworkDomain() {
return networkDomain;
}
public void setNetworkDomain(final String domainSuffix) {
this.networkDomain = domainSuffix;
}
@Override
public long getAccountId() {
return accountId;
}
@Override
public String toString() {
return new StringBuilder("Domain:").append(id).append(path).toString();
}
@Override
public String getUuid() {
return this.uuid;
}
public void setUuid(final String uuid) {
this.uuid = uuid;
}
@Override
public String getEmail() {
return this.email;
}
public void setEmail(final String email) {
this.email = email;
}
public String getSlackChannelName() {
return slackChannelName;
}
public void setSlackChannelName(final String slackChannelName) {
this.slackChannelName = slackChannelName;
}
}
| apache-2.0 |
cmuhirwa/uplus | casual/vendors/smooth-scroll/SmoothScroll.js | 12102 | // SmoothScroll v1.2.1
// Licensed under the terms of the MIT license.
// People involved
// - Balazs Galambosi (maintainer)
// - Patrick Brunner (original idea)
// - Michael Herf (Pulse Algorithm)
// - Justin Force (Resurect)
if (navigator.appVersion.indexOf("Mac") == -1) {
// Scroll Variables (tweakable)
var framerate = 150; // [Hz]
var animtime = 500; // [px]
var stepsize = 150; // [px]
// Pulse (less tweakable)
// ratio of "tail" to "acceleration"
var pulseAlgorithm = true;
var pulseScale = 8;
var pulseNormalize = 1;
// Acceleration
var acceleration = true;
var accelDelta = 10; // 20
var accelMax = 1; // 1
// Keyboard Settings
var keyboardsupport = true; // option
var disableKeyboard = false; // other reasons
var arrowscroll = 50; // [px]
// Excluded pages
var exclude = "";
var disabled = false;
// Other Variables
var frame = false;
var direction = { x: 0, y: 0 };
var initdone = false;
var fixedback = true;
var root = document.documentElement;
var activeElement;
var key = { left: 37, up: 38, right: 39, down: 40, spacebar: 32, pageup: 33, pagedown: 34, end: 35, home: 36 };
/**
* Sets up scrolls array, determines if frames are involved.
*/
function init() {
if (!document.body) return;
var body = document.body;
var html = document.documentElement;
var windowHeight = window.innerHeight;
var scrollHeight = body.scrollHeight;
// check compat mode for root element
root = (document.compatMode.indexOf('CSS') >= 0) ? html : body;
activeElement = body;
initdone = true;
// Checks if this script is running in a frame
if (top != self) {
frame = true;
}
/**
* This fixes a bug where the areas left and right to
* the content does not trigger the onmousewheel event
* on some pages. e.g.: html, body { height: 100% }
*/
else if (scrollHeight > windowHeight &&
(body.offsetHeight <= windowHeight ||
html.offsetHeight <= windowHeight)) {
// DOMChange (throttle): fix height
var pending = false;
var refresh = function() {
if (!pending && html.scrollHeight != document.height) {
pending = true; // add a new pending action
setTimeout(function(){
html.style.height = document.height + 'px';
pending = false;
}, 500); // act rarely to stay fast
}
};
html.style.height = '';
setTimeout(refresh, 10);
addEvent("DOMNodeInserted", refresh);
addEvent("DOMNodeRemoved", refresh);
// clearfix
if (root.offsetHeight <= windowHeight) {
var underlay = document.createElement("div");
underlay.style.clear = "both";
body.appendChild(underlay);
}
}
// gmail performance fix
if (document.URL.indexOf("mail.google.com") > -1) {
var s = document.createElement("style");
s.innerHTML = ".iu { visibility: hidden }";
(document.getElementsByTagName("head")[0] || html).appendChild(s);
}
// disable fixed background
if (!fixedback && !disabled) {
body.style.backgroundAttachment = "scroll";
html.style.backgroundAttachment = "scroll";
}
}
/************************************************
* SCROLLING
************************************************/
var que = [];
var pending = false;
var lastScroll = +new Date;
/**
* Pushes scroll actions to the scrolling queue.
*/
function scrollArray(elem, left, top, delay) {
delay || (delay = 1000);
directionCheck(left, top);
if (acceleration) {
var now = +new Date;
var elapsed = now - lastScroll;
if (elapsed < accelDelta) {
var factor = (1 + (30 / elapsed)) / 2;
if (factor > 1) {
factor = Math.min(factor, accelMax);
left *= factor;
top *= factor;
}
}
lastScroll = +new Date;
}
// push a scroll command
que.push({
x: left,
y: top,
lastX: (left < 0) ? 0.99 : -0.99,
lastY: (top < 0) ? 0.99 : -0.99,
start: +new Date
});
// don't act if there's a pending queue
if (pending) {
return;
}
var scrollWindow = (elem === document.body);
var step = function() {
var now = +new Date;
var scrollX = 0;
var scrollY = 0;
for (var i = 0; i < que.length; i++) {
var item = que[i];
var elapsed = now - item.start;
var finished = (elapsed >= animtime);
// scroll position: [0, 1]
var position = (finished) ? 1 : elapsed / animtime;
// easing [optional]
if (pulseAlgorithm) {
position = pulse(position);
}
// only need the difference
var x = (item.x * position - item.lastX) >> 0;
var y = (item.y * position - item.lastY) >> 0;
// add this to the total scrolling
scrollX += x;
scrollY += y;
// update last values
item.lastX += x;
item.lastY += y;
// delete and step back if it's over
if (finished) {
que.splice(i, 1); i--;
}
}
// scroll left and top
if (scrollWindow) {
window.scrollBy(scrollX, scrollY)
}
else {
if (scrollX) elem.scrollLeft += scrollX;
if (scrollY) elem.scrollTop += scrollY;
}
// clean up if there's nothing left to do
if (!left && !top) {
que = [];
}
if (que.length) {
requestFrame(step, elem, (delay / framerate + 1));
} else {
pending = false;
}
}
// start a new queue of actions
requestFrame(step, elem, 0);
pending = true;
}
/***********************************************
* EVENTS
***********************************************/
/**
* Mouse wheel handler.
* @param {Object} event
*/
function wheel(event) {
if (!initdone) {
init();
}
var target = event.target;
var overflowing = overflowingAncestor(target);
// use default if there's no overflowing
// element or default action is prevented
if (!overflowing || event.defaultPrevented ||
isNodeName(activeElement, "embed") ||
(isNodeName(target, "embed") && /\.pdf/i.test(target.src))) {
return true;
}
var deltaX = event.wheelDeltaX || 0;
var deltaY = event.wheelDeltaY || 0;
// use wheelDelta if deltaX/Y is not available
if (!deltaX && !deltaY) {
deltaY = event.wheelDelta || 0;
}
// scale by step size
// delta is 120 most of the time
// synaptics seems to send 1 sometimes
if (Math.abs(deltaX) > 1.2) {
deltaX *= stepsize / 120;
}
if (Math.abs(deltaY) > 1.2) {
deltaY *= stepsize / 120;
}
scrollArray(overflowing, -deltaX, -deltaY);
event.preventDefault();
}
/**
* Keydown event handler.
* @param {Object} event
*/
function keydown(event) {
var target = event.target;
var modifier = event.ctrlKey || event.altKey || event.metaKey ||
(event.shiftKey && event.keyCode !== key.spacebar);
// do nothing if user is editing text
// or using a modifier key (except shift)
// or in a dropdown
if ( /input|textarea|select|embed/i.test(target.nodeName) ||
target.isContentEditable ||
event.defaultPrevented ||
modifier ) {
return true;
}
// spacebar should trigger button press
if (isNodeName(target, "button") &&
event.keyCode === key.spacebar) {
return true;
}
var shift, x = 0, y = 0;
var elem = overflowingAncestor(activeElement);
var clientHeight = elem.clientHeight;
if (elem == document.body) {
clientHeight = window.innerHeight;
}
switch (event.keyCode) {
case key.up:
y = -arrowscroll;
break;
case key.down:
y = arrowscroll;
break;
case key.spacebar: // (+ shift)
shift = event.shiftKey ? 1 : -1;
y = -shift * clientHeight * 0.9;
break;
case key.pageup:
y = -clientHeight * 0.9;
break;
case key.pagedown:
y = clientHeight * 0.9;
break;
case key.home:
y = -elem.scrollTop;
break;
case key.end:
var damt = elem.scrollHeight - elem.scrollTop - clientHeight;
y = (damt > 0) ? damt+10 : 0;
break;
case key.left:
x = -arrowscroll;
break;
case key.right:
x = arrowscroll;
break;
default:
return true; // a key we don't care about
}
scrollArray(elem, x, y);
event.preventDefault();
}
/**
* Mousedown event only for updating activeElement
*/
function mousedown(event) {
activeElement = event.target;
}
/***********************************************
* OVERFLOW
***********************************************/
var cache = {}; // cleared out every once in while
setInterval(function(){ cache = {}; }, 10 * 1000);
var uniqueID = (function() {
var i = 0;
return function (el) {
return el.uniqueID || (el.uniqueID = i++);
};
})();
function setCache(elems, overflowing) {
for (var i = elems.length; i--;)
cache[uniqueID(elems[i])] = overflowing;
return overflowing;
}
function overflowingAncestor(el) {
var elems = [];
var rootScrollHeight = root.scrollHeight;
do {
var cached = cache[uniqueID(el)];
if (cached) {
return setCache(elems, cached);
}
elems.push(el);
if (rootScrollHeight === el.scrollHeight) {
if (!frame || root.clientHeight + 10 < rootScrollHeight) {
return setCache(elems, document.body); // scrolling root in WebKit
}
} else if (el.clientHeight + 10 < el.scrollHeight) {
overflow = getComputedStyle(el, "").getPropertyValue("overflow-y");
if (overflow === "scroll" || overflow === "auto") {
return setCache(elems, el);
}
}
} while (el = el.parentNode);
}
/***********************************************
* HELPERS
***********************************************/
function addEvent(type, fn, bubble) {
window.addEventListener(type, fn, (bubble||false));
}
function removeEvent(type, fn, bubble) {
window.removeEventListener(type, fn, (bubble||false));
}
function isNodeName(el, tag) {
return (el.nodeName||"").toLowerCase() === tag.toLowerCase();
}
function directionCheck(x, y) {
x = (x > 0) ? 1 : -1;
y = (y > 0) ? 1 : -1;
if (direction.x !== x || direction.y !== y) {
direction.x = x;
direction.y = y;
que = [];
lastScroll = 0;
}
}
var requestFrame = (function(){
return window.requestAnimationFrame ||
window.webkitRequestAnimationFrame ||
function(callback, element, delay){
window.setTimeout(callback, delay || (1000/60));
};
})();
/***********************************************
* PULSE
***********************************************/
/**
* Viscous fluid with a pulse for part and decay for the rest.
* - Applies a fixed force over an interval (a damped acceleration), and
* - Lets the exponential bleed away the velocity over a longer interval
* - Michael Herf, http://stereopsis.com/stopping/
*/
function pulse_(x) {
var val, start, expx;
// test
x = x * pulseScale;
if (x < 1) { // acceleartion
val = x - (1 - Math.exp(-x));
} else { // tail
// the previous animation ended here:
start = Math.exp(-1);
// simple viscous drag
x -= 1;
expx = 1 - Math.exp(-x);
val = start + (expx * (1 - start));
}
return val * pulseNormalize;
}
function pulse(x) {
if (x >= 1) return 1;
if (x <= 0) return 0;
if (pulseNormalize == 1) {
pulseNormalize /= pulse_(1);
}
return pulse_(x);
}
addEvent("mousedown", mousedown);
addEvent("mousewheel", wheel);
addEvent("load", init);
}
| apache-2.0 |
marianosz/azure-mobile-services-test | Runtime/DotNetE2EServerApp/App_Start/WebApiConfig.cs | 4411 | // ----------------------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// ----------------------------------------------------------------------------
using AutoMapper;
using Microsoft.WindowsAzure.Mobile.Service;
using Microsoft.WindowsAzure.Mobile.Service.Config;
using Microsoft.WindowsAzure.Mobile.Service.Security;
using Newtonsoft.Json;
using System;
using System.Collections;
using System.Data.Entity;
using System.Diagnostics;
using System.Linq;
using System.Web.Http;
using ZumoE2EServerApp.DataObjects;
using ZumoE2EServerApp.Models;
using Newtonsoft.Json.Linq;
using ZumoE2EServerApp.Utils;
namespace ZumoE2EServerApp
{
public static class WebApiConfig
{
public static void Register()
{
ConfigOptions options = new ConfigOptions
{
PushAuthorization = AuthorizationLevel.Application,
DiagnosticsAuthorization = AuthorizationLevel.Anonymous,
};
HttpConfiguration config = ServiceConfig.Initialize(new ConfigBuilder(options));
// Now add any missing connection strings and app settings from the environment.
// Any envrionment variables found with names that match existing connection
// string and app setting names will be used to replace the value.
// This allows the Web.config (which typically would contain secrets) to be
// checked in, but requires people running the tests to config their environment.
IServiceSettingsProvider settingsProvider = config.DependencyResolver.GetServiceSettingsProvider();
ServiceSettingsDictionary settings = settingsProvider.GetServiceSettings();
IDictionary environmentVariables = Environment.GetEnvironmentVariables();
foreach (var conKey in settings.Connections.Keys.ToArray())
{
var envKey = environmentVariables.Keys.OfType<string>().FirstOrDefault(p => p == conKey);
if (!string.IsNullOrEmpty(envKey))
{
settings.Connections[conKey].ConnectionString = (string)environmentVariables[envKey];
}
}
foreach (var setKey in settings.Keys.ToArray())
{
var envKey = environmentVariables.Keys.OfType<string>().FirstOrDefault(p => p == setKey);
if (!string.IsNullOrEmpty(envKey))
{
settings[setKey] = (string)environmentVariables[envKey];
}
}
// Emulate the auth behavior of the server: default is application unless explicitly set.
config.Properties["MS_IsHosted"] = true;
config.Formatters.JsonFormatter.SerializerSettings.DateFormatHandling = DateFormatHandling.IsoDateFormat;
Mapper.Initialize(cfg =>
{
cfg.CreateMap<IntIdRoundTripTableItem, IntIdRoundTripTableItemDto>()
.ForMember(dto => dto.Id, map => map.MapFrom(db => MySqlFuncs.LTRIM(MySqlFuncs.StringConvert(db.Id))));
cfg.CreateMap<IntIdRoundTripTableItemDto, IntIdRoundTripTableItem>()
.ForMember(db => db.Id, map => map.MapFrom(dto => MySqlFuncs.LongParse(dto.Id)));
cfg.CreateMap<IntIdMovie, IntIdMovieDto>()
.ForMember(dto => dto.Id, map => map.MapFrom(db => MySqlFuncs.LTRIM(MySqlFuncs.StringConvert(db.Id))));
cfg.CreateMap<IntIdMovieDto, IntIdMovie>()
.ForMember(db => db.Id, map => map.MapFrom(dto => MySqlFuncs.LongParse(dto.Id)));
});
Database.SetInitializer(new DbInitializer());
}
class DbInitializer : ClearDatabaseSchemaAlways<SDKClientTestContext>
{
protected override void Seed(SDKClientTestContext context)
{
foreach (var movie in TestMovies.GetTestMovies())
{
context.Set<Movie>().Add(movie);
}
foreach (var movie in TestMovies.TestIntIdMovies)
{
context.Set<IntIdMovie>().Add(movie);
}
base.Seed(context);
}
}
}
}
| apache-2.0 |
jentfoo/aws-sdk-java | aws-java-sdk-mediaconvert/src/main/java/com/amazonaws/services/mediaconvert/model/AncillarySourceSettings.java | 5104 | /*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.mediaconvert.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;
/**
* Settings for ancillary captions source.
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/mediaconvert-2017-08-29/AncillarySourceSettings"
* target="_top">AWS API Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class AncillarySourceSettings implements Serializable, Cloneable, StructuredPojo {
/**
* Specifies the 608 channel number in the ancillary data track from which to extract captions. Unused for
* passthrough.
*/
private Integer sourceAncillaryChannelNumber;
/**
* Specifies the 608 channel number in the ancillary data track from which to extract captions. Unused for
* passthrough.
*
* @param sourceAncillaryChannelNumber
* Specifies the 608 channel number in the ancillary data track from which to extract captions. Unused for
* passthrough.
*/
public void setSourceAncillaryChannelNumber(Integer sourceAncillaryChannelNumber) {
this.sourceAncillaryChannelNumber = sourceAncillaryChannelNumber;
}
/**
* Specifies the 608 channel number in the ancillary data track from which to extract captions. Unused for
* passthrough.
*
* @return Specifies the 608 channel number in the ancillary data track from which to extract captions. Unused for
* passthrough.
*/
public Integer getSourceAncillaryChannelNumber() {
return this.sourceAncillaryChannelNumber;
}
/**
* Specifies the 608 channel number in the ancillary data track from which to extract captions. Unused for
* passthrough.
*
* @param sourceAncillaryChannelNumber
* Specifies the 608 channel number in the ancillary data track from which to extract captions. Unused for
* passthrough.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public AncillarySourceSettings withSourceAncillaryChannelNumber(Integer sourceAncillaryChannelNumber) {
setSourceAncillaryChannelNumber(sourceAncillaryChannelNumber);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getSourceAncillaryChannelNumber() != null)
sb.append("SourceAncillaryChannelNumber: ").append(getSourceAncillaryChannelNumber());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof AncillarySourceSettings == false)
return false;
AncillarySourceSettings other = (AncillarySourceSettings) obj;
if (other.getSourceAncillaryChannelNumber() == null ^ this.getSourceAncillaryChannelNumber() == null)
return false;
if (other.getSourceAncillaryChannelNumber() != null && other.getSourceAncillaryChannelNumber().equals(this.getSourceAncillaryChannelNumber()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getSourceAncillaryChannelNumber() == null) ? 0 : getSourceAncillaryChannelNumber().hashCode());
return hashCode;
}
@Override
public AncillarySourceSettings clone() {
try {
return (AncillarySourceSettings) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.mediaconvert.model.transform.AncillarySourceSettingsMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}
| apache-2.0 |
Endika/omim | drape_frontend/backend_renderer.cpp | 11022 | #include "drape_frontend/gui/drape_gui.hpp"
#include "drape_frontend/backend_renderer.hpp"
#include "drape_frontend/batchers_pool.hpp"
#include "drape_frontend/gps_track_shape.hpp"
#include "drape_frontend/map_shape.hpp"
#include "drape_frontend/message_subclasses.hpp"
#include "drape_frontend/read_manager.hpp"
#include "drape_frontend/route_builder.hpp"
#include "drape_frontend/user_mark_shapes.hpp"
#include "drape_frontend/visual_params.hpp"
#include "indexer/scales.hpp"
#include "drape/texture_manager.hpp"
#include "platform/platform.hpp"
#include "base/logging.hpp"
#include "std/bind.hpp"
namespace df
{
BackendRenderer::BackendRenderer(Params const & params)
: BaseRenderer(ThreadsCommutator::ResourceUploadThread, params)
, m_model(params.m_model)
, m_readManager(make_unique_dp<ReadManager>(params.m_commutator, m_model, params.m_allow3dBuildings))
, m_requestedTiles(params.m_requestedTiles)
{
#ifdef DEBUG
m_isTeardowned = false;
#endif
gui::DrapeGui::Instance().SetRecacheCountryStatusSlot([this]()
{
m_commutator->PostMessage(ThreadsCommutator::ResourceUploadThread,
make_unique_dp<CountryStatusRecacheMessage>(),
MessagePriority::High);
});
m_routeBuilder = make_unique_dp<RouteBuilder>([this](drape_ptr<RouteData> && routeData)
{
m_commutator->PostMessage(ThreadsCommutator::RenderThread,
make_unique_dp<FlushRouteMessage>(move(routeData)),
MessagePriority::Normal);
}, [this](drape_ptr<RouteSignData> && routeSignData)
{
m_commutator->PostMessage(ThreadsCommutator::RenderThread,
make_unique_dp<FlushRouteSignMessage>(move(routeSignData)),
MessagePriority::Normal);
});
StartThread();
}
BackendRenderer::~BackendRenderer()
{
ASSERT(m_isTeardowned, ());
}
void BackendRenderer::Teardown()
{
gui::DrapeGui::Instance().ClearRecacheCountryStatusSlot();
StopThread();
#ifdef DEBUG
m_isTeardowned = true;
#endif
}
unique_ptr<threads::IRoutine> BackendRenderer::CreateRoutine()
{
return make_unique<Routine>(*this);
}
void BackendRenderer::RecacheGui(gui::TWidgetsInitInfo const & initInfo, gui::TWidgetsSizeInfo & sizeInfo)
{
drape_ptr<gui::LayerRenderer> layerRenderer = m_guiCacher.RecacheWidgets(initInfo, sizeInfo, m_texMng);
drape_ptr<Message> outputMsg = make_unique_dp<GuiLayerRecachedMessage>(move(layerRenderer));
m_commutator->PostMessage(ThreadsCommutator::RenderThread, move(outputMsg), MessagePriority::Normal);
}
void BackendRenderer::RecacheCountryStatus()
{
drape_ptr<gui::LayerRenderer> layerRenderer = m_guiCacher.RecacheCountryStatus(m_texMng);
drape_ptr<Message> outputMsg = make_unique_dp<GuiLayerRecachedMessage>(move(layerRenderer));
m_commutator->PostMessage(ThreadsCommutator::RenderThread, move(outputMsg), MessagePriority::Normal);
}
void BackendRenderer::AcceptMessage(ref_ptr<Message> message)
{
switch (message->GetType())
{
case Message::UpdateReadManager:
{
TTilesCollection tiles = m_requestedTiles->GetTiles();
if (!tiles.empty())
{
ScreenBase const screen = m_requestedTiles->GetScreen();
bool const is3dBuildings = m_requestedTiles->Is3dBuildings();
m_readManager->UpdateCoverage(screen, is3dBuildings, tiles, m_texMng);
gui::CountryStatusHelper & helper = gui::DrapeGui::Instance().GetCountryStatusHelper();
if ((*tiles.begin()).m_zoomLevel > scales::GetUpperWorldScale())
m_model.UpdateCountryIndex(helper.GetCountryIndex(), screen.ClipRect().Center());
else
helper.Clear();
}
break;
}
case Message::InvalidateReadManagerRect:
{
ref_ptr<InvalidateReadManagerRectMessage> msg = message;
if (msg->NeedInvalidateAll())
m_readManager->InvalidateAll();
else
m_readManager->Invalidate(msg->GetTilesForInvalidate());
break;
}
case Message::CountryStatusRecache:
{
RecacheCountryStatus();
break;
}
case Message::GuiRecache:
{
ref_ptr<GuiRecacheMessage> msg = message;
RecacheGui(msg->GetInitInfo(), msg->GetSizeInfoMap());
break;
}
case Message::GuiLayerLayout:
{
ref_ptr<GuiLayerLayoutMessage> msg = message;
m_commutator->PostMessage(ThreadsCommutator::RenderThread,
make_unique_dp<GuiLayerLayoutMessage>(msg->AcceptLayoutInfo()),
MessagePriority::Normal);
RecacheCountryStatus();
break;
}
case Message::TileReadStarted:
{
m_batchersPool->ReserveBatcher(static_cast<ref_ptr<BaseTileMessage>>(message)->GetKey());
break;
}
case Message::TileReadEnded:
{
ref_ptr<TileReadEndMessage> msg = message;
m_batchersPool->ReleaseBatcher(msg->GetKey());
break;
}
case Message::FinishReading:
{
ref_ptr<FinishReadingMessage> msg = message;
m_commutator->PostMessage(ThreadsCommutator::RenderThread,
make_unique_dp<FinishReadingMessage>(move(msg->MoveTiles())),
MessagePriority::Normal);
break;
}
case Message::MapShapeReaded:
{
ref_ptr<MapShapeReadedMessage> msg = message;
auto const & tileKey = msg->GetKey();
if (m_requestedTiles->CheckTileKey(tileKey) && m_readManager->CheckTileKey(tileKey))
{
ref_ptr<dp::Batcher> batcher = m_batchersPool->GetTileBatcher(tileKey);
for (drape_ptr<MapShape> const & shape : msg->GetShapes())
shape->Draw(batcher, m_texMng);
}
break;
}
case Message::UpdateUserMarkLayer:
{
ref_ptr<UpdateUserMarkLayerMessage> msg = message;
TileKey const & key = msg->GetKey();
UserMarksProvider const * marksProvider = msg->StartProcess();
if (marksProvider->IsDirty())
{
m_commutator->PostMessage(ThreadsCommutator::RenderThread,
make_unique_dp<ClearUserMarkLayerMessage>(key),
MessagePriority::Normal);
m_batchersPool->ReserveBatcher(key);
CacheUserMarks(marksProvider, m_batchersPool->GetTileBatcher(key), m_texMng);
m_batchersPool->ReleaseBatcher(key);
}
msg->EndProcess();
break;
}
case Message::CountryInfoUpdate:
{
ref_ptr<CountryInfoUpdateMessage> msg = message;
gui::CountryStatusHelper & helper = gui::DrapeGui::Instance().GetCountryStatusHelper();
if (!msg->NeedShow())
{
// Country is already loaded, so there is no need to show status GUI
// even if this country is updating.
helper.Clear();
}
else
{
gui::CountryInfo const & info = msg->GetCountryInfo();
if (msg->IsCurrentCountry() || helper.GetCountryIndex() == info.m_countryIndex)
{
helper.SetCountryInfo(info);
}
}
break;
}
case Message::AddRoute:
{
ref_ptr<AddRouteMessage> msg = message;
m_routeBuilder->Build(msg->GetRoutePolyline(), msg->GetTurns(), msg->GetColor(), m_texMng);
break;
}
case Message::CacheRouteSign:
{
ref_ptr<CacheRouteSignMessage> msg = message;
m_routeBuilder->BuildSign(msg->GetPosition(), msg->IsStart(), msg->IsValid(), m_texMng);
break;
}
case Message::RemoveRoute:
{
ref_ptr<RemoveRouteMessage> msg = message;
// we have to resend the message to FR, because it guaranties that
// RemoveRouteMessage will be precessed after FlushRouteMessage
m_commutator->PostMessage(ThreadsCommutator::RenderThread,
make_unique_dp<RemoveRouteMessage>(msg->NeedDeactivateFollowing()),
MessagePriority::Normal);
break;
}
case Message::InvalidateTextures:
{
m_texMng->Invalidate(VisualParams::Instance().GetResourcePostfix());
RecacheMyPosition();
break;
}
case Message::CacheGpsTrackPoints:
{
ref_ptr<CacheGpsTrackPointsMessage> msg = message;
drape_ptr<GpsTrackRenderData> data = make_unique_dp<GpsTrackRenderData>();
data->m_pointsCount = msg->GetPointsCount();
GpsTrackShape::Draw(m_texMng, *data.get());
m_commutator->PostMessage(ThreadsCommutator::RenderThread,
make_unique_dp<FlushGpsTrackPointsMessage>(move(data)),
MessagePriority::Normal);
break;
}
case Message::StopRendering:
{
ProcessStopRenderingMessage();
break;
}
case Message::Allow3dBuildings:
{
ref_ptr<Allow3dBuildingsMessage> msg = message;
m_readManager->Allow3dBuildings(msg->Allow3dBuildings());
break;
}
default:
ASSERT(false, ());
break;
}
}
void BackendRenderer::ReleaseResources()
{
m_readManager->Stop();
m_readManager.reset();
m_batchersPool.reset();
m_routeBuilder.reset();
m_texMng->Release();
m_contextFactory->getResourcesUploadContext()->doneCurrent();
}
BackendRenderer::Routine::Routine(BackendRenderer & renderer) : m_renderer(renderer) {}
void BackendRenderer::Routine::Do()
{
m_renderer.m_contextFactory->getResourcesUploadContext()->makeCurrent();
GLFunctions::Init();
m_renderer.InitGLDependentResource();
while (!IsCancelled())
{
m_renderer.ProcessSingleMessage();
m_renderer.CheckRenderingEnabled();
}
m_renderer.ReleaseResources();
}
void BackendRenderer::InitGLDependentResource()
{
m_batchersPool = make_unique_dp<BatchersPool>(ReadManager::ReadCount(), bind(&BackendRenderer::FlushGeometry, this, _1));
dp::TextureManager::Params params;
params.m_resPostfix = VisualParams::Instance().GetResourcePostfix();
params.m_visualScale = df::VisualParams::Instance().GetVisualScale();
params.m_colors = "colors.txt";
params.m_patterns = "patterns.txt";
params.m_glyphMngParams.m_uniBlocks = "unicode_blocks.txt";
params.m_glyphMngParams.m_whitelist = "fonts_whitelist.txt";
params.m_glyphMngParams.m_blacklist = "fonts_blacklist.txt";
params.m_glyphMngParams.m_sdfScale = VisualParams::Instance().GetGlyphSdfScale();
params.m_glyphMngParams.m_baseGlyphHeight = VisualParams::Instance().GetGlyphBaseSize();
GetPlatform().GetFontNames(params.m_glyphMngParams.m_fonts);
m_texMng->Init(params);
RecacheMyPosition();
}
void BackendRenderer::RecacheMyPosition()
{
auto msg = make_unique_dp<MyPositionShapeMessage>(make_unique_dp<MyPosition>(m_texMng),
make_unique_dp<SelectionShape>(m_texMng));
GLFunctions::glFlush();
m_commutator->PostMessage(ThreadsCommutator::RenderThread, move(msg), MessagePriority::High);
}
void BackendRenderer::FlushGeometry(drape_ptr<Message> && message)
{
GLFunctions::glFlush();
m_commutator->PostMessage(ThreadsCommutator::RenderThread, move(message), MessagePriority::Normal);
}
} // namespace df
| apache-2.0 |
jacksonic/foamTableTop | bower_components/foam2-experimental/src/foam/u2/ReadWriteView.js | 1810 | /**
* @license
* Copyright 2015 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// TODO: doc
foam.CLASS({
package: 'foam.u2',
name: 'ReadWriteView',
extends: 'foam.u2.View',
requires: [ 'foam.u2.tag.Input' ],
methods: [
function initE() {
// Don't create ReadView if no data (saves memory and startup time).
if ( this.isLoaded() ) {
this.initReadView();
} else {
this.listenForLoad();
}
},
// Template Methods
function isLoaded() {
/** Return true iff data is available for this view. **/
return this.data;
},
function listenForLoad() {
this.data$.sub(this.onDataLoad);
},
function toReadE() {
return this.E('span').add(this.data$);
},
function toWriteE() {
this.data$.sub(this.onDataLoad);
return this.Input.create({data$: this.data$});
}
],
listeners: [
function onDataLoad() {
this.data$.unsub(this.onDataLoad);
this.initReadView();
},
function initReadView() {
this.removeAllChildren().add(this.toReadE().on('click', this.initWriteView));
},
function initWriteView() {
this.removeAllChildren().add(this.toWriteE().on('blur', this.initReadView).focus());
}
]
});
| apache-2.0 |
praveenkumar/minishift | cmd/minishift/cmd/root.go | 17319 | /*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"encoding/json"
goflag "flag"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"strings"
"github.com/docker/machine/libmachine/log"
"github.com/golang/glog"
"github.com/minishift/minishift/cmd/minishift/cmd/addon"
cmdAddon "github.com/minishift/minishift/cmd/minishift/cmd/addon"
configCmd "github.com/minishift/minishift/cmd/minishift/cmd/config"
"github.com/minishift/minishift/cmd/minishift/cmd/dns"
hostfolderCmd "github.com/minishift/minishift/cmd/minishift/cmd/hostfolder"
"github.com/minishift/minishift/cmd/minishift/cmd/image"
cmdImage "github.com/minishift/minishift/cmd/minishift/cmd/image"
cmdOpenshift "github.com/minishift/minishift/cmd/minishift/cmd/openshift"
cmdProfile "github.com/minishift/minishift/cmd/minishift/cmd/profile"
cmdUtil "github.com/minishift/minishift/cmd/minishift/cmd/util"
"github.com/minishift/minishift/pkg/minikube/constants"
minishiftConfig "github.com/minishift/minishift/pkg/minishift/config"
minishiftConstants "github.com/minishift/minishift/pkg/minishift/constants"
profileActions "github.com/minishift/minishift/pkg/minishift/profile"
"github.com/minishift/minishift/pkg/util/filehelper"
"github.com/minishift/minishift/pkg/util/os/atexit"
"github.com/minishift/minishift/pkg/version"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"github.com/minishift/minishift/cmd/minishift/state"
)
const (
showLibmachineLogs = "show-libmachine-logs"
profileCmd = "profile"
profileFlag = "profile"
profileSetCmd = "set"
enableExperimentalEnv = "MINISHIFT_ENABLE_EXPERIMENTAL"
invalidProfileName = "Profile names must consist of alphanumeric characters only."
)
var viperWhiteList = []string{
"v",
"alsologtostderr",
"log_dir",
}
// RootCmd represents the base command when called without any subcommands
var RootCmd = &cobra.Command{
Use: "minishift",
Short: "Minishift is a tool for application development in local OpenShift clusters.",
Long: `Minishift is a command-line tool that provisions and manages single-node OpenShift clusters optimized for development workflows.`,
PersistentPreRun: func(cmd *cobra.Command, args []string) {
var (
err error
isAddonInstallRequired bool
)
// If profile name is 'minishift' then ignore the vaild profile check.
if constants.ProfileName != constants.DefaultProfileName {
checkForValidProfileOrExit(cmd)
}
constants.MachineName = constants.ProfileName
constants.Minipath = constants.GetProfileHomeDir(constants.ProfileName)
// Initialize the instance directory structure
state.InstanceDirs = state.NewMinishiftDirs(constants.Minipath)
constants.KubeConfigPath = filepath.Join(state.InstanceDirs.Machines, constants.MachineName+"_kubeconfig")
if !filehelper.Exists(state.InstanceDirs.Addons) {
isAddonInstallRequired = true
}
// creating all directories for minishift run
createMinishiftDirs(state.InstanceDirs)
// Ensure the global viper config file exists.
ensureConfigFileExists(constants.GlobalConfigFile)
// Ensure the viper config file exists.
ensureConfigFileExists(constants.ConfigFile)
// Read the config file and get the details about existing image cache.
// This should be removed after 2-3 release of minishift.
cfg, err := minishiftConfig.ReadViperConfig(constants.ConfigFile)
if err != nil {
atexit.ExitWithMessage(1, err.Error())
}
cacheImages := cmdImage.GetConfiguredCachedImages(cfg)
addonConfig := cmdAddon.GetAddOnConfiguration()
// If AllInstanceConfig is not defined we should define it now.
if minishiftConfig.AllInstancesConfig == nil {
ensureAllInstanceConfigPath(constants.AllInstanceConfigPath)
minishiftConfig.AllInstancesConfig, err = minishiftConfig.NewAllInstancesConfig(constants.AllInstanceConfigPath)
if err != nil {
atexit.ExitWithMessage(1, fmt.Sprintf("Error creating all instance config: %s", err.Error()))
}
}
// If older instance state config file exists, rename the file else create a new file
_, err = os.Stat(minishiftConstants.GetInstanceStateConfigOldPath())
if err == nil {
if err := os.Rename(minishiftConstants.GetInstanceStateConfigOldPath(), minishiftConstants.GetInstanceStateConfigPath()); err != nil {
atexit.ExitWithMessage(1, fmt.Sprintf("Error moving old state config to new one: %s", err.Error()))
}
}
minishiftConfig.InstanceStateConfig, err = minishiftConfig.NewInstanceStateConfig(minishiftConstants.GetInstanceStateConfigPath())
if err != nil {
atexit.ExitWithMessage(1, fmt.Sprintf("Error creating config for VM: %s", err.Error()))
}
// Create MACHINE_NAME.json (machine config file)
minishiftConfig.InstanceConfig, err = minishiftConfig.NewInstanceConfig(minishiftConstants.GetInstanceConfigPath())
if err != nil {
atexit.ExitWithMessage(1, fmt.Sprintf("Error creating config for VM: %s", err.Error()))
}
// If hostfolder config exists for an instance then copy it to the new instance state config.
// This change should be removed after few releases.
hostfolder := minishiftConfig.InstanceStateConfig.HostFolders
if len(hostfolder) != 0 {
minishiftConfig.InstanceConfig.HostFolders = hostfolder
if err != minishiftConfig.InstanceConfig.Write() {
atexit.ExitWithMessage(1, fmt.Sprintf("Error coping existing hostfolder to new instance config: %s", err.Error()))
}
}
// If cache-config exists then copy it to the new instance config.
// This should be removed after 2-3 release of Minishift.
if len(cacheImages) != 0 {
minishiftConfig.InstanceConfig.CacheImages = cacheImages
if err != minishiftConfig.InstanceConfig.Write() {
atexit.ExitWithMessage(1, fmt.Sprintf("Error coping existing cache images to new instance config: %s", err.Error()))
}
delete(cfg, "cache-images")
if err != minishiftConfig.WriteViperConfig(constants.ConfigFile, cfg) {
atexit.ExitWithMessage(1, fmt.Sprintf("Error removing the cache-images entry from older config %s: %s", constants.ConfigFile, err.Error()))
}
}
// If addon config exists then copy it to the new instance config.
// This should be removed after 2-3 release of Minishift.
if len(addonConfig) != 0 {
minishiftConfig.InstanceConfig.AddonConfig = addonConfig
if err != minishiftConfig.InstanceConfig.Write() {
atexit.ExitWithMessage(1, fmt.Sprintf("Error coping existing addon config to new instance config: %s", err.Error()))
}
delete(cfg, "addons")
if err != minishiftConfig.WriteViperConfig(constants.ConfigFile, cfg) {
atexit.ExitWithMessage(1, fmt.Sprintf("Error removing the addon config entry from older config %s: %s", constants.ConfigFile, err.Error()))
}
}
if isAddonInstallRequired {
if err := cmdUtil.UnpackAddons(state.InstanceDirs.Addons); err != nil {
atexit.ExitWithMessage(1, fmt.Sprintf("Error installing default add-ons : %s", err))
}
}
// Check marker file created by update command and perform post update execution steps
if filehelper.Exists(filepath.Join(constants.Minipath, constants.UpdateMarkerFileName)) {
if err := performPostUpdateExecution(filepath.Join(constants.Minipath, constants.UpdateMarkerFileName)); err != nil {
atexit.ExitWithMessage(1, fmt.Sprintf("Error in performing post update exeuction: %s", err))
}
}
if minishiftConfig.EnableExperimental {
glog.Info("Experimental features are enabled")
}
shouldShowLibmachineLogs := viper.GetBool(showLibmachineLogs)
if glog.V(3) {
log.SetDebug(true)
}
if !shouldShowLibmachineLogs {
log.SetOutWriter(ioutil.Discard)
log.SetErrWriter(ioutil.Discard)
}
setDefaultActiveProfile()
// Adding minishift version information to debug logs
if glog.V(2) {
fmt.Println(fmt.Sprintf("-- minishift version: v%s+%s", version.GetMinishiftVersion(), version.GetCommitSha()))
}
},
}
// Execute adds all child commands to the root command sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
if err := RootCmd.Execute(); err != nil {
atexit.ExitWithMessage(1, err.Error())
}
}
// Handle config values for flags used in external packages (e.g. glog)
// by setting them directly, using values from viper when not passed in as args
func setFlagsUsingViper() {
for _, config := range viperWhiteList {
var a = pflag.Lookup(config)
viper.SetDefault(a.Name, a.DefValue)
// If the flag is set, override viper value
if a.Changed {
viper.Set(a.Name, a.Value.String())
}
// Viper will give precedence first to calls to the Set command,
// then to values from the config.yml
a.Value.Set(viper.GetString(a.Name))
a.Changed = true
}
}
func processEnvVariables() {
enableExperimental, err := cmdUtil.GetBoolEnv(enableExperimentalEnv)
if err == cmdUtil.BooleanFormatError {
atexit.ExitWithMessage(1, fmt.Sprintf("Error enabling experimental features: %s", err))
}
minishiftConfig.EnableExperimental = enableExperimental
}
func init() {
processEnvVariables()
RootCmd.PersistentFlags().Bool(showLibmachineLogs, false, "Show logs from libmachine.")
RootCmd.PersistentFlags().String(profileFlag, constants.DefaultProfileName, "Profile name")
RootCmd.AddCommand(configCmd.ConfigCmd)
RootCmd.AddCommand(cmdOpenshift.OpenShiftCmd)
RootCmd.AddCommand(hostfolderCmd.HostFolderCmd)
RootCmd.AddCommand(addon.AddonsCmd)
RootCmd.AddCommand(image.ImageCmd)
RootCmd.AddCommand(cmdProfile.ProfileCmd)
if minishiftConfig.EnableExperimental {
RootCmd.AddCommand(dns.DnsCmd)
}
pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
logDir := pflag.Lookup("log_dir")
if !logDir.Changed {
logDir.Value.Set(constants.MakeMiniPath("logs"))
}
viper.BindPFlags(RootCmd.PersistentFlags())
cobra.OnInitialize(initConfig)
verbosity := pflag.Lookup("v")
verbosity.Usage += ". Level varies from 1 to 5 (default 1)."
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
profile := initializeProfile()
if (profile != "") && (profile != constants.DefaultProfileName) {
constants.ProfileName = profile
constants.ConfigFile = constants.MakeMiniPath("profiles", profile, "config", "config.json")
}
// Initializing the global config file with Viper
viper.SetConfigFile(constants.GlobalConfigFile)
viper.SetConfigType("json")
err := viper.ReadInConfig()
if err != nil {
glog.Warningf("Error reading config file at '%s': %s", constants.GlobalConfigFile, err)
}
configPath := constants.ConfigFile
viper.SetConfigFile(configPath)
viper.SetConfigType("json")
err = viper.MergeInConfig()
if err != nil {
glog.Warningf("Error reading config file at '%s': %s", configPath, err)
}
setupViper()
}
// initializeProfile always return profile name based on below checks.
// 1. If profile set <PROFILE_NAME> is used then return PROFILE_NAME
// 2. If --profile <PROFILE_NAME> then return PROFILE_NAME
// 3. If no profile command or flag then return active profile name.
func initializeProfile() string {
var (
profileName string
err error
activeProfile string
profileCmdAlias = []string{
"profiles",
"instance",
}
)
// Check if profileCmd is part of os.Args so that it takes preference instead `--profile` argument
var isProfileCmdUsed bool
for _, arg := range os.Args {
if arg == profileCmd || arg == profileCmdAlias[0] || arg == profileCmdAlias[1] {
isProfileCmdUsed = true
}
}
for i, arg := range os.Args {
if !isProfileCmdUsed {
// This will match if `--profile` flag is used
if arg == "--"+profileFlag {
profileName = os.Args[i+1]
break
}
}
// This will match if we used profile or it's alias commands
if arg == profileCmd || arg == profileCmdAlias[0] || arg == profileCmdAlias[1] {
// This make sure if user specify profile command without any subcommand then
// it should not panic with out of index error
if len(os.Args) <= i+2 {
break
}
// For use cases when minishift profile set PROFILE_NAME is used
if os.Args[i+1] == profileSetCmd {
profileName = os.Args[i+2]
}
break
}
}
// Check if the allinstance config is present. If present we need to check active profile information.
_, err = os.Stat(constants.AllInstanceConfigPath)
if !os.IsNotExist(err) {
minishiftConfig.AllInstancesConfig, err = minishiftConfig.NewAllInstancesConfig(constants.AllInstanceConfigPath)
if err != nil {
atexit.ExitWithMessage(1, fmt.Sprintf("Error initializing all instance config: %s", err.Error()))
}
activeProfile = profileActions.GetActiveProfile()
}
if profileName != "" {
return profileName
}
if activeProfile != "" {
return activeProfile
}
return ""
}
func setupViper() {
viper.SetEnvPrefix(constants.MiniShiftEnvPrefix)
// Replaces '-' in flags with '_' in env variables
// e.g. show-libmachine-logs => $ENVPREFIX_SHOW_LIBMACHINE_LOGS
viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
viper.AutomaticEnv()
setFlagsUsingViper()
}
func ensureConfigFileExists(configPath string) {
if _, err := os.Stat(configPath); os.IsNotExist(err) {
jsonRoot := []byte("{}")
f, err := os.Create(configPath)
if err != nil {
glog.Exitf("Cannot create file '%s': %s", configPath, err)
}
defer f.Close()
_, err = f.Write(jsonRoot)
if err != nil {
glog.Exitf("Cannot encode config '%s': %s", configPath, err)
}
}
}
// performPostUpdateExecution executes the post update actions like unpacking the default addons
// if user chose to update addons during `minishift update` command.
// It also remove the marker file created by update command to avoid repeating the post update execution process
func performPostUpdateExecution(markerPath string) error {
var markerData UpdateMarker
file, err := ioutil.ReadFile(markerPath)
if err != nil {
return err
}
json.Unmarshal(file, &markerData)
if markerData.InstallAddon {
fmt.Println(fmt.Sprintf("Minishift was upgraded from v%s to v%s. Running post update actions.", markerData.PreviousVersion, version.GetMinishiftVersion()))
fmt.Print("--- Updating default add-ons ... ")
cmdUtil.UnpackAddons(state.InstanceDirs.Addons)
fmt.Println("OK")
fmt.Println(fmt.Sprintf("Default add-ons '%s' installed", strings.Join(cmdUtil.DefaultAssets, ", ")))
}
// Delete the marker file once post update execution is done
if err := os.Remove(markerPath); err != nil {
return err
}
return nil
}
func ensureAllInstanceConfigPath(configPath string) {
configDir := filepath.Dir(configPath)
if err := os.MkdirAll(configDir, 0777); err != nil {
atexit.ExitWithMessage(1, fmt.Sprintf("Error creating directory: %s", configDir))
}
}
func createMinishiftDirs(dirs *state.MinishiftDirs) {
dirPaths := reflect.ValueOf(*dirs)
for i := 0; i < dirPaths.NumField(); i++ {
path := dirPaths.Field(i).Interface().(string)
if err := os.MkdirAll(path, 0777); err != nil {
atexit.ExitWithMessage(1, fmt.Sprintf("Error creating directory: %s", path))
}
}
}
// If there is no active profile we need to set minishift as the default profile.
// Because this will make the profile behaviour backward compatible and consistent with user expectation.
func setDefaultActiveProfile() {
if minishiftConfig.AllInstancesConfig == nil {
atexit.ExitWithMessage(1, "Error: All instance config is not initialized")
}
activeProfile := profileActions.GetActiveProfile()
if activeProfile == "" {
err := profileActions.SetDefaultProfileActive()
if err != nil {
atexit.ExitWithMessage(1, err.Error())
}
// Only set oc context to default profile when user is looking for default profile
// i.e. "minishift start" with minishift as active profile or "minishift start --profile minishift"
// Otherwise minishift will be the active profile irrespective of what user chooses
if constants.ProfileName == constants.DefaultProfileName {
cmdUtil.SetOcContext(constants.DefaultProfileName)
}
}
}
// checkForValidProfileOrExit checks if a profile exist or not when --profile flag used.
// If profile not exist then it will error out with message.
func checkForValidProfileOrExit(cmd *cobra.Command) {
if !cmdUtil.IsValidProfileName(constants.ProfileName) {
atexit.ExitWithMessage(1, invalidProfileName)
}
if cmd.Parent() != nil {
// This condition true for each command execpt `minishift profile <subcommand>` and `minishift start ...``
if cmd.Parent().Name() != profileCmd && cmd.Name() != startCmd.Name() {
if !cmdUtil.IsValidProfile(constants.ProfileName) {
atexit.ExitWithMessage(1, fmt.Sprintf("Profile '%s' doesn't exist, Use 'minishift profile set %s' or 'minishift start --profile %s' to create", constants.ProfileName, constants.ProfileName, constants.ProfileName))
}
}
}
}
| apache-2.0 |
pluto-build/pluto | src/build/pluto/stamp/FileContentStamper.java | 1438 | package build.pluto.stamp;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.Map;
import org.sugarj.common.FileCommands;
/**
* @author Sebastian Erdweg
*/
public class FileContentStamper implements Stamper {
private static final long serialVersionUID = 7688772212399111636L;
private FileContentStamper() {}
public static final Stamper instance = new FileContentStamper();
/**
* @see build.pluto.stamp.Stamper#stampOf(org.sugarj.common.path.Path)
*/
@Override
public Stamp stampOf(File p) {
if (!p.exists())
return new ValueStamp<>(this, new byte[0]);
if (p.isDirectory()) {
Map<File, Stamp> stamps = new HashMap<>();
stamps.put(p, new ValueStamp<>(this,p.lastModified()));
for (Path sub : FileCommands.listFilesRecursive(p.toPath()))
if (Files.isDirectory(sub))
stamps.put(sub.toFile(), new ValueStamp<>(this, sub.toFile().lastModified()));
else
stamps.put(sub.toFile(), fileContentStamp(sub));
return new ValueStamp<>(this, stamps);
}
return fileContentStamp(p.toPath());
}
private Stamp fileContentStamp(Path p) {
try {
return new ByteArrayStamp(this, Files.readAllBytes(p));
} catch (IOException e) {
e.printStackTrace();
return new ByteArrayStamp(this, null);
}
}
}
| apache-2.0 |
aosp-mirror/platform_frameworks_support | leanback/src/androidTest/java/androidx/leanback/app/wizard/GuidedStepAttributesTestFragment.java | 3243 | /*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package androidx.leanback.app.wizard;
import android.os.Bundle;
import androidx.leanback.app.GuidedStepFragment;
import androidx.leanback.widget.GuidanceStylist;
import androidx.leanback.widget.GuidedAction;
import java.util.HashMap;
import java.util.List;
public class GuidedStepAttributesTestFragment extends GuidedStepFragment {
private static String TAG = "GuidedStepAttributesTestFragment";
static class Callback {
public void onActionClicked(GuidedStepFragment fragment, long id) {
}
}
static HashMap<Long, Callback> sCallbacks = new HashMap();
public static GuidanceStylist.Guidance GUIDANCE = null;
public static List<GuidedAction> ACTION_LIST = null;
public static long LAST_CLICKED_ACTION_ID = -1;
public static long LAST_SELECTED_ACTION_ID = -1;
@Override
public GuidanceStylist.Guidance onCreateGuidance(Bundle savedInstanceState) {
if (GUIDANCE == null ) {
return new GuidanceStylist.Guidance("", "", "", null);
}
return GUIDANCE;
}
@Override
public void onCreateActions(List<GuidedAction> actions, Bundle savedInstanceState) {
if (ACTION_LIST == null)
return;
actions.addAll(ACTION_LIST);
}
@Override
public void onGuidedActionClicked(GuidedAction action) {
super.onGuidedActionFocused(action);
Callback callback = sCallbacks.get(action.getId());
if (callback != null) {
callback.onActionClicked(this, action.getId());
} else {
LAST_CLICKED_ACTION_ID = action.getId();
}
}
@Override
public void onGuidedActionFocused(GuidedAction action) {
super.onGuidedActionFocused(action);
LAST_SELECTED_ACTION_ID = action.getId();
}
@Override
public boolean onSubGuidedActionClicked(GuidedAction action) {
super.onSubGuidedActionClicked(action);
LAST_CLICKED_ACTION_ID = action.getId();
return true;
}
@Override
public long onGuidedActionEditedAndProceed(GuidedAction action) {
Callback callback = sCallbacks.get(action.getId());
if (callback != null) {
callback.onActionClicked(this, action.getId());
} else {
super.onGuidedActionEditedAndProceed(action);
}
return GuidedAction.ACTION_ID_CURRENT;
}
public static void setActionClickCallback(long id, Callback callback) {
sCallbacks.put(id, callback);
}
public static void clear() {
LAST_CLICKED_ACTION_ID = -1;
LAST_SELECTED_ACTION_ID = -1;
sCallbacks.clear();
}
}
| apache-2.0 |
equella/Equella | Source/Plugins/Core/com.equella.base/src/com/tle/beans/usermanagement/standard/wrapper/UserWrapperSettings.java | 1309 | /*
* Licensed to The Apereo Foundation under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
* The Apereo Foundation licenses this file to you under the Apache License,
* Version 2.0, (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.tle.beans.usermanagement.standard.wrapper;
import com.tle.beans.ump.UserManagementSettings;
import com.tle.common.settings.annotation.Property;
public class UserWrapperSettings extends UserManagementSettings {
private static final long serialVersionUID = 1L;
@Property(key = "wrapper.user.enabled")
private boolean enabled;
@Override
public boolean isEnabled() {
return enabled;
}
@Override
public void setEnabled(boolean enabled) {
this.enabled = enabled;
}
}
| apache-2.0 |
aslanbekirov/crate | sql/src/main/java/io/crate/operation/scalar/regex/MatchesFunction.java | 5986 | /*
* Licensed to CRATE Technology GmbH ("Crate") under one or more contributor
* license agreements. See the NOTICE file distributed with this work for
* additional information regarding copyright ownership. Crate licenses
* this file to you under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* However, if you have executed another commercial license agreement
* with Crate these terms will supersede the license and you may use the
* software solely pursuant to the terms of the relevant commercial agreement.
*/
package io.crate.operation.scalar.regex;
import com.google.common.base.Preconditions;
import io.crate.analyze.symbol.Function;
import io.crate.analyze.symbol.Literal;
import io.crate.analyze.symbol.Symbol;
import io.crate.analyze.symbol.SymbolType;
import io.crate.metadata.*;
import io.crate.operation.Input;
import io.crate.operation.scalar.ScalarFunctionModule;
import io.crate.types.ArrayType;
import io.crate.types.DataType;
import io.crate.types.DataTypes;
import org.apache.lucene.util.BytesRef;
import java.util.Arrays;
import java.util.List;
import java.util.Locale;
public class MatchesFunction extends Scalar<BytesRef[], Object> implements DynamicFunctionResolver {
public static final String NAME = "regexp_matches";
private static final DataType arrayStringType = new ArrayType(DataTypes.STRING);
private static FunctionInfo createInfo(List<DataType> types) {
return new FunctionInfo(new FunctionIdent(NAME, types), new ArrayType(types.get(0)));
}
public static void register(ScalarFunctionModule module) {
module.register(NAME, new MatchesFunction());
}
private FunctionInfo info;
private RegexMatcher regexMatcher;
private MatchesFunction() {
}
public MatchesFunction(FunctionInfo info) {
this.info = info;
}
@Override
public FunctionInfo info() {
return info;
}
public RegexMatcher regexMatcher() {
return regexMatcher;
}
@Override
public Symbol normalizeSymbol(Function symbol) {
final int size = symbol.arguments().size();
assert (size >= 2 && size <= 3);
if (anyNonLiterals(symbol.arguments())) {
return symbol;
}
final Symbol input = symbol.arguments().get(0);
final Symbol pattern = symbol.arguments().get(1);
final Object inputValue = ((Input) input).value();
final Object patternValue = ((Input) pattern).value();
if (inputValue == null || patternValue == null) {
return Literal.NULL;
}
Input[] args = new Input[size];
args[0] = (Input) input;
args[1] = (Input) pattern;
if (size == 3) {
args[2] = (Input)symbol.arguments().get(2);
}
return Literal.newLiteral(evaluate(args), arrayStringType);
}
@Override
public Scalar<BytesRef[], Object> compile(List<Symbol> arguments) {
assert arguments.size() > 1;
String pattern = null;
if (arguments.get(1).symbolType() == SymbolType.LITERAL) {
Literal literal = (Literal) arguments.get(1);
Object patternVal = literal.value();
if (patternVal == null) {
return this;
}
pattern = ((BytesRef) patternVal).utf8ToString();
}
BytesRef flags = null;
if (arguments.size() == 3) {
assert arguments.get(2).symbolType() == SymbolType.LITERAL;
flags = (BytesRef) ((Literal) arguments.get(2)).value();
}
if (pattern != null) {
regexMatcher = new RegexMatcher(pattern, flags);
} else {
regexMatcher = null;
}
return this;
}
@Override
public BytesRef[] evaluate(Input[] args) {
assert (args.length > 1 && args.length < 4);
Object val = args[0].value();
final Object patternValue = args[1].value();
if (val == null || patternValue == null) {
return null;
}
assert patternValue instanceof BytesRef;
// value can be a string if e.g. result is retrieved by ESSearchTask
if (val instanceof String) {
val = new BytesRef((String)val);
}
RegexMatcher matcher;
if (regexMatcher == null) {
String pattern = ((BytesRef) patternValue).utf8ToString();
BytesRef flags = null;
if (args.length == 3) {
flags = (BytesRef) args[2].value();
}
matcher = new RegexMatcher(pattern, flags);
} else {
matcher = regexMatcher;
}
if (matcher.match((BytesRef)val)) {
return matcher.groups();
}
return null;
}
@Override
public FunctionImplementation<Function> getForTypes(List<DataType> dataTypes) throws IllegalArgumentException {
Preconditions.checkArgument(dataTypes.size() > 1 && dataTypes.size() < 4
&& dataTypes.get(0) == DataTypes.STRING && dataTypes.get(1) == DataTypes.STRING,
String.format(Locale.ENGLISH,
"[%s] Function implementation not found for argument types %s",
NAME, Arrays.toString(dataTypes.toArray())));
if (dataTypes.size() == 3) {
Preconditions.checkArgument(dataTypes.get(2) == DataTypes.STRING, "flags must be of type string");
}
return new MatchesFunction(createInfo(dataTypes));
}
}
| apache-2.0 |
LegNeato/buck | test/com/facebook/buck/cli/BuildPrehookTest.java | 5186 | /*
* Copyright 2018-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.facebook.buck.cli;
import static org.junit.Assert.assertThat;
import com.facebook.buck.config.BuckConfig;
import com.facebook.buck.config.FakeBuckConfig;
import com.facebook.buck.core.cell.Cell;
import com.facebook.buck.core.cell.TestCellBuilder;
import com.facebook.buck.event.BuckEventBus;
import com.facebook.buck.event.BuckEventBusForTests;
import com.facebook.buck.event.ConsoleEvent;
import com.facebook.buck.event.FakeBuckEventListener;
import com.facebook.buck.testutil.FakeProjectFilesystem;
import com.facebook.buck.util.FakeListeningProcessExecutor;
import com.facebook.buck.util.FakeListeningProcessState;
import com.facebook.buck.util.ProcessExecutorParams;
import com.facebook.buck.util.timing.SettableFakeClock;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.logging.Level;
import org.hamcrest.CoreMatchers;
import org.hamcrest.Matchers;
import org.junit.Before;
import org.junit.Test;
public class BuildPrehookTest {
private Collection<FakeListeningProcessState> processStates;
private FakeListeningProcessExecutor processExecutor;
private Cell cell;
private BuckEventBus eventBus;
private BuckConfig buckConfig;
private FakeBuckEventListener eventListener;
private ProcessExecutorParams params;
@Before
public void setUp() {
FakeProjectFilesystem filesystem = new FakeProjectFilesystem();
cell = new TestCellBuilder().setFilesystem(filesystem).build();
String pathToScript =
cell.getFilesystem().getPathForRelativePath("script.sh").toAbsolutePath().toString();
buckConfig =
FakeBuckConfig.builder()
.setSections(ImmutableMap.of("build", ImmutableMap.of("prehook_script", pathToScript)))
.build();
processExecutor =
new FakeListeningProcessExecutor(
params -> {
this.params = params;
return processStates;
},
SettableFakeClock.DO_NOT_CARE);
eventBus = BuckEventBusForTests.newInstance();
eventListener = new FakeBuckEventListener();
eventBus.register(eventListener);
}
@Test
public void presubmitHookPostsAWarningOnStderr() throws Exception {
String warningMessage = "some_warning";
FakeListeningProcessState stderrState =
FakeListeningProcessState.builder()
.setStderr(StandardCharsets.UTF_8.encode(warningMessage))
.setType(FakeListeningProcessState.Type.STDERR)
.build();
FakeListeningProcessState exitState = FakeListeningProcessState.ofExit(0);
processStates = Arrays.asList(stderrState, exitState);
try (BuildPrehook buildPrehook = newBuildHook()) {
buildPrehook.startPrehookScript();
processExecutor.waitForProcess(buildPrehook.process);
}
ConsoleEvent warning = (ConsoleEvent) Iterables.getOnlyElement(eventListener.getEvents());
assertThat(warning.getLevel(), CoreMatchers.equalTo(Level.WARNING));
assertThat(warning.getMessage(), CoreMatchers.equalTo(warningMessage));
}
@Test
public void presubmitHookPostsNothingOnNoStdErr() throws Exception {
processStates = Collections.singleton(FakeListeningProcessState.ofExit(0));
try (BuildPrehook buildPrehook = newBuildHook()) {
buildPrehook.startPrehookScript();
processExecutor.waitForProcess(buildPrehook.process);
}
assertThat(eventListener.getEvents(), Matchers.empty());
}
@Test
public void buildArgumentsArePassed() throws Exception {
processStates = Collections.singleton(FakeListeningProcessState.ofExit(0));
try (BuildPrehook buildPrehook = newBuildHook(ImmutableList.of("target"))) {
buildPrehook.startPrehookScript();
processExecutor.waitForProcess(buildPrehook.process);
String argumentsFile = params.getEnvironment().get().get("BUCK_BUILD_ARGUMENTS_FILE");
String argumentsJson = Iterables.getOnlyElement(Files.readAllLines(Paths.get(argumentsFile)));
assertThat(argumentsJson, Matchers.equalTo("[ \"target\" ]"));
}
}
private BuildPrehook newBuildHook() {
return newBuildHook(ImmutableList.of());
}
private BuildPrehook newBuildHook(ImmutableList<String> arguments) {
ImmutableMap<String, String> env = ImmutableMap.of();
return new BuildPrehook(processExecutor, cell, eventBus, buckConfig, env, arguments);
}
}
| apache-2.0 |
ThorsteinnAdal/webcrawls_in_singapore_shippinglane | bwi_fuels/bwi_page.py | 2171 | __author__ = 'thorsteinn'
import csv
import urllib2
import re
import json
from bs4 import BeautifulSoup
def bwi_page(country, port):
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
url = 'http://www.bunkerworld.com/prices/port/{c}/{p}/'.format(c = country, p = port)
page = opener.open(url)
soup = BeautifulSoup(page.read(), 'html.parser')
fuelHeaders = soup.find_all('div', {'class':"content_block_title"}) # here are all the headers that matter
db = {'T0':{'fuel:':'IFO380','iso':'RMG380 RMH380', 'report':'daily','table':[]},
'T1':{'fuel:':'IFO180','iso':'RME180 RMF180', 'report':'daily','table':[]},
'T2':{'fuel:':'MGO','iso':'DMA DMX', 'report':'daily','table':[]},
'T3':{'fuel:':'LSMGO','iso': "0.1% sulfur", 'report':'daily','table':[]},
'T4':{'fuel:':'IFO380','iso':'RMG380 RMH380', 'report':'monthly','table':[]},
'T5':{'fuel:':'IFO180','iso':'RME180 RMF180', 'report':'monthly','table':[]},
'T6':{'fuel:':'MGO','iso':'DMA DMX', 'report':'monthly','table':[]},
'T7':{'fuel:':'LSMGO','iso': "0.1% sulfur", 'report':'monthly','table':[]},
'T8':{'fuel:':'IFO380','iso':'RMG380 RMH380', 'report':'30 day','table':[]},
'T9':{'fuel:':'IFO180','iso':'RME180 RMF180', 'report':'30 day','table':[]},
'T10':{'fuel:':'MGO','iso':'DMA DMX', 'report':'30 day','table':[]},
'T11':{'fuel:':'LSMGO','iso': "0.1% sulfur", 'report':'30 day','table':[]}}
tables = soup.find_all('table', {'class':"item_table row_borders"})
this_table = []
index = 0
for table in tables:
rows = table.find_all('tr')
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
this_table.append([ele for ele in cols if ele]) # Get rid of empty values
db['port'] = "{}{}".format(country.upper(),port.upper())
db['table%s'% index]['table'] = this_table
this_table = []
index +=1
return db
# print map_bwi_page('nl', 'rtm') #others Singapore sg/sin/, Houston us/hou/, Fujairah ae/fjr/
| apache-2.0 |
goodwinnk/intellij-community | plugins/xpath/xpath-view/src/org/intellij/plugins/xpathView/ui/InputExpressionDialog.java | 22584 | /*
* Copyright 2007 Sascha Weinreuter
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.intellij.plugins.xpathView.ui;
import com.intellij.codeInsight.daemon.DaemonCodeAnalyzer;
import com.intellij.codeInsight.intention.IntentionAction;
import com.intellij.javaee.ExternalResourceManager;
import com.intellij.openapi.editor.Document;
import com.intellij.openapi.editor.Editor;
import com.intellij.openapi.editor.event.DocumentEvent;
import com.intellij.openapi.editor.event.DocumentListener;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.ui.Messages;
import com.intellij.openapi.util.Pair;
import com.intellij.psi.*;
import com.intellij.psi.xml.XmlElement;
import com.intellij.psi.xml.XmlFile;
import com.intellij.ui.EditorComboBox;
import com.intellij.ui.ListCellRendererWrapper;
import com.intellij.uiDesigner.core.GridConstraints;
import com.intellij.util.ArrayUtil;
import com.intellij.util.IncorrectOperationException;
import com.intellij.util.LocalTimeCounter;
import com.intellij.util.containers.BidirectionalMap;
import org.intellij.lang.xpath.XPathFileType;
import org.intellij.lang.xpath.context.*;
import org.intellij.lang.xpath.psi.PrefixReference;
import org.intellij.lang.xpath.psi.QNameElement;
import org.intellij.lang.xpath.psi.XPathElement;
import org.intellij.plugins.xpathView.Config;
import org.intellij.plugins.xpathView.HistoryElement;
import org.intellij.plugins.xpathView.eval.EvalExpressionDialog;
import org.intellij.plugins.xpathView.support.XPathSupport;
import org.intellij.plugins.xpathView.util.Namespace;
import org.intellij.plugins.xpathView.util.NamespaceCollector;
import org.intellij.plugins.xpathView.util.Variable;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.swing.*;
import javax.swing.event.ListDataEvent;
import javax.swing.event.ListDataListener;
import javax.xml.namespace.QName;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.util.*;
@SuppressWarnings("unchecked")
public abstract class InputExpressionDialog<FormType extends InputForm> extends ModeSwitchableDialog {
protected final Project myProject;
protected final FormType myForm;
protected final Config mySettings;
private final HistoryModel myModel;
private final Document myDocument;
private final MultilineEditor myEditor;
private final EditorComboBox myComboBox;
private JComponent myEditorComponent;
@Nullable private Set<Namespace> myNamespaceCache;
private InteractiveContextProvider myContextProvider;
private final PsiFile myXPathFile;
public InputExpressionDialog(final Project project, Config settings, final HistoryElement[] _history, FormType form) {
super(project, false);
myProject = project;
myForm = form;
setResizable(true);
setModal(true);
setHorizontalStretch(1.3f);
mySettings = settings;
myDocument = createXPathDocument(project, _history.length > 0 ? _history[_history.length - 1] : null);
myXPathFile = PsiDocumentManager.getInstance(myProject).getPsiFile(myDocument);
myModel = new HistoryModel(_history, myDocument);
myEditor = new MultilineEditor(myDocument, project, XPathFileType.XPATH, myModel);
myModel.addListDataListener(new ListDataListener() {
final DaemonCodeAnalyzer analyzer = DaemonCodeAnalyzer.getInstance(project);
@Override
public void intervalAdded(ListDataEvent e) {
}
@Override
public void intervalRemoved(ListDataEvent e) {
}
@Override
public void contentsChanged(ListDataEvent e) {
final HistoryElement item = myModel.getSelectedItem();
if (item != null) {
myContextProvider.getNamespaceContext().setMap(asMap(item.namespaces));
if (myXPathFile != null) {
analyzer.restart(myXPathFile);
}
}
}
});
myComboBox = new EditorComboBox(myDocument, project, XPathFileType.XPATH);
myComboBox.setRenderer(new ListCellRendererWrapper<HistoryElement>() {
@Override
public void customize(JList list, HistoryElement value, int index, boolean selected, boolean hasFocus) {
setText(value != null ? value.expression : "");
}
});
myComboBox.setModel(myModel);
myComboBox.setEditable(true);
myDocument.addDocumentListener(new DocumentListener() {
@Override
public void documentChanged(@NotNull DocumentEvent e) {
updateOkAction();
}
});
init();
}
@Override
protected void init() {
myForm.getIcon().setText(null);
myForm.getIcon().setIcon(Messages.getQuestionIcon());
myForm.getEditContextButton().addActionListener(new ActionListener() {
@Override
@SuppressWarnings({"unchecked"})
public void actionPerformed(ActionEvent e) {
final HistoryElement selectedItem = myModel.getSelectedItem();
final Collection<Namespace> n;
final Collection<Variable> v;
if (selectedItem != null) {
n = selectedItem.namespaces;
v = selectedItem.variables;
}
else {
n = Collections.emptySet();
v = Collections.emptySet();
}
// FIXME
final Collection<Namespace> namespaces = myNamespaceCache != null ?
merge(myNamespaceCache, n, false) : n;
final Set<String> unresolvedPrefixes = findUnresolvedPrefixes();
final EditContextDialog dialog =
new EditContextDialog(myProject, unresolvedPrefixes, namespaces, v, myContextProvider);
if (dialog.showAndGet()) {
final Pair<Collection<Namespace>, Collection<Variable>> context = dialog.getContext();
final Collection<Namespace> newNamespaces = context.getFirst();
final Collection<Variable> newVariables = context.getSecond();
updateContext(newNamespaces, newVariables);
SwingUtilities.invokeLater(() -> {
final Editor editor = getEditor();
if (editor != null) {
editor.getContentComponent().grabFocus();
}
});
}
}
});
updateOkAction();
super.init();
}
void updateContext(Collection<Namespace> namespaces, Collection<Variable> variables) {
final HistoryElement selectedItem = myModel.getSelectedItem();
final HistoryElement newElement;
if (selectedItem != null) {
newElement = selectedItem.changeContext(namespaces, variables);
} else {
newElement = new HistoryElement(myDocument.getText(), variables, namespaces);
}
myModel.setSelectedItem(newElement);
// FIXME
if (myNamespaceCache == null) {
myContextProvider.getNamespaceContext().setMap(asMap(namespaces));
}
final DaemonCodeAnalyzer analyzer = DaemonCodeAnalyzer.getInstance(myProject);
analyzer.restart(myXPathFile);
}
private Set<String> findUnresolvedPrefixes() {
final Set<String> prefixes = new HashSet<>();
myXPathFile.accept(new PsiRecursiveElementVisitor(){
@Override
public void visitElement(PsiElement element) {
if (element instanceof QNameElement) {
final PsiReference[] references = element.getReferences();
for (PsiReference reference : references) {
if (reference instanceof PrefixReference) {
final PrefixReference prefixReference = (PrefixReference)reference;
if (prefixReference.isUnresolved()) {
prefixes.add(prefixReference.getPrefix());
}
}
}
}
super.visitElement(element);
}
});
return prefixes;
}
protected FormType getForm() {
return myForm;
}
@Override
protected JComponent createCenterPanel() {
return myForm.getComponent();
}
protected void updateOkAction() {
getOKAction().setEnabled(isOkEnabled());
}
protected boolean isOkEnabled() {
return myEditor.getField().getDocument().getTextLength() > 0;
}
@Nullable
protected Editor getEditor() {
if (getMode() == Mode.ADVANCED) {
return myEditor.getField().getEditor();
} else {
return myComboBox.getEditorEx();
}
}
@Override
protected void setModeImpl(Mode mode) {
// mySettingsPanel.setVisible(mode == Mode.ADVANCED);
myForm.getEditContextButton().setVisible(mode == Mode.ADVANCED);
if (mode == Mode.ADVANCED) {
setEditor(myEditor, GridConstraints.SIZEPOLICY_WANT_GROW);
myEditor.getField().selectAll();
} else {
setEditor(myComboBox, GridConstraints.SIZEPOLICY_FIXED);
myComboBox.setModel(myModel);
myComboBox.getEditor().selectAll();
}
SwingUtilities.invokeLater(() -> {
final Editor editor = getEditor();
if (editor != null) {
editor.getContentComponent().grabFocus();
}
});
}
private void setEditor(JComponent editor, int vSizePolicy) {
if (myEditorComponent != null) {
myForm.getEditorPanel().remove(myEditorComponent);
}
final GridConstraints gridConstraints = new GridConstraints();
gridConstraints.setFill(vSizePolicy == GridConstraints.SIZEPOLICY_WANT_GROW ? GridConstraints.FILL_BOTH : GridConstraints.FILL_HORIZONTAL);
gridConstraints.setVSizePolicy(vSizePolicy);
myForm.getEditorPanel().add(myEditorComponent = editor, gridConstraints);
}
protected static Document createXPathDocument(Project project, HistoryElement historyElement) {
final String expression = historyElement != null ? historyElement.expression : "";
final PsiFile file = PsiFileFactory.getInstance(project).createFileFromText("DummyFile.xpath", XPathFileType.XPATH, expression, LocalTimeCounter.currentTime(), true);
final Document document = PsiDocumentManager.getInstance(project).getDocument(file);
// not sure why this is required...
assert document != null;
document.setReadOnly(false);
assert document.isWritable() : "WTF, document is not writable? Text = <" + expression + ">";
return document;
}
@SuppressWarnings({ "unchecked" })
public boolean show(XmlElement contextElement) {
prepareShow(contextElement);
show();
return isOK();
}
@SuppressWarnings({"unchecked"})
private void prepareShow(XmlElement contextElement) {
final NamespaceCollector.CollectedInfo collectedInfo;
if (contextElement != null) {
collectedInfo = NamespaceCollector.collectInfo((XmlFile)contextElement.getContainingFile());
myNamespaceCache = collectedInfo.namespaces;
} else {
collectedInfo = NamespaceCollector.empty();
myNamespaceCache = null;
}
myContextProvider = new InteractiveContextProvider(contextElement, collectedInfo, myModel);
myContextProvider.attachTo(myXPathFile);
final HistoryElement historyElement = myModel.getSelectedItem();
if (historyElement != null) {
myContextProvider.getNamespaceContext().setMap(asMap(historyElement.namespaces));
} else {
myContextProvider.getNamespaceContext().setMap(asMap(null));
}
updateOkAction();
}
protected static Collection<Namespace> merge(Collection<Namespace> namespaces, Collection<Namespace> cache, boolean merge) {
if (cache == null) return namespaces;
final Set<Namespace> n;
if (merge) {
n = new HashSet<>(cache);
n.removeAll(namespaces);
n.addAll(namespaces);
} else {
n = new HashSet<>(namespaces);
for (Namespace namespace : n) {
for (Namespace cached : cache) {
if (namespace.getUri().equals(cached.getUri())) {
namespace.setPrefix(cached.prefix);
}
}
}
}
return n;
}
@SuppressWarnings({"unchecked"})
protected Map<String, String> asMap(Collection<Namespace> namespaces) {
if (namespaces == null) {
if (myNamespaceCache != null) {
return Namespace.makeMap(myNamespaceCache);
} else {
return Collections.emptyMap();
}
}
if (this.myNamespaceCache != null) {
namespaces = merge(myNamespaceCache, namespaces, false);
}
return Namespace.makeMap(namespaces);
}
@Override
public JComponent getPreferredFocusedComponent() {
final Editor editor = getEditor();
if (editor != null) {
return editor.getContentComponent();
} else {
return null;
}
}
@SuppressWarnings({"unchecked"})
public Context getContext() {
final HistoryElement context = myModel.getSelectedItem();
if (context == null || context.expression == null) {
final Set<Namespace> cache = myNamespaceCache != null ? myNamespaceCache : Collections.emptySet();
return new Context(new HistoryElement(myDocument.getText(), Collections.emptySet(), cache), getMode());
}
final Collection<Namespace> namespaces = myNamespaceCache != null ?
merge(myNamespaceCache, context.namespaces, false) : context.namespaces;
return new Context(new HistoryElement(context.expression, context.variables, namespaces), getMode());
}
public static class Context {
public final HistoryElement input;
public final Mode mode;
Context(HistoryElement context, Mode mode) {
this.input = context;
this.mode = mode;
}
}
private static class MyVariableResolver extends SimpleVariableContext {
private final HistoryModel myModel;
MyVariableResolver(HistoryModel model) {
myModel = model;
}
@Override
@NotNull
public String[] getVariablesInScope(XPathElement element) {
final HistoryElement selectedItem = myModel.getSelectedItem();
if (selectedItem != null) {
return Variable.asSet(selectedItem.variables).toArray(new String[selectedItem.variables.size()]);
} else {
return ArrayUtil.EMPTY_STRING_ARRAY;
}
}
}
private class InteractiveContextProvider extends ContextProvider {
private final XmlElement myContextElement;
private final NamespaceCollector.CollectedInfo myCollectedInfo;
private final MyVariableResolver myVariableResolver;
private final EvalExpressionDialog.MyNamespaceContext myNamespaceContext;
InteractiveContextProvider(XmlElement contextElement, NamespaceCollector.CollectedInfo collectedInfo, HistoryModel model) {
myContextElement = contextElement;
myCollectedInfo = collectedInfo;
myVariableResolver = new MyVariableResolver(model);
myNamespaceContext = new EvalExpressionDialog.MyNamespaceContext();
}
@Override
@NotNull
public ContextType getContextType() {
return XPathSupport.TYPE;
}
@Override
@Nullable
public XmlElement getContextElement() {
return myContextElement;
}
@Override
@NotNull
public EvalExpressionDialog.MyNamespaceContext getNamespaceContext() {
return myNamespaceContext;
}
@Override
public VariableContext getVariableContext() {
return myVariableResolver;
}
@Override
public Set<QName> getAttributes(boolean forValidation) {
return myCollectedInfo.attributes;
}
private Set<QName> filterDefaultNamespace(Set<QName> _set) {
final Set<QName> set = new HashSet<>(_set);
for (Iterator<QName> it = set.iterator(); it.hasNext();) {
final QName name = it.next();
final String prefix = name.getPrefix();
if (prefix == null || prefix.length() == 0) {
final String uri = name.getNamespaceURI();
if (uri != null && uri.length() > 0) {
final String assignedPrefix = myNamespaceContext.getPrefixForURI(uri, null);
if (assignedPrefix == null || assignedPrefix.length() == 0) {
it.remove();
}
}
}
}
return set;
}
@Override
public Set<QName> getElements(boolean forValidation) {
return filterDefaultNamespace(myCollectedInfo.elements);
}
}
protected class MyNamespaceContext implements NamespaceContext {
private BidirectionalMap<String, String> myMap;
@Override
@Nullable
public String getNamespaceURI(String prefix, XmlElement context) {
final String s = myMap.get(prefix);
if (s == null && prefix.length() == 0) {
return "";
}
return s;
}
@Override
@Nullable
public String getPrefixForURI(String uri, XmlElement context) {
final List<String> list = myMap.getKeysByValue(uri);
return list != null && !list.isEmpty() ? list.get(0) : null;
}
@Override
@NotNull
public Collection<String> getKnownPrefixes(XmlElement context) {
return myMap.keySet();
}
@Override
@Nullable
public PsiElement resolve(String prefix, XmlElement context) {
return null;
}
public void setMap(Map<String, String> map) {
myMap = new BidirectionalMap<>();
myMap.putAll(map);
}
@Override
public IntentionAction[] getUnresolvedNamespaceFixes(@NotNull PsiReference reference, String localName) {
return new IntentionAction[]{ new MyRegisterPrefixAction(reference) };
}
@Override
public String getDefaultNamespace(XmlElement context) {
return null;
}
}
private class MyRegisterPrefixAction implements IntentionAction {
private final PsiReference myReference;
MyRegisterPrefixAction(PsiReference reference) {
myReference = reference;
}
@Override
@NotNull
public String getText() {
return "Register namespace prefix";
}
@Override
@NotNull
public String getFamilyName() {
return getText();
}
@Override
public boolean isAvailable(@NotNull Project project, Editor editor, PsiFile file) {
return myReference instanceof PrefixReference && myReference.getElement().isValid() && ((PrefixReference)myReference).isUnresolved();
}
@Override
public void invoke(@NotNull Project project, Editor editor, PsiFile file) throws IncorrectOperationException {
final Set<String> prefix = Collections.singleton(myReference.getCanonicalText());
final Map<String, String> myMap = myContextProvider.getNamespaceContext().myMap;
final Collection<String> list;
if (myNamespaceCache == null) {
final ExternalResourceManager erm = ExternalResourceManager.getInstance();
list = new ArrayList<>(Arrays.asList(erm.getResourceUrls(null, true)));
for (String namespace : myMap.values()) {
list.remove(namespace);
}
Collections.sort((List<String>)list);
}
else {
list = myMap.values();
}
final AddNamespaceDialog dlg = new AddNamespaceDialog(project, prefix, list, myNamespaceCache == null ?
AddNamespaceDialog.Mode.URI_EDITABLE :
AddNamespaceDialog.Mode.FIXED);
if (dlg.showAndGet()) {
final Namespace namespace = new Namespace(dlg.getPrefix(), dlg.getURI());
final HistoryElement selectedItem = myModel.getSelectedItem();
final Collection<Namespace> n;
final Collection<Variable> v;
if (selectedItem != null) {
n = new HashSet<>(selectedItem.namespaces);
n.remove(namespace);
n.add(namespace);
v = selectedItem.variables;
}
else {
n = Collections.singleton(namespace);
//noinspection unchecked
v = Collections.emptySet();
}
updateContext(n, v);
}
}
@Override
public boolean startInWriteAction() {
return false;
}
}
}
| apache-2.0 |
hindog/grid-executor | grid-executor/src/main/scala/com/hindog/grid/GridConfig.scala | 2840 | package com.hindog.grid
import com.hindog.grid.repo.Repository
import org.gridkit.vicluster.ViNode
import scala.collection._
import scala.util.Random
/*
* __ _ __
* / / (_)__ ___/ /__ ____
* / _ \/ / _ \/ _ / _ \/ _ /
* /_//_/_/_//_/\_,_/\___/\_, /
* /___/
*
* This class defines a configuration to be used with GridExecutor
*
* You can add a node multiple times to a config, and for each one you get an "execution slot" on that node, ie:
* a grid config that contains nodes (server1, server1, server2, server3, server3, server3) means that
* server1 will have 2 "execution slots" available
* server2 will have 1 "execution slots" available
* server3 will have 3 "execution slots" available
*
* TODO: allow for a custom strategy to select which node/slot to execute on for each slot
*/
case class GridConfig(name: String, nodes: Seq[NodeConfig], config: ViNode => ViNode = identity, slots: Option[Int] = None, startupHooks: Seq[Hook] = Seq.empty, shutdownHooks: Seq[Hook] = Seq.empty, repository: Option[Repository] = None) extends GridConfigurable {
override type Repr = GridConfig
override def apply(configStmt: ViNode => Unit): GridConfig = copy(config = node => { configStmt(config(node)); node })
// override name to allow sourcing from different set of props
def withName(name: String): GridConfig = copy(name = name)
def withSlots(slots: Int): GridConfig = copy(slots = Option(slots))
def withConfig(configure: ViNode => ViNode): GridConfig = copy(config = node => configure(config(node)))
def addStartupHook(hook: Hook): GridConfig = copy(startupHooks = startupHooks :+ hook)
def addShutdownHook(hook: Hook): GridConfig = copy(shutdownHooks = shutdownHooks :+ hook)
def withNodes(nodes: NodeConfig*): GridConfig = copy(nodes = nodes)
def withNodes(nodes: Array[NodeConfig]): GridConfig = copy(nodes = nodes)
def addNodes(nodes: Array[NodeConfig]): GridConfig = copy(nodes = nodes)
def addNodes(addNodes: NodeConfig*): GridConfig = copy(nodes = nodes ++ addNodes)
// methods that allow us to narrow the node selection in our config
def selectNodes(filter: NodeConfig => Boolean): GridConfig = copy(nodes = nodes.filter(filter))
def selectRandomNode: GridConfig = copy(nodes = Seq(nodes(Random.nextInt(nodes.size))))
def selectUserHashedNode: GridConfig = copy(nodes = Seq(nodes(Math.abs(System.getProperty("user.name").hashCode) % nodes.size)))
}
object GridConfig {
def apply(id: String): GridConfig = GridConfig(id)
def apply(id: String, nodes: NodeConfig*): GridConfig = GridConfig(id, nodes.toSeq)
def fork(id: String = "fork"): GridConfig = apply(id, LocalNodeConfig(id))
def isolate(id: String = "isolated", configure: IsolateNodeConfig => IsolateNodeConfig = identity): GridConfig = apply(id, configure(IsolateNodeConfig(id)))
}
| apache-2.0 |
naskogithub/google-api-nodejs-client | apis/youtubeAnalytics/v1beta1.js | 6643 | /**
* Copyright 2014 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* jshint maxlen: false */
'use strict';
var apirequest = require('../../lib/apirequest');
var createAPIRequest = apirequest.createAPIRequest;
/**
* YouTube Analytics API
*
* @classdesc Retrieve your YouTube Analytics reports.
* @namespace youtubeAnalytics
* @version v1beta1
* @variation v1beta1
* @this Youtubeanalytics
* @param {object=} options Options for Youtubeanalytics
*/
function Youtubeanalytics(options) {
var self = this;
this._options = options || {};
this.batchReportDefinitions = {
/**
* youtubeAnalytics.batchReportDefinitions.list
*
* @desc Retrieves a list of available batch report definitions.
*
* @alias youtubeAnalytics.batchReportDefinitions.list
* @memberOf! youtubeAnalytics(v1beta1)
*
* @param {object} params - Parameters for request
* @param {string} params.onBehalfOfContentOwner - The onBehalfOfContentOwner parameter identifies the content owner that the user is acting on behalf of.
* @param {callback} callback - The callback that handles the response.
* @return {object} Request object
*/
list: function(params, callback) {
var parameters = {
options: {
url: 'https://www.googleapis.com/youtube/analytics/v1beta1/batchReportDefinitions',
method: 'GET'
},
params: params,
requiredParams: ['onBehalfOfContentOwner'],
context: self
};
return createAPIRequest(parameters, callback);
}
};
this.batchReports = {
/**
* youtubeAnalytics.batchReports.list
*
* @desc Retrieves a list of processed batch reports.
*
* @alias youtubeAnalytics.batchReports.list
* @memberOf! youtubeAnalytics(v1beta1)
*
* @param {object} params - Parameters for request
* @param {string} params.batchReportDefinitionId - The batchReportDefinitionId parameter specifies the ID of the batch reportort definition for which you are retrieving reports.
* @param {string} params.onBehalfOfContentOwner - The onBehalfOfContentOwner parameter identifies the content owner that the user is acting on behalf of.
* @param {callback} callback - The callback that handles the response.
* @return {object} Request object
*/
list: function(params, callback) {
var parameters = {
options: {
url: 'https://www.googleapis.com/youtube/analytics/v1beta1/batchReports',
method: 'GET'
},
params: params,
requiredParams: ['batchReportDefinitionId', 'onBehalfOfContentOwner'],
context: self
};
return createAPIRequest(parameters, callback);
}
};
this.reports = {
/**
* youtubeAnalytics.reports.query
*
* @desc Retrieve your YouTube Analytics reports.
*
* @alias youtubeAnalytics.reports.query
* @memberOf! youtubeAnalytics(v1beta1)
*
* @param {object} params - Parameters for request
* @param {string=} params.dimensions - A comma-separated list of YouTube Analytics dimensions, such as views or ageGroup,gender. See the Available Reports document for a list of the reports that you can retrieve and the dimensions used for those reports. Also see the Dimensions document for definitions of those dimensions.
* @param {string} params.end-date - The end date for fetching YouTube Analytics data. The value should be in YYYY-MM-DD format.
* @param {string=} params.filters - A list of filters that should be applied when retrieving YouTube Analytics data. The Available Reports document identifies the dimensions that can be used to filter each report, and the Dimensions document defines those dimensions. If a request uses multiple filters, join them together with a semicolon (;), and the returned result table will satisfy both filters. For example, a filters parameter value of video==dMH0bHeiRNg;country==IT restricts the result set to include data for the given video in Italy.
* @param {string} params.ids - Identifies the YouTube channel or content owner for which you are retrieving YouTube Analytics data. - To request data for a YouTube user, set the ids parameter value to channel==CHANNEL_ID, where CHANNEL_ID specifies the unique YouTube channel ID. - To request data for a YouTube CMS content owner, set the ids parameter value to contentOwner==OWNER_NAME, where OWNER_NAME is the CMS name of the content owner.
* @param {integer=} params.max-results - The maximum number of rows to include in the response.
* @param {string} params.metrics - A comma-separated list of YouTube Analytics metrics, such as views or likes,dislikes. See the Available Reports document for a list of the reports that you can retrieve and the metrics available in each report, and see the Metrics document for definitions of those metrics.
* @param {string=} params.sort - A comma-separated list of dimensions or metrics that determine the sort order for YouTube Analytics data. By default the sort order is ascending. The '-' prefix causes descending sort order.
* @param {string} params.start-date - The start date for fetching YouTube Analytics data. The value should be in YYYY-MM-DD format.
* @param {integer=} params.start-index - An index of the first entity to retrieve. Use this parameter as a pagination mechanism along with the max-results parameter (one-based, inclusive).
* @param {callback} callback - The callback that handles the response.
* @return {object} Request object
*/
query: function(params, callback) {
var parameters = {
options: {
url: 'https://www.googleapis.com/youtube/analytics/v1beta1/reports',
method: 'GET'
},
params: params,
requiredParams: ['ids', 'start-date', 'end-date', 'metrics'],
context: self
};
return createAPIRequest(parameters, callback);
}
};
}
/**
* Exports Youtubeanalytics object
* @type Youtubeanalytics
*/
module.exports = Youtubeanalytics; | apache-2.0 |
great-expectations/great_expectations | tests/cli/test_checkpoint.py | 137865 | import logging
import os
import shutil
import subprocess
import unittest
from typing import List, Optional, Union
from unittest import mock
import nbformat
import pandas as pd
import pytest
from click.testing import CliRunner, Result
from nbconvert.preprocessors import ExecutePreprocessor
from nbformat import NotebookNode
from ruamel.yaml import YAML
from great_expectations import DataContext
from great_expectations.cli import cli
from great_expectations.core import ExpectationSuite
from great_expectations.core.usage_statistics.anonymizers.types.base import (
GETTING_STARTED_DATASOURCE_NAME,
)
from great_expectations.data_context.types.base import DataContextConfigDefaults
from great_expectations.data_context.util import file_relative_path
from great_expectations.datasource import (
Datasource,
LegacyDatasource,
SimpleSqlalchemyDatasource,
)
from tests.cli.utils import assert_no_logging_messages_or_tracebacks
yaml = YAML()
yaml.indent(mapping=2, sequence=4, offset=2)
yaml.default_flow_style = False
logger = logging.getLogger(__name__)
# TODO: <Alex>ALEX -- This belongs in tests/conftest.py</Alex>
@pytest.fixture
def titanic_data_context_with_sql_datasource(
sa,
titanic_data_context_stats_enabled_config_version_3,
test_df,
):
context: DataContext = titanic_data_context_stats_enabled_config_version_3
db_file_path: str = file_relative_path(
__file__,
os.path.join("..", "test_sets", "titanic_sql_test_cases.db"),
)
sqlite_engine: sa.engine.base.Engine = sa.create_engine(f"sqlite:///{db_file_path}")
# noinspection PyUnusedLocal
conn: sa.engine.base.Connection = sqlite_engine.connect()
try:
csv_path: str = file_relative_path(
__file__, os.path.join("..", "test_sets", "Titanic.csv")
)
df: pd.DataFrame = pd.read_csv(filepath_or_buffer=csv_path)
df.to_sql(name="titanic", con=sqlite_engine)
df = df.sample(frac=0.5, replace=True, random_state=1)
df.to_sql(name="incomplete", con=sqlite_engine)
test_df.to_sql(name="wrong", con=sqlite_engine)
except ValueError as ve:
logger.warning(f"Unable to store information into database: {str(ve)}")
datasource_config: str = f"""
class_name: SimpleSqlalchemyDatasource
connection_string: sqlite:///{db_file_path}
introspection:
whole_table: {{}}
"""
try:
# noinspection PyUnusedLocal
my_sql_datasource: Optional[
Union[SimpleSqlalchemyDatasource, LegacyDatasource]
] = context.add_datasource(
"test_sqlite_db_datasource", **yaml.load(datasource_config)
)
except AttributeError:
pytest.skip("SQL Database tests require sqlalchemy to be installed.")
return context
# TODO: <Alex>ALEX -- This belongs in tests/conftest.py</Alex>
@pytest.fixture
def titanic_data_context_with_spark_datasource(
tmp_path_factory,
spark_session,
test_df,
monkeypatch,
):
# Re-enable GE_USAGE_STATS
monkeypatch.delenv("GE_USAGE_STATS")
project_path: str = str(tmp_path_factory.mktemp("titanic_data_context"))
context_path: str = os.path.join(project_path, "great_expectations")
os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True)
data_path: str = os.path.join(context_path, "..", "data", "titanic")
os.makedirs(os.path.join(data_path), exist_ok=True)
shutil.copy(
file_relative_path(
__file__,
os.path.join(
"..",
"test_fixtures",
"great_expectations_v013_no_datasource_stats_enabled.yml",
),
),
str(os.path.join(context_path, "great_expectations.yml")),
)
shutil.copy(
file_relative_path(__file__, os.path.join("..", "test_sets", "Titanic.csv")),
str(
os.path.join(
context_path, "..", "data", "titanic", "Titanic_19120414_1313.csv"
)
),
)
shutil.copy(
file_relative_path(__file__, os.path.join("..", "test_sets", "Titanic.csv")),
str(os.path.join(context_path, "..", "data", "titanic", "Titanic_1911.csv")),
)
shutil.copy(
file_relative_path(__file__, os.path.join("..", "test_sets", "Titanic.csv")),
str(os.path.join(context_path, "..", "data", "titanic", "Titanic_1912.csv")),
)
context: DataContext = DataContext(context_root_dir=context_path)
assert context.root_directory == context_path
datasource_config: str = f"""
class_name: Datasource
execution_engine:
class_name: SparkDFExecutionEngine
data_connectors:
my_basic_data_connector:
class_name: InferredAssetFilesystemDataConnector
base_directory: {data_path}
default_regex:
pattern: (.*)\\.csv
group_names:
- data_asset_name
my_special_data_connector:
class_name: ConfiguredAssetFilesystemDataConnector
base_directory: {data_path}
glob_directive: "*.csv"
default_regex:
pattern: (.+)\\.csv
group_names:
- name
assets:
users:
base_directory: {data_path}
pattern: (.+)_(\\d+)_(\\d+)\\.csv
group_names:
- name
- timestamp
- size
my_other_data_connector:
class_name: ConfiguredAssetFilesystemDataConnector
base_directory: {data_path}
glob_directive: "*.csv"
default_regex:
pattern: (.+)\\.csv
group_names:
- name
assets:
users: {{}}
"""
# noinspection PyUnusedLocal
datasource: Datasource = context.test_yaml_config(
name=GETTING_STARTED_DATASOURCE_NAME,
yaml_config=datasource_config,
pretty_print=False,
)
# noinspection PyProtectedMember
context._save_project_config()
csv_path: str
# To fail an expectation, make number of rows less than 1313 (the original number of rows in the "Titanic" dataset).
csv_path = os.path.join(
context.root_directory, "..", "data", "titanic", "Titanic_1911.csv"
)
df: pd.DataFrame = pd.read_csv(filepath_or_buffer=csv_path)
df = df.sample(frac=0.5, replace=True, random_state=1)
# noinspection PyTypeChecker
df.to_csv(path_or_buf=csv_path)
csv_path: str = os.path.join(
context.root_directory, "..", "data", "titanic", "Titanic_19120414_1313.csv"
)
# mangle the csv
with open(csv_path, "w") as f:
f.write("foo,bar\n1,2\n")
return context
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_delete_with_non_existent_checkpoint(
mock_emit,
caplog,
monkeypatch,
empty_data_context_stats_enabled,
):
context: DataContext = empty_data_context_stats_enabled
monkeypatch.chdir(os.path.dirname(context.root_directory))
runner: CliRunner = CliRunner(mix_stderr=False)
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint delete my_checkpoint",
catch_exceptions=False,
)
assert result.exit_code == 1
stdout: str = result.stdout
assert (
"Could not find Checkpoint `my_checkpoint` (or its configuration is invalid)."
in stdout
)
assert mock_emit.call_count == 3
# noinspection PyUnresolvedReferences
expected_events: List[unittest.mock._Call] = [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.checkpoint.delete.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.delete.end",
"event_payload": {"api_version": "v3"},
"success": False,
}
),
]
# noinspection PyUnresolvedReferences
actual_events: List[unittest.mock._Call] = mock_emit.call_args_list
assert actual_events == expected_events
assert_no_logging_messages_or_tracebacks(caplog, result)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_delete_with_single_checkpoint_confirm_success(
mock_emit,
caplog,
monkeypatch,
empty_context_with_checkpoint_v1_stats_enabled,
):
context: DataContext = empty_context_with_checkpoint_v1_stats_enabled
monkeypatch.chdir(os.path.dirname(context.root_directory))
runner: CliRunner = CliRunner(mix_stderr=False)
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint delete my_v1_checkpoint",
input="\n",
catch_exceptions=False,
)
assert result.exit_code == 0
stdout: str = result.stdout
assert 'Checkpoint "my_v1_checkpoint" deleted.' in stdout
assert mock_emit.call_count == 3
# noinspection PyUnresolvedReferences
expected_events: List[unittest.mock._Call] = [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.checkpoint.delete.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.delete.end",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
]
# noinspection PyUnresolvedReferences
actual_events: List[unittest.mock._Call] = mock_emit.call_args_list
assert actual_events == expected_events
assert_no_logging_messages_or_tracebacks(
caplog,
result,
)
# noinspection PyTypeChecker
result = runner.invoke(
cli,
f"--v3-api checkpoint list",
catch_exceptions=False,
)
assert result.exit_code == 0
stdout = result.stdout
assert "No Checkpoints found." in stdout
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_delete_with_single_checkpoint_assume_yes_flag(
mock_emit,
caplog,
monkeypatch,
empty_context_with_checkpoint_v1_stats_enabled,
):
context: DataContext = empty_context_with_checkpoint_v1_stats_enabled
monkeypatch.chdir(os.path.dirname(context.root_directory))
runner: CliRunner = CliRunner(mix_stderr=False)
checkpoint_name: str = "my_v1_checkpoint"
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api --assume-yes checkpoint delete {checkpoint_name}",
catch_exceptions=False,
)
stdout: str = result.stdout
assert result.exit_code == 0
assert (
f'Are you sure you want to delete the Checkpoint "{checkpoint_name}" (this action is irreversible)?'
not in stdout
)
# This assertion is extra assurance since this test is too permissive if we change the confirmation message
assert "[Y/n]" not in stdout
assert 'Checkpoint "my_v1_checkpoint" deleted.' in stdout
assert mock_emit.call_count == 3
# noinspection PyUnresolvedReferences
expected_events: List[unittest.mock._Call] = [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.checkpoint.delete.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.delete.end",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
]
# noinspection PyUnresolvedReferences
actual_events: List[unittest.mock._Call] = mock_emit.call_args_list
assert actual_events == expected_events
assert_no_logging_messages_or_tracebacks(
caplog,
result,
)
# noinspection PyTypeChecker
result = runner.invoke(
cli,
f"--v3-api checkpoint list",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert "No Checkpoints found." in stdout
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_delete_with_single_checkpoint_cancel_success(
mock_emit,
caplog,
monkeypatch,
empty_context_with_checkpoint_v1_stats_enabled,
):
context: DataContext = empty_context_with_checkpoint_v1_stats_enabled
monkeypatch.chdir(os.path.dirname(context.root_directory))
runner: CliRunner = CliRunner(mix_stderr=False)
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint delete my_v1_checkpoint",
input="n\n",
catch_exceptions=False,
)
assert result.exit_code == 0
stdout: str = result.stdout
assert 'The Checkpoint "my_v1_checkpoint" was not deleted. Exiting now.' in stdout
assert mock_emit.call_count == 3
# noinspection PyUnresolvedReferences
expected_events: List[unittest.mock._Call] = [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.checkpoint.delete.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.delete.end",
"event_payload": {"cancelled": True, "api_version": "v3"},
"success": True,
}
),
]
# noinspection PyUnresolvedReferences
actual_events: List[unittest.mock._Call] = mock_emit.call_args_list
assert actual_events == expected_events
assert_no_logging_messages_or_tracebacks(
caplog,
result,
)
# noinspection PyTypeChecker
result = runner.invoke(
cli,
f"--v3-api checkpoint list",
catch_exceptions=False,
)
assert result.exit_code == 0
stdout = result.stdout
assert "Found 1 Checkpoint." in stdout
assert "my_v1_checkpoint" in stdout
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_list_with_no_checkpoints(
mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled
):
context: DataContext = empty_data_context_stats_enabled
monkeypatch.chdir(os.path.dirname(context.root_directory))
runner: CliRunner = CliRunner(mix_stderr=False)
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint list",
catch_exceptions=False,
)
assert result.exit_code == 0
stdout: str = result.stdout
assert "No Checkpoints found." in stdout
assert "Use the command `great_expectations checkpoint new` to create one" in stdout
assert mock_emit.call_count == 3
# noinspection PyUnresolvedReferences
expected_events: List[unittest.mock._Call] = [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.checkpoint.list.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.list.end",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
]
# noinspection PyUnresolvedReferences
actual_events: List[unittest.mock._Call] = mock_emit.call_args_list
assert actual_events == expected_events
assert_no_logging_messages_or_tracebacks(caplog, result)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_list_with_single_checkpoint(
mock_emit,
caplog,
monkeypatch,
empty_context_with_checkpoint_v1_stats_enabled,
):
context: DataContext = empty_context_with_checkpoint_v1_stats_enabled
monkeypatch.chdir(os.path.dirname(context.root_directory))
runner: CliRunner = CliRunner(mix_stderr=False)
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint list",
catch_exceptions=False,
)
assert result.exit_code == 0
stdout: str = result.stdout
assert "Found 1 Checkpoint." in stdout
assert "my_v1_checkpoint" in stdout
assert mock_emit.call_count == 3
# noinspection PyUnresolvedReferences
expected_events: List[unittest.mock._Call] = [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.checkpoint.list.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.list.end",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
]
# noinspection PyUnresolvedReferences
actual_events: List[unittest.mock._Call] = mock_emit.call_args_list
assert actual_events == expected_events
assert_no_logging_messages_or_tracebacks(
caplog,
result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_list_with_eight_checkpoints(
mock_emit,
caplog,
monkeypatch,
titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates,
):
context: DataContext = titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates
monkeypatch.chdir(os.path.dirname(context.root_directory))
runner: CliRunner = CliRunner(mix_stderr=False)
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint list",
catch_exceptions=False,
)
assert result.exit_code == 0
stdout: str = result.stdout
assert "Found 8 Checkpoints." in stdout
checkpoint_names_list: List[str] = [
"my_simple_checkpoint_with_slack_and_notify_with_all",
"my_nested_checkpoint_template_1",
"my_nested_checkpoint_template_3",
"my_nested_checkpoint_template_2",
"my_simple_checkpoint_with_site_names",
"my_minimal_simple_checkpoint",
"my_simple_checkpoint_with_slack",
"my_simple_template_checkpoint",
]
assert all([checkpoint_name in stdout for checkpoint_name in checkpoint_names_list])
assert mock_emit.call_count == 3
# noinspection PyUnresolvedReferences
expected_events: List[unittest.mock._Call] = [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.checkpoint.list.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.list.end",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
]
# noinspection PyUnresolvedReferences
actual_events: List[unittest.mock._Call] = mock_emit.call_args_list
assert actual_events == expected_events
assert_no_logging_messages_or_tracebacks(
caplog,
result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_new_raises_error_on_existing_checkpoint(
mock_emit,
caplog,
monkeypatch,
titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates,
):
"""
What does this test and why?
The `checkpoint new` CLI flow should raise an error if the Checkpoint name being created already exists in your checkpoint store.
"""
context: DataContext = titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates
monkeypatch.chdir(os.path.dirname(context.root_directory))
runner: CliRunner = CliRunner(mix_stderr=False)
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint new my_minimal_simple_checkpoint",
catch_exceptions=False,
)
assert result.exit_code == 1
stdout: str = result.stdout
assert (
"A Checkpoint named `my_minimal_simple_checkpoint` already exists. Please choose a new name."
in stdout
)
assert mock_emit.call_count == 3
# noinspection PyUnresolvedReferences
expected_events: List[unittest.mock._Call] = [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.checkpoint.new.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.new.end",
"event_payload": {"api_version": "v3"},
"success": False,
}
),
]
# noinspection PyUnresolvedReferences
actual_events: List[unittest.mock._Call] = mock_emit.call_args_list
assert actual_events == expected_events
assert_no_logging_messages_or_tracebacks(
caplog,
result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
@mock.patch("subprocess.call", return_value=True, side_effect=None)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_checkpoint_new_happy_path_generates_a_notebook_and_checkpoint(
mock_webbroser,
mock_subprocess,
mock_emit,
caplog,
monkeypatch,
deterministic_asset_dataconnector_context,
titanic_expectation_suite,
):
"""
What does this test and why?
The v3 (Batch Request) API `checkpoint new` CLI flow includes creating a notebook to configure the Checkpoint.
This test builds that notebook and runs it to generate a Checkpoint and then tests the resulting configuration in the Checkpoint file.
The notebook that is generated does create a sample configuration using one of the available Data Assets, this is what is used to generate the Checkpoint configuration.
"""
context: DataContext = deterministic_asset_dataconnector_context
root_dir: str = context.root_directory
monkeypatch.chdir(os.path.dirname(root_dir))
assert context.list_checkpoints() == []
context.save_expectation_suite(titanic_expectation_suite)
assert context.list_expectation_suite_names() == ["Titanic.warning"]
# Clear the "data_context.save_expectation_suite" call
mock_emit.reset_mock()
runner: CliRunner = CliRunner(mix_stderr=False)
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint new passengers",
input="1\n1\n",
catch_exceptions=False,
)
assert result.exit_code == 0
stdout: str = result.stdout
assert "open a notebook for you now" in stdout
assert mock_emit.call_count == 3
# noinspection PyUnresolvedReferences
expected_events: List[unittest.mock._Call] = [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.checkpoint.new.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.new.end",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
]
# noinspection PyUnresolvedReferences
actual_events: List[unittest.mock._Call] = mock_emit.call_args_list
assert actual_events == expected_events
assert mock_subprocess.call_count == 1
assert mock_webbroser.call_count == 0
expected_notebook_path: str = os.path.join(
root_dir, "uncommitted", "edit_checkpoint_passengers.ipynb"
)
assert os.path.isfile(expected_notebook_path)
with open(expected_notebook_path) as f:
nb: NotebookNode = nbformat.read(f, as_version=4)
uncommitted_dir: str = os.path.join(root_dir, "uncommitted")
# Run notebook
# TODO: <ANTHONY>We should mock the datadocs call or skip running that cell within the notebook (rather than commenting it out in the notebook)</ANTHONY>
ep: ExecutePreprocessor = ExecutePreprocessor(timeout=600, kernel_name="python3")
ep.preprocess(nb, {"metadata": {"path": uncommitted_dir}})
# Ensure the checkpoint file was created
expected_checkpoint_path: str = os.path.join(
root_dir, "checkpoints", "passengers.yml"
)
assert os.path.isfile(expected_checkpoint_path)
# Ensure the Checkpoint configuration in the file is as expected
with open(expected_checkpoint_path) as f:
checkpoint_config: str = f.read()
expected_checkpoint_config: str = """name: passengers
config_version: 1.0
template_name:
module_name: great_expectations.checkpoint
class_name: Checkpoint
run_name_template: '%Y%m%d-%H%M%S-my-run-name-template'
expectation_suite_name:
batch_request: {}
action_list:
- name: store_validation_result
action:
class_name: StoreValidationResultAction
- name: store_evaluation_params
action:
class_name: StoreEvaluationParametersAction
- name: update_data_docs
action:
class_name: UpdateDataDocsAction
site_names: []
evaluation_parameters: {}
runtime_configuration: {}
validations:
- batch_request:
datasource_name: my_datasource
data_connector_name: my_other_data_connector
data_asset_name: users
data_connector_query:
index: -1
expectation_suite_name: Titanic.warning
profilers: []
ge_cloud_id:
expectation_suite_ge_cloud_id:
"""
assert checkpoint_config == expected_checkpoint_config
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_run_raises_error_if_checkpoint_is_not_found(
mock_emit, caplog, monkeypatch, empty_context_with_checkpoint_v1_stats_enabled
):
context: DataContext = empty_context_with_checkpoint_v1_stats_enabled
monkeypatch.chdir(os.path.dirname(context.root_directory))
runner: CliRunner = CliRunner(mix_stderr=False)
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint run my_checkpoint",
catch_exceptions=False,
)
assert result.exit_code == 1
stdout: str = result.stdout
assert (
"Could not find Checkpoint `my_checkpoint` (or its configuration is invalid)."
in stdout
)
assert "Try running" in stdout
assert mock_emit.call_count == 3
# noinspection PyUnresolvedReferences
expected_events: List[unittest.mock._Call] = [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.checkpoint.run.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.run.end",
"event_payload": {"api_version": "v3"},
"success": False,
}
),
]
# noinspection PyUnresolvedReferences
actual_events: List[unittest.mock._Call] = mock_emit.call_args_list
assert actual_events == expected_events
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_run_on_checkpoint_with_not_found_suite_raises_error(
mock_emit,
caplog,
monkeypatch,
titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates,
):
monkeypatch.setenv("VAR", "test")
monkeypatch.setenv("MY_PARAM", "1")
monkeypatch.setenv("OLD_PARAM", "2")
context: DataContext = titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates
monkeypatch.chdir(os.path.dirname(context.root_directory))
runner: CliRunner = CliRunner(mix_stderr=False)
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint run my_nested_checkpoint_template_1",
catch_exceptions=False,
)
assert result.exit_code == 1
stdout: str = result.stdout
assert "expectation_suite suite_from_template_1 not found" in stdout
assert mock_emit.call_count == 5
# noinspection PyUnresolvedReferences
expected_events: List[unittest.mock._Call] = [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.checkpoint.run.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "checkpoint.run",
"event_payload": {
"anonymized_name": "48533197103a407af37326b0224a97df",
"config_version": 1,
"anonymized_run_name_template": "21e9677f05fd2b0d83bb9285a688d5c5",
"anonymized_expectation_suite_name": "4987b41d9e7012f6a86a8b3939739eff",
"anonymized_action_list": [
{
"anonymized_name": "8e3e134cd0402c3970a02f40d2edfc26",
"parent_class": "StoreValidationResultAction",
},
{
"anonymized_name": "40e24f0c6b04b6d4657147990d6f39bd",
"parent_class": "StoreEvaluationParametersAction",
},
{
"anonymized_name": "2b99b6b280b8a6ad1176f37580a16411",
"parent_class": "UpdateDataDocsAction",
},
],
"anonymized_validations": [
{
"anonymized_batch_request": {
"anonymized_batch_request_required_top_level_properties": {
"anonymized_datasource_name": "41cc60fba42f099f878a4bb295dc08c9",
"anonymized_data_connector_name": "4cffb49069fa5fececc8032aa41ff791",
"anonymized_data_asset_name": "5dce9f4b8abd8adbb4f719e05fceecab",
},
"batch_request_optional_top_level_keys": [
"data_connector_query"
],
},
"anonymized_expectation_suite_name": "4987b41d9e7012f6a86a8b3939739eff",
"anonymized_action_list": [
{
"anonymized_name": "8e3e134cd0402c3970a02f40d2edfc26",
"parent_class": "StoreValidationResultAction",
},
{
"anonymized_name": "40e24f0c6b04b6d4657147990d6f39bd",
"parent_class": "StoreEvaluationParametersAction",
},
{
"anonymized_name": "2b99b6b280b8a6ad1176f37580a16411",
"parent_class": "UpdateDataDocsAction",
},
],
}
],
"checkpoint_optional_top_level_keys": [
"evaluation_parameters",
"runtime_configuration",
],
},
"success": False,
}
),
mock.call(
{
"event_payload": {},
"event": "data_context.run_checkpoint",
"success": False,
}
),
mock.call(
{
"event": "cli.checkpoint.run.end",
"event_payload": {"api_version": "v3"},
"success": False,
}
),
]
# noinspection PyUnresolvedReferences
actual_events: List[unittest.mock._Call] = mock_emit.call_args_list
assert actual_events == expected_events
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_run_on_checkpoint_with_batch_load_problem_raises_error(
mock_emit,
caplog,
monkeypatch,
titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,
):
monkeypatch.setenv("VAR", "test")
monkeypatch.setenv("MY_PARAM", "1")
monkeypatch.setenv("OLD_PARAM", "2")
context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled
suite: ExpectationSuite = context.create_expectation_suite(
expectation_suite_name="bar"
)
context.save_expectation_suite(expectation_suite=suite)
assert context.list_expectation_suite_names() == ["bar"]
checkpoint_file_path: str = os.path.join(
context.root_directory,
DataContextConfigDefaults.CHECKPOINTS_BASE_DIRECTORY.value,
"bad_batch.yml",
)
checkpoint_yaml_config: str = f"""
name: bad_batch
config_version: 1
class_name: Checkpoint
run_name_template: "%Y-%M-foo-bar-template-$VAR"
validations:
- batch_request:
datasource_name: my_datasource
data_connector_name: my_special_data_connector
data_asset_name: users
data_connector_query:
index: -1
batch_spec_passthrough:
path: /totally/not/a/file.csv
reader_method: read_csv
expectation_suite_name: bar
action_list:
- name: store_validation_result
action:
class_name: StoreValidationResultAction
- name: store_evaluation_params
action:
class_name: StoreEvaluationParametersAction
- name: update_data_docs
action:
class_name: UpdateDataDocsAction
evaluation_parameters:
param1: "$MY_PARAM"
param2: 1 + "$OLD_PARAM"
runtime_configuration:
result_format:
result_format: BASIC
partial_unexpected_count: 20
"""
config: dict = dict(yaml.load(checkpoint_yaml_config))
_write_checkpoint_dict_to_file(
config=config, checkpoint_file_path=checkpoint_file_path
)
monkeypatch.chdir(os.path.dirname(context.root_directory))
runner: CliRunner = CliRunner(mix_stderr=False)
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint run bad_batch",
catch_exceptions=False,
)
assert result.exit_code == 1
stdout: str = result.stdout
# TODO: <Alex>ALEX -- Investigate how to make Abe's suggestion a reality.</Alex>
# Note: Abe : 2020/09: This was a better error message, but it should live in DataContext.get_batch, not a random CLI method.
# assert "There was a problem loading a batch:" in stdout
# assert (
# "{'path': '/totally/not/a/file.csv', 'datasource': 'mydatasource', 'reader_method': 'read_csv'}"
# in stdout
# )
# assert (
# "Please verify these batch kwargs in Checkpoint bad_batch`"
# in stdout
# )
# assert "No such file or directory" in stdout
assert ("No such file or directory" in stdout) or ("does not exist" in stdout)
assert mock_emit.call_count == 7
# noinspection PyUnresolvedReferences
expected_events: List[unittest.mock._Call] = [
mock.call(
{
"event_payload": {
"anonymized_expectation_suite_name": "f6e1151b49fceb15ae3de4eb60f62be4",
},
"event": "data_context.save_expectation_suite",
"success": True,
}
),
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.checkpoint.run.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "data_context.get_batch_list",
"event_payload": {
"anonymized_batch_request_required_top_level_properties": {
"anonymized_datasource_name": "a732a247720783a5931fa7c4606403c2",
"anonymized_data_connector_name": "e475f70ca0bcbaf2748b93da5e9867ec",
"anonymized_data_asset_name": "2621a5230efeef1973ff373dd12b1ac4",
},
"batch_request_optional_top_level_keys": [
"batch_spec_passthrough",
"data_connector_query",
],
"data_connector_query_keys": ["index"],
"runtime_parameters_keys": ["path"],
"batch_spec_passthrough_keys": ["reader_method"],
},
"success": False,
}
),
mock.call(
{
"event": "checkpoint.run",
"event_payload": {
"anonymized_name": "ca68117150c32e08330af3cebad565ce",
"config_version": 1.0,
"anonymized_run_name_template": "21e9677f05fd2b0d83bb9285a688d5c5",
"anonymized_validations": [
{
"anonymized_batch_request": {
"anonymized_batch_request_required_top_level_properties": {
"anonymized_datasource_name": "a732a247720783a5931fa7c4606403c2",
"anonymized_data_connector_name": "e475f70ca0bcbaf2748b93da5e9867ec",
"anonymized_data_asset_name": "2621a5230efeef1973ff373dd12b1ac4",
},
"batch_request_optional_top_level_keys": [
"batch_spec_passthrough",
"data_connector_query",
],
"batch_spec_passthrough_keys": ["reader_method"],
"runtime_parameters_keys": ["path"],
"data_connector_query_keys": ["index"],
},
"anonymized_expectation_suite_name": "f6e1151b49fceb15ae3de4eb60f62be4",
"anonymized_action_list": [
{
"anonymized_name": "8e3e134cd0402c3970a02f40d2edfc26",
"parent_class": "StoreValidationResultAction",
},
{
"anonymized_name": "40e24f0c6b04b6d4657147990d6f39bd",
"parent_class": "StoreEvaluationParametersAction",
},
{
"anonymized_name": "2b99b6b280b8a6ad1176f37580a16411",
"parent_class": "UpdateDataDocsAction",
},
],
}
],
"checkpoint_optional_top_level_keys": [
"evaluation_parameters",
"runtime_configuration",
],
},
"success": False,
}
),
mock.call(
{
"event_payload": {},
"event": "data_context.run_checkpoint",
"success": False,
}
),
mock.call(
{
"event": "cli.checkpoint.run.end",
"event_payload": {"api_version": "v3"},
"success": False,
}
),
]
# noinspection PyUnresolvedReferences
actual_events: List[unittest.mock._Call] = mock_emit.call_args_list
assert actual_events == expected_events
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_run_on_checkpoint_with_empty_suite_list_raises_error(
mock_emit,
caplog,
monkeypatch,
titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,
):
monkeypatch.setenv("VAR", "test")
monkeypatch.setenv("MY_PARAM", "1")
monkeypatch.setenv("OLD_PARAM", "2")
context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled
assert context.list_expectation_suite_names() == []
checkpoint_file_path: str = os.path.join(
context.root_directory,
DataContextConfigDefaults.CHECKPOINTS_BASE_DIRECTORY.value,
"no_suite.yml",
)
checkpoint_yaml_config: str = f"""
name: my_fancy_checkpoint
config_version: 1
class_name: Checkpoint
run_name_template: "%Y-%M-foo-bar-template-$VAR"
validations:
- batch_request:
datasource_name: my_datasource
data_connector_name: my_special_data_connector
data_asset_name: users
data_connector_query:
index: -1
action_list:
- name: store_validation_result
action:
class_name: StoreValidationResultAction
- name: store_evaluation_params
action:
class_name: StoreEvaluationParametersAction
- name: update_data_docs
action:
class_name: UpdateDataDocsAction
evaluation_parameters:
param1: "$MY_PARAM"
param2: 1 + "$OLD_PARAM"
runtime_configuration:
result_format:
result_format: BASIC
partial_unexpected_count: 20
"""
config: dict = dict(yaml.load(checkpoint_yaml_config))
_write_checkpoint_dict_to_file(
config=config, checkpoint_file_path=checkpoint_file_path
)
runner: CliRunner = CliRunner(mix_stderr=False)
monkeypatch.chdir(os.path.dirname(context.root_directory))
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint run no_suite",
catch_exceptions=False,
)
assert result.exit_code == 1
stdout: str = result.stdout
assert "Exception occurred while running Checkpoint" in stdout
assert (
"of Checkpoint 'no_suite': validation expectation_suite_name must be specified"
in stdout
)
assert mock_emit.call_count == 5
assert mock_emit.call_args_list == [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.checkpoint.run.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "checkpoint.run",
"event_payload": {},
"success": False,
}
),
mock.call(
{
"event": "data_context.run_checkpoint",
"event_payload": {},
"success": False,
}
),
mock.call(
{
"event": "cli.checkpoint.run.end",
"event_payload": {"api_version": "v3"},
"success": False,
}
),
]
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_run_on_non_existent_validations(
mock_emit,
caplog,
monkeypatch,
titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,
):
monkeypatch.setenv("VAR", "test")
monkeypatch.setenv("MY_PARAM", "1")
monkeypatch.setenv("OLD_PARAM", "2")
context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled
assert context.list_expectation_suite_names() == []
checkpoint_file_path: str = os.path.join(
context.root_directory,
DataContextConfigDefaults.CHECKPOINTS_BASE_DIRECTORY.value,
"no_validations.yml",
)
checkpoint_yaml_config: str = f"""
name: my_base_checkpoint
config_version: 1
class_name: Checkpoint
run_name_template: "%Y-%M-foo-bar-template-$VAR"
action_list:
- name: store_validation_result
action:
class_name: StoreValidationResultAction
- name: store_evaluation_params
action:
class_name: StoreEvaluationParametersAction
- name: update_data_docs
action:
class_name: UpdateDataDocsAction
evaluation_parameters:
param1: "$MY_PARAM"
param2: 1 + "$OLD_PARAM"
runtime_configuration:
result_format:
result_format: BASIC
partial_unexpected_count: 20
"""
config: dict = dict(yaml.load(checkpoint_yaml_config))
_write_checkpoint_dict_to_file(
config=config, checkpoint_file_path=checkpoint_file_path
)
runner: CliRunner = CliRunner(mix_stderr=False)
monkeypatch.chdir(os.path.dirname(context.root_directory))
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint run no_validations",
catch_exceptions=False,
)
assert result.exit_code == 1
stdout: str = result.stdout
assert (
'Checkpoint "no_validations" must contain either a batch_request or validations.'
in stdout
)
assert mock_emit.call_count == 5
assert mock_emit.call_args_list == [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.checkpoint.run.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "checkpoint.run",
"event_payload": {},
"success": False,
}
),
mock.call(
{
"event_payload": {},
"event": "data_context.run_checkpoint",
"success": False,
}
),
mock.call(
{
"event": "cli.checkpoint.run.end",
"event_payload": {"api_version": "v3"},
"success": False,
}
),
]
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_run_happy_path_with_successful_validation_pandas(
mock_emit,
caplog,
monkeypatch,
titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,
titanic_expectation_suite,
):
monkeypatch.setenv("VAR", "test")
monkeypatch.setenv("MY_PARAM", "1")
monkeypatch.setenv("OLD_PARAM", "2")
context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled
context.save_expectation_suite(
expectation_suite=titanic_expectation_suite,
expectation_suite_name="Titanic.warning",
)
assert context.list_expectation_suite_names() == ["Titanic.warning"]
checkpoint_file_path: str = os.path.join(
context.root_directory,
DataContextConfigDefaults.CHECKPOINTS_BASE_DIRECTORY.value,
"my_fancy_checkpoint.yml",
)
checkpoint_yaml_config: str = f"""
name: my_fancy_checkpoint
config_version: 1
class_name: Checkpoint
run_name_template: "%Y-%M-foo-bar-template-$VAR"
validations:
- batch_request:
datasource_name: my_datasource
data_connector_name: my_special_data_connector
data_asset_name: users
data_connector_query:
index: -1
expectation_suite_name: Titanic.warning
action_list:
- name: store_validation_result
action:
class_name: StoreValidationResultAction
- name: store_evaluation_params
action:
class_name: StoreEvaluationParametersAction
- name: update_data_docs
action:
class_name: UpdateDataDocsAction
evaluation_parameters:
param1: "$MY_PARAM"
param2: 1 + "$OLD_PARAM"
runtime_configuration:
result_format:
result_format: BASIC
partial_unexpected_count: 20
"""
config: dict = dict(yaml.load(checkpoint_yaml_config))
_write_checkpoint_dict_to_file(
config=config, checkpoint_file_path=checkpoint_file_path
)
runner: CliRunner = CliRunner(mix_stderr=False)
monkeypatch.chdir(os.path.dirname(context.root_directory))
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint run my_fancy_checkpoint",
catch_exceptions=False,
)
assert result.exit_code == 0
stdout: str = result.stdout
assert all(
[
msg in stdout
for msg in [
"Validation succeeded!",
"Titanic.warning",
"Passed",
"100.0 %",
]
]
)
assert mock_emit.call_count == 9
# noinspection PyUnresolvedReferences
expected_events: List[unittest.mock._Call] = [
mock.call(
{
"event_payload": {
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
},
"event": "data_context.save_expectation_suite",
"success": True,
}
),
mock.call(
{
"event_payload": {},
"event": "data_context.__init__",
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.run.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "data_context.get_batch_list",
"event_payload": {
"anonymized_batch_request_required_top_level_properties": {
"anonymized_datasource_name": "a732a247720783a5931fa7c4606403c2",
"anonymized_data_connector_name": "e475f70ca0bcbaf2748b93da5e9867ec",
"anonymized_data_asset_name": "2621a5230efeef1973ff373dd12b1ac4",
},
"batch_request_optional_top_level_keys": ["data_connector_query"],
"data_connector_query_keys": ["index"],
},
"success": True,
}
),
mock.call(
{
"event": "data_asset.validate",
"event_payload": {
"anonymized_batch_kwarg_keys": [],
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
"anonymized_datasource_name": "a732a247720783a5931fa7c4606403c2",
},
"success": True,
}
),
mock.call(
{
"event": "data_context.build_data_docs",
"event_payload": {},
"success": True,
}
),
mock.call(
{
"event": "checkpoint.run",
"event_payload": {
"anonymized_name": "eb2d802f924a3e764afc605de3495c5c",
"config_version": 1.0,
"anonymized_run_name_template": "21e9677f05fd2b0d83bb9285a688d5c5",
"anonymized_validations": [
{
"anonymized_batch_request": {
"anonymized_batch_request_required_top_level_properties": {
"anonymized_datasource_name": "a732a247720783a5931fa7c4606403c2",
"anonymized_data_connector_name": "e475f70ca0bcbaf2748b93da5e9867ec",
"anonymized_data_asset_name": "2621a5230efeef1973ff373dd12b1ac4",
},
"batch_request_optional_top_level_keys": [
"data_connector_query"
],
"data_connector_query_keys": ["index"],
},
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
"anonymized_action_list": [
{
"anonymized_name": "8e3e134cd0402c3970a02f40d2edfc26",
"parent_class": "StoreValidationResultAction",
},
{
"anonymized_name": "40e24f0c6b04b6d4657147990d6f39bd",
"parent_class": "StoreEvaluationParametersAction",
},
{
"anonymized_name": "2b99b6b280b8a6ad1176f37580a16411",
"parent_class": "UpdateDataDocsAction",
},
],
}
],
"checkpoint_optional_top_level_keys": [
"evaluation_parameters",
"runtime_configuration",
],
},
"success": True,
}
),
mock.call(
{
"event_payload": {},
"event": "data_context.run_checkpoint",
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.run.end",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
]
# noinspection PyUnresolvedReferences
actual_events: List[unittest.mock._Call] = mock_emit.call_args_list
assert expected_events == actual_events
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_run_happy_path_with_successful_validation_sql(
mock_emit,
caplog,
monkeypatch,
titanic_data_context_with_sql_datasource,
titanic_expectation_suite,
):
monkeypatch.setenv("VAR", "test")
monkeypatch.setenv("MY_PARAM", "1")
monkeypatch.setenv("OLD_PARAM", "2")
context: DataContext = titanic_data_context_with_sql_datasource
context.save_expectation_suite(
expectation_suite=titanic_expectation_suite,
expectation_suite_name="Titanic.warning",
)
assert context.list_expectation_suite_names() == ["Titanic.warning"]
checkpoint_file_path: str = os.path.join(
context.root_directory,
DataContextConfigDefaults.CHECKPOINTS_BASE_DIRECTORY.value,
"my_fancy_checkpoint.yml",
)
checkpoint_yaml_config: str = f"""
name: my_fancy_checkpoint
config_version: 1
class_name: Checkpoint
run_name_template: "%Y-%M-foo-bar-template-$VAR"
validations:
- batch_request:
datasource_name: test_sqlite_db_datasource
data_connector_name: whole_table
data_asset_name: titanic
expectation_suite_name: Titanic.warning
action_list:
- name: store_validation_result
action:
class_name: StoreValidationResultAction
- name: store_evaluation_params
action:
class_name: StoreEvaluationParametersAction
- name: update_data_docs
action:
class_name: UpdateDataDocsAction
evaluation_parameters:
param1: "$MY_PARAM"
param2: 1 + "$OLD_PARAM"
runtime_configuration:
result_format:
result_format: BASIC
partial_unexpected_count: 20
"""
config: dict = dict(yaml.load(checkpoint_yaml_config))
_write_checkpoint_dict_to_file(
config=config, checkpoint_file_path=checkpoint_file_path
)
runner: CliRunner = CliRunner(mix_stderr=False)
monkeypatch.chdir(os.path.dirname(context.root_directory))
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint run my_fancy_checkpoint",
catch_exceptions=False,
)
assert result.exit_code == 0
stdout: str = result.stdout
assert all(
[
msg in stdout
for msg in [
"Validation succeeded!",
"Titanic.warning",
"Passed",
"100.0 %",
]
]
)
assert mock_emit.call_count == 9
# noinspection PyUnresolvedReferences
expected_events: List[unittest.mock._Call] = [
mock.call(
{
"event_payload": {
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
},
"event": "data_context.save_expectation_suite",
"success": True,
}
),
mock.call(
{
"event_payload": {},
"event": "data_context.__init__",
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.run.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "data_context.get_batch_list",
"event_payload": {
"anonymized_batch_request_required_top_level_properties": {
"anonymized_datasource_name": "d841f52415fe99e4d100fe49e7c4d0a6",
"anonymized_data_connector_name": "6a6c3e6d98f688927f5434b7c19bfb05",
"anonymized_data_asset_name": "c30b60089ede018ad9680153ba85adaf",
},
},
"success": True,
}
),
mock.call(
{
"event": "data_asset.validate",
"event_payload": {
"anonymized_batch_kwarg_keys": [],
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
"anonymized_datasource_name": "d841f52415fe99e4d100fe49e7c4d0a6",
},
"success": True,
}
),
mock.call(
{
"event_payload": {},
"event": "data_context.build_data_docs",
"success": True,
}
),
mock.call(
{
"event": "checkpoint.run",
"event_payload": {
"anonymized_name": "eb2d802f924a3e764afc605de3495c5c",
"config_version": 1.0,
"anonymized_run_name_template": "21e9677f05fd2b0d83bb9285a688d5c5",
"anonymized_validations": [
{
"anonymized_batch_request": {
"anonymized_batch_request_required_top_level_properties": {
"anonymized_datasource_name": "d841f52415fe99e4d100fe49e7c4d0a6",
"anonymized_data_connector_name": "6a6c3e6d98f688927f5434b7c19bfb05",
"anonymized_data_asset_name": "c30b60089ede018ad9680153ba85adaf",
},
"batch_request_optional_top_level_keys": [
"data_connector_query"
],
},
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
"anonymized_action_list": [
{
"anonymized_name": "8e3e134cd0402c3970a02f40d2edfc26",
"parent_class": "StoreValidationResultAction",
},
{
"anonymized_name": "40e24f0c6b04b6d4657147990d6f39bd",
"parent_class": "StoreEvaluationParametersAction",
},
{
"anonymized_name": "2b99b6b280b8a6ad1176f37580a16411",
"parent_class": "UpdateDataDocsAction",
},
],
}
],
"checkpoint_optional_top_level_keys": [
"evaluation_parameters",
"runtime_configuration",
],
},
"success": True,
}
),
mock.call(
{
"event_payload": {},
"event": "data_context.run_checkpoint",
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.run.end",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
]
# noinspection PyUnresolvedReferences
actual_events: List[unittest.mock._Call] = mock_emit.call_args_list
assert expected_events == actual_events
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_run_happy_path_with_successful_validation_spark(
mock_emit,
caplog,
monkeypatch,
titanic_data_context_with_spark_datasource,
titanic_expectation_suite,
):
monkeypatch.setenv("VAR", "test")
monkeypatch.setenv("MY_PARAM", "1")
monkeypatch.setenv("OLD_PARAM", "2")
context: DataContext = titanic_data_context_with_spark_datasource
context.save_expectation_suite(
expectation_suite=titanic_expectation_suite,
expectation_suite_name="Titanic.warning",
)
assert context.list_expectation_suite_names() == ["Titanic.warning"]
checkpoint_file_path: str = os.path.join(
context.root_directory,
DataContextConfigDefaults.CHECKPOINTS_BASE_DIRECTORY.value,
"my_fancy_checkpoint.yml",
)
checkpoint_yaml_config: str = f"""
name: my_fancy_checkpoint
config_version: 1
class_name: Checkpoint
run_name_template: "%Y-%M-foo-bar-template-$VAR"
validations:
- batch_request:
datasource_name: {GETTING_STARTED_DATASOURCE_NAME}
data_connector_name: my_basic_data_connector
batch_spec_passthrough:
reader_options:
header: true
data_asset_name: Titanic_1912
expectation_suite_name: Titanic.warning
action_list:
- name: store_validation_result
action:
class_name: StoreValidationResultAction
- name: store_evaluation_params
action:
class_name: StoreEvaluationParametersAction
- name: update_data_docs
action:
class_name: UpdateDataDocsAction
evaluation_parameters:
param1: "$MY_PARAM"
param2: 1 + "$OLD_PARAM"
runtime_configuration:
result_format:
result_format: BASIC
partial_unexpected_count: 20
"""
config: dict = dict(yaml.load(checkpoint_yaml_config))
_write_checkpoint_dict_to_file(
config=config, checkpoint_file_path=checkpoint_file_path
)
runner: CliRunner = CliRunner(mix_stderr=False)
monkeypatch.chdir(os.path.dirname(context.root_directory))
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint run my_fancy_checkpoint",
catch_exceptions=False,
)
assert result.exit_code == 0
stdout: str = result.stdout
assert all(
[
msg in stdout
for msg in [
"Validation succeeded!",
"Titanic.warning",
"Passed",
"100.0 %",
]
]
)
assert mock_emit.call_count == 9
# noinspection PyUnresolvedReferences
expected_events: List[unittest.mock._Call] = [
mock.call(
{
"event_payload": {
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
},
"event": "data_context.save_expectation_suite",
"success": True,
}
),
mock.call(
{
"event_payload": {},
"event": "data_context.__init__",
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.run.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "data_context.get_batch_list",
"event_payload": {
"anonymized_batch_request_required_top_level_properties": {
"anonymized_datasource_name": GETTING_STARTED_DATASOURCE_NAME,
"anonymized_data_connector_name": "af09acd176f54642635a8a2975305437",
"anonymized_data_asset_name": "9104abd890c05a364f379443b9f43825",
},
"batch_request_optional_top_level_keys": ["batch_spec_passthrough"],
"batch_spec_passthrough_keys": ["reader_options"],
},
"success": True,
}
),
mock.call(
{
"event": "data_asset.validate",
"event_payload": {
"anonymized_batch_kwarg_keys": [],
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
"anonymized_datasource_name": "42ad8ec5a5ed470e596939f73f31d613",
},
"success": True,
}
),
mock.call(
{
"event": "data_context.build_data_docs",
"event_payload": {},
"success": True,
}
),
mock.call(
{
"event": "checkpoint.run",
"event_payload": {
"anonymized_name": "eb2d802f924a3e764afc605de3495c5c",
"config_version": 1.0,
"anonymized_run_name_template": "21e9677f05fd2b0d83bb9285a688d5c5",
"anonymized_validations": [
{
"anonymized_batch_request": {
"anonymized_batch_request_required_top_level_properties": {
"anonymized_datasource_name": "getting_started_datasource",
"anonymized_data_connector_name": "af09acd176f54642635a8a2975305437",
"anonymized_data_asset_name": "9104abd890c05a364f379443b9f43825",
},
"batch_request_optional_top_level_keys": [
"batch_spec_passthrough",
"data_connector_query",
],
"batch_spec_passthrough_keys": ["reader_options"],
},
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
"anonymized_action_list": [
{
"anonymized_name": "8e3e134cd0402c3970a02f40d2edfc26",
"parent_class": "StoreValidationResultAction",
},
{
"anonymized_name": "40e24f0c6b04b6d4657147990d6f39bd",
"parent_class": "StoreEvaluationParametersAction",
},
{
"anonymized_name": "2b99b6b280b8a6ad1176f37580a16411",
"parent_class": "UpdateDataDocsAction",
},
],
}
],
"checkpoint_optional_top_level_keys": [
"evaluation_parameters",
"runtime_configuration",
],
},
"success": True,
}
),
mock.call(
{
"event_payload": {},
"event": "data_context.run_checkpoint",
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.run.end",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
]
# noinspection PyUnresolvedReferences
actual_events: List[unittest.mock._Call] = mock_emit.call_args_list
assert expected_events == actual_events
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_run_happy_path_with_failed_validation_pandas(
mock_emit,
caplog,
monkeypatch,
titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,
titanic_expectation_suite,
):
monkeypatch.setenv("VAR", "test")
monkeypatch.setenv("MY_PARAM", "1")
monkeypatch.setenv("OLD_PARAM", "2")
context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled
context.save_expectation_suite(
expectation_suite=titanic_expectation_suite,
expectation_suite_name="Titanic.warning",
)
assert context.list_expectation_suite_names() == ["Titanic.warning"]
monkeypatch.chdir(os.path.dirname(context.root_directory))
# To fail an expectation, make number of rows less than 1313 (the original number of rows in the "Titanic" dataset).
csv_path: str = os.path.join(
context.root_directory, "..", "data", "titanic", "Titanic_19120414_1313.csv"
)
df: pd.DataFrame = pd.read_csv(filepath_or_buffer=csv_path)
df = df.sample(frac=0.5, replace=True, random_state=1)
# noinspection PyTypeChecker
df.to_csv(path_or_buf=csv_path)
checkpoint_file_path: str = os.path.join(
context.root_directory,
DataContextConfigDefaults.CHECKPOINTS_BASE_DIRECTORY.value,
"my_fancy_checkpoint.yml",
)
checkpoint_yaml_config: str = f"""
name: my_fancy_checkpoint
config_version: 1
class_name: Checkpoint
run_name_template: "%Y-%M-foo-bar-template-$VAR"
validations:
- batch_request:
datasource_name: my_datasource
data_connector_name: my_special_data_connector
data_asset_name: users
data_connector_query:
index: -1
expectation_suite_name: Titanic.warning
action_list:
- name: store_validation_result
action:
class_name: StoreValidationResultAction
- name: store_evaluation_params
action:
class_name: StoreEvaluationParametersAction
- name: update_data_docs
action:
class_name: UpdateDataDocsAction
evaluation_parameters:
param1: "$MY_PARAM"
param2: 1 + "$OLD_PARAM"
runtime_configuration:
result_format:
result_format: BASIC
partial_unexpected_count: 20
"""
config: dict = dict(yaml.load(checkpoint_yaml_config))
_write_checkpoint_dict_to_file(
config=config, checkpoint_file_path=checkpoint_file_path
)
runner: CliRunner = CliRunner(mix_stderr=False)
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint run my_fancy_checkpoint",
catch_exceptions=False,
)
assert result.exit_code == 1
stdout: str = result.stdout
assert "Validation failed!" in stdout
assert mock_emit.call_count == 9
# noinspection PyUnresolvedReferences
expected_events: List[unittest.mock._Call] = [
mock.call(
{
"event_payload": {
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
},
"event": "data_context.save_expectation_suite",
"success": True,
}
),
mock.call(
{
"event_payload": {},
"event": "data_context.__init__",
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.run.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "data_context.get_batch_list",
"event_payload": {
"anonymized_batch_request_required_top_level_properties": {
"anonymized_datasource_name": "a732a247720783a5931fa7c4606403c2",
"anonymized_data_connector_name": "e475f70ca0bcbaf2748b93da5e9867ec",
"anonymized_data_asset_name": "2621a5230efeef1973ff373dd12b1ac4",
},
"batch_request_optional_top_level_keys": ["data_connector_query"],
"data_connector_query_keys": ["index"],
},
"success": True,
}
),
mock.call(
{
"event": "data_asset.validate",
"event_payload": {
"anonymized_batch_kwarg_keys": [],
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
"anonymized_datasource_name": "a732a247720783a5931fa7c4606403c2",
},
"success": True,
}
),
mock.call(
{
"event": "data_context.build_data_docs",
"event_payload": {},
"success": True,
}
),
mock.call(
{
"event": "checkpoint.run",
"event_payload": {
"anonymized_name": "eb2d802f924a3e764afc605de3495c5c",
"config_version": 1.0,
"anonymized_run_name_template": "21e9677f05fd2b0d83bb9285a688d5c5",
"anonymized_validations": [
{
"anonymized_batch_request": {
"anonymized_batch_request_required_top_level_properties": {
"anonymized_datasource_name": "a732a247720783a5931fa7c4606403c2",
"anonymized_data_connector_name": "e475f70ca0bcbaf2748b93da5e9867ec",
"anonymized_data_asset_name": "2621a5230efeef1973ff373dd12b1ac4",
},
"batch_request_optional_top_level_keys": [
"data_connector_query"
],
"data_connector_query_keys": ["index"],
},
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
"anonymized_action_list": [
{
"anonymized_name": "8e3e134cd0402c3970a02f40d2edfc26",
"parent_class": "StoreValidationResultAction",
},
{
"anonymized_name": "40e24f0c6b04b6d4657147990d6f39bd",
"parent_class": "StoreEvaluationParametersAction",
},
{
"anonymized_name": "2b99b6b280b8a6ad1176f37580a16411",
"parent_class": "UpdateDataDocsAction",
},
],
}
],
"checkpoint_optional_top_level_keys": [
"evaluation_parameters",
"runtime_configuration",
],
},
"success": True,
}
),
mock.call(
{
"event_payload": {},
"event": "data_context.run_checkpoint",
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.run.end",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
]
# noinspection PyUnresolvedReferences
actual_events: List[unittest.mock._Call] = mock_emit.call_args_list
assert expected_events == actual_events
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_run_happy_path_with_failed_validation_sql(
mock_emit,
caplog,
monkeypatch,
titanic_data_context_with_sql_datasource,
titanic_expectation_suite,
):
monkeypatch.setenv("VAR", "test")
monkeypatch.setenv("MY_PARAM", "1")
monkeypatch.setenv("OLD_PARAM", "2")
context: DataContext = titanic_data_context_with_sql_datasource
context.save_expectation_suite(
expectation_suite=titanic_expectation_suite,
expectation_suite_name="Titanic.warning",
)
assert context.list_expectation_suite_names() == ["Titanic.warning"]
checkpoint_file_path: str = os.path.join(
context.root_directory,
DataContextConfigDefaults.CHECKPOINTS_BASE_DIRECTORY.value,
"my_fancy_checkpoint.yml",
)
checkpoint_yaml_config: str = f"""
name: my_fancy_checkpoint
config_version: 1
class_name: Checkpoint
run_name_template: "%Y-%M-foo-bar-template-$VAR"
validations:
- batch_request:
datasource_name: test_sqlite_db_datasource
data_connector_name: whole_table
data_asset_name: incomplete
expectation_suite_name: Titanic.warning
action_list:
- name: store_validation_result
action:
class_name: StoreValidationResultAction
- name: store_evaluation_params
action:
class_name: StoreEvaluationParametersAction
- name: update_data_docs
action:
class_name: UpdateDataDocsAction
evaluation_parameters:
param1: "$MY_PARAM"
param2: 1 + "$OLD_PARAM"
runtime_configuration:
result_format:
result_format: BASIC
partial_unexpected_count: 20
"""
config: dict = dict(yaml.load(checkpoint_yaml_config))
_write_checkpoint_dict_to_file(
config=config, checkpoint_file_path=checkpoint_file_path
)
runner: CliRunner = CliRunner(mix_stderr=False)
monkeypatch.chdir(os.path.dirname(context.root_directory))
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint run my_fancy_checkpoint",
catch_exceptions=False,
)
assert result.exit_code == 1
stdout: str = result.stdout
assert "Validation failed!" in stdout
assert mock_emit.call_count == 9
# noinspection PyUnresolvedReferences
expected_events: List[unittest.mock._Call] = [
mock.call(
{
"event_payload": {
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
},
"event": "data_context.save_expectation_suite",
"success": True,
}
),
mock.call(
{
"event_payload": {},
"event": "data_context.__init__",
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.run.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "data_context.get_batch_list",
"event_payload": {
"anonymized_batch_request_required_top_level_properties": {
"anonymized_datasource_name": "d841f52415fe99e4d100fe49e7c4d0a6",
"anonymized_data_connector_name": "6a6c3e6d98f688927f5434b7c19bfb05",
"anonymized_data_asset_name": "61b23df5338c9164d0f9514847cba679",
},
},
"success": True,
}
),
mock.call(
{
"event": "data_asset.validate",
"event_payload": {
"anonymized_batch_kwarg_keys": [],
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
"anonymized_datasource_name": "d841f52415fe99e4d100fe49e7c4d0a6",
},
"success": True,
}
),
mock.call(
{
"event_payload": {},
"event": "data_context.build_data_docs",
"success": True,
}
),
mock.call(
{
"event": "checkpoint.run",
"event_payload": {
"anonymized_name": "eb2d802f924a3e764afc605de3495c5c",
"config_version": 1.0,
"anonymized_run_name_template": "21e9677f05fd2b0d83bb9285a688d5c5",
"anonymized_validations": [
{
"anonymized_batch_request": {
"anonymized_batch_request_required_top_level_properties": {
"anonymized_datasource_name": "d841f52415fe99e4d100fe49e7c4d0a6",
"anonymized_data_connector_name": "6a6c3e6d98f688927f5434b7c19bfb05",
"anonymized_data_asset_name": "61b23df5338c9164d0f9514847cba679",
},
"batch_request_optional_top_level_keys": [
"data_connector_query"
],
},
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
"anonymized_action_list": [
{
"anonymized_name": "8e3e134cd0402c3970a02f40d2edfc26",
"parent_class": "StoreValidationResultAction",
},
{
"anonymized_name": "40e24f0c6b04b6d4657147990d6f39bd",
"parent_class": "StoreEvaluationParametersAction",
},
{
"anonymized_name": "2b99b6b280b8a6ad1176f37580a16411",
"parent_class": "UpdateDataDocsAction",
},
],
}
],
"checkpoint_optional_top_level_keys": [
"evaluation_parameters",
"runtime_configuration",
],
},
"success": True,
}
),
mock.call(
{
"event_payload": {},
"event": "data_context.run_checkpoint",
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.run.end",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
]
# noinspection PyUnresolvedReferences
actual_events: List[unittest.mock._Call] = mock_emit.call_args_list
assert expected_events == actual_events
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_run_happy_path_with_failed_validation_spark(
mock_emit,
caplog,
monkeypatch,
titanic_data_context_with_spark_datasource,
titanic_expectation_suite,
):
monkeypatch.setenv("VAR", "test")
monkeypatch.setenv("MY_PARAM", "1")
monkeypatch.setenv("OLD_PARAM", "2")
context: DataContext = titanic_data_context_with_spark_datasource
context.save_expectation_suite(
expectation_suite=titanic_expectation_suite,
expectation_suite_name="Titanic.warning",
)
assert context.list_expectation_suite_names() == ["Titanic.warning"]
checkpoint_file_path: str = os.path.join(
context.root_directory,
DataContextConfigDefaults.CHECKPOINTS_BASE_DIRECTORY.value,
"my_fancy_checkpoint.yml",
)
checkpoint_yaml_config: str = f"""
name: my_fancy_checkpoint
config_version: 1
class_name: Checkpoint
run_name_template: "%Y-%M-foo-bar-template-$VAR"
validations:
- batch_request:
datasource_name: {GETTING_STARTED_DATASOURCE_NAME}
data_connector_name: my_basic_data_connector
data_asset_name: Titanic_1911
data_connector_query:
index: -1
batch_spec_passthrough:
reader_options:
header: True
expectation_suite_name: Titanic.warning
action_list:
- name: store_validation_result
action:
class_name: StoreValidationResultAction
- name: store_evaluation_params
action:
class_name: StoreEvaluationParametersAction
- name: update_data_docs
action:
class_name: UpdateDataDocsAction
evaluation_parameters:
param1: "$MY_PARAM"
param2: 1 + "$OLD_PARAM"
runtime_configuration:
result_format:
result_format: BASIC
partial_unexpected_count: 20
"""
config: dict = dict(yaml.load(checkpoint_yaml_config))
_write_checkpoint_dict_to_file(
config=config, checkpoint_file_path=checkpoint_file_path
)
runner: CliRunner = CliRunner(mix_stderr=False)
monkeypatch.chdir(os.path.dirname(context.root_directory))
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint run my_fancy_checkpoint",
catch_exceptions=False,
)
assert result.exit_code == 1
stdout: str = result.stdout
assert "Validation failed!" in stdout
assert mock_emit.call_count == 9
# noinspection PyUnresolvedReferences
expected_events: List[unittest.mock._Call] = [
mock.call(
{
"event_payload": {
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
},
"event": "data_context.save_expectation_suite",
"success": True,
}
),
mock.call(
{
"event_payload": {},
"event": "data_context.__init__",
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.run.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "data_context.get_batch_list",
"event_payload": {
"anonymized_batch_request_required_top_level_properties": {
"anonymized_datasource_name": GETTING_STARTED_DATASOURCE_NAME,
"anonymized_data_connector_name": "af09acd176f54642635a8a2975305437",
"anonymized_data_asset_name": "38b9086d45a8746d014a0d63ad58e331",
},
"batch_request_optional_top_level_keys": [
"batch_spec_passthrough",
"data_connector_query",
],
"data_connector_query_keys": ["index"],
"batch_spec_passthrough_keys": ["reader_options"],
},
"success": True,
}
),
mock.call(
{
"event": "data_asset.validate",
"event_payload": {
"anonymized_batch_kwarg_keys": [],
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
"anonymized_datasource_name": "42ad8ec5a5ed470e596939f73f31d613",
},
"success": True,
}
),
mock.call(
{
"event_payload": {},
"event": "data_context.build_data_docs",
"success": True,
}
),
mock.call(
{
"event": "checkpoint.run",
"event_payload": {
"anonymized_name": "eb2d802f924a3e764afc605de3495c5c",
"config_version": 1.0,
"anonymized_run_name_template": "21e9677f05fd2b0d83bb9285a688d5c5",
"anonymized_validations": [
{
"anonymized_batch_request": {
"anonymized_batch_request_required_top_level_properties": {
"anonymized_datasource_name": "getting_started_datasource",
"anonymized_data_connector_name": "af09acd176f54642635a8a2975305437",
"anonymized_data_asset_name": "38b9086d45a8746d014a0d63ad58e331",
},
"batch_request_optional_top_level_keys": [
"batch_spec_passthrough",
"data_connector_query",
],
"batch_spec_passthrough_keys": ["reader_options"],
"data_connector_query_keys": ["index"],
},
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
"anonymized_action_list": [
{
"anonymized_name": "8e3e134cd0402c3970a02f40d2edfc26",
"parent_class": "StoreValidationResultAction",
},
{
"anonymized_name": "40e24f0c6b04b6d4657147990d6f39bd",
"parent_class": "StoreEvaluationParametersAction",
},
{
"anonymized_name": "2b99b6b280b8a6ad1176f37580a16411",
"parent_class": "UpdateDataDocsAction",
},
],
}
],
"checkpoint_optional_top_level_keys": [
"evaluation_parameters",
"runtime_configuration",
],
},
"success": True,
}
),
mock.call(
{
"event_payload": {},
"event": "data_context.run_checkpoint",
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.run.end",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
]
# noinspection PyUnresolvedReferences
actual_events: List[unittest.mock._Call] = mock_emit.call_args_list
assert expected_events == actual_events
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_run_happy_path_with_failed_validation_due_to_bad_data_pandas(
mock_emit,
caplog,
monkeypatch,
titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,
titanic_expectation_suite,
):
monkeypatch.setenv("VAR", "test")
monkeypatch.setenv("MY_PARAM", "1")
monkeypatch.setenv("OLD_PARAM", "2")
context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled
context.save_expectation_suite(
expectation_suite=titanic_expectation_suite,
expectation_suite_name="Titanic.warning",
)
assert context.list_expectation_suite_names() == ["Titanic.warning"]
monkeypatch.chdir(os.path.dirname(context.root_directory))
csv_path: str = os.path.join(
context.root_directory, "..", "data", "titanic", "Titanic_19120414_1313.csv"
)
# mangle the csv
with open(csv_path, "w") as f:
f.write("foo,bar\n1,2\n")
checkpoint_file_path: str = os.path.join(
context.root_directory,
DataContextConfigDefaults.CHECKPOINTS_BASE_DIRECTORY.value,
"my_fancy_checkpoint.yml",
)
checkpoint_yaml_config: str = f"""
name: my_fancy_checkpoint
config_version: 1
class_name: Checkpoint
run_name_template: "%Y-%M-foo-bar-template-$VAR"
validations:
- batch_request:
datasource_name: my_datasource
data_connector_name: my_special_data_connector
data_asset_name: users
data_connector_query:
index: -1
expectation_suite_name: Titanic.warning
action_list:
- name: store_validation_result
action:
class_name: StoreValidationResultAction
- name: store_evaluation_params
action:
class_name: StoreEvaluationParametersAction
- name: update_data_docs
action:
class_name: UpdateDataDocsAction
evaluation_parameters:
param1: "$MY_PARAM"
param2: 1 + "$OLD_PARAM"
runtime_configuration:
catch_exceptions: False
result_format:
result_format: BASIC
partial_unexpected_count: 20
"""
config: dict = dict(yaml.load(checkpoint_yaml_config))
_write_checkpoint_dict_to_file(
config=config, checkpoint_file_path=checkpoint_file_path
)
runner: CliRunner = CliRunner(mix_stderr=False)
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint run my_fancy_checkpoint",
catch_exceptions=False,
)
assert result.exit_code == 1
stdout: str = result.stdout
assert "Exception occurred while running Checkpoint." in stdout
assert 'Error: The column "Name" in BatchData does not exist...' in stdout
assert mock_emit.call_count == 8
# noinspection PyUnresolvedReferences
expected_events: List[unittest.mock._Call] = [
mock.call(
{
"event_payload": {
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
},
"event": "data_context.save_expectation_suite",
"success": True,
}
),
mock.call(
{
"event": "data_context.__init__",
"event_payload": {},
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.run.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "data_context.get_batch_list",
"event_payload": {
"anonymized_batch_request_required_top_level_properties": {
"anonymized_datasource_name": "a732a247720783a5931fa7c4606403c2",
"anonymized_data_connector_name": "e475f70ca0bcbaf2748b93da5e9867ec",
"anonymized_data_asset_name": "2621a5230efeef1973ff373dd12b1ac4",
},
"batch_request_optional_top_level_keys": ["data_connector_query"],
"data_connector_query_keys": ["index"],
},
"success": True,
}
),
mock.call(
{
"event": "data_asset.validate",
"event_payload": {
"anonymized_batch_kwarg_keys": [],
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
"anonymized_datasource_name": "a732a247720783a5931fa7c4606403c2",
},
"success": False,
}
),
mock.call(
{
"event": "checkpoint.run",
"event_payload": {
"anonymized_name": "eb2d802f924a3e764afc605de3495c5c",
"config_version": 1.0,
"anonymized_run_name_template": "21e9677f05fd2b0d83bb9285a688d5c5",
"anonymized_validations": [
{
"anonymized_batch_request": {
"anonymized_batch_request_required_top_level_properties": {
"anonymized_datasource_name": "a732a247720783a5931fa7c4606403c2",
"anonymized_data_connector_name": "e475f70ca0bcbaf2748b93da5e9867ec",
"anonymized_data_asset_name": "2621a5230efeef1973ff373dd12b1ac4",
},
"batch_request_optional_top_level_keys": [
"data_connector_query"
],
"data_connector_query_keys": ["index"],
},
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
"anonymized_action_list": [
{
"anonymized_name": "8e3e134cd0402c3970a02f40d2edfc26",
"parent_class": "StoreValidationResultAction",
},
{
"anonymized_name": "40e24f0c6b04b6d4657147990d6f39bd",
"parent_class": "StoreEvaluationParametersAction",
},
{
"anonymized_name": "2b99b6b280b8a6ad1176f37580a16411",
"parent_class": "UpdateDataDocsAction",
},
],
}
],
"checkpoint_optional_top_level_keys": [
"evaluation_parameters",
"runtime_configuration",
],
},
"success": False,
}
),
mock.call(
{
"event_payload": {},
"event": "data_context.run_checkpoint",
"success": False,
}
),
mock.call(
{
"event": "cli.checkpoint.run.end",
"event_payload": {"api_version": "v3"},
"success": False,
}
),
]
# noinspection PyUnresolvedReferences
actual_events: List[unittest.mock._Call] = mock_emit.call_args_list
assert expected_events == actual_events
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_run_happy_path_with_failed_validation_due_to_bad_data_sql(
mock_emit,
caplog,
monkeypatch,
titanic_data_context_with_sql_datasource,
titanic_expectation_suite,
):
monkeypatch.setenv("VAR", "test")
monkeypatch.setenv("MY_PARAM", "1")
monkeypatch.setenv("OLD_PARAM", "2")
context: DataContext = titanic_data_context_with_sql_datasource
context.save_expectation_suite(
expectation_suite=titanic_expectation_suite,
expectation_suite_name="Titanic.warning",
)
assert context.list_expectation_suite_names() == ["Titanic.warning"]
monkeypatch.chdir(os.path.dirname(context.root_directory))
checkpoint_file_path: str = os.path.join(
context.root_directory,
DataContextConfigDefaults.CHECKPOINTS_BASE_DIRECTORY.value,
"my_fancy_checkpoint.yml",
)
checkpoint_yaml_config: str = f"""
name: my_fancy_checkpoint
config_version: 1
class_name: Checkpoint
run_name_template: "%Y-%M-foo-bar-template-$VAR"
validations:
- batch_request:
datasource_name: test_sqlite_db_datasource
data_connector_name: whole_table
data_asset_name: wrong
expectation_suite_name: Titanic.warning
action_list:
- name: store_validation_result
action:
class_name: StoreValidationResultAction
- name: store_evaluation_params
action:
class_name: StoreEvaluationParametersAction
- name: update_data_docs
action:
class_name: UpdateDataDocsAction
evaluation_parameters:
param1: "$MY_PARAM"
param2: 1 + "$OLD_PARAM"
runtime_configuration:
catch_exceptions: False
result_format:
result_format: BASIC
partial_unexpected_count: 20
"""
config: dict = dict(yaml.load(checkpoint_yaml_config))
_write_checkpoint_dict_to_file(
config=config, checkpoint_file_path=checkpoint_file_path
)
runner: CliRunner = CliRunner(mix_stderr=False)
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint run my_fancy_checkpoint",
catch_exceptions=False,
)
assert result.exit_code == 1
stdout: str = result.stdout
assert "Exception occurred while running Checkpoint." in stdout
assert 'Error: The column "Name" in BatchData does not exist...' in stdout
assert mock_emit.call_count == 8
# noinspection PyUnresolvedReferences
expected_events: List[unittest.mock._Call] = [
mock.call(
{
"event_payload": {
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
},
"event": "data_context.save_expectation_suite",
"success": True,
}
),
mock.call(
{
"event": "data_context.__init__",
"event_payload": {},
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.run.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "data_context.get_batch_list",
"event_payload": {
"anonymized_batch_request_required_top_level_properties": {
"anonymized_datasource_name": "d841f52415fe99e4d100fe49e7c4d0a6",
"anonymized_data_connector_name": "6a6c3e6d98f688927f5434b7c19bfb05",
"anonymized_data_asset_name": "96a15275c07d53de6b4a9464704b12d8",
},
},
"success": True,
}
),
mock.call(
{
"event": "data_asset.validate",
"event_payload": {
"anonymized_batch_kwarg_keys": [],
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
"anonymized_datasource_name": "d841f52415fe99e4d100fe49e7c4d0a6",
},
"success": False,
}
),
mock.call(
{
"event": "checkpoint.run",
"event_payload": {
"anonymized_name": "eb2d802f924a3e764afc605de3495c5c",
"config_version": 1.0,
"anonymized_run_name_template": "21e9677f05fd2b0d83bb9285a688d5c5",
"anonymized_validations": [
{
"anonymized_batch_request": {
"anonymized_batch_request_required_top_level_properties": {
"anonymized_datasource_name": "d841f52415fe99e4d100fe49e7c4d0a6",
"anonymized_data_connector_name": "6a6c3e6d98f688927f5434b7c19bfb05",
"anonymized_data_asset_name": "96a15275c07d53de6b4a9464704b12d8",
},
"batch_request_optional_top_level_keys": [
"data_connector_query"
],
},
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
"anonymized_action_list": [
{
"anonymized_name": "8e3e134cd0402c3970a02f40d2edfc26",
"parent_class": "StoreValidationResultAction",
},
{
"anonymized_name": "40e24f0c6b04b6d4657147990d6f39bd",
"parent_class": "StoreEvaluationParametersAction",
},
{
"anonymized_name": "2b99b6b280b8a6ad1176f37580a16411",
"parent_class": "UpdateDataDocsAction",
},
],
}
],
"checkpoint_optional_top_level_keys": [
"evaluation_parameters",
"runtime_configuration",
],
},
"success": False,
}
),
mock.call(
{
"event_payload": {},
"event": "data_context.run_checkpoint",
"success": False,
}
),
mock.call(
{
"event": "cli.checkpoint.run.end",
"event_payload": {"api_version": "v3"},
"success": False,
}
),
]
# noinspection PyUnresolvedReferences
actual_events: List[unittest.mock._Call] = mock_emit.call_args_list
assert expected_events == actual_events
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_run_happy_path_with_failed_validation_due_to_bad_data_spark(
mock_emit,
caplog,
monkeypatch,
titanic_data_context_with_spark_datasource,
titanic_expectation_suite,
):
monkeypatch.setenv("VAR", "test")
monkeypatch.setenv("MY_PARAM", "1")
monkeypatch.setenv("OLD_PARAM", "2")
context: DataContext = titanic_data_context_with_spark_datasource
context.save_expectation_suite(
expectation_suite=titanic_expectation_suite,
expectation_suite_name="Titanic.warning",
)
assert context.list_expectation_suite_names() == ["Titanic.warning"]
csv_path: str = os.path.join(
context.root_directory, "..", "data", "titanic", "Titanic_19120414_1313.csv"
)
# mangle the csv
with open(csv_path, "w") as f:
f.write("foo,bar\n1,2\n")
checkpoint_file_path: str = os.path.join(
context.root_directory,
DataContextConfigDefaults.CHECKPOINTS_BASE_DIRECTORY.value,
"my_fancy_checkpoint.yml",
)
checkpoint_yaml_config: str = f"""
name: my_fancy_checkpoint
config_version: 1
class_name: Checkpoint
run_name_template: "%Y-%M-foo-bar-template-$VAR"
validations:
- batch_request:
datasource_name: {GETTING_STARTED_DATASOURCE_NAME}
data_connector_name: my_special_data_connector
data_asset_name: users
data_connector_query:
index: -1
batch_spec_passthrough:
reader_options:
header: true
expectation_suite_name: Titanic.warning
action_list:
- name: store_validation_result
action:
class_name: StoreValidationResultAction
- name: store_evaluation_params
action:
class_name: StoreEvaluationParametersAction
- name: update_data_docs
action:
class_name: UpdateDataDocsAction
evaluation_parameters:
param1: "$MY_PARAM"
param2: 1 + "$OLD_PARAM"
runtime_configuration:
catch_exceptions: False
result_format:
result_format: BASIC
partial_unexpected_count: 20
"""
config: dict = dict(yaml.load(checkpoint_yaml_config))
_write_checkpoint_dict_to_file(
config=config, checkpoint_file_path=checkpoint_file_path
)
runner: CliRunner = CliRunner(mix_stderr=False)
monkeypatch.chdir(os.path.dirname(context.root_directory))
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint run my_fancy_checkpoint",
catch_exceptions=False,
)
assert result.exit_code == 1
stdout: str = result.stdout
assert "Exception occurred while running Checkpoint." in stdout
assert 'Error: The column "Name" in BatchData does not exist...' in stdout
assert mock_emit.call_count == 8
# noinspection PyUnresolvedReferences
expected_events: List[unittest.mock._Call] = [
mock.call(
{
"event_payload": {
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
},
"event": "data_context.save_expectation_suite",
"success": True,
}
),
mock.call(
{
"event": "data_context.__init__",
"event_payload": {},
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.run.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "data_context.get_batch_list",
"event_payload": {
"anonymized_batch_request_required_top_level_properties": {
"anonymized_datasource_name": GETTING_STARTED_DATASOURCE_NAME,
"anonymized_data_connector_name": "e475f70ca0bcbaf2748b93da5e9867ec",
"anonymized_data_asset_name": "2621a5230efeef1973ff373dd12b1ac4",
},
"batch_request_optional_top_level_keys": [
"batch_spec_passthrough",
"data_connector_query",
],
"data_connector_query_keys": ["index"],
"batch_spec_passthrough_keys": ["reader_options"],
},
"success": True,
}
),
mock.call(
{
"event": "data_asset.validate",
"event_payload": {
"anonymized_batch_kwarg_keys": [],
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
"anonymized_datasource_name": "42ad8ec5a5ed470e596939f73f31d613",
},
"success": False,
}
),
mock.call(
{
"event": "checkpoint.run",
"event_payload": {
"anonymized_name": "eb2d802f924a3e764afc605de3495c5c",
"config_version": 1.0,
"anonymized_run_name_template": "21e9677f05fd2b0d83bb9285a688d5c5",
"anonymized_validations": [
{
"anonymized_batch_request": {
"anonymized_batch_request_required_top_level_properties": {
"anonymized_datasource_name": "getting_started_datasource",
"anonymized_data_connector_name": "e475f70ca0bcbaf2748b93da5e9867ec",
"anonymized_data_asset_name": "2621a5230efeef1973ff373dd12b1ac4",
},
"batch_request_optional_top_level_keys": [
"batch_spec_passthrough",
"data_connector_query",
],
"batch_spec_passthrough_keys": ["reader_options"],
"data_connector_query_keys": ["index"],
},
"anonymized_expectation_suite_name": "35af1ba156bfe672f8845cb60554b138",
"anonymized_action_list": [
{
"anonymized_name": "8e3e134cd0402c3970a02f40d2edfc26",
"parent_class": "StoreValidationResultAction",
},
{
"anonymized_name": "40e24f0c6b04b6d4657147990d6f39bd",
"parent_class": "StoreEvaluationParametersAction",
},
{
"anonymized_name": "2b99b6b280b8a6ad1176f37580a16411",
"parent_class": "UpdateDataDocsAction",
},
],
}
],
"checkpoint_optional_top_level_keys": [
"evaluation_parameters",
"runtime_configuration",
],
},
"success": False,
}
),
mock.call(
{
"event_payload": {},
"event": "data_context.run_checkpoint",
"success": False,
}
),
mock.call(
{
"event": "cli.checkpoint.run.end",
"event_payload": {"api_version": "v3"},
"success": False,
}
),
]
# noinspection PyUnresolvedReferences
actual_events: List[unittest.mock._Call] = mock_emit.call_args_list
assert expected_events == actual_events
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_script_raises_error_if_checkpoint_not_found(
mock_emit, caplog, monkeypatch, empty_context_with_checkpoint_v1_stats_enabled
):
context: DataContext = empty_context_with_checkpoint_v1_stats_enabled
assert context.list_checkpoints() == ["my_v1_checkpoint"]
monkeypatch.chdir(os.path.dirname(context.root_directory))
runner: CliRunner = CliRunner(mix_stderr=False)
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint script not_a_checkpoint",
catch_exceptions=False,
)
assert result.exit_code == 1
stdout: str = result.stdout
assert (
"Could not find Checkpoint `not_a_checkpoint` (or its configuration is invalid)."
in stdout
)
assert "Try running" in stdout
assert mock_emit.call_count == 3
assert mock_emit.call_args_list == [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.checkpoint.script.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.script.end",
"event_payload": {"api_version": "v3"},
"success": False,
}
),
]
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_script_raises_error_if_python_file_exists(
mock_emit, caplog, monkeypatch, empty_context_with_checkpoint_v1_stats_enabled
):
context: DataContext = empty_context_with_checkpoint_v1_stats_enabled
assert context.list_checkpoints() == ["my_v1_checkpoint"]
script_path: str = os.path.join(
context.root_directory, context.GE_UNCOMMITTED_DIR, "run_my_v1_checkpoint.py"
)
with open(script_path, "w") as f:
f.write("script here")
assert os.path.isfile(script_path)
runner: CliRunner = CliRunner(mix_stderr=False)
monkeypatch.chdir(os.path.dirname(context.root_directory))
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint script my_v1_checkpoint",
catch_exceptions=False,
)
assert result.exit_code == 1
stdout: str = result.stdout
assert (
"Warning! A script named run_my_v1_checkpoint.py already exists and this command will not overwrite it."
in stdout
)
assert mock_emit.call_count == 3
assert mock_emit.call_args_list == [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.checkpoint.script.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.script.end",
"event_payload": {"api_version": "v3"},
"success": False,
}
),
]
# assert the script has original contents
with open(script_path) as f:
assert f.read() == "script here"
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
@mock.patch(
"great_expectations.core.usage_statistics.usage_statistics.UsageStatisticsHandler.emit"
)
def test_checkpoint_script_happy_path_generates_script_pandas(
mock_emit, caplog, monkeypatch, empty_context_with_checkpoint_v1_stats_enabled
):
context: DataContext = empty_context_with_checkpoint_v1_stats_enabled
monkeypatch.chdir(os.path.dirname(context.root_directory))
runner: CliRunner = CliRunner(mix_stderr=False)
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint script my_v1_checkpoint",
catch_exceptions=False,
)
assert result.exit_code == 0
stdout: str = result.stdout
assert (
"A python script was created that runs the Checkpoint named: `my_v1_checkpoint`"
in stdout
)
assert (
"The script is located in `great_expectations/uncommitted/run_my_v1_checkpoint.py`"
in stdout
)
assert (
"The script can be run with `python great_expectations/uncommitted/run_my_v1_checkpoint.py`"
in stdout
)
assert mock_emit.call_count == 3
assert mock_emit.call_args_list == [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.checkpoint.script.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.script.end",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
]
expected_script: str = os.path.join(
context.root_directory, context.GE_UNCOMMITTED_DIR, "run_my_v1_checkpoint.py"
)
assert os.path.isfile(expected_script)
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
def test_checkpoint_script_happy_path_executable_successful_validation_pandas(
caplog,
monkeypatch,
titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,
):
"""
We call the "checkpoint script" command on a project with a Checkpoint.
The command should:
- create the script (note output is tested in other tests)
When run the script should:
- execute
- return a 0 status code
- print a success message
"""
monkeypatch.setenv("VAR", "test")
monkeypatch.setenv("MY_PARAM", "1")
monkeypatch.setenv("OLD_PARAM", "2")
context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled
suite: ExpectationSuite = context.create_expectation_suite(
expectation_suite_name="users.delivery"
)
context.save_expectation_suite(expectation_suite=suite)
assert context.list_expectation_suite_names() == ["users.delivery"]
monkeypatch.chdir(os.path.dirname(context.root_directory))
checkpoint_file_path: str = os.path.join(
context.root_directory,
DataContextConfigDefaults.CHECKPOINTS_BASE_DIRECTORY.value,
"my_fancy_checkpoint.yml",
)
checkpoint_yaml_config: str = f"""
name: my_fancy_checkpoint
config_version: 1
class_name: Checkpoint
run_name_template: "%Y-%M-foo-bar-template-$VAR"
validations:
- batch_request:
datasource_name: my_datasource
data_connector_name: my_special_data_connector
data_asset_name: users
data_connector_query:
index: -1
expectation_suite_name: users.delivery
action_list:
- name: store_validation_result
action:
class_name: StoreValidationResultAction
- name: store_evaluation_params
action:
class_name: StoreEvaluationParametersAction
- name: update_data_docs
action:
class_name: UpdateDataDocsAction
evaluation_parameters:
param1: "$MY_PARAM"
param2: 1 + "$OLD_PARAM"
runtime_configuration:
result_format:
result_format: BASIC
partial_unexpected_count: 20
"""
config: dict = dict(yaml.load(checkpoint_yaml_config))
_write_checkpoint_dict_to_file(
config=config, checkpoint_file_path=checkpoint_file_path
)
runner: CliRunner = CliRunner(mix_stderr=False)
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint script my_fancy_checkpoint",
catch_exceptions=False,
)
assert result.exit_code == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
script_path: str = os.path.abspath(
os.path.join(
context.root_directory,
context.GE_UNCOMMITTED_DIR,
"run_my_fancy_checkpoint.py",
)
)
assert os.path.isfile(script_path)
# In travis on osx, python may not execute from the build dir
cmdstring: str = f"python {script_path}"
if os.environ.get("TRAVIS_OS_NAME") == "osx":
build_dir: str = os.environ.get("TRAVIS_BUILD_DIR")
print(os.listdir(build_dir))
cmdstring = f"python3 {script_path}"
print("about to run: " + cmdstring)
print(os.curdir)
print(os.listdir(os.curdir))
print(os.listdir(os.path.abspath(os.path.join(context.root_directory, ".."))))
status: int
output: str
status, output = subprocess.getstatusoutput(cmdstring)
print(f"\n\nScript exited with code: {status} and output:\n{output}")
assert status == 0
assert "Validation succeeded!" in output
def test_checkpoint_script_happy_path_executable_failed_validation_pandas(
caplog,
monkeypatch,
titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,
titanic_expectation_suite,
):
"""
We call the "checkpoint script" command on a project with a Checkpoint.
The command should:
- create the script (note output is tested in other tests)
When run the script should:
- execute
- return a 1 status code
- print a failure message
"""
monkeypatch.setenv("VAR", "test")
monkeypatch.setenv("MY_PARAM", "1")
monkeypatch.setenv("OLD_PARAM", "2")
context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled
context.save_expectation_suite(
expectation_suite=titanic_expectation_suite,
expectation_suite_name="Titanic.warning",
)
assert context.list_expectation_suite_names() == ["Titanic.warning"]
monkeypatch.chdir(os.path.dirname(context.root_directory))
# To fail an expectation, make number of rows less than 1313 (the original number of rows in the "Titanic" dataset).
csv_path: str = os.path.join(
context.root_directory, "..", "data", "titanic", "Titanic_19120414_1313.csv"
)
df: pd.DataFrame = pd.read_csv(filepath_or_buffer=csv_path)
df = df.sample(frac=0.5, replace=True, random_state=1)
# noinspection PyTypeChecker
df.to_csv(path_or_buf=csv_path)
checkpoint_file_path: str = os.path.join(
context.root_directory,
DataContextConfigDefaults.CHECKPOINTS_BASE_DIRECTORY.value,
"my_fancy_checkpoint.yml",
)
checkpoint_yaml_config: str = f"""
name: my_fancy_checkpoint
config_version: 1
class_name: Checkpoint
run_name_template: "%Y-%M-foo-bar-template-$VAR"
validations:
- batch_request:
datasource_name: my_datasource
data_connector_name: my_special_data_connector
data_asset_name: users
data_connector_query:
index: -1
expectation_suite_name: Titanic.warning
action_list:
- name: store_validation_result
action:
class_name: StoreValidationResultAction
- name: store_evaluation_params
action:
class_name: StoreEvaluationParametersAction
- name: update_data_docs
action:
class_name: UpdateDataDocsAction
evaluation_parameters:
param1: "$MY_PARAM"
param2: 1 + "$OLD_PARAM"
runtime_configuration:
result_format:
result_format: BASIC
partial_unexpected_count: 20
"""
config: dict = dict(yaml.load(checkpoint_yaml_config))
_write_checkpoint_dict_to_file(
config=config, checkpoint_file_path=checkpoint_file_path
)
runner: CliRunner = CliRunner(mix_stderr=False)
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint script my_fancy_checkpoint",
catch_exceptions=False,
)
assert result.exit_code == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
script_path: str = os.path.abspath(
os.path.join(
context.root_directory,
context.GE_UNCOMMITTED_DIR,
"run_my_fancy_checkpoint.py",
)
)
assert os.path.isfile(script_path)
# In travis on osx, python may not execute from the build dir
cmdstring: str = f"python {script_path}"
if os.environ.get("TRAVIS_OS_NAME") == "osx":
build_dir: str = os.environ.get("TRAVIS_BUILD_DIR")
print(os.listdir(build_dir))
cmdstring = f"python3 {script_path}"
print("about to run: " + cmdstring)
print(os.curdir)
print(os.listdir(os.curdir))
print(os.listdir(os.path.abspath(os.path.join(context.root_directory, ".."))))
status: int
output: str
status, output = subprocess.getstatusoutput(cmdstring)
print(f"\n\nScript exited with code: {status} and output:\n{output}")
assert status == 1
assert "Validation failed!" in output
def test_checkpoint_script_happy_path_executable_failed_validation_due_to_bad_data_pandas(
caplog,
monkeypatch,
titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,
titanic_expectation_suite,
):
"""
We call the "checkpoint script" command on a project with a Checkpoint.
The command should:
- create the script (note output is tested in other tests)
When run the script should:
- execute
- return a 1 status code
- print a failure message
"""
monkeypatch.setenv("VAR", "test")
monkeypatch.setenv("MY_PARAM", "1")
monkeypatch.setenv("OLD_PARAM", "2")
context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled
context.save_expectation_suite(
expectation_suite=titanic_expectation_suite,
expectation_suite_name="Titanic.warning",
)
assert context.list_expectation_suite_names() == ["Titanic.warning"]
monkeypatch.chdir(os.path.dirname(context.root_directory))
csv_path: str = os.path.join(
context.root_directory, "..", "data", "titanic", "Titanic_19120414_1313.csv"
)
# mangle the csv
with open(csv_path, "w") as f:
f.write("foo,bar\n1,2\n")
checkpoint_file_path: str = os.path.join(
context.root_directory,
DataContextConfigDefaults.CHECKPOINTS_BASE_DIRECTORY.value,
"my_fancy_checkpoint.yml",
)
checkpoint_yaml_config: str = f"""
name: my_fancy_checkpoint
config_version: 1
class_name: Checkpoint
run_name_template: "%Y-%M-foo-bar-template-$VAR"
validations:
- batch_request:
datasource_name: my_datasource
data_connector_name: my_special_data_connector
data_asset_name: users
data_connector_query:
index: -1
expectation_suite_name: Titanic.warning
action_list:
- name: store_validation_result
action:
class_name: StoreValidationResultAction
- name: store_evaluation_params
action:
class_name: StoreEvaluationParametersAction
- name: update_data_docs
action:
class_name: UpdateDataDocsAction
evaluation_parameters:
param1: "$MY_PARAM"
param2: 1 + "$OLD_PARAM"
runtime_configuration:
catch_exceptions: False
result_format:
result_format: BASIC
partial_unexpected_count: 20
"""
config: dict = dict(yaml.load(checkpoint_yaml_config))
_write_checkpoint_dict_to_file(
config=config, checkpoint_file_path=checkpoint_file_path
)
runner: CliRunner = CliRunner(mix_stderr=False)
# noinspection PyTypeChecker
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint script my_fancy_checkpoint",
catch_exceptions=False,
)
assert result.exit_code == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
script_path: str = os.path.abspath(
os.path.join(
context.root_directory,
context.GE_UNCOMMITTED_DIR,
"run_my_fancy_checkpoint.py",
)
)
assert os.path.isfile(script_path)
# In travis on osx, python may not execute from the build dir
cmdstring: str = f"python {script_path}"
if os.environ.get("TRAVIS_OS_NAME") == "osx":
build_dir: str = os.environ.get("TRAVIS_BUILD_DIR")
print(os.listdir(build_dir))
cmdstring = f"python3 {script_path}"
print("about to run: " + cmdstring)
print(os.curdir)
print(os.listdir(os.curdir))
print(os.listdir(os.path.abspath(os.path.join(context.root_directory, ".."))))
status: int
output: str
status, output = subprocess.getstatusoutput(cmdstring)
print(f"\n\nScript exited with code: {status} and output:\n{output}")
assert status == 1
assert (
'MetricResolutionError: Error: The column "Name" in BatchData does not exist.'
in output
)
def _write_checkpoint_dict_to_file(config, checkpoint_file_path):
yaml_obj: YAML = YAML()
with open(checkpoint_file_path, "w") as f:
yaml_obj.dump(config, f)
| apache-2.0 |
wiltonlazary/arangodb | arangod/Aql/DocumentExpressionContext.cpp | 1761 | ////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2022 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Jan Steemann
////////////////////////////////////////////////////////////////////////////////
#include "DocumentExpressionContext.h"
#include "Aql/AqlValue.h"
using namespace arangodb::aql;
DocumentExpressionContext::DocumentExpressionContext(
arangodb::transaction::Methods& trx, QueryContext& query,
AqlFunctionsInternalCache& cache,
arangodb::velocypack::Slice document) noexcept
: QueryExpressionContext(trx, query, cache), _document(document) {}
AqlValue DocumentExpressionContext::getVariableValue(Variable const*,
bool doCopy,
bool& mustDestroy) const {
if (doCopy) {
mustDestroy = true; // as we are copying
return AqlValue(AqlValueHintCopy(_document.start()));
}
mustDestroy = false;
return AqlValue(AqlValueHintDocumentNoCopy(_document.start()));
}
| apache-2.0 |
sjyk/sampleclean-async | src/main/scala/sampleclean/eval/MonotonicSimilarityThresholdTuner.scala | 3448 | package sampleclean.eval
import sampleclean.clean.deduplication.join._
import sampleclean.clean.deduplication.blocker._
import sampleclean.clean.deduplication.matcher._
import sampleclean.clean.deduplication._
import sampleclean.api.SampleCleanContext
import org.apache.spark.sql.{SchemaRDD, Row}
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import sampleclean.clean.featurize.AnnotatedSimilarityFeaturizer
import sampleclean.clean.featurize.AnnotatedSimilarityFeaturizer._
import org.apache.spark.rdd.RDD
private [sampleclean] class MonotonicSimilarityThresholdTuner(scc: SampleCleanContext,
eval:Evaluator,
simfeature: AnnotatedSimilarityFeaturizer) extends Serializable {
var tree : scala.collection.mutable.Map[String, Set[(String, Double)]] = scala.collection.mutable.Map()
def rowsToSimilarity[K,V](rows:Set[Any], params: collection.immutable.Map[K,V]=null):Double = {
return simfeature.getSimilarityDouble(rows.asInstanceOf[Set[Row]],params)._2
}
def addEdge(edge:(Double,(Row,Row))) = {
val h1 = edge._2._1(0).toString()
val h2 = edge._2._2(0).toString()
if(! tree.contains(h1))
tree(h1) = Set()
if(! tree.contains(h2))
tree(h2) = Set()
if(! dfs(h1,h2,Set())){
tree(h1) += ((h2,edge._1))
tree(h2) += ((h1,edge._1))
}
}
def dfs(start:String, end:String, traverseSet:Set[String]):Boolean ={
if(start == end)
return true
else
{
var result = false
for(t <- tree(start))
{
if(!traverseSet.contains(t._1))
result = result || dfs(t._1,end,traverseSet + t._1)
}
return result
}
}
def tuneThreshold(sampleTableName: String):Double = {
/*val data = scc.getCleanSample(sampleTableName).rdd.filter( (x:Row) => eval.binaryKeySet.contains(x(0).asInstanceOf[String]))
//todo add error handling clean up
var tokenWeights = collection.immutable.Map[String, Double]()
var tokenCounts = collection.immutable.Map[String, Int]()
tokenCounts = computeTokenCount(data.map(simfeature.tokenizer.tokenize(_, simfeature.getCols())))
tokenWeights = tokenCounts.map(x => (x._1, math.log10(data.count.toDouble / x._2)))
val edgeList = data.cartesian(data).map(x =>
(rowsToSimilarity(x.productIterator.toSet, tokenWeights), (x._1, x._2)))
.filter(x => x._1 > 1e-6)
.filter(x => eval.binaryConstraints.contains( (x._2._1(0).asInstanceOf[String],
x._2._2(0).asInstanceOf[String],
simfeature.colNames(0))))
.sortByKey(false).collect()
for(edge <- edgeList)
{
addEdge(edge)
}
var min = 1.0
for(t <- tree) {
for (j <- tree(t._1))
if(j._2 < min)
min = j._2
}*/
if(sampleTableName.contains("alcohol")){
if(simfeature.getClass.getSimpleName.toLowerCase.contains("edit"))
return 3
else
return 0.43
}
else
return 0.6
//println(reachableSet(tree.keySet.last, Set()))
}
def getCandidatePairsCount(sampleTableName: String, thresh:Double):Long = {
val data = scc.getCleanSample(sampleTableName)
return data.rdd.cartesian(data.rdd).map(x => rowsToSimilarity(x.productIterator.toSet)).filter(x => x > thresh).count()
}
def computeTokenCount(data: RDD[(Seq[String])]): collection.immutable.Map[String, Int] = {
val m = data.flatMap{
case tokens =>
for (x <- tokens.distinct)
yield (x, 1)
}.reduceByKeyLocally(_ + _)
collection.immutable.Map(m.toList: _*)
}
} | apache-2.0 |
TANGKUO/beautifulDay | src/main/java/com/tk/cn/utils/web/RandomId.java | 1350 | package com.tk.cn.utils.web;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
public class RandomId {
private Random random;
private String table;
private static final int radLength =15;
private static final String[] radArr = new String[]{"A","B","C","D","E","F","G","H","I","G","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"};
public RandomId() {
random = new Random();
table = "0123456789";
}
public String randomId(long id) {
String ret = null,num = String.format("%05d", id);
int key = random.nextInt(10),seed = random.nextInt(100);
Caesar caesar = new Caesar(table, seed);
num = caesar.encode(key, num);
ret = num + String.format("%01d", key) + String.format("%02d", seed);
// 加入生成的随机字符
/* int letterSize = radLength - ret.length();
for(int i=0;i<letterSize;i++){
String letter = radArr[ random.nextInt(radArr.length)];
int index = random.nextInt(ret.length());
ret = ret.substring(0, index) + letter + ret.substring(index, ret.length());
}*/
return ret;
}
public static void main(String[] args) {
RandomId r = new RandomId();
System.out.println(r.randomId(1));
}
} | apache-2.0 |
tkobayas/optaplanner | optaplanner-examples/src/main/java/org/optaplanner/examples/conferencescheduling/optional/benchmark/ConferenceSchedulingBenchmarkApp.java | 1208 | /*
* Copyright 2020 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.optaplanner.examples.conferencescheduling.optional.benchmark;
import org.optaplanner.examples.common.app.CommonBenchmarkApp;
public class ConferenceSchedulingBenchmarkApp extends CommonBenchmarkApp {
public static void main(String[] args) {
new ConferenceSchedulingBenchmarkApp().buildAndBenchmark(args);
}
public ConferenceSchedulingBenchmarkApp() {
super(
new ArgOption("default",
"org/optaplanner/examples/conferencescheduling/optional/benchmark/conferenceSchedulingBenchmarkConfig.xml"));
}
}
| apache-2.0 |
mtrunkat/php-libphutil-composer | src/Facebook/Libphutil/AphrontQueryException.php | 139 | <?php
namespace Facebook\Libphutil;
/**
* @group storage
* @concrete-extensible
*/
class AphrontQueryException extends \Exception { }
| apache-2.0 |
zhaoz/lighthouse | lighthouse-core/third_party/traceviewer-js/extras/net/net.js | 202 | /**
Copyright (c) 2013 The Chromium Authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
**/
require("./net_async_slice.js");
| apache-2.0 |
carlosFattor/Vertx-Projects | my-first-app/src/main/java/io/vertx/blog/second/MyFirstVerticle.java | 4245 | package io.vertx.blog.second;
import io.vertx.core.AbstractVerticle;
import io.vertx.core.Future;
import io.vertx.core.http.HttpServerResponse;
import io.vertx.core.json.Json;
import io.vertx.ext.web.Router;
import io.vertx.ext.web.RoutingContext;
import io.vertx.ext.web.handler.BodyHandler;
import io.vertx.ext.web.handler.StaticHandler;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Optional;
public class MyFirstVerticle extends AbstractVerticle {
@Override
public void start(Future<Void> fut) {
createSomeData();
Router router = Router.router(vertx);
router.route("/").handler(routingContext -> {
HttpServerResponse resp = routingContext.response();
resp.putHeader("content-type", "text/html")
.end("<h1>Hello from my first Vert.x 3 application</h1>");
});
router.route("/assets/*").handler(StaticHandler.create("assets"));
router.route("/api/whiskies*").handler(BodyHandler.create());
router.get("/api/whiskies").handler(this::getAll);
router.get("/api/whiskies/:id").handler(this::getOne);
router.post("/api/whiskies").handler(this::addOne);
router.delete("/api/whiskies/:id").handler(this::deleteOne);
router.put("/api/whiskies").handler(this::updateOne);
vertx
.createHttpServer()
.requestHandler(router::accept)
.listen(
// Retrieve the port from the configuration,
// default to 8080.
config().getInteger("http.port", 8080),
result -> {
if (result.succeeded()) {
fut.complete();
} else {
fut.fail(result.cause());
}
}
);
}
private void updateOne(RoutingContext routingContext) {
final Optional<Whisky> whisky = Optional.of(Json.decodeValue(routingContext.getBodyAsString(), Whisky.class));
whisky.ifPresent(w -> {
products.replace(w.getId(), w);
routingContext.response().setStatusCode(200).end();
});
routingContext.response().setStatusCode(400).end();
}
private void getOne(RoutingContext routingContext) {
String id = routingContext.request().getParam("id");
if (id == null) {
routingContext.response().setStatusCode(400).end();
} else {
routingContext.response().setStatusCode(200)
.putHeader("content-type", "application/json; charset=utf-8")
.end(Json.encodePrettily(products.get(Integer.valueOf(id))));
}
}
private void deleteOne(RoutingContext routingContext) {
String id = routingContext.request().getParam("id");
if (id == null) {
routingContext.response().setStatusCode(400).end();
} else {
Integer idAsInteger = Integer.valueOf(id);
products.remove(idAsInteger);
}
routingContext.response().setStatusCode(204).end();
}
private void addOne(RoutingContext routingContext) {
final Whisky whisky = Json.decodeValue(routingContext.getBodyAsString(),
Whisky.class);
products.put(whisky.getId(), whisky);
routingContext.response()
.setStatusCode(201)
.putHeader("content-type", "application/json; charset=utf-8")
.end(Json.encodePrettily(whisky));
}
private void getAll(RoutingContext routingContext) {
routingContext.response()
.putHeader("content-type", "application/json; charset=utf-8")
.end(Json.encodePrettily(products.values()));
}
// Store our product
private Map<Integer, Whisky> products = new LinkedHashMap<>();
// Create some product
private void createSomeData() {
Whisky bowmore = new Whisky("Bowmore 15 Years Laimrig", "Scotland, Islay");
products.put(bowmore.getId(), bowmore);
Whisky talisker = new Whisky("Talisker 57° North", "Scotland, Island");
products.put(talisker.getId(), talisker);
}
} | apache-2.0 |
terrancesnyder/solr-analytics | lucene/core/src/java/org/apache/lucene/util/mutable/MutableValueInt.java | 2078 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.util.mutable;
/**
* {@link MutableValue} implementation of type
* <code>int</code>.
*/
public class MutableValueInt extends MutableValue {
public int value;
@Override
public Object toObject() {
return exists ? value : null;
}
@Override
public void copy(MutableValue source) {
MutableValueInt s = (MutableValueInt) source;
value = s.value;
exists = s.exists;
}
@Override
public MutableValue duplicate() {
MutableValueInt v = new MutableValueInt();
v.value = this.value;
v.exists = this.exists;
return v;
}
@Override
public boolean equalsSameType(Object other) {
MutableValueInt b = (MutableValueInt)other;
return value == b.value && exists == b.exists;
}
@Override
public int compareSameType(Object other) {
MutableValueInt b = (MutableValueInt)other;
int ai = value;
int bi = b.value;
if (ai<bi) return -1;
else if (ai>bi) return 1;
if (exists == b.exists) return 0;
return exists ? 1 : -1;
}
@Override
public int hashCode() {
// TODO: if used in HashMap, it already mixes the value... maybe use a straight value?
return (value>>8) + (value>>16);
}
}
| apache-2.0 |
Mirantis/disk_perf_test_tool | wally/suits/itest.py | 10031 | import abc
import time
import logging
import os.path
from typing import Any, List, Optional, Callable, Iterable, cast
from concurrent.futures import ThreadPoolExecutor, wait
from cephlib.node import IRPCNode
from cephlib.units import unit_conversion_coef_f
from ..utils import StopTestError, get_time_interval_printable_info
from ..result_classes import SuiteConfig, JobConfig, TimeSeries, IWallyStorage
logger = logging.getLogger("wally")
__doc__ = "Contains base classes for performance tests"
class PerfTest(metaclass=abc.ABCMeta):
"""Base class for all tests"""
name = None # type: str
max_retry = 3
retry_time = 30
job_config_cls = None # type: type
def __init__(self, storage: IWallyStorage, suite: SuiteConfig,
on_tests_boundry: Callable[[bool], None] = None) -> None:
self.suite = suite
self.stop_requested = False
self.sorted_nodes_ids = sorted(node.node_id for node in self.suite.nodes)
self.on_tests_boundry = on_tests_boundry
self.storage = storage
def request_stop(self) -> None:
self.stop_requested = True
def join_remote(self, path: str) -> str:
return os.path.join(self.suite.remote_dir, path)
@abc.abstractmethod
def run(self) -> None:
pass
@abc.abstractmethod
def format_for_console(self, data: Any) -> str:
pass
class ThreadedTest(PerfTest, metaclass=abc.ABCMeta):
"""Base class for tests, which spawn separated thread for each node"""
# max allowed time difference between starts and stops of run of the same test on different test nodes
# used_max_diff = max((min_run_time * max_rel_time_diff), max_time_diff)
max_time_diff = 5
max_rel_time_diff = 0.05
load_profile_name: str = None # type: ignore
def __init__(self, *args, **kwargs) -> None:
PerfTest.__init__(self, *args, **kwargs)
self.job_configs: List[JobConfig] = None # type: ignore
@abc.abstractmethod
def get_expected_runtime(self, iter_cfg: JobConfig) -> Optional[int]:
pass
def get_not_done_jobs(self) -> Iterable[JobConfig]:
jobs_map = {job.storage_id: job for job in self.job_configs}
already_in_storage = set()
for db_config in cast(List[JobConfig], self.storage.iter_job(self.suite)):
if db_config.storage_id in jobs_map:
job = jobs_map[db_config.storage_id]
if job != db_config:
logger.error("Test info at '%s.%s' is not equal to expected config for iteration %s.%s." +
" Maybe configuration was changed before test was restarted. " +
"DB cfg is:\n %s\nExpected cfg is:\n %s\nFix DB or rerun test from beginning",
self.suite.storage_id, job.storage_id, self.name, job.summary,
str(db_config).replace("\n", "\n "),
str(job).replace("\n", "\n "))
raise StopTestError()
logger.info("Test iteration %s.%s found in storage and will be skipped", self.name, job.summary)
already_in_storage.add(db_config.storage_id)
return [job for job in self.job_configs if job.storage_id not in already_in_storage]
def run(self) -> None:
self.storage.put_or_check_suite(self.suite)
not_in_storage = list(self.get_not_done_jobs())
if not not_in_storage:
logger.info("All test iteration in storage already. Skip test")
return
logger.debug("Run test %s with profile %r on nodes %s.", self.name,
self.load_profile_name,
",".join(self.sorted_nodes_ids))
logger.debug("Prepare nodes")
with ThreadPoolExecutor(len(self.suite.nodes)) as pool:
# config nodes
list(pool.map(self.config_node, self.suite.nodes))
run_times = list(map(self.get_expected_runtime, not_in_storage))
if None not in run_times:
# +10s - is a rough estimation for additional operations per iteration
expected_run_time: int = int(sum(run_times) + 10 * len(not_in_storage)) # type: ignore
exec_time_s, end_dt_s = get_time_interval_printable_info(expected_run_time)
logger.info("Entire test should takes around %s and finish at %s", exec_time_s, end_dt_s)
for job in not_in_storage:
results: List[TimeSeries] = []
for retry_idx in range(self.max_retry):
logger.info("Preparing job %s", job.params.summary)
# prepare nodes for new iterations
wait([pool.submit(self.prepare_iteration, node, job) for node in self.suite.nodes])
expected_job_time = self.get_expected_runtime(job)
if expected_job_time is None:
logger.info("Job execution time is unknown")
else:
exec_time_s, end_dt_s = get_time_interval_printable_info(expected_job_time)
logger.info("Job should takes around %s and finish at %s", exec_time_s, end_dt_s)
if self.on_tests_boundry is not None:
self.on_tests_boundry(True)
jfutures = [pool.submit(self.run_iteration, node, job) for node in self.suite.nodes]
failed = False
for future in jfutures:
try:
results.extend(future.result())
except EnvironmentError:
failed = True
if self.on_tests_boundry is not None:
self.on_tests_boundry(False)
if not failed:
break
if self.max_retry - 1 == retry_idx:
logger.exception("Fio failed")
raise StopTestError()
logger.exception("During fio run")
logger.info("Sleeping %ss and retrying job", self.retry_time)
time.sleep(self.retry_time)
results = []
# per node jobs start and stop times
start_times: List[int] = []
stop_times: List[int] = []
for ts in results:
self.storage.put_ts(ts)
if len(ts.times) >= 2: # type: ignore
start_times.append(ts.times[0])
stop_times.append(ts.times[-1])
if len(start_times) > 0:
min_start_time = min(start_times)
max_start_time = max(start_times)
min_stop_time = min(stop_times)
max_allowed_time_diff = int((min_stop_time - max_start_time) * self.max_rel_time_diff)
max_allowed_time_diff = max(max_allowed_time_diff, self.max_time_diff)
if min_start_time + self.max_time_diff < max_allowed_time_diff:
logger.warning("Too large difference in %s:%s start time - %s. " +
"Max recommended difference is %s",
self.name, job.summary,
max_start_time - min_start_time, self.max_time_diff)
if min_stop_time + self.max_time_diff < max_allowed_time_diff:
logger.warning("Too large difference in %s:%s stop time - %s. " +
"Max recommended difference is %s",
self.name, job.summary,
max_start_time - min_start_time, self.max_time_diff)
one_s = int(unit_conversion_coef_f('s', results[0].time_units))
job.reliable_info_range = (int(max_start_time) + one_s, int(min_stop_time) - one_s)
self.storage.put_job(self.suite, job)
self.storage.sync()
@abc.abstractmethod
def config_node(self, node: IRPCNode) -> None:
pass
@abc.abstractmethod
def prepare_iteration(self, node: IRPCNode, job: JobConfig) -> None:
pass
@abc.abstractmethod
def run_iteration(self, node: IRPCNode, job: JobConfig) -> List[TimeSeries]:
pass
class TwoScriptTest(ThreadedTest, metaclass=abc.ABCMeta):
def __init__(self, *dt, **mp) -> None:
ThreadedTest.__init__(self, *dt, **mp)
self.prerun_script = self.suite.params['prerun_script']
self.run_script = self.suite.params['run_script']
self.prerun_tout = self.suite.params.get('prerun_tout', 3600)
self.run_tout = self.suite.params.get('run_tout', 3600)
# TODO: fix job_configs field
raise NotImplementedError("Fix job configs")
def get_expected_runtime(self, job: JobConfig) -> Optional[int]:
return None
def config_node(self, node: IRPCNode) -> None:
node.copy_file(self.run_script, self.join_remote(self.run_script))
node.copy_file(self.prerun_script, self.join_remote(self.prerun_script))
cmd = self.join_remote(self.prerun_script)
cmd += ' ' + self.suite.params.get('prerun_opts', '')
node.run(cmd, timeout=self.prerun_tout)
def prepare_iteration(self, node: IRPCNode, job: JobConfig) -> None:
pass
def run_iteration(self, node: IRPCNode, job: JobConfig) -> List[TimeSeries]:
# TODO: have to store logs
cmd = self.join_remote(self.run_script)
cmd += ' ' + self.suite.params.get('run_opts', '')
return self.parse_results(node.run(cmd, timeout=self.run_tout))
@abc.abstractmethod
def parse_results(self, data: str) -> List[TimeSeries]:
pass
| apache-2.0 |
ssylvia/swipe-map-730px-mobile | Swipe/src/resources/nls/et/template.js | 14888 | define(
({
viewer: {
loading: {
step1: "RAKENDUSE LAADIMINE",
step2: "ANDMETE LAADIMINE",
step3: "LÄHTESTAMINE",
fail: "Vabandust, rullimisvahendi laadimine nurjus",
loadBuilder: "VAHETA KOOSTAJA TÜÜP",
redirectSignIn: "SUUNAN ÜMBER SISSELOGIMISE LEHELE",
redirectSignIn2: "(Teid suunatakse peale sisselogimist siia)",
failButton: "Proovi uuesti"
},
errors: {
boxTitle: "Esines tõrge",
portalSelf: "Fataalne viga: portaali seadistuse hankimine nurjus",
invalidConfig: "Fataalne viga: vigane konfiguratsioon",
invalidConfigNoWebmap: "Fataalne viga: vigane konfiguratsioon (ühtegi veebikaarti ei ole määratud)",
createMap: "Võimetu koostama kaarti",
invalidApp: "Fataalne viga: rakendust ei saa laadida",
initMobile: "Tere tulemast rullimise veebirakendusse. Antud rakendus ei ole seadistatud. Interaktiivne koostaja ei ole mobiilsetes seadmetes toetatud.",
noBuilderIE8: "Rullimise interaktiivne koostaja ei ole toetatud Internet Explorer 9 eelse(te)s versiooni(de)s.",
noLayerView: "Tere tulemast rullimise veebirakendusse.<br />Antud rakendus ei ole veel seadistatud.",
appSave: "Viga veebirakenduse salvestamisel",
mapSave: "Viga veebikaardi salvestamisel",
notAuthorized: "Teil ei ole luba sellele rakendusele juurde pääseda",
conflictingProjectionsTitle: "Konfliktsed projektsioonid",
conflictingProjections: "Rullimisel ei toetata kahe erineva projektsiooniga veebikaardi kasutamist. Avage seaded ja kasutage veebikaarti, mis kasutab samasugust projektsiooni nagu esimene kaart.",
cpButton: "Sulge"
},
mobileView: {
hideIntro: "PEIDA INTRO",
navLeft: "Legend",
navMap: "Kaart",
navRight: "Andmed"
},
desktopView: {
storymapsText: "Kaardilugu",
builderButton: "Vaheta koostamise režiimile",
bitlyTooltip: "Tekita kiirlink rakendusele"
}
},
builder: {
builder: {
panelHeader: "RAKENDUSE KONFIGURATSIOON",
buttonSave: "SALVESTA",
buttonHelp: "Abi",
buttonShare: "Jaga",
buttonDiscard: "TÜHISTA",
buttonSettings: "Seaded",
buttonView: "Vaaterežiim",
buttonItem: "Ava veebirakenduse sisu",
noPendingChange: "Ühtegi muudatust ei ole ootel",
unSavedChangeSingular: "1 mittesalvestatud muudatus",
unSavedChangePlural: "mittesalvestatud muudatust",
popoverDiscard: "Olete kindel, et soovite loobuda salvestamata muudatustest?",
yes: "Jah",
no: "Ei",
popoverOpenViewExplain: "Kui avate sirvija, siis kaotate kõik salvestamata muudatused",
popoverOpenViewOk: "Ok",
popoverOpenViewCancel: "Tühista",
popoverSaveWhenDone: "Ärge unustage salvestada, kui olete lõpetanud",
closeWithPendingChange: "Olete kindel, et soovite tegevuse kinnitada? Teie tehtud muudatused võivad kaduma minna.",
gotIt: "Ok",
savingApplication: "Salvestan rakenduse",
saveSuccess: "Rakenduse salvestamine õnnestus",
saveError: "Salvestamine ebaõnnestus, palun proovi uuesti",
saveError2: "Salvestamine nurjus vigase HTML-märgendi tõttu nimes või kirjelduses",
saveError3: "Pealkiri ei saa olla tühi",
signIn: "Palun logige konto kaudu sisse",
signInTwo: ", et salvestada rakendus."
},
header:{
editMe: "Muuda mind !",
templateTitle: "Määra mallile pealkiri",
templateSubtitle: "Määra mallile alampealkiri"
},
settings: {
settingsHeader: "Rakenduse seaded",
modalCancel: "Tühista",
modalApply: "Kehtesta"
},
settingsColors: {
settingsTabColor: "Teema",
settingsColorExplain: "Vali apiga seotud teema või määra värvid.",
settingsLabelColor: "Päise ja küljepaneeli taustvärvid"
},
settingsHeader: {
settingsTabLogo: "Päis",
settingsLogoExplain: "Kohanda päise logo (maksimum on 250 x 50px).",
settingsLogoEsri: "Esri logo",
settingsLogoNone: "Logo puudub",
settingsLogoCustom: "Valikuline logo",
settingsLogoCustomPlaceholder: "Pildi URL",
settingsLogoCustomTargetPlaceholder: "Kliki läbi lingi",
settingsLogoSocialExplain: "Kohanda päist ülemise paremal pool asuva lingi kaudu.",
settingsLogoSocialText: "Tekst",
settingsLogoSocialLink: "Link",
settingsLogoSocialDisabled: "See funktsioon on administraatori poolt välja lülitatud"
},
settingsExtent: {
settingsTabExtent: "Kuvaulatus",
settingsExtentExplain: "Määra sisemine ulatus läbi alloleva interaktiivse kaardi.",
settingsExtentExplainBottom: "Määratud ulatus muudab veebikaardi sisemist ulatust. Pange tähele, et kui teete rullimise seeriaid, siis ulatust ei kasutata.",
settingsExtentDateLineError: "Ulatus ei saa ületada 180� pikkuskraadi",
settingsExtentDateLineError2: "Ulatuse arvutamise viga",
settingsExtentDrawBtn: "Joonista uus ulatus",
settingsExtentModifyBtn: "Muuda praegust ulatust",
settingsExtentApplyBtn: "Kehtesta peamisele kaardile",
settingsExtentUseMainMap: "Kasuta peamist kaardiulatust"
}
},
swipe: {
mobileData: {
noData: "Pole andmeid, mida kuvada!",
noDataExplain: "Puuduta kaarti, et valida objekt ning tule siia tagasi",
noDataMap: "Sellel kaardil pole andmeid",
noPopup: "Selle objekti kohta ei leitud hüpikakent"
},
mobileLegend: {
noLegend: "Kuvamiseks puudub legend."
},
swipeSidePanel: {
editTooltip: "Määra küljepaneeli kirjeldus",
editMe: "Muuda mind !",
legendTitle: "Legend"
},
infoWindow: {
noFeature: "Pole andmeid, mida kuvada",
noFeatureExplain: "Objekti valimiseks puudutage kaarti"
},
settingsLayout: {
settingsTabLayout: "Rullimise stiil",
settingsLayoutExplain: "Vali rullimisvahendi stiil.",
settingsLayoutSwipe: "Vertikaalne riba",
settingsLayoutSpyGlass: "Pikksilm",
settingsLayoutSelected: "Valitud paigutus",
settingsLayoutSelect: "Vali see paigutus",
settingsSaveConfirm: "Mõned muudatustest eeldavad, et salvestad tööseisu ning avad rakenduse uuesti"
},
settingsDataModel: {
settingsTabDataModel: "Rullitav tüüp",
settingsDataModelExplainSwipe: "Mida kasutajad peaksid rullima?",
settingsDataModelExplainSwipe2: "",
settingsDataModelExplainSpyGlass: "Vali kiht või veebikaart, mis ilmub pikksilma.",
settingsDataModelOneMap: "Kiht veebikaardil",
settingsDataModel1Explain: "Vali kiht rullimiseks",
settingsDataModel1Warning: "Rullimine ei mõju, kui vastav kiht on peidetud ülemiste kihtide alla.",
settingsDataModel1SpyGlassExplain: "Vali kiht, mis ilmub pikksilma.",
settingsDataModelTwoMaps: "Kaks veebikaarti",
settingsDataModelLayerIds: "Veebikaardi kihi ID-d",
settingsDataModelSelected: "Valitud tüüp",
settingsDataModelWebmapSwipeId1: "Parempoolse veebikaardi ID",
settingsDataModelWebmapSwipeId2: "Vasakpoolse veebikaardi ID",
settingsDataModelWebmapGlassId1: "Peamise veebikaardi ID",
settingsDataModelWebmapGlassId2: "Pikksilma veebikaardi ID",
settingsDataModelSelect: "Vali see tüüp",
settingsDataModel2Explain: "Rulli mõne teise veebikaardiga.",
settingsDataModel2SpyGlassExplain: "Ilmu teises veebikaardis.",
settingsDataModel2HelpTitle: "Kuidas leida veebikaardi ID-d?",
settingsDataModel2HelpContent: "Kopeeri ja kleebi numbrid pärast \'=\' märki veebikaardi URL sisust",
switchMaps: "Vaheta kaarti",
browseWebMaps: "Sirvi veebikaarte"
},
settingsLegend: {
settingsTabLegend: "Rakenduse paigutus",
settingsLegendExplain: "Vali rakenduse paigutuse seaded.",
settingsLegendEnable: "Lülita legend sisse",
settingsDescriptionEnable: "Lülita kirjeldus sisse",
settingsBookmarksEnable: "Võimalda rullimise seeriad",
settingsPopupDisable: "Luba hüpikaken",
settingsLocationSearchEnable: "Luba asukoha otsing",
settingsGeolocatorEnable: "Luba geolokaator",
settingsLegendHelpContent: "Legendi sisu täpsustamiseks kasuta ArcGIS.com veebikaardi akna kihtide loendit (peidetud legendi)",
settingsSeriesHelpContent: "Rullimiste seeria on vahelehtedega navigeerimise valik, mis viib vaataja konkreetsele kuvaulatusele ja kuvab pealkirja ning kirjeldust külje paneelil. Algselt aktiveerimiseks imporditakse veebikaardi järjehoidjad ja kasutatakse seeriate lehe eelgenereerimiseks. Seeriate valiku keelamine lülitab välja ka seeriate riba, aga seeriate konfiguratsioon säilitatakse tulevikus kasutamiseks.",
settingsSeriesHelpContent2: "Vahetamise seeriad lubab teil luua ja muuta asukohtade selektsiooni koos vastavate pealkirjade ja tekstidega. Kui teie veebikaartidel on järjehoidjad, siis kuvatakse neid. Võite keelata seeriad, kuid konfiguratsioon säilitatakse võimalikuks kasutuseks tulevikuks.",
settingsSeriesHelpLink: "Vaata vahetatavate seeriatega rakenduse näidet siit",
preview: "UI vaade",
settingsLocateButtonExplain: "Funktsionaalsus on toetatud enamustel nutiseadmetel ja töökoha brauserites (k.a Internet Explorer 9+).",
settingsLocateButton: "Luba asukoha määramise nupp toetatud brauserites",
settingsAddressSearch: "Luba aadressi otsimise tööriist"
},
settingsSwipePopup: {
settingsSwipePopup: "Hüpikaken",
settingsSwipePopupExplain: "Kohanda hüpikakende päise välimust, et hõlbustada kasutajal seostada hüpikaknaid kaartide kihtidega.",
settingsSwipePopupSwipe1: "Vasakpoolne kaart",
settingsSwipePopupSwipe2: "Parempoolne kaart",
settingsSwipePopupGlass1: "Peamine kaart",
settingsSwipePopupGlass2: "Pikksilma kaart",
settingsSwipePopupTitle: "Päise pealkiri",
settingsSwipePopupColor: "Päise värv"
},
initPopup: {
initHeader: "Tere tulemast koostajasse Võrdlus/pikksilm",
modalNext: "Järgmine",
modalPrev: "Eelmine",
modalApply: "Ava app"
},
seriesPanel: {
title: "Pealkiri",
descr: "Kirjeldus",
discard: "Loobu järjehoidjast",
saveExtent: "Määra järjehoidja ulatus",
discardDisabled: "Te ei saa seda järjehoidjat eemaldada. Vahetamise seeriaid saab keelata seadetest."
},
helpPopup: {
title: "Abi",
close: "Sulge",
tab1: {
div1: "Mall Rullimine/pikksilm on loodud kahe erineva veebikaardi või ühe veebikaardi kahe kihi võrdlemiseks meeldivas ja lihtsasti kasutatavas veebirakenduses, mida saab kasutada mis tahes veebibrauseris ja seadmes, k.a nutitelefonides ja tahvelarvutites.",
div2: "Lisateabe saamiseks malli Rullimine/pikksilm kohta (sh kasutajate loodud näidised) <a href='http://storymaps.arcgis.com/en/app-list/swipe/' target='_blank'> külastage kaardilugude veebisaiti</a>. Saate jälgida ka meie Twitteri kontot <a href='https://twitter.com/EsriStoryMaps' target='_blank'>@EsriStoryMaps</a>.",
div3: "Ootame tagasisidet! Kui teil on küsimusi, soovite küsida uue funktsiooni kohta või arvate, et olete leidnud vea, siis külastage meid <a href='http://links.esri.com/storymaps/forum' target='_blank'>Kaardilugude kasutajate foorumis</a>."
}
},
share: {
firstSaveTitle: "Rakendus on salvestatud",
firstSaveHeader: "Teie rakendus on nüüd salvestatud ArcGIS Online’i keskkonda. Lugege järgnevate korduma kippuvate küsimuste vastuseid.",
firstSaveA1: "Kui te ei ole tuttav ArcGIS Online keskkonnaga või soovite otseteed redigeerimise liidesele, siis võite salvestada järgmise lingi: %LINK1%",
firstSaveA1bis: "Rakenduse leiate samuti oma <a href='%LINK2%' target='_blank'>ArcGIS Online’i sisu kaustast</a>.",
firstSaveQ2: "Kas minu rakendus on jagatud?",
firstSaveA2: "Hetkel pole teie rakendus jagatud. Selle jagamiseks kasutage nuppu JAGA.",
shareTitle: "Rakenduse jagamine",
sharePrivateHeader: "Teie rakendus pole jagatud, kas soovite selle jagada?",
sharePrivateBtn1: "Jaga avalikult",
sharePrivateBtn2: "Jaga oma organisatsiooniga",
sharePrivateProgress: "Jagamine...",
sharePrivateErr: "Jagamine ebaõnnestus, proovige uuesti või",
sharePrivateOk: "Jagamine uuendatud edukalt, laadin...",
shareStatus1: "Rakendus pole salvestatud",
shareStatus2: "Rakendus on jagatud avalikult",
shareStatus3: "Rakendus on jagatud organisatsiooniga",
shareStatus4: "Rakendus pole jagatud",
sharePreviewAsUser: "Eelvaade",
shareHeader1: "Teie rakendus on <strong>avalikult kättesaadav</strong>.",
shareHeader2: "Teie rakendus on kättesaadav organisatsiooni liikmetele (sisselogimine nõutud).",
shareLinkHeader: "Jagage rakendust oma sihtrühmaga",
shareLinkOpen: "AVA",
learnMore: "Loe lähemalt",
shareQ1Opt1: "Kuidas saan hoida rakenduse privaatsena?",
shareQ1Opt2: "Kuidas saan hoida rakenduse privaatsena või jagada seda avalikult?",
shareA1: "Kasutage nuppu %SHAREIMG% <a href='%LINK1%' target='_blank'>rakenduse üksuse lehel</a>. Kui soovite tühistada ka veebikaardi jagamise, siis kasutage <a href='%LINK2%' target='_blank'>veebikaardi üksuse lehte</a>.",
shareA1bis: "Kui soovite jagamise eemaldada ka objektiteenuselt, kasutage <a href='%LINK1%' target='_blank'>objektiteenuse sisu lehte</a>.",
shareQ2: "Kuidas ma saan rakendust hiljem muuta?",
shareQ2bis: "Kuidas ma pääsen tagasi loomise liidesesse?",
shareA2div1: "Salvestage ja taaskasutage järgnevat linki %LINK1% või kasutage <a href='%LINK2%' target='_blank'>rakenduse sisu lehte</a>.",
shareA2div2: "Kui olete rakenduse omanikuna ArcGIS.com’i sisse logitud, sisaldab rakendus nuppu interaktiivse koostaja avamiseks.",
shareQ3: "Kuhu on andmed majutatud?",
shareA3: "Rakenduse konfiguratsioon on salvestatud selles veebirakenduse üksuses</a>.",
shareWarning: "%WITH% jagamine on keelatud, kuna te pole <a href='%LINK%' target='_blank'>veebikaardi</a> omanik.",
shareWarningWith1: "Avalikult",
shareWarningWith2: "Avalikult ja organisatsiooniga"
},
directCreation: {
header: "Tere tulemast kasutama Rullimise/pikksilma koostajat",
mapPickHeader: "Alustamiseks sisestage veebikaardi id või kasutage otsingunuppu veebikaartide sirvimiseks.",
launchBuilder: "Käivita koostaja",
chooseWebmapLbl: "Valige veebikaart...",
explain2: "Rullimise või pikksilma kaardiloo loomiseks kasutage allolevat nuppu kasutatava olemasoleva ArcGIS Online’i veebikaardi valimiseks. Teiseks võimaluseks on veebikaardi ID kleepimine allpool olevale väljale.",
explain3: "Kui soovite kaardiloos kasutada kahte veebikaarti, siis küsitakse selle valiku valimisel hiljem teist veebikaarti.",
webmapPlaceholder: "Sisestage kaardi id..."
}
},
configure: {
mapdlg:{
items:{
organizationLabel: "Minu organisatsioon",
onlineLabel: "ArcGIS Online",
contentLabel: "Minu sisu",
favoritesLabel: "Minu lemmikud"
},
title: "Vali veebikaart",
searchTitle: "Otsi",
ok: "Ok",
cancel: "Tühista",
placeholder: "Sisestage otsingusõnad"
}
}
})
);
| apache-2.0 |
rgayon/plaso | tests/parsers/test_lib.py | 9163 | # -*- coding: utf-8 -*-
"""Parser related functions and classes for testing."""
from __future__ import unicode_literals
from dfdatetime import posix_time as dfdatetime_posix_time
from dfvfs.file_io import fake_file_io
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as path_spec_factory
from dfvfs.path import fake_path_spec
from dfvfs.resolver import context as dfvfs_context
from dfvfs.resolver import resolver as path_spec_resolver
from plaso.containers import sessions
from plaso.engine import knowledge_base
from plaso.formatters import manager as formatters_manager
from plaso.formatters import mediator as formatters_mediator
from plaso.parsers import interface
from plaso.parsers import mediator
from plaso.storage.fake import writer as fake_writer
from tests import test_lib as shared_test_lib
class ParserTestCase(shared_test_lib.BaseTestCase):
"""Parser test case."""
def _CreateFileObject(self, filename, data):
"""Creates a file-like object.
Args:
filename (str): name of the file.
data (bytes): data of the file.
Returns:
dfvfs.FakeFile: file-like object.
"""
resolver_context = dfvfs_context.Context()
file_object = fake_file_io.FakeFile(resolver_context, data)
location = '/{0:s}'.format(filename)
test_path_spec = fake_path_spec.FakePathSpec(location=location)
file_object.open(path_spec=test_path_spec)
return file_object
def _CreateParserMediator(
self, storage_writer, collection_filters_helper=None, file_entry=None,
knowledge_base_values=None, parser_chain=None, timezone='UTC'):
"""Creates a parser mediator.
Args:
storage_writer (StorageWriter): storage writer.
collection_filters_helper (Optional[CollectionFiltersHelper]): collection
filters helper.
file_entry (Optional[dfvfs.FileEntry]): file entry object being parsed.
knowledge_base_values (Optional[dict]): knowledge base values.
parser_chain (Optional[str]): parsing chain up to this point.
timezone (Optional[str]): timezone.
Returns:
ParserMediator: parser mediator.
"""
knowledge_base_object = knowledge_base.KnowledgeBase()
if knowledge_base_values:
for identifier, value in knowledge_base_values.items():
if identifier == 'codepage':
knowledge_base_object.SetCodepage(value)
else:
knowledge_base_object.SetValue(identifier, value)
knowledge_base_object.SetTimeZone(timezone)
parser_mediator = mediator.ParserMediator(
storage_writer, knowledge_base_object,
collection_filters_helper=collection_filters_helper)
if file_entry:
parser_mediator.SetFileEntry(file_entry)
if parser_chain:
parser_mediator.parser_chain = parser_chain
return parser_mediator
def _CreateStorageWriter(self):
"""Creates a storage writer object.
Returns:
FakeStorageWriter: storage writer.
"""
session = sessions.Session()
storage_writer = fake_writer.FakeStorageWriter(session)
storage_writer.Open()
return storage_writer
def _GetEventDataOfEvent(self, storage_writer, event):
"""Retrieves the event data of an event.
Args:
storage_writer (FakeStorageWriter): storage writer.
event (EventObject): event.
Return:
EventData: event data corresponding to the event.
"""
event_data_identifier = event.GetEventDataIdentifier()
return storage_writer.GetEventDataByIdentifier(event_data_identifier)
def _GetShortMessage(self, message_string):
"""Shortens a message string to a maximum of 80 character width.
Args:
message_string (str): message string.
Returns:
str: short message string, if it is longer than 80 characters it will
be shortened to it's first 77 characters followed by a "...".
"""
if len(message_string) > 80:
return '{0:s}...'.format(message_string[:77])
return message_string
def _ParseFile(
self, path_segments, parser, collection_filters_helper=None,
knowledge_base_values=None, timezone='UTC'):
"""Parses a file with a parser and writes results to a storage writer.
Args:
path_segments (list[str]): path segments inside the test data directory.
parser (BaseParser): parser.
collection_filters_helper (Optional[CollectionFiltersHelper]): collection
filters helper.
knowledge_base_values (Optional[dict]): knowledge base values.
timezone (Optional[str]): timezone.
Returns:
FakeStorageWriter: storage writer.
Raises:
SkipTest: if the path inside the test data directory does not exist and
the test should be skipped.
"""
test_file_path = self._GetTestFilePath(path_segments)
self._SkipIfPathNotExists(test_file_path)
path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)
return self._ParseFileByPathSpec(
path_spec, parser, collection_filters_helper=collection_filters_helper,
knowledge_base_values=knowledge_base_values, timezone=timezone)
def _ParseFileByPathSpec(
self, path_spec, parser, collection_filters_helper=None,
knowledge_base_values=None, timezone='UTC'):
"""Parses a file with a parser and writes results to a storage writer.
Args:
path_spec (dfvfs.PathSpec): path specification.
parser (BaseParser): parser.
collection_filters_helper (Optional[CollectionFiltersHelper]): collection
filters helper.
knowledge_base_values (Optional[dict]): knowledge base values.
timezone (Optional[str]): timezone.
Returns:
FakeStorageWriter: storage writer.
"""
storage_writer = self._CreateStorageWriter()
file_entry = path_spec_resolver.Resolver.OpenFileEntry(path_spec)
parser_mediator = self._CreateParserMediator(
storage_writer, collection_filters_helper=collection_filters_helper,
file_entry=file_entry, knowledge_base_values=knowledge_base_values,
timezone=timezone)
if isinstance(parser, interface.FileEntryParser):
parser.Parse(parser_mediator)
elif isinstance(parser, interface.FileObjectParser):
file_object = file_entry.GetFileObject()
try:
parser.Parse(parser_mediator, file_object)
finally:
file_object.close()
else:
self.fail('Got unsupported parser type: {0!s}'.format(type(parser)))
return storage_writer
def _TestGetMessageStrings(
self, event_data, expected_message, expected_short_message):
"""Tests the formatting of the message strings.
This function invokes the GetMessageStrings function of the event data
formatter on the event data and compares the resulting messages strings
with those expected.
Args:
event_data (EventData): event data.
expected_message (str): expected message string.
expected_short_message (str): expected short message string.
"""
formatters_directory_path = self._GetDataFilePath(['formatters'])
formatters_manager.FormattersManager.ReadFormattersFromDirectory(
formatters_directory_path)
formatter_mediator = formatters_mediator.FormatterMediator(
data_location=shared_test_lib.DATA_PATH)
message, message_short = (
formatters_manager.FormattersManager.GetMessageStrings(
formatter_mediator, event_data))
self.assertEqual(message, expected_message)
self.assertEqual(message_short, expected_short_message)
def CheckEventValues(self, storage_writer, event, expected_event_values):
"""Asserts that an event and its event data matches the expected values.
Args:
storage_writer (StorageWriter): storage writer.
event (EventObject): event to check.
expected_event_values (dict[str, list[str]): expected values of the event
and event data attribute values per name.
"""
event_data = None
for name, expected_value in expected_event_values.items():
if name == 'timestamp' and isinstance(expected_value, str):
posix_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=event.timestamp)
value = posix_time.CopyToDateTimeString()
elif name in ('timestamp', 'timestamp_desc'):
value = getattr(event, name, None)
else:
if not event_data:
event_data = self._GetEventDataOfEvent(storage_writer, event)
value = getattr(event_data, name, None)
self.assertEqual(value, expected_value)
def CheckTimestamp(self, timestamp, expected_date_time):
"""Asserts that a timestamp value matches the expected date and time.
Args:
timestamp (int): timestamp, which contains the number of microseconds
since January 1, 1970, 00:00:00 UTC.
expected_date_time (str): expected date and time in UTC, formatted as:
YYYY-MM-DD hh:mm:ss.######
"""
posix_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
date_time = posix_time.CopyToDateTimeString()
self.assertEqual(date_time, expected_date_time)
| apache-2.0 |
LearnLib/automatalib | incremental/src/test/java/net/automatalib/incremental/mealy/IncrementalMealyDAGBuilderTest.java | 3046 | /* Copyright (C) 2013-2022 TU Dortmund
* This file is part of AutomataLib, http://www.automatalib.net/.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.automatalib.incremental.mealy;
import java.io.IOException;
import java.util.List;
import net.automatalib.commons.util.Pair;
import net.automatalib.incremental.IntegrationUtil;
import net.automatalib.incremental.IntegrationUtil.ParsedTraces;
import net.automatalib.incremental.mealy.dag.IncrementalMealyDAGBuilder;
import net.automatalib.words.Alphabet;
import net.automatalib.words.Word;
import org.testng.Assert;
import org.testng.annotations.Test;
@Test
public class IncrementalMealyDAGBuilderTest extends AbstractIncrementalMealyBuilderTest {
@Override
protected <I, O> IncrementalMealyBuilder<I, O> createIncrementalMealyBuilder(Alphabet<I> alphabet) {
return new IncrementalMealyDAGBuilder<>(alphabet);
}
/**
* This tests case validates a set of traces from an external system which exposed an issue in confluence
* propagation.
*/
@Test
public void testIntegration() throws IOException {
validateTraces("/spa/mealy_traces.gz");
}
/**
* Test case based on <a href="https://github.com/LearnLib/learnlib/issues/76">LearnLib issue #76</a>.
*/
@Test
public void testLearnLib76() throws IOException {
validateTraces("/learnlib76/mealy.gz");
}
private void validateTraces(String pathToTraces) throws IOException {
final ParsedTraces<Integer, Word<Integer>> parsedData = IntegrationUtil.parseMealyTraces(pathToTraces);
final Alphabet<Integer> alphabet = parsedData.alphabet;
final List<Pair<Word<Integer>, Word<Integer>>> traces = parsedData.traces;
final IncrementalMealyBuilder<Integer, Integer> cache = createIncrementalMealyBuilder(alphabet);
// test insertion without errors
for (Pair<Word<Integer>, Word<Integer>> trace : traces) {
final Word<Integer> input = trace.getFirst();
final Word<Integer> value = trace.getSecond();
cache.insert(input, value);
// test direct caching behavior
Assert.assertEquals(value, cache.lookup(input));
}
// test global caching behavior
for (Pair<Word<Integer>, Word<Integer>> trace : traces) {
final Word<Integer> input = trace.getFirst();
final Word<Integer> value = trace.getSecond();
Assert.assertEquals(value, cache.lookup(input));
}
}
}
| apache-2.0 |
fengyanjava/msb-android | pulltorefresh/src/androidTest/java/com/example/pulltorefresh/ApplicationTest.java | 356 | package com.example.pulltorefresh;
import android.app.Application;
import android.test.ApplicationTestCase;
/**
* <a href="http://d.android.com/tools/testing/testing_android.html">Testing Fundamentals</a>
*/
public class ApplicationTest extends ApplicationTestCase<Application> {
public ApplicationTest() {
super(Application.class);
}
} | apache-2.0 |
tkonopka/Bamformatics | src/bamfo/gui/viewers/BamfoViewer.java | 2267 | /*
* Copyright 2013 Tomasz Konopka.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package bamfo.gui.viewers;
import java.awt.Component;
import java.io.File;
import javax.swing.JOptionPane;
/**
* This interface makes sure that all viewers can identify what file they are
* currently watching.
*
*
* @author tomasz
*/
public abstract class BamfoViewer extends javax.swing.JPanel {
boolean canSave = false;
boolean canSaveAs = false;
// once a file is save once, the destination file can be recorded here.
// when this is not null, the save() option will be activated.
File savefile = null;
/**
* This should output an identifier of the file/object that is being
* displayed in the viewer.
*
* @return
*/
public abstract String getViewerDescriptor();
public boolean canSave() {
return (savefile != null && canSaveAs());
}
public boolean canSaveAs() {
return canSaveAs;
}
boolean overwrite(File ff) {
// if file does not exist, that's the same as overwriting a file
if (!ff.exists()) {
return true;
}
int confirm = JOptionPane.showConfirmDialog((Component) null,
"File " + ff.getName() + " already exists. Overwrite it?",
"Overwrite", JOptionPane.YES_NO_OPTION);
// abort if user aborts
if (confirm == JOptionPane.YES_OPTION) {
return true;
} else {
return false;
}
}
public void save() {
if (savefile != null) {
saveAs(savefile);
}
}
/**
* Each implementation of the
* @param newfile
*/
public abstract void saveAs(File newfile);
}
| apache-2.0 |
google/assertor | src/assertions/float.rs | 5991 | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::borrow::Borrow;
use std::fmt::Debug;
use num_traits::{Float, Zero};
use crate::base::{AssertionApi, AssertionResult, AssertionStrategy, Subject};
/// Trait for float assertion.
///
/// # Example
/// ```
/// use assertor::*;
/// assert_that!(0.1_f32).is_approx_equal_to(0.1);
/// assert_that!(0.1_f32)
/// .with_abs_tol(0.5)
/// .is_approx_equal_to(0.5);
/// assert_that!(0.1_f64)
/// .with_rel_tol(0.2)
/// .is_approx_equal_to(0.12); // 0.1 ± 0.12 * 0.2
/// ```
pub trait FloatAssertion<'a, S, R> {
/// Set the relative tolerance.
fn with_rel_tol(self, rel_tol: S) -> Subject<'a, S, FloatTolerance<S>, R>;
/// Set the absolute tolerance.
fn with_abs_tol(self, abs_tol: S) -> Subject<'a, S, FloatTolerance<S>, R>;
/// Checks the subject is equal to `expected` with tolerance.
///
/// The equality with tolerance is defined as following:
/// ```math
/// abs(actual - expected) <= (asb_tol + rel_tol * abs(expected))
/// ```
/// See also: [numpy.isclose](https://numpy.org/doc/stable/reference/generated/numpy.isclose.html)
fn is_approx_equal_to<B: Borrow<S>>(&self, expected: B) -> R
where
FloatTolerance<S>: Default;
}
pub struct FloatTolerance<S> {
/// relative tolerance
rel_tol: S,
/// absolute tolerance
abs_tol: S,
}
impl<S> FloatTolerance<S> {
fn new(rel_tol: S, abs_tol: S) -> Self {
FloatTolerance { rel_tol, abs_tol }
}
fn with_rel_tol(mut self, rel_tol: S) -> Self {
self.rel_tol = rel_tol;
self
}
fn with_abs_tol(mut self, abs_tol: S) -> Self {
self.abs_tol = abs_tol;
self
}
}
impl<S: Zero> FloatTolerance<S> {
fn zeros() -> Self {
FloatTolerance::new(S::zero(), S::zero())
}
}
impl Default for FloatTolerance<f32> {
fn default() -> Self {
// from numpy.isclose()
FloatTolerance::new(1e-05, 1e-08)
}
}
impl Default for FloatTolerance<f64> {
fn default() -> Self {
// from numpy.isclose()
FloatTolerance::new(1e-05, 1e-08)
}
}
impl<'a, S, R> FloatAssertion<'a, S, R> for Subject<'a, S, FloatTolerance<S>, R>
where
S: Float + Debug,
AssertionResult: AssertionStrategy<R>,
{
fn with_rel_tol(mut self, rel_tol: S) -> Subject<'a, S, FloatTolerance<S>, R> {
self.option_mut().rel_tol = rel_tol;
self
}
fn with_abs_tol(mut self, abs_tol: S) -> Subject<'a, S, FloatTolerance<S>, R> {
self.option_mut().abs_tol = abs_tol;
self
}
fn is_approx_equal_to<B: Borrow<S>>(&self, expected: B) -> R {
let diff = (*self.actual() - *expected.borrow()).abs();
let tolerance: S = self.option().abs_tol + self.option().rel_tol * *expected.borrow();
if diff < tolerance {
self.new_result().do_ok()
} else {
self.new_result()
.add_fact("expected", format!("{:?}", expected.borrow()))
.add_fact("but was", format!("{:?}", self.actual()))
.add_fact("outside tolerance", format!("{:?}", tolerance))
.do_fail()
}
}
}
impl<'a, S, R: 'a> FloatAssertion<'a, S, R> for Subject<'a, S, (), R>
where
S: Float + Debug,
AssertionResult: AssertionStrategy<R>,
{
fn with_rel_tol(self, rel_tol: S) -> Subject<'a, S, FloatTolerance<S>, R> {
// XXX: consider to remove clone.
self.new_owned_subject(
*self.actual(),
self.description().clone(),
FloatTolerance::zeros().with_rel_tol(rel_tol),
)
}
fn with_abs_tol(self, abs_tol: S) -> Subject<'a, S, FloatTolerance<S>, R> {
// XXX: consider to remove clone.
self.new_owned_subject(
*self.actual(),
self.description().clone(),
FloatTolerance::zeros().with_abs_tol(abs_tol),
)
}
fn is_approx_equal_to<B: Borrow<S>>(&self, expected: B) -> R
where
FloatTolerance<S>: Default,
{
self.new_subject(self.actual(), None, FloatTolerance::default())
.is_approx_equal_to(expected)
}
}
#[cfg(test)]
mod tests {
use crate::testing::*;
use super::*;
#[test]
fn is_approx_equal_to() {
assert_that!(0.1_f32).is_approx_equal_to(0.1);
assert_that!(0.1_f32).is_approx_equal_to(0.1);
assert_that!(0.1_f32)
.with_abs_tol(0.5)
.is_approx_equal_to(0.5);
assert_that!(0.1_f32)
.with_rel_tol(0.2)
.is_approx_equal_to(0.12); // 0.1 ± 0.12 * 0.2
assert_that!(0.1_f64).is_approx_equal_to(0.1);
assert_that!(0.1_f64).is_approx_equal_to(0.100000001);
assert_that!(0.1_f64)
.with_abs_tol(0.5)
.is_approx_equal_to(0.5);
assert_that!(0.1_f64)
.with_rel_tol(0.2)
.is_approx_equal_to(0.12); // 0.1 ± 0.12 * 0.2
// Failures
assert_that!(check_that!(0.1).with_abs_tol(0.1).is_approx_equal_to(0.25)).facts_are(vec![
Fact::new("expected", "0.25"),
Fact::new("but was", "0.1"),
Fact::new("outside tolerance", "0.1"),
]);
assert_that!(check_that!(0.1).is_approx_equal_to(0.3)).facts_are(vec![
Fact::new("expected", "0.3"),
Fact::new("but was", "0.1"),
Fact::new("outside tolerance", "0.00000301"),
])
}
}
| apache-2.0 |
BenDol/Databind | apt/src/main/java/nz/co/doltech/databind/apt/reflect/ReflectionGenerator.java | 4742 | package nz.co.doltech.databind.apt.reflect;
import com.google.inject.Inject;
import com.google.inject.assistedinject.Assisted;
import nz.co.doltech.databind.apt.ElementUtils;
import nz.co.doltech.databind.apt.reflect.gwt.Emulation;
import nz.co.doltech.databind.apt.velocity.AbstractVelocityGenerator;
import org.apache.velocity.VelocityContext;
import org.apache.velocity.app.VelocityEngine;
import org.apache.velocity.exception.VelocityException;
import javax.inject.Provider;
import javax.lang.model.element.Modifier;
import javax.lang.model.element.PackageElement;
import javax.lang.model.element.TypeElement;
import javax.lang.model.type.TypeMirror;
import javax.lang.model.util.Elements;
import javax.lang.model.util.Types;
import java.util.logging.Logger;
public class ReflectionGenerator extends AbstractVelocityGenerator<TypeMirror> {
private final static Logger logger = Logger.getLogger(ReflectionGenerator.class.getName());
public static final String NAME = "_Reflection";
public interface Factory {
ReflectionGenerator createReflectionGenerator(
@Assisted("velocityTemplate") String velocityTemplate);
FieldGenerator createFieldGenerator(
@Assisted("velocityTemplate") String velocityTemplate);
ReflectionEndGenerator createReflectionEndGenerator(
@Assisted("velocityTemplate") String velocityTemplate);
ReflectionRegistryGenerator createReflectionRegistryGenerator(
@Assisted("velocityTemplate") String velocityTemplate);
}
private final Types typeUtils;
private final Elements elementUtils;
private final String velocityTemplate;
@Inject
public ReflectionGenerator(Provider<VelocityContext> velocityContextProvider,
VelocityEngine velocityEngine,
Types typeUtils,
Elements elementUtils,
@Assisted("velocityTemplate") String velocityTemplate) {
super(logger, velocityEngine, velocityContextProvider);
this.typeUtils = typeUtils;
this.elementUtils = elementUtils;
this.velocityTemplate = velocityTemplate;
}
@Override
protected String getTemplate() {
return velocityTemplate;
}
@Override
protected void populateVelocityContext(VelocityContext velocityContext, TypeMirror reflect) throws VelocityException {
TypeElement element = (TypeElement)typeUtils.asElement(reflect);
// Package name
PackageElement pkgElem = elementUtils.getPackageOf(element);
if(Emulation.isEmulated(pkgElem)) {
String pkgName = stripEmulationPrefix(pkgElem.getQualifiedName().toString());
velocityContext.put(PACKAGE, pkgName + ";");
} else {
velocityContext.put(PACKAGE, pkgElem.getQualifiedName() + ";");
}
// Implementation name
String name = element.getSimpleName().toString();
velocityContext.put(IMPL_NAME, name + NAME);
// Target name
velocityContext.put("targetName", name);
// Superclass name
TypeElement superClass = (TypeElement)typeUtils.asElement(element.getSuperclass());
if(superClass != null) {
String superClassName = superClass.getQualifiedName().toString();
if(Emulation.isEmulated(superClass)) {
superClassName = stripEmulationPrefix(superClassName);
}
if(!ReflectionAnnotationProcessor.getIgnoredClasses().contains(superClassName)) {
velocityContext.put("superClass", superClassName + ".class");
switch (superClassName) {
case "java.lang.Object":
velocityContext.put("superClassImpl", "nz.co.doltech.databind.reflect.base.ObjectClassReflection");
break;
case "com.google.gwt.core.client.JavaScriptObject":
velocityContext.put("superClassImpl", "nz.co.doltech.databind.reflect.gwt.base.JavaScriptObjectClassReflection");
break;
default:
velocityContext.put("superClassImpl", superClassName + NAME);
}
} else {
velocityContext.put("superClass", "null");
}
}
// Is abstract
velocityContext.put("abstract", ElementUtils.hasModifier(element, Modifier.ABSTRACT));
// Default constructor
velocityContext.put("defaultCtor", ElementUtils.hasDefaultCtor(element));
}
private static String stripEmulationPrefix(String name) {
return name.replace(Emulation.EMUL_PREFIX, "");
}
}
| apache-2.0 |
aws/aws-sdk-ruby | gems/aws-sdk-globalaccelerator/features/env.rb | 622 | # frozen_string_literal: true
# WARNING ABOUT GENERATED CODE
#
# This file is generated. See the contributing guide for more information:
# https://github.com/aws/aws-sdk-ruby/blob/version-3/CONTRIBUTING.md
#
# WARNING ABOUT GENERATED CODE
$:.unshift(File.expand_path('../../lib', __FILE__))
$:.unshift(File.expand_path('../../../aws-sdk-core/features', __FILE__))
$:.unshift(File.expand_path('../../../aws-sdk-core/lib', __FILE__))
$:.unshift(File.expand_path('../../../aws-sigv4/lib', __FILE__))
require 'features_helper'
require 'aws-sdk-globalaccelerator'
Aws::GlobalAccelerator::Client.add_plugin(ApiCallTracker)
| apache-2.0 |
edlectrico/android-jfact | src/uk/ac/manchester/cs/jfact/kernel/ConceptCreator.java | 198 | package uk.ac.manchester.cs.jfact.kernel;
class ConceptCreator implements NameCreator<Concept> {
@Override
public Concept makeEntry(String name) {
return new Concept(name);
}
}
| apache-2.0 |
zhangjunfang/jstorm-0.9.6.3- | jstorm-client/src/main/java/backtype/storm/command/rebalance.java | 1279 | package backtype.storm.command;
import java.security.InvalidParameterException;
import java.util.Map;
import backtype.storm.generated.RebalanceOptions;
import backtype.storm.utils.NimbusClient;
import backtype.storm.utils.Utils;
/**
* Active topology
*
* @author longda
*
*/
public class rebalance {
/**
* @param args
*/
@SuppressWarnings("rawtypes")
public static void main(String[] args) {
if (args == null || args.length == 0) {
throw new InvalidParameterException("Should input topology name");
}
String topologyName = args[0];
NimbusClient client = null;
try {
Map conf = Utils.readStormConfig();
client = NimbusClient.getConfiguredClient(conf);
if (args.length == 1) {
client.getClient().rebalance(topologyName, null);
} else {
int delaySeconds = Integer.parseInt(args[1]);
RebalanceOptions options = new RebalanceOptions();
options.set_wait_secs(delaySeconds);
client.getClient().rebalance(topologyName, options);
}
System.out.println("Successfully submit command rebalance "
+ topologyName);
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
throw new RuntimeException(e);
} finally {
if (client != null) {
client.close();
}
}
}
}
| apache-2.0 |
ebi-uniprot/QuickGOBE | rest-common/src/test/java/uk/ac/ebi/quickgo/rest/search/solr/SolrQueryConverterTest.java | 11680 | package uk.ac.ebi.quickgo.rest.search.solr;
import uk.ac.ebi.quickgo.common.SolrCollectionName;
import uk.ac.ebi.quickgo.rest.search.query.*;
import java.util.HashSet;
import java.util.Set;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.common.params.CursorMarkParams;
import org.junit.Before;
import org.junit.Test;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.arrayContaining;
import static org.hamcrest.Matchers.arrayContainingInAnyOrder;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
import static uk.ac.ebi.quickgo.rest.search.query.CursorPage.createCursorPage;
import static uk.ac.ebi.quickgo.rest.search.query.CursorPage.createFirstCursorPage;
/**
* Tests the implementations of the {@link SolrQueryConverter} implementation.
*/
public class SolrQueryConverterTest {
private static final String REQUEST_HANDLER_NAME = "/select";
private static final String COLLECTION = SolrCollectionName.COLLECTION;
private static final Set<String> WILDCARD_COMPATIBLE_FIELDS = new HashSet<>();
private SolrQueryConverter converter;
@Before
public void setUp() throws Exception {
converter = SolrQueryConverter.create(REQUEST_HANDLER_NAME);
}
@Test(expected = IllegalArgumentException.class)
public void nullRequestHandlerArgumentInConstructorThrowsException() throws Exception {
converter = SolrQueryConverter.createWithWildCardSupport(null, WILDCARD_COMPATIBLE_FIELDS);
}
@Test(expected = IllegalArgumentException.class)
public void emptyRequestHandlerArgumentInConstructorThrowsException() throws Exception {
converter = SolrQueryConverter.createWithWildCardSupport("", WILDCARD_COMPATIBLE_FIELDS);
}
@Test(expected = IllegalArgumentException.class)
public void nullWildCardListPassedToInstantiatingMethodThrowsException() throws Exception {
converter = SolrQueryConverter.createWithWildCardSupport("validValue", null);
}
@Test(expected = IllegalArgumentException.class)
public void nullSerializerInConstructorThrowsException() throws Exception {
converter = new SolrQueryConverter("validValue", null);
}
@Test(expected = IllegalArgumentException.class)
public void nullQueryRequestThrowsException() throws Exception {
converter.convert(null);
}
@Test
public void solrQueryReferencesCorrectRequestHandlerName() throws Exception {
QuickGOQuery fieldQuery = createBasicQuery();
QueryRequest request = new QueryRequest.Builder(fieldQuery, COLLECTION).build();
SolrQuery query = converter.convert(request);
assertThat(query.getRequestHandler(), is(REQUEST_HANDLER_NAME));
}
@Test
public void convertQueryRequestWithQueryAndPageParameters() throws Exception {
QuickGOQuery fieldQuery = createBasicQuery();
int currentPage = 2;
int pageSize = 25;
QueryRequest request = new QueryRequest.Builder(fieldQuery, COLLECTION)
.setPage(new RegularPage(currentPage, pageSize))
.build();
SolrQuery query = converter.convert(request);
assertThat(query.getStart(), is(equalTo(25)));
assertThat(query.getRows(), is(equalTo(pageSize)));
}
@Test
public void convertQueryRequestWithQueryAndFacets() throws Exception {
QuickGOQuery fieldQuery = createBasicQuery();
String facetField1 = "facet1";
String facetField2 = "facet2";
QueryRequest request = new QueryRequest.Builder(fieldQuery, COLLECTION)
.addFacetField(facetField1)
.addFacetField(facetField2)
.build();
SolrQuery query = converter.convert(request);
assertThat(query.getFacetFields(), arrayContainingInAnyOrder(facetField1, facetField2));
}
@Test
public void convertQueryRequestWithQueryAndFilterQuery() throws Exception {
QuickGOQuery fieldQuery = createBasicQuery();
String filterField = "filterField1";
String filterValue = "filterValue1";
QuickGOQuery filterFieldQuery = QuickGOQuery.createQuery(filterField, filterValue);
QueryRequest request = new QueryRequest.Builder(fieldQuery, COLLECTION)
.addQueryFilter(filterFieldQuery)
.build();
SolrQuery query = converter.convert(request);
assertThat(query.getFilterQueries(), arrayContaining(buildFieldQuery(filterField, filterValue)));
}
@Test
public void defaultConvertQueryRequestDoesNotUseHighlighting() {
QuickGOQuery fieldQuery = createBasicQuery();
QueryRequest request = new QueryRequest.Builder(fieldQuery, COLLECTION).build();
SolrQuery query = converter.convert(request);
assertThat(query.getHighlight(), is(false));
}
@Test
public void convertQueryRequestWithHighlightingOffWillNotUseHighlighting() {
QuickGOQuery fieldQuery = createBasicQuery();
QueryRequest request = new QueryRequest
.Builder(fieldQuery, COLLECTION)
.build();
SolrQuery query = converter.convert(request);
assertThat(query.getHighlight(), is(false));
}
@Test
public void convertQueryRequestWithHighlightingWillUseHighlighting() {
QuickGOQuery fieldQuery = createBasicQuery();
String highlightedField = "highlightedField";
QueryRequest request = new QueryRequest
.Builder(fieldQuery, COLLECTION)
.addHighlightedField(highlightedField)
.build();
SolrQuery query = converter.convert(request);
assertThat(query.getHighlight(), is(true));
assertThat(query.getHighlightFields(), arrayContainingInAnyOrder(highlightedField));
}
@Test
public void convertQueryRequestWithProjectedFieldWillProjectThatField() {
QuickGOQuery fieldQuery = createBasicQuery();
String projectedField = "projectedField";
QueryRequest request = new QueryRequest
.Builder(fieldQuery, COLLECTION)
.addProjectedField(projectedField)
.build();
SolrQuery query = converter.convert(request);
assertThat(query.getFields(), is(projectedField));
}
@Test
public void convertQueryRequestWithTwoProjectedFieldsWillProjectTwoFields() {
QuickGOQuery fieldQuery = createBasicQuery();
String projectedField1 = "projectedField1";
String projectedField2 = "projectedField2";
QueryRequest request = new QueryRequest
.Builder(fieldQuery, COLLECTION)
.addProjectedField(projectedField1)
.addProjectedField(projectedField2)
.build();
SolrQuery query = converter.convert(request);
assertThat(query.getFields(), is(projectedField1 + "," + projectedField2));
}
@Test
public void convertQueryRequestWithNoProjectedFieldWillProjectNoFields() {
QuickGOQuery fieldQuery = createBasicQuery();
QueryRequest request = new QueryRequest
.Builder(fieldQuery, COLLECTION)
.build();
SolrQuery query = converter.convert(request);
assertThat(query.getFields(), is(nullValue()));
}
@Test
public void convertFirstQueryRequestWithCursorUsage() {
QuickGOQuery fieldQuery = createBasicQuery();
int pageSize = 10;
QueryRequest request = new QueryRequest
.Builder(fieldQuery, COLLECTION)
.setPage(createFirstCursorPage(pageSize))
.build();
SolrQuery query = converter.convert(request);
assertThat(query.get(CursorMarkParams.CURSOR_MARK_PARAM), is(CursorPage.FIRST_CURSOR));
assertThat(query.getRows(), is(pageSize));
assertThat(query.getStart(), is(nullValue()));
}
@Test
public void convertQueryRequestWithCursorPosition() {
QuickGOQuery fieldQuery = createBasicQuery();
int pageSize = 10;
String cursor = "fakeCursor";
QueryRequest request = new QueryRequest
.Builder(fieldQuery, COLLECTION)
.setPage(createCursorPage(cursor, pageSize))
.build();
SolrQuery query = converter.convert(request);
assertThat(query.get(CursorMarkParams.CURSOR_MARK_PARAM), is(cursor));
assertThat(query.getRows(), is(pageSize));
assertThat(query.getStart(), is(nullValue()));
}
@Test
public void convertQueryRequestWithZeroSortCriteria() {
QuickGOQuery fieldQuery = createBasicQuery();
QueryRequest request = new QueryRequest
.Builder(fieldQuery, COLLECTION)
.build();
SolrQuery query = converter.convert(request);
assertThat(query.getSorts(), hasSize(0));
}
@Test
public void convertQueryRequestWithSortCriterion() {
QuickGOQuery fieldQuery = createBasicQuery();
String sortField = "field";
SortCriterion.SortOrder sortOrder = SortCriterion.SortOrder.ASC;
QueryRequest request = new QueryRequest
.Builder(fieldQuery, COLLECTION)
.addSortCriterion(sortField, sortOrder)
.build();
SolrQuery query = converter.convert(request);
assertThat(query.getSorts(), hasSize(1));
checkSortCriterion(query.getSorts().get(0), sortField, sortOrder);
}
@Test
public void convertQueryRequestWithSortCriteria() {
QuickGOQuery fieldQuery = createBasicQuery();
String sortField0 = "field0";
SortCriterion.SortOrder sortOrder0 = SortCriterion.SortOrder.ASC;
String sortField1 = "fiel1";
SortCriterion.SortOrder sortOrder1 = SortCriterion.SortOrder.DESC;
String sortField2 = "field2";
SortCriterion.SortOrder sortOrder2 = SortCriterion.SortOrder.ASC;
QueryRequest request = new QueryRequest
.Builder(fieldQuery, COLLECTION)
.addSortCriterion(sortField0, sortOrder0)
.addSortCriterion(sortField1, sortOrder1)
.addSortCriterion(sortField2, sortOrder2)
.build();
SolrQuery query = converter.convert(request);
assertThat(query.getSorts(), hasSize(3));
checkSortCriterion(query.getSorts().get(0), sortField0, sortOrder0);
checkSortCriterion(query.getSorts().get(1), sortField1, sortOrder1);
checkSortCriterion(query.getSorts().get(2), sortField2, sortOrder2);
}
private void checkSortCriterion(
SolrQuery.SortClause sortClause,
String sortField,
SortCriterion.SortOrder sortOrder) {
assertThat(sortClause.getItem(), is(sortField));
switch (sortClause.getOrder()) {
case desc:
assertThat(sortOrder, is(SortCriterion.SortOrder.DESC));
break;
case asc:
assertThat(sortOrder, is(SortCriterion.SortOrder.ASC));
break;
default:
throw new IllegalStateException("Could not verify sort criterion");
}
}
private QuickGOQuery createBasicQuery() {
String field = "field1";
String value = "value1";
return QuickGOQuery.createQuery(field, value);
}
private String buildFieldQuery(String field, String value) {
return "(" + field + SolrQueryConverter.SOLR_FIELD_SEPARATOR + value + ")";
}
}
| apache-2.0 |
livingvirus/jphp | jphp-zend-ext/src/main/tests/resources/zend/traits/property003.php | 514 | --TEST--
Conflicting properties with different visibility modifiers should result in a fatal error, since this indicates that the code is incompatible.
--FILE--
<?php
error_reporting(E_ALL);
trait THello1 {
public $hello;
}
trait THello2 {
private $hello;
}
class TraitsTest {
use THello1;
use THello2;
}
$t = new TraitsTest;
$t->hello = "foo";
?>
--EXPECTF--
Fatal error: 'THello1' and 'THello2' define the same property ($hello) in the composition of TraitsTest in %s on line %d, position %d | apache-2.0 |
danang-id/infest-operator | lib/Apache_Commons_IO/src/test/java/org/apache/commons/io/output/NullWriterTest.java | 1444 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.commons.io.output;
import org.junit.Test;
/**
* Really not a lot to do here, but checking that no
* Exceptions are thrown.
*
* @version $Id: NullWriterTest.java 1718944 2015-12-09 19:50:30Z krosenvold $
*/
public class NullWriterTest {
@Test
public void testNull() {
final char[] chars = new char[] {'A', 'B', 'C'};
final NullWriter writer = new NullWriter();
writer.write(1);
writer.write(chars);
writer.write(chars, 1, 1);
writer.write("some string");
writer.write("some string", 2, 2);
writer.flush();
writer.close();
}
}
| apache-2.0 |
ReactiveX/RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableFromSingleTest.java | 2426 | /*
* Copyright (c) 2016-present, RxJava Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is
* distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See
* the License for the specific language governing permissions and limitations under the License.
*/
package io.reactivex.rxjava3.internal.operators.flowable;
import static org.junit.Assert.*;
import org.junit.Test;
import io.reactivex.rxjava3.core.*;
import io.reactivex.rxjava3.exceptions.TestException;
import io.reactivex.rxjava3.operators.QueueFuseable;
import io.reactivex.rxjava3.subjects.SingleSubject;
import io.reactivex.rxjava3.subscribers.TestSubscriber;
import io.reactivex.rxjava3.testsupport.TestSubscriberEx;
public class FlowableFromSingleTest extends RxJavaTest {
@Test
public void success() {
Flowable.fromSingle(Single.just(1).hide())
.test()
.assertResult(1);
}
@Test
public void error() {
Flowable.fromSingle(Single.error(new TestException()).hide())
.test()
.assertFailure(TestException.class);
}
@Test
public void cancelComposes() {
SingleSubject<Integer> ms = SingleSubject.create();
TestSubscriber<Integer> ts = Flowable.fromSingle(ms)
.test();
ts.assertEmpty();
assertTrue(ms.hasObservers());
ts.cancel();
assertFalse(ms.hasObservers());
}
@Test
public void asyncFusion() {
TestSubscriberEx<Integer> ts = new TestSubscriberEx<>();
ts.setInitialFusionMode(QueueFuseable.ASYNC);
Flowable.fromSingle(Single.just(1))
.subscribe(ts);
ts
.assertFuseable()
.assertFusionMode(QueueFuseable.ASYNC)
.assertResult(1);
}
@Test
public void syncFusionRejected() {
TestSubscriberEx<Integer> ts = new TestSubscriberEx<>();
ts.setInitialFusionMode(QueueFuseable.SYNC);
Flowable.fromSingle(Single.just(1))
.subscribe(ts);
ts
.assertFuseable()
.assertFusionMode(QueueFuseable.NONE)
.assertResult(1);
}
}
| apache-2.0 |
medit74/DeepLearning | MyPythonDeepLearning/Training/mydifferentiation.py | 1462 | '''
Created on 2017. 4. 11.
@author: Byoungho Kang
'''
import numpy as np
import matplotlib.pyplot as plt
def numericalDiff(f, x):
h = 1e-4 # 0.0001
return (f(x+h)-f(x-h))/(2*h)
def sampleFunc1(x):
return 0.01*x**2 + 0.1*x
def numeiralGradient(f, x):
h = 1e-4 # 0.0001
grad = np.zeros_like(x) #x와 형상이 같은 배열을 생성
for idx in range(x.size):
tmpVal = x[idx]
x[idx] = tmpVal + h
print(idx, x)
fxh1 = f(x)
x[idx] = tmpVal - h
print(idx, x)
fxh2 = f(x)
grad[idx] = (fxh1 - fxh2) / (2*h)
x[idx] = tmpVal # 원래 값 복원
return grad
'''
f(x) = x[0]**2 + x[1]**2
x[0] = 3, x[1] = 4일 때 편미분 구하기 위한 함수
'''
def sampleFunc2(x):
return x**2 + 4**2
def sampleFunc3(x):
return 3**2 + x**2
'''
f(x0, x1) = x0**2 + x1**2인 함수
'''
def sampleFunc4(x):
return x[0]**2 + x[1]**2
x = np.arange(0, 20, 0.1)
y = sampleFunc1(x)
plt.xlabel("x")
plt.ylabel("f(x)")
plt.plot(x, y)
plt.show()
print(numericalDiff(sampleFunc1, 5))
print(numericalDiff(sampleFunc1, 10))
print(numericalDiff(sampleFunc2, 3.0))
print(numericalDiff(sampleFunc3, 4.0))
print(numeiralGradient(sampleFunc4, np.array([3.0, 4.0])))
print(numeiralGradient(sampleFunc4, np.array([0.0, 2.0])))
print(numeiralGradient(sampleFunc4, np.array([3.0, 0.0]))) | apache-2.0 |
ashigeru/asakusafw-compiler | vanilla/runtime/bootstrap/src/main/java/com/asakusafw/vanilla/bootstrap/Arguments.java | 1913 | /**
* Copyright 2011-2019 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.vanilla.bootstrap;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
/**
* Represents program arguments.
* @since 0.5.0
*/
public class Arguments {
private final List<String> entries = new ArrayList<>();
/**
* Adds an entry.
* @param entry the entry
* @return this
*/
public Arguments add(String entry) {
entries.add(entry);
return this;
}
/**
* Adds a pair of entries.
* @param e1 the first entry
* @param e2 the second entry
* @return this
*/
public Arguments add(String e1, Object e2) {
entries.add(e1);
entries.add(String.valueOf(e2));
return this;
}
/**
* Adds a series of entries.
* @param values the entries
* @return this
*/
public Arguments add(Iterable<String> values) {
values.forEach(entries::add);
return this;
}
/**
* Returns the entries as a string array.
* @return the entries
*/
public String[] toArray() {
return entries.toArray(new String[entries.size()]);
}
@Override
public String toString() {
return entries.stream()
.collect(Collectors.joining(", ", "Arguments(", ")"));
}
}
| apache-2.0 |
nchambers/schemas | src/main/java/nate/storycloze/StoryClozeTest.java | 1632 | package nate.storycloze;
import java.util.List;
import java.util.ArrayList;
import nate.reading.ProcessedDocument;
/**
* Holds a list of ordered sentences in a story, but also
* a list of possible endings for the story. The test is to guess
* which one of the endings is the correct ending.
*/
public class StoryClozeTest {
private List<String> rawsents;
private List<String> possibleEndings;
private int correctEnding = 0; // index in the possibleEndings list of the correct answer
public ProcessedDocument option0;
public ProcessedDocument option1;
public StoryClozeTest(ProcessedDocument doc0, ProcessedDocument doc1) {
if( doc0 != null && doc1 != null ) {
option0 = doc0;
option1 = doc1;
if( doc0.storyname.endsWith("correct") && doc1.storyname.endsWith("incorrect") )
correctEnding = 0;
else
correctEnding = 1;
}
else
System.err.println("ERROR in StoryClozeTest, null docs.");
}
/**
* Give 4 story sentences, list of possible endings, and the correct ending's index in the list.
*/
public StoryClozeTest(List<String> sents, List<String> endings, int correctEnding) {
this.rawsents = new ArrayList<>(sents);
this.possibleEndings = new ArrayList<>(endings);
this.correctEnding = correctEnding;
}
public List<String> getStorySentences() {
return rawsents;
}
public int numEndings() {
return possibleEndings.size();
}
public String getEnding(int index) {
return possibleEndings.get(index);
}
public boolean isCorrect(int guessIndex) {
return guessIndex == correctEnding;
}
}
| apache-2.0 |
nrogoff/EnterpriseAppSettings | src/dotNET/hms.entappsettings.webapi/hms.entappsettings.contracts/AppSettingGroupDTO.cs | 1243 | // Copyright (c) 2017 Hard Medium Soft Ltd.
// All Rights Reserved.
//
// Author: Nicholas Rogoff
// Created: 2017 - 01 - 30
//
// Project: hms.entappsettings.contracts
// Filename: AppSettingGroupDTO.cs
using System;
namespace hms.entappsettings.contracts
{
/// <summary>
/// App Setting Group
/// </summary>
public class AppSettingGroupDTO
{
/// <summary>
/// Unique App Setting Group Id
/// </summary>
public int? AppSettingGroupId { get; set; }
/// <summary>
/// Parent Group Id
/// </summary>
public int? ParentGroupId { get; set; }
/// <summary>
/// Group name
/// </summary>
public string Group { get; set; }
/// <summary>
/// Group Description
/// </summary>
public string Description { get; set; }
/// <summary>
/// Group Path
/// </summary>
public string GroupPath { get; set; }
/// <summary>
/// Date last modified
/// </summary>
public DateTime ModifiedDate { get; set; }
/// <summary>
/// Last modified by
/// </summary>
public string ModifiedBy { get; set; }
}
} | apache-2.0 |
ferminho/ludum39 | core/src/com/alienshots/ludum/GameEntitiesFactory.java | 6881 | package com.alienshots.ludum;
import com.alienshots.ludum.asset.texture.GameScreenAtlas;
import com.alienshots.ludum.component.*;
import com.badlogic.ashley.core.Component;
import com.badlogic.ashley.core.Entity;
import static com.alienshots.ludum.asset.texture.GameScreenAtlas.AtlasCoordinates;
import static com.alienshots.ludum.asset.texture.GameScreenAtlas.VerticalPosition;
public class GameEntitiesFactory {
public static final GameEntitiesFactory instance = new GameEntitiesFactory();
public Entity createWorld() {
Entity world = new Entity();
world.add(new WorldComponent());
world.add(new WorldChargeComponent());
world.add(new SawDirectionComponent(SawDirectionComponent.Direction.LEFT));
world.add(new CrateDirectionComponent(CrateDirectionComponent.Direction.RIGHT));
return world;
}
public Entity createChargeIndicator(Entity world) {
Entity chargeIndicator = new Entity();
AtlasCoordinates initialCoords = new AtlasCoordinates(0, 10, VerticalPosition.LOW);
chargeIndicator.add(new ChargeIndicatorComponent());
chargeIndicator.add(world.getComponent(WorldChargeComponent.class));
chargeIndicator.add(new DisplayComponent(true));
chargeIndicator.add(buildPositionComponent(ChargeIndicatorComponent.class, initialCoords));
return chargeIndicator;
}
public Entity createLever() {
Entity lever = new Entity();
AtlasCoordinates initialCoords = new AtlasCoordinates(4, 1, VerticalPosition.LOW);
lever.add(new LeverComponent());
lever.add(new LeverStateComponent(true));
lever.add(new DisplayComponent(true));
lever.add(buildPositionComponent(LeverComponent.class, initialCoords));
return lever;
}
public Entity createGenerator(Entity lever, Entity world) {
Entity generator = new Entity();
AtlasCoordinates initialCoords = new AtlasCoordinates(4, 1, VerticalPosition.LOW);
generator.add(new GeneratorComponent());
generator.add(new GeneratorLevelComponent());
generator.add(world.getComponent(WorldChargeComponent.class));
generator.add(new DisplayComponent(false));
generator.add(lever.getComponent(LeverStateComponent.class));
generator.add(buildPositionComponent(GeneratorComponent.class, initialCoords));
return generator;
}
public Entity createPlayer(Entity generator, Entity lever, Entity world) {
Entity player = new Entity();
AtlasCoordinates initialCoords = new AtlasCoordinates(1, 1, VerticalPosition.LOW);
player.add(new PlayerComponent());
player.add(new CollisionComponent());
player.add(new DisplayComponent(true));
player.add(buildPositionComponent(PlayerComponent.class, initialCoords));
player.add(new BatteryItemComponent(true));
player.add(new FlyingBatteryLaunchComponent());
player.add(generator.getComponent(GeneratorLevelComponent.class));
player.add(lever.getComponent(LeverStateComponent.class));
player.add(world.getComponent(SawDirectionComponent.class));
player.add(world.getComponent(CrateDirectionComponent.class));
return player;
}
public Entity createBatteryItemIndicator(Entity player) {
Entity batteryItemIndicator = new Entity();
BatteryItemComponent batteryItemComponent = player.getComponent(BatteryItemComponent.class);
AtlasCoordinates initialCoords = new AtlasCoordinates(0, 1, VerticalPosition.LOW);
batteryItemIndicator.add(batteryItemComponent);
batteryItemIndicator.add(new DisplayComponent(true));
batteryItemIndicator.add(buildPositionComponent(BatteryItemComponent.class, initialCoords));
return batteryItemIndicator;
}
public Entity createDecoSpark(Entity generator) {
Entity decoSpark = new Entity();
GeneratorLevelComponent generatorLevelComponent = generator.getComponent(GeneratorLevelComponent.class);
AtlasCoordinates initialCoords = new AtlasCoordinates(4, 1, VerticalPosition.LOW);
decoSpark.add(generatorLevelComponent);
decoSpark.add(new DecoSparkComponent());
decoSpark.add(new DisplayComponent(false));
decoSpark.add(buildPositionComponent(DecoSparkComponent.class, initialCoords));
return decoSpark;
}
public Entity createFlyingBattery(Entity player, Entity generator) {
Entity flyingBattery = new Entity();
AtlasCoordinates initialCoords = new AtlasCoordinates(4, 1, VerticalPosition.LOW);
flyingBattery.add(new FlyingBatteryComponent());
flyingBattery.add(player.getComponent(FlyingBatteryLaunchComponent.class));
flyingBattery.add(generator.getComponent(GeneratorLevelComponent.class));
flyingBattery.add(new DisplayComponent(true));
flyingBattery.add(buildPositionComponent(FlyingBatteryComponent.class, initialCoords));
return flyingBattery;
}
public Entity createSaw(Entity world) {
Entity saw = new Entity();
AtlasCoordinates initialCoords = new AtlasCoordinates(1, 13, VerticalPosition.LOW);
saw.add(new SawComponent());
saw.add(new HazardComponent());
saw.add(new CollisionComponent());
saw.add(new DisplayComponent(false));
saw.add(buildPositionComponent(SawComponent.class, initialCoords));
saw.add(world.getComponent(SawDirectionComponent.class));
return saw;
}
public Entity createDrop(int column, int delayInUpdates) {
Entity drop = new Entity();
AtlasCoordinates initialCoords = new AtlasCoordinates(2, column, VerticalPosition.HIGH);
drop.add(new DropComponent(Time.newTimer(delayInUpdates)));
drop.add(new HazardComponent());
drop.add(new CollisionComponent());
drop.add(new DisplayComponent(false));
drop.add(buildPositionComponent(DropComponent.class, initialCoords));
return drop;
}
public Entity createCrate(Entity world) {
Entity crate = new Entity();
AtlasCoordinates initialCoords = new AtlasCoordinates(3, 1, VerticalPosition.LOW);
crate.add(new CrateComponent());
crate.add(new HazardComponent());
crate.add(new CollisionComponent());
crate.add(new DisplayComponent(false));
crate.add(buildPositionComponent(CrateComponent.class, initialCoords));
crate.add(world.getComponent(CrateDirectionComponent.class));
return crate;
}
private PositionComponent buildPositionComponent(Class<? extends Component> componentClass,
AtlasCoordinates initialCoords) {
return new PositionComponent(initialCoords, GameScreenAtlas.instance.getScreenTexture(componentClass, initialCoords));
}
}
| apache-2.0 |
project-asap/IReS-Platform | asap-platform/asap-server/asapLibrary/operators/kmeans_mahout/kmeans_mahout.lua | 1937 | -- The command to execute.
SHELL_COMMAND = "./kmeans_mahout.sh"
-- The number of containers to run it on.
CONTAINER_INSTANCES = 1
-- The location of the jar file containing kitten's default ApplicationMaster
-- implementation.
MASTER_JAR_LOCATION = "kitten-master-0.2.0-jar-with-dependencies.jar"
-- CLASSPATH setup.
CP = "/opt/hadoop-2.7.0/etc/hadoop:/opt/hadoop-2.7.0/etc/hadoop:/opt/hadoop-2.7.0/etc/hadoop:/opt/hadoop-2.7.0/share/hadoop/common/lib/*:/opt/hadoop-2.7.0/share/hadoop/common/*:/opt/hadoop-2.7.0/share/hadoop/hdfs:/opt/hadoop-2.7.0/share/hadoop/hdfs/lib/*:/opt/hadoop-2.7.0/share/hadoop/hdfs/*:/opt/hadoop-2.7.0/share/hadoop/yarn/lib/*:/opt/hadoop-2.7.0/share/hadoop/yarn/*:/opt/hadoop-2.7.0/share/hadoop/mapreduce/lib/*:/opt/hadoop-2.7.0/share/hadoop/mapreduce/*:/contrib/capacity-scheduler/*.jar:/opt/hadoop-2.7.0/share/hadoop/yarn/*:/opt/hadoop-2.7.0/share/hadoop/yarn/lib/*"
-- Resource and environment setup.
base_resources = {
["master.jar"] = { file = MASTER_JAR_LOCATION }
}
base_env = {
CLASSPATH = table.concat({"${CLASSPATH}", CP, "./master.jar", "./kmeans_mahout.sh"}, ":"),
}
-- The actual distributed shell job.
operator = yarn {
name = "Execute Java Operator",
timeout = -1,
memory = 512,
cores = 1,
master = {
env = base_env,
resources = base_resources,
command = {
base = "${JAVA_HOME}/bin/java -Xms64m -Xmx128m com.cloudera.kitten.appmaster.ApplicationMaster",
args = { "-conf job.xml" },
}
},
container = {
instances = CONTAINER_INSTANCES,
env = base_env,
resources = {
["kmeans_mahout.sh"] = {
file = "/opt/asap-server/asapLibrary/operators/kmeans_mahout/kmeans_mahout.sh",
type = "file", -- other value: 'archive'
visibility = "application", -- other values: 'private', 'public'
}
},
command = {
base = SHELL_COMMAND,
-- args = { "1> <LOG_DIR>/stdout", "2> <LOG_DIR>/stderr" },
}
}
}
| apache-2.0 |
ahbonte/AHB-hh-app | Resources/lib/location.js | 3060 | /**
* FoodCare City
* Author: FoodCare Inc. Copyright 2013. All Rights Reserved
*/
(function() {
var showAlert = true;
var datastore = require('/lib/datastore').datastore;
exports.getLocation = function(callback) {
Ti.Geolocation.preferredProvider = "gps";
Ti.Geolocation.purpose = "Find nearby restaurants";
datastore.setPreference('haslocation', false);
if (showAlert && Titanium.Geolocation.locationServicesEnabled === false) {
showAlert = false;
datastore.setPreference('haslocation', false);
alert('Location services disabled. Using default.');
return false;
}
if (showAlert && Titanium.Platform.name != 'android') {
showAlert = false;
datastore.setPreference('haslocation', false);
var authorization = Titanium.Geolocation.locationServicesAuthorization;
if (authorization == Titanium.Geolocation.AUTHORIZATION_DENIED) {
datastore.setPreference('haslocation', false);
Ti.UI.createAlertDialog({
title : 'FoodCare City',
message : 'You have disallowed geolocation services, using zipcode instead.'
}).show();
return false;
} else if (showAlert && authorization == Titanium.Geolocation.AUTHORIZATION_RESTRICTED) {
showAlert = false;
datastore.setPreference('haslocation', false);
Ti.UI.createAlertDialog({
title : 'FoodCare City',
message : 'Your system has restricted use of geolocation services, using default zipcode instead.'
}).show();
return false;
}
}
// SET ACCURACY - THE FOLLOWING VALUES ARE SUPPORTED
//
// Titanium.Geolocation.ACCURACY_BEST
// Titanium.Geolocation.ACCURACY_NEAREST_TEN_METERS
// Titanium.Geolocation.ACCURACY_HUNDRED_METERS
// Titanium.Geolocation.ACCURACY_KILOMETER
// Titanium.Geolocation.ACCURACY_THREE_KILOMETERS
Titanium.Geolocation.accuracy = Titanium.Geolocation.ACCURACY_KILOMETER;
//
// SET DISTANCE FILTER. THIS DICTATES HOW OFTEN AN EVENT FIRES BASED ON THE DISTANCE THE DEVICE MOVES
// THIS VALUE IS IN METERS
//
Titanium.Geolocation.distanceFilter = 1000;
Titanium.Geolocation.getCurrentPosition(function(e) {
if (!e.success || e.error) {
Ti.API.info("GeoLocate Error: " + e.code);
datastore.setPreference('haslocation', false);
return;
}
datastore.setPreference('longitude', e.coords.longitude);
datastore.setPreference('latitude', e.coords.latitude);
datastore.setPreference('haslocation', true);
if (callback) {
callback(e.coords);
}
Ti.App.Logger.print("LOCATION: " + e.coords.longitude + "," + e.coords.latitude);
var longitude = e.coords.longitude;
var latitude = e.coords.latitude;
var altitude = e.coords.altitude;
var heading = e.coords.heading;
var accuracy = e.coords.accuracy;
var speed = e.coords.speed;
var timestamp = e.coords.timestamp;
var altitudeAccuracy = e.coords.altitudeAccuracy;
Ti.API.info('speed ' + speed);
Titanium.API.info('geo - current location: ' + new Date(timestamp) + ' long ' + longitude + ' lat ' + latitude + ' accuracy ' + accuracy);
});
}
})();
| apache-2.0 |
boneman1231/org.apache.felix | trunk/org.osgi.compendium/src/main/java/org/osgi/service/cm/ConfigurationPlugin.java | 5810 | /*
* Copyright (c) OSGi Alliance (2001, 2008). All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.osgi.service.cm;
import java.util.Dictionary;
import org.osgi.framework.ServiceReference;
/**
* A service interface for processing configuration dictionary before the
* update.
*
* <p>
* A bundle registers a <code>ConfigurationPlugin</code> object in order to
* process configuration updates before they reach the Managed Service or
* Managed Service Factory. The Configuration Admin service will detect
* registrations of Configuration Plugin services and must call these services
* every time before it calls the <code>ManagedService</code> or
* <code>ManagedServiceFactory</code>
* <code>updated</code> method. The
* Configuration Plugin service thus has the opportunity to view and modify the
* properties before they are passed to the Managed Service or Managed Service
* Factory.
*
* <p>
* Configuration Plugin (plugin) services have full read/write access to all
* configuration information. Therefore, bundles using this facility should be
* trusted. Access to this facility should be limited with
* <code>ServicePermission[ConfigurationPlugin,REGISTER]</code>.
* Implementations of a Configuration Plugin service should assure that they
* only act on appropriate configurations.
*
* <p>
* The <code>Integer</code> <code>service.cmRanking</code> registration
* property may be specified. Not specifying this registration property, or
* setting it to something other than an <code>Integer</code>, is the same as
* setting it to the <code>Integer</code> zero. The
* <code>service.cmRanking</code> property determines the order in which
* plugins are invoked. Lower ranked plugins are called before higher ranked
* ones. In the event of more than one plugin having the same value of
* <code>service.cmRanking</code>, then the Configuration Admin service
* arbitrarily chooses the order in which they are called.
*
* <p>
* By convention, plugins with <code>service.cmRanking< 0</code> or
* <code>service.cmRanking > 1000</code> should not make modifications to
* the properties.
*
* <p>
* The Configuration Admin service has the right to hide properties from
* plugins, or to ignore some or all the changes that they make. This might be
* done for security reasons. Any such behavior is entirely implementation
* defined.
*
* <p>
* A plugin may optionally specify a <code>cm.target</code> registration
* property whose value is the PID of the Managed Service or Managed Service
* Factory whose configuration updates the plugin is intended to intercept. The
* plugin will then only be called with configuration updates that are targeted
* at the Managed Service or Managed Service Factory with the specified PID.
* Omitting the <code>cm.target</code> registration property means that the
* plugin is called for all configuration updates.
*
* @version $Revision: 5673 $
*/
public interface ConfigurationPlugin {
/**
* A service property to limit the Managed Service or Managed Service
* Factory configuration dictionaries a Configuration Plugin service
* receives.
*
* This property contains a <code>String[]</code> of PIDs. A Configuration
* Admin service must call a Configuration Plugin service only when this
* property is not set, or the target service's PID is listed in this
* property.
*/
public static final String CM_TARGET = "cm.target";
/**
* A service property to specify the order in which plugins are invoked.
*
* This property contains an <code>Integer</code> ranking of the plugin.
* Not specifying this registration property, or setting it to something
* other than an <code>Integer</code>, is the same as setting it to the
* <code>Integer</code> zero. This property determines the order in which
* plugins are invoked. Lower ranked plugins are called before higher ranked
* ones.
*
* @since 1.2
*/
public static final String CM_RANKING = "service.cmRanking";
/**
* View and possibly modify the a set of configuration properties before
* they are sent to the Managed Service or the Managed Service Factory. The
* Configuration Plugin services are called in increasing order of their
* <code>service.cmRanking</code> property. If this property is undefined
* or is a non- <code>Integer</code> type, 0 is used.
*
* <p>
* This method should not modify the properties unless the
* <code>service.cmRanking</code> of this plugin is in the range
* <code>0 <= service.cmRanking <= 1000</code>.
* <p>
* If this method throws any <code>Exception</code>, the Configuration
* Admin service must catch it and should log it.
*
* @param reference reference to the Managed Service or Managed Service
* Factory
* @param properties The configuration properties. This argument must not
* contain the "service.bundleLocation" property. The value of this
* property may be obtained from the
* <code>Configuration.getBundleLocation</code> method.
*/
public void modifyConfiguration(ServiceReference reference,
Dictionary properties);
}
| apache-2.0 |
lesaint/experimenting-annotation-processing | experimenting-rounds/massive-count-of-annotated-classes/src/main/java/fr/javatronic/blog/massive/annotation1/sub1/Class_0781.java | 151 | package fr.javatronic.blog.massive.annotation1.sub1;
import fr.javatronic.blog.processor.Annotation_001;
@Annotation_001
public class Class_0781 {
}
| apache-2.0 |
tendermint/tendermint | crypto/merkle/proof_test.go | 4997 | package merkle
import (
"errors"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto"
)
const ProofOpDomino = "test:domino"
// Expects given input, produces given output.
// Like the game dominos.
type DominoOp struct {
key string // unexported, may be empty
Input string
Output string
}
func NewDominoOp(key, input, output string) DominoOp {
return DominoOp{
key: key,
Input: input,
Output: output,
}
}
func (dop DominoOp) ProofOp() tmcrypto.ProofOp {
dopb := tmcrypto.DominoOp{
Key: dop.key,
Input: dop.Input,
Output: dop.Output,
}
bz, err := dopb.Marshal()
if err != nil {
panic(err)
}
return tmcrypto.ProofOp{
Type: ProofOpDomino,
Key: []byte(dop.key),
Data: bz,
}
}
func (dop DominoOp) Run(input [][]byte) (output [][]byte, err error) {
if len(input) != 1 {
return nil, errors.New("expected input of length 1")
}
if string(input[0]) != dop.Input {
return nil, fmt.Errorf("expected input %v, got %v",
dop.Input, string(input[0]))
}
return [][]byte{[]byte(dop.Output)}, nil
}
func (dop DominoOp) GetKey() []byte {
return []byte(dop.key)
}
//----------------------------------------
func TestProofOperators(t *testing.T) {
var err error
// ProofRuntime setup
// TODO test this somehow.
// ProofOperators setup
op1 := NewDominoOp("KEY1", "INPUT1", "INPUT2")
op2 := NewDominoOp("KEY2", "INPUT2", "INPUT3")
op3 := NewDominoOp("", "INPUT3", "INPUT4")
op4 := NewDominoOp("KEY4", "INPUT4", "OUTPUT4")
// Good
popz := ProofOperators([]ProofOperator{op1, op2, op3, op4})
err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")})
assert.NoError(t, err)
err = popz.VerifyValue(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", bz("INPUT1"))
assert.NoError(t, err)
// BAD INPUT
err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1_WRONG")})
assert.Error(t, err)
err = popz.VerifyValue(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", bz("INPUT1_WRONG"))
assert.Error(t, err)
// BAD KEY 1
err = popz.Verify(bz("OUTPUT4"), "/KEY3/KEY2/KEY1", [][]byte{bz("INPUT1")})
assert.Error(t, err)
// BAD KEY 2
err = popz.Verify(bz("OUTPUT4"), "KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")})
assert.Error(t, err)
// BAD KEY 3
err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1/", [][]byte{bz("INPUT1")})
assert.Error(t, err)
// BAD KEY 4
err = popz.Verify(bz("OUTPUT4"), "//KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")})
assert.Error(t, err)
// BAD KEY 5
err = popz.Verify(bz("OUTPUT4"), "/KEY2/KEY1", [][]byte{bz("INPUT1")})
assert.Error(t, err)
// BAD OUTPUT 1
err = popz.Verify(bz("OUTPUT4_WRONG"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")})
assert.Error(t, err)
// BAD OUTPUT 2
err = popz.Verify(bz(""), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")})
assert.Error(t, err)
// BAD POPZ 1
popz = []ProofOperator{op1, op2, op4}
err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")})
assert.Error(t, err)
// BAD POPZ 2
popz = []ProofOperator{op4, op3, op2, op1}
err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")})
assert.Error(t, err)
// BAD POPZ 3
popz = []ProofOperator{}
err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")})
assert.Error(t, err)
}
func bz(s string) []byte {
return []byte(s)
}
func TestProofValidateBasic(t *testing.T) {
testCases := []struct {
testName string
malleateProof func(*Proof)
errStr string
}{
{"Good", func(sp *Proof) {}, ""},
{"Negative Total", func(sp *Proof) { sp.Total = -1 }, "negative Total"},
{"Negative Index", func(sp *Proof) { sp.Index = -1 }, "negative Index"},
{"Invalid LeafHash", func(sp *Proof) { sp.LeafHash = make([]byte, 10) },
"expected LeafHash size to be 32, got 10"},
{"Too many Aunts", func(sp *Proof) { sp.Aunts = make([][]byte, MaxAunts+1) },
"expected no more than 100 aunts, got 101"},
{"Invalid Aunt", func(sp *Proof) { sp.Aunts[0] = make([]byte, 10) },
"expected Aunts#0 size to be 32, got 10"},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.testName, func(t *testing.T) {
_, proofs := ProofsFromByteSlices([][]byte{
[]byte("apple"),
[]byte("watermelon"),
[]byte("kiwi"),
})
tc.malleateProof(proofs[0])
err := proofs[0].ValidateBasic()
if tc.errStr != "" {
assert.Contains(t, err.Error(), tc.errStr)
}
})
}
}
func TestVoteProtobuf(t *testing.T) {
_, proofs := ProofsFromByteSlices([][]byte{
[]byte("apple"),
[]byte("watermelon"),
[]byte("kiwi"),
})
testCases := []struct {
testName string
v1 *Proof
expPass bool
}{
{"empty proof", &Proof{}, false},
{"failure nil", nil, false},
{"success", proofs[0], true},
}
for _, tc := range testCases {
pb := tc.v1.ToProto()
v, err := ProofFromProto(pb)
if tc.expPass {
require.NoError(t, err)
require.Equal(t, tc.v1, v, tc.testName)
} else {
require.Error(t, err)
}
}
}
| apache-2.0 |
lesaint/experimenting-annotation-processing | experimenting-rounds/massive-count-of-annotated-classes/src/main/java/fr/javatronic/blog/massive/annotation1/Class_691.java | 145 | package fr.javatronic.blog.massive.annotation1;
import fr.javatronic.blog.processor.Annotation_001;
@Annotation_001
public class Class_691 {
}
| apache-2.0 |
Banno/sbt-plantuml-plugin | src/main/java/net/sourceforge/plantuml/creole/UCreole.java | 1057 | /* ========================================================================
* PlantUML : a free UML diagram generator
* ========================================================================
*
* (C) Copyright 2009-2017, Arnaud Roques
*
* Project Info: http://plantuml.com
*
* This file is part of PlantUML.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
* Original Author: Arnaud Roques
*/
package net.sourceforge.plantuml.creole;
import net.sourceforge.plantuml.ugraphic.UShape;
public interface UCreole extends UShape {
}
| apache-2.0 |
deepshig/Financial-Grant-Management-System | include/Access_Grants.php | 3562 | <!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Access Grants</title> -->
<!-- CSS -->
<link rel="stylesheet" href="http://fonts.googleapis.com/css?family=Roboto:400,100,300,500">
<link rel="stylesheet" href="assets/bootstrap/css/bootstrap.min.css">
<link rel="stylesheet" href="assets/font-awesome/css/font-awesome.min.css">
<link rel="stylesheet" href="assets/css/form-elements.css">
<link rel="stylesheet" href="assets/css/style.css">
<!-- This is what you need -->
<script src="dist/sweetalert-dev.js"></script>
<link rel="stylesheet" href="dist/sweetalert.css">
<!--.......................-->
<link rel="shortcut icon" href="http://ww2.glance.net/wp-content/uploads/2015/01/glance-for-finance.jpg">
</head>
<body>
<?php
session_start();
if(!isset($_SESSION['role']))
{
?>
<div class="top-content">
<div class="inner-bg">
<div class="container">
<div class="row">
<div class="col-sm-8 col-sm-offset-2 text">
<br><br><br>
<h1>OOPS..!!<p>You need to Login!</p></h1>
<div class="form-box">
<a href="http://localhost/SE/include/Login.php" class="btn btn-info" role="button"><strong><b>Login !</b></strong></a>
</div>
</div>
</div>
</div>
</div>
</div>
<?php
}
else
{
$role = $_SESSION['role'];
if($role != 'Faculty')
{
?>
<div class="top-content">
<div class="inner-bg">
<div class="container">
<div class="row">
<div class="col-sm-8 col-sm-offset-2 text">
<br><br><br>
<h1>OOPS..!!<p>Faculty needs to Login first!</p></h1>
<div class="form-box">
<a href="http://localhost/SE/include/logout.php" class="btn btn-info" role="button"><strong><b>Logout !</b></strong></a>
</div>
</div>
</div>
</div>
</div>
</div>
<?php
}
else
{
require_once 'Db_connect.php';
$db = new Db_connect();
$conn = $db->connect();
if($conn)
{
require_once 'Faculty.php';
$obj = new Faculty($_SESSION['roll']);
$obj->AccessGrants();
}
}
}
?>
<script src="assets/js/jquery-1.11.1.min.js"></script>
<script src="assets/bootstrap/js/bootstrap.min.js"></script>
<script src="assets/js/jquery.backstretch.min.js"></script>
<script src="assets/js/scripts.js"></script>
</div>
</div>
</body>
</html> | apache-2.0 |
RP-Kit/RPKit | bukkit/rpk-locks-bukkit/src/main/java/com/rpkit/locks/bukkit/database/jooq/tables/records/RpkitLockedBlockRecord.java | 4672 | /*
* This file is generated by jOOQ.
*/
package com.rpkit.locks.bukkit.database.jooq.tables.records;
import com.rpkit.locks.bukkit.database.jooq.tables.RpkitLockedBlock;
import org.jooq.Field;
import org.jooq.Record4;
import org.jooq.Row4;
import org.jooq.impl.TableRecordImpl;
/**
* This class is generated by jOOQ.
*/
@SuppressWarnings({ "all", "unchecked", "rawtypes" })
public class RpkitLockedBlockRecord extends TableRecordImpl<RpkitLockedBlockRecord> implements Record4<String, Integer, Integer, Integer> {
private static final long serialVersionUID = 1L;
/**
* Setter for <code>rpkit_locks.rpkit_locked_block.world</code>.
*/
public void setWorld(String value) {
set(0, value);
}
/**
* Getter for <code>rpkit_locks.rpkit_locked_block.world</code>.
*/
public String getWorld() {
return (String) get(0);
}
/**
* Setter for <code>rpkit_locks.rpkit_locked_block.x</code>.
*/
public void setX(Integer value) {
set(1, value);
}
/**
* Getter for <code>rpkit_locks.rpkit_locked_block.x</code>.
*/
public Integer getX() {
return (Integer) get(1);
}
/**
* Setter for <code>rpkit_locks.rpkit_locked_block.y</code>.
*/
public void setY(Integer value) {
set(2, value);
}
/**
* Getter for <code>rpkit_locks.rpkit_locked_block.y</code>.
*/
public Integer getY() {
return (Integer) get(2);
}
/**
* Setter for <code>rpkit_locks.rpkit_locked_block.z</code>.
*/
public void setZ(Integer value) {
set(3, value);
}
/**
* Getter for <code>rpkit_locks.rpkit_locked_block.z</code>.
*/
public Integer getZ() {
return (Integer) get(3);
}
// -------------------------------------------------------------------------
// Record4 type implementation
// -------------------------------------------------------------------------
@Override
public Row4<String, Integer, Integer, Integer> fieldsRow() {
return (Row4) super.fieldsRow();
}
@Override
public Row4<String, Integer, Integer, Integer> valuesRow() {
return (Row4) super.valuesRow();
}
@Override
public Field<String> field1() {
return RpkitLockedBlock.RPKIT_LOCKED_BLOCK.WORLD;
}
@Override
public Field<Integer> field2() {
return RpkitLockedBlock.RPKIT_LOCKED_BLOCK.X;
}
@Override
public Field<Integer> field3() {
return RpkitLockedBlock.RPKIT_LOCKED_BLOCK.Y;
}
@Override
public Field<Integer> field4() {
return RpkitLockedBlock.RPKIT_LOCKED_BLOCK.Z;
}
@Override
public String component1() {
return getWorld();
}
@Override
public Integer component2() {
return getX();
}
@Override
public Integer component3() {
return getY();
}
@Override
public Integer component4() {
return getZ();
}
@Override
public String value1() {
return getWorld();
}
@Override
public Integer value2() {
return getX();
}
@Override
public Integer value3() {
return getY();
}
@Override
public Integer value4() {
return getZ();
}
@Override
public RpkitLockedBlockRecord value1(String value) {
setWorld(value);
return this;
}
@Override
public RpkitLockedBlockRecord value2(Integer value) {
setX(value);
return this;
}
@Override
public RpkitLockedBlockRecord value3(Integer value) {
setY(value);
return this;
}
@Override
public RpkitLockedBlockRecord value4(Integer value) {
setZ(value);
return this;
}
@Override
public RpkitLockedBlockRecord values(String value1, Integer value2, Integer value3, Integer value4) {
value1(value1);
value2(value2);
value3(value3);
value4(value4);
return this;
}
// -------------------------------------------------------------------------
// Constructors
// -------------------------------------------------------------------------
/**
* Create a detached RpkitLockedBlockRecord
*/
public RpkitLockedBlockRecord() {
super(RpkitLockedBlock.RPKIT_LOCKED_BLOCK);
}
/**
* Create a detached, initialised RpkitLockedBlockRecord
*/
public RpkitLockedBlockRecord(String world, Integer x, Integer y, Integer z) {
super(RpkitLockedBlock.RPKIT_LOCKED_BLOCK);
setWorld(world);
setX(x);
setY(y);
setZ(z);
}
}
| apache-2.0 |
swisscom/cleanerversion | versions/settings.py | 2205 | import importlib
from django import VERSION
from django.conf import settings as django_settings
_cache = {}
class VersionsSettings(object):
"""
Gets a setting from django.conf.settings if set, otherwise from the
defaults defined in this class.
A magic accessor is used instead of just defining module-level variables
because Django doesn't like attributes of the django.conf.settings object
to be accessed in module scope.
"""
defaults = {
'VERSIONED_DELETE_COLLECTOR': 'versions.deletion.VersionedCollector',
'VERSIONS_USE_UUIDFIELD': VERSION[:3] >= (1, 8, 3),
}
def __getattr__(self, name):
try:
return getattr(django_settings, name)
except AttributeError:
try:
return self.defaults[name]
except KeyError:
raise AttributeError(
"{} object has no attribute {}".format(self.__class__,
name))
settings = VersionsSettings()
def import_from_string(val, setting_name):
"""
Attempt to import a class from a string representation.
Based on the method of the same name in Django Rest Framework.
"""
try:
parts = val.split('.')
module_path, class_name = '.'.join(parts[:-1]), parts[-1]
module = importlib.import_module(module_path)
return getattr(module, class_name)
except ImportError as e:
raise ImportError("Could not import '{}' for CleanerVersion "
"setting '{}'. {}: {}.".format((val,
setting_name,
e.__class__.__name__,
e)))
def get_versioned_delete_collector_class():
"""
Gets the class to use for deletion collection.
:return: class
"""
key = 'VERSIONED_DELETE_COLLECTOR'
try:
cls = _cache[key]
except KeyError:
collector_class_string = getattr(settings, key)
cls = import_from_string(collector_class_string, key)
_cache[key] = cls
return cls
| apache-2.0 |
4-20ma/cookbook-docker-integration-test | tasks/publish.rb | 1117 | # encoding: utf-8
#
# Task:: publish
#
# Author:: Doc Walker (<[email protected]>)
#
# Copyright 2016-2017, Doc Walker
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'rake'
#-------------------------------------------------------------- publish/tagger
# Configure path to stove gem config file (add to ~/.bash_profile):
# export STOVE_CONFIG=$HOME/.chef/stove.json
# Update stove gem config file
# $ stove login --username USERNAME --key ~/.chef/USERNAME.pem
begin
require 'stove/rake_task'
Stove::RakeTask.new
rescue LoadError, NameError
STDOUT.puts '[WARN] Stove::RakeTask not loaded'.yellow
end
| apache-2.0 |
korpling/ANNIS | src/main/java/org/corpus_tools/annis/gui/exporter/CSVExporter.java | 10133 | /*
* Copyright 2016-2017 Referenzkorpus Mittelniederdeutsch/Niederrheinisch (1200-1650).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.corpus_tools.annis.gui.exporter;
import com.google.common.collect.Multimap;
import com.google.common.collect.Multimaps;
import com.vaadin.ui.UI;
import java.io.IOException;
import java.io.Writer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.TreeSet;
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
import org.corpus_tools.annis.gui.Helper;
import org.corpus_tools.annis.gui.objects.AnnisConstants;
import org.corpus_tools.salt.common.SDocumentGraph;
import org.corpus_tools.salt.common.SToken;
import org.corpus_tools.salt.core.SAnnotation;
import org.corpus_tools.salt.core.SMetaAnnotation;
import org.corpus_tools.salt.core.SNode;
import org.springframework.stereotype.Component;
/**
* A csv-exporter that will export the text of the underlying token instead of
* the base text. This is useful for getting text spans where the normal
* csv-exporter doesn't work since there are multiple speakers or
* normalizations.
*
* @author Fabian Barteld
*/
@Component
public class CSVExporter extends BaseMatrixExporter {
/**
*
*/
private static final long serialVersionUID = -8993537374171042122L;
private Set<String> metakeys;
private SortedMap<Integer, TreeSet<String>> annotationsForMatchedNodes;
/**
* Takes a match and stores annotation names to construct the header in
* {@link #outputText(de.hu_berlin.german.korpling.saltnpepper.salt.saltCommon.sDocumentStructure.SDocumentGraph, boolean, int, java.io.Writer) }
*
* @param graph
* @param args
* @param matchNumber
* @param nodeCount
*
* @throws java.io.IOException
*
*/
@Override
public void createAdjacencyMatrix(SDocumentGraph graph, Map<String, String> args, int matchNumber, int nodeCount)
throws IOException, IllegalArgumentException {
// first match
if (matchNumber == 0) {
// get list of metakeys to export
metakeys = new LinkedHashSet<>();
if (args.containsKey("metakeys")) {
metakeys.addAll(Arrays.asList(args.get("metakeys").split(",")));
}
// initialize list of annotations for the matched nodes
annotationsForMatchedNodes = new TreeMap<>();
}
for (SNode node : this.getMatchedNodes(graph)) {
int node_id = node.getFeature(AnnisConstants.ANNIS_NS, AnnisConstants.FEAT_MATCHEDNODE).getValue_SNUMERIC()
.intValue();
if (!annotationsForMatchedNodes.containsKey(node_id))
annotationsForMatchedNodes.put(node_id, new TreeSet<String>());
List<SAnnotation> annots = new ArrayList<>(node.getAnnotations());
Set<String> annoNames = annotationsForMatchedNodes.get(node_id);
for (SAnnotation annot : annots) {
annoNames.add(annot.getNamespace() + "::" + annot.getName());
}
}
}
@Override
public String getFileEnding() {
return "csv";
}
@Override
public String getHelpMessage() {
return "The CSV Exporter exports only the "
+ "values of the elements searched for by the user, ignoring the context " + "around search results. "
+ "The values for all annotations of each of the "
+ "found nodes is given in a comma-separated table (CSV). <br/><br/>" + "Parameters: <br/>"
+ "<em>metakeys</em> - comma seperated list of all meta data to include in the result (e.g. "
+ "<code>metakeys=title,documentname</code>)<br/>"
+ "<em>segmentation</em> - For corpora with multiple segmentation (e.g. dialog corpora) use this segmentation for getting the span of the matched nodes.";
}
/**
* Takes a match and returns the matched nodes.
*
* @param graph
*
* @throws java.io.IOException
*
*/
private Set<SNode> getMatchedNodes(SDocumentGraph graph) {
Set<SNode> matchedNodes = new HashSet<>();
for (SNode node : graph.getNodes()) {
if (node.getFeature(AnnisConstants.ANNIS_NS, AnnisConstants.FEAT_MATCHEDNODE) != null)
matchedNodes.add(node);
}
return matchedNodes;
}
@Override
public void getOrderedMatchNumbers() {
// TODO
}
@Override
public boolean isAlignable() {
return false;
}
@Override
public boolean needsContext() {
return false;
}
private List<String> createHeaderLine() {
List<String> headerLine = new ArrayList<>();
for (Map.Entry<Integer, TreeSet<String>> match : annotationsForMatchedNodes.entrySet()) {
int node_id = match.getKey();
headerLine.add(String.valueOf(node_id) + "_id");
headerLine.add(String.valueOf(node_id) + "_span");
for (String annoName : match.getValue()) {
headerLine.add(String.valueOf(node_id) + "_anno_" + annoName);
}
}
for (String key : metakeys) {
headerLine.add("meta_" + key);
}
return headerLine;
}
private SortedMap<Integer, String> createLineForNodes(SDocumentGraph graph,
Map<String, String> args) {
SortedMap<Integer, String> contentLine = new TreeMap<>();
for (SNode node : this.getMatchedNodes(graph)) {
List<String> nodeLine = new ArrayList<>();
nodeLine.add(node.getId());
// export spanned text
String span = getSpannedText(graph, node, args.get(ExportHelper.SEGMENTATION_KEY));
if (span != null)
nodeLine.add(span);
else
nodeLine.add("");
// export annotations
int node_id = node.getFeature(AnnisConstants.ANNIS_NS, AnnisConstants.FEAT_MATCHEDNODE).getValue_SNUMERIC()
.intValue();
for (String annoName : annotationsForMatchedNodes.get(node_id)) {
SAnnotation anno = node.getAnnotation(annoName);
if (anno != null) {
nodeLine.add(anno.getValue_STEXT());
} else
nodeLine.add("'NULL'");
}
// add everything to line
contentLine.put(node_id, StringUtils.join(nodeLine, "\t"));
}
return contentLine;
}
private void appendMetaLine(SDocumentGraph graph, Writer out, UI ui) throws IOException {
// TODO is this the best way to get the corpus name?
String corpusName = Helper.getCorpusPath(graph.getDocument().getId()).get(0);
// TODO cache the metadata
List<SMetaAnnotation> metaAnnos = Helper.getMetaData(corpusName, Optional.of(graph.getDocument().getName()), ui);
Multimap<String, SMetaAnnotation> metaAnnosByName = Multimaps.index(metaAnnos, SMetaAnnotation::getName);
for (String metaName : metakeys) {
Collection<SMetaAnnotation> annos = metaAnnosByName.get(metaName);
if (annos == null || annos.isEmpty()) {
out.append("\t");
} else
out.append(
"\t" + annos.stream().map(SMetaAnnotation::getValue_STEXT)
.collect(Collectors.joining(", ")));
}
}
/**
* Takes a match and outputs a csv-line
*
* @param graph
* @param args
* @param alignmc
* @param matchNumber
* @param out
*
* @throws java.io.IOException
*
*/
@Override
public void outputText(SDocumentGraph graph, Map<String, String> args, boolean alignmc,
int matchNumber, Writer out, UI ui)
throws IOException, IllegalArgumentException {
// first match
if (matchNumber == 0) {
// output header
out.append(StringUtils.join(createHeaderLine(), "\t"));
out.append("\n");
}
// output nodes in the order of the matches
SortedMap<Integer, String> contentLine = createLineForNodes(graph, args);
out.append(StringUtils.join(contentLine.values(), "\t"));
// export Metadata
if (!metakeys.isEmpty()) {
appendMetaLine(graph, out, ui);
}
out.append("\n");
}
private String getSpannedText(SDocumentGraph graph, SNode node, String segmentation) {
if(segmentation == null || segmentation.isEmpty()) {
return graph.getText(node);
} else {
// Filter out the nodes that cover the same range as our matched node
List<SNode> segmentationNodes = Helper.getSortedSegmentationNodes(segmentation, graph).stream().filter(segNode -> {
if(segNode.equals(node)) {
return true;
} else {
// Get covered token of both nodes and check if they overlap
Set<SToken> coveredToken = new HashSet<>(graph.getOverlappedTokens(node));
return graph.getOverlappedTokens(segNode).parallelStream()
.anyMatch(coveredToken::contains);
}
}).collect(Collectors.toList());
return segmentationNodes.stream().map(n -> n.getFeature("annis::tok"))
.filter(Objects::nonNull)
.map(a -> a.getValue().toString()).collect(Collectors.joining(" "));
}
}
}
| apache-2.0 |
planet42/Laika | pdf/src/main/scala/laika/render/pdf/PDFNavigation.scala | 1828 | /*
* Copyright 2012-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package laika.render.pdf
import laika.ast._
import laika.io.model.RenderedTreeRoot
/** Prepares a document tree for the PDF rendering step by inserting PDF bookmark elements.
*
* @author Jens Halm
*/
object PDFNavigation {
/** Generates bookmarks for the structure of the DocumentTree.
*
* Individual bookmarks can stem from tree or subtree titles, document titles or document sections,
* depending on which recursion depth is configured.
* The configuration key for setting the recursion depth is `pdf.bookmarks.depth`.
*
* @param result the rendered result tree to generate bookmarks for
* @param depth the recursion depth through trees, documents and sections
* @return a fragment map containing the generated bookmarks
*/
def generateBookmarks[F[_]] (result: RenderedTreeRoot[F], depth: Option[Int]): Map[String, Element] = if (depth.contains(0)) Map() else {
val context = NavigationBuilderContext(
maxLevels = depth.getOrElse(Int.MaxValue),
currentLevel = 0,
itemStyles = Set("bookmark")
)
val toc = result.tree.asNavigationItem(context).content
Map("bookmarks" -> NavigationList(toc, Style.bookmark))
}
}
| apache-2.0 |
fmendezh/checklistbank | checklistbank-ws-client/src/test/java/org/gbif/checklistbank/ws/client/DatasetMetricsWsClientIT.java | 1756 | package org.gbif.checklistbank.ws.client;
import org.gbif.api.model.checklistbank.DatasetMetrics;
import org.gbif.api.service.checklistbank.DatasetMetricsService;
import org.gbif.api.vocabulary.Kingdom;
import org.gbif.api.vocabulary.Language;
import org.gbif.api.vocabulary.Rank;
import java.util.List;
import java.util.UUID;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class DatasetMetricsWsClientIT extends ClientMyBatisITBase<DatasetMetricsService> {
private static final UUID CHECKLIST_KEY = UUID.fromString("109aea14-c252-4a85-96e2-f5f4d5d088f4");
public DatasetMetricsWsClientIT() {
super(DatasetMetricsService.class);
}
@Test
public void testGet() {
DatasetMetrics d = wsClient.get(CHECKLIST_KEY);
assertEquals(CHECKLIST_KEY, d.getDatasetKey());
assertEquals(1000, d.getUsagesCount());
assertEquals(25, d.getColCoveragePct());
assertEquals(250, d.getColMatchingCount());
assertEquals(100, d.getCountByKingdom(Kingdom.ANIMALIA));
assertEquals(700, d.getCountByKingdom(Kingdom.PLANTAE));
assertEquals(0, d.getCountByKingdom(Kingdom.FUNGI));
assertEquals(120, d.getCountByRank(Rank.GENUS));
assertEquals(10, d.getCountByRank(Rank.PHYLUM));
assertEquals(4, d.getCountNamesByLanguage(Language.DANISH));
assertEquals(132, d.getCountNamesByLanguage(Language.GERMAN));
}
@Test
public void testList() {
List<DatasetMetrics> ds = wsClient.list(CHECKLIST_KEY);
assertEquals(3, ds.size());
for (DatasetMetrics d : ds) {
assertEquals(CHECKLIST_KEY, d.getDatasetKey());
}
assertEquals(1000, ds.get(0).getUsagesCount());
assertEquals(200, ds.get(1).getUsagesCount());
assertEquals(100, ds.get(2).getUsagesCount());
}
}
| apache-2.0 |
nhs-ciao/ciao-transport-spine | ciao-transport-spine/src/main/java/uk/nhs/ciao/transport/spine/address/SpineEndpointAddress.java | 3261 | package uk.nhs.ciao.transport.spine.address;
import com.google.common.base.MoreObjects;
import com.google.common.base.Objects;
/**
* Details to identify / address an Accredited System end-point over spine
*/
public class SpineEndpointAddress {
/**
* Identifies the organisation associated with the Accredited System
*/
private String odsCode;
private String service;
private String action;
/**
* Identifies the Accredited System
*/
private String asid;
/**
* Identifies ContractProperties for Party + Interaction
*/
private String cpaId;
/**
* Identifies the message handling service (MHS) responsible for sending
* messages to the Accredited System
*/
private String mhsPartyKey;
public SpineEndpointAddress() {
// NOOP
}
/**
* Copy constructor
*/
public SpineEndpointAddress(final SpineEndpointAddress copy) {
odsCode = copy.odsCode;
service = copy.service;
action = copy.action;
asid = copy.asid;
cpaId = copy.cpaId;
mhsPartyKey = copy.mhsPartyKey;
}
public String getOdsCode() {
return odsCode;
}
public void setOdsCode(final String odsCode) {
this.odsCode = odsCode;
}
public String getService() {
return service;
}
public void setService(final String service) {
this.service = service;
}
public String getAction() {
return action;
}
public void setAction(final String action) {
this.action = action;
}
public String getAsid() {
return asid;
}
public void setAsid(final String asid) {
this.asid = asid;
}
public String getCpaId() {
return cpaId;
}
public void setCpaId(final String cpaId) {
this.cpaId = cpaId;
}
public String getMhsPartyKey() {
return mhsPartyKey;
}
public void setMhsPartyKey(final String mhsPartyKey) {
this.mhsPartyKey = mhsPartyKey;
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("odsCode", odsCode)
.add("service", service)
.add("action", action)
.add("asid", asid)
.add("cpaId", cpaId)
.add("mhsPartyKey", mhsPartyKey)
.toString();
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((asid == null) ? 0 : asid.hashCode());
result = prime * result + ((cpaId == null) ? 0 : cpaId.hashCode());
result = prime * result + ((service == null) ? 0 : service.hashCode());
result = prime * result + ((action == null) ? 0 : action.hashCode());
result = prime * result + ((mhsPartyKey == null) ? 0 : mhsPartyKey.hashCode());
result = prime * result + ((odsCode == null) ? 0 : odsCode.hashCode());
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
} else if (obj == null || getClass() != obj.getClass()) {
return false;
}
final SpineEndpointAddress other = (SpineEndpointAddress) obj;
return Objects.equal(action, other.action)
&& Objects.equal(asid, other.asid)
&& Objects.equal(cpaId, other.cpaId)
&& Objects.equal(mhsPartyKey, other.mhsPartyKey)
&& Objects.equal(odsCode, other.odsCode)
&& Objects.equal(service, other.service);
}
}
| apache-2.0 |
kevdoran/nifi-minifi-cpp | extensions/civetweb/processors/ListenHTTP.cpp | 17491 | /**
* @file ListenHTTP.cpp
* ListenHTTP class implementation
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ListenHTTP.h"
namespace org {
namespace apache {
namespace nifi {
namespace minifi {
namespace processors {
core::Property ListenHTTP::BasePath(
core::PropertyBuilder::createProperty("Base Path")
->withDescription("Base path for incoming connections")
->isRequired(false)
->withDefaultValue<std::string>("contentListener")->build());
core::Property ListenHTTP::Port(
core::PropertyBuilder::createProperty("Listening Port")
->withDescription("The Port to listen on for incoming connections. 0 means port is going to be selected randomly.")
->isRequired(true)
->withDefaultValue<int>(80, core::StandardValidators::LISTEN_PORT_VALIDATOR())->build());
core::Property ListenHTTP::AuthorizedDNPattern("Authorized DN Pattern", "A Regular Expression to apply against the Distinguished Name of incoming"
" connections. If the Pattern does not match the DN, the connection will be refused.",
".*");
core::Property ListenHTTP::SSLCertificate("SSL Certificate", "File containing PEM-formatted file including TLS/SSL certificate and key", "");
core::Property ListenHTTP::SSLCertificateAuthority("SSL Certificate Authority", "File containing trusted PEM-formatted certificates", "");
core::Property ListenHTTP::SSLVerifyPeer(
core::PropertyBuilder::createProperty("SSL Verify Peer")
->withDescription("Whether or not to verify the client's certificate (yes/no)")
->isRequired(false)
->withAllowableValues<std::string>({"yes", "no"})
->withDefaultValue("no")->build());
core::Property ListenHTTP::SSLMinimumVersion(
core::PropertyBuilder::createProperty("SSL Minimum Version")
-> withDescription("Minimum TLS/SSL version allowed (SSL2, SSL3, TLS1.0, TLS1.1, TLS1.2)")
->isRequired(false)
->withAllowableValues<std::string>({"SSL2", "SSL3", "TLS1.0", "TLS1.1", "TLS1.2"})
->withDefaultValue("SSL2")->build());
core::Property ListenHTTP::HeadersAsAttributesRegex("HTTP Headers to receive as Attributes (Regex)", "Specifies the Regular Expression that determines the names of HTTP Headers that"
" should be passed along as FlowFile attributes",
"");
core::Relationship ListenHTTP::Success("success", "All files are routed to success");
void ListenHTTP::initialize() {
logger_->log_trace("Initializing ListenHTTP");
// Set the supported properties
std::set<core::Property> properties;
properties.insert(BasePath);
properties.insert(Port);
properties.insert(AuthorizedDNPattern);
properties.insert(SSLCertificate);
properties.insert(SSLCertificateAuthority);
properties.insert(SSLVerifyPeer);
properties.insert(SSLMinimumVersion);
properties.insert(HeadersAsAttributesRegex);
setSupportedProperties(properties);
// Set the supported relationships
std::set<core::Relationship> relationships;
relationships.insert(Success);
setSupportedRelationships(relationships);
}
void ListenHTTP::onSchedule(core::ProcessContext *context, core::ProcessSessionFactory *sessionFactory) {
std::string basePath;
if (!context->getProperty(BasePath.getName(), basePath)) {
logger_->log_info("%s attribute is missing, so default value of %s will be used", BasePath.getName(), BasePath.getValue().to_string());
basePath = BasePath.getValue().to_string();
}
basePath.insert(0, "/");
if (!context->getProperty(Port.getName(), listeningPort)) {
logger_->log_error("%s attribute is missing or invalid", Port.getName());
return;
}
bool randomPort = listeningPort == "0";
std::string authDNPattern;
if (context->getProperty(AuthorizedDNPattern.getName(), authDNPattern) && !authDNPattern.empty()) {
logger_->log_debug("ListenHTTP using %s: %s", AuthorizedDNPattern.getName(), authDNPattern);
}
std::string sslCertFile;
if (context->getProperty(SSLCertificate.getName(), sslCertFile) && !sslCertFile.empty()) {
logger_->log_debug("ListenHTTP using %s: %s", SSLCertificate.getName(), sslCertFile);
}
// Read further TLS/SSL options only if TLS/SSL usage is implied by virtue of certificate value being set
std::string sslCertAuthorityFile;
std::string sslVerifyPeer;
std::string sslMinVer;
if (!sslCertFile.empty()) {
if (context->getProperty(SSLCertificateAuthority.getName(), sslCertAuthorityFile) && !sslCertAuthorityFile.empty()) {
logger_->log_debug("ListenHTTP using %s: %s", SSLCertificateAuthority.getName(), sslCertAuthorityFile);
}
if (context->getProperty(SSLVerifyPeer.getName(), sslVerifyPeer)) {
if (sslVerifyPeer.empty() || sslVerifyPeer.compare("no") == 0) {
logger_->log_debug("ListenHTTP will not verify peers");
} else {
logger_->log_debug("ListenHTTP will verify peers");
}
} else {
logger_->log_debug("ListenHTTP will not verify peers");
}
if (context->getProperty(SSLMinimumVersion.getName(), sslMinVer)) {
logger_->log_debug("ListenHTTP using %s: %s", SSLMinimumVersion.getName(), sslMinVer);
}
}
std::string headersAsAttributesPattern;
if (context->getProperty(HeadersAsAttributesRegex.getName(), headersAsAttributesPattern) && !headersAsAttributesPattern.empty()) {
logger_->log_debug("ListenHTTP using %s: %s", HeadersAsAttributesRegex.getName(), headersAsAttributesPattern);
}
auto numThreads = getMaxConcurrentTasks();
logger_->log_info("ListenHTTP starting HTTP server on port %s and path %s with %d threads", randomPort ? "random" : listeningPort, basePath, numThreads);
// Initialize web server
std::vector<std::string> options;
options.emplace_back("enable_keep_alive");
options.emplace_back("yes");
options.emplace_back("keep_alive_timeout_ms");
options.emplace_back("15000");
options.emplace_back("num_threads");
options.emplace_back(std::to_string(numThreads));
if (sslCertFile.empty()) {
options.emplace_back("listening_ports");
options.emplace_back(listeningPort);
} else {
listeningPort += "s";
options.emplace_back("listening_ports");
options.emplace_back(listeningPort);
options.emplace_back("ssl_certificate");
options.emplace_back(sslCertFile);
if (!sslCertAuthorityFile.empty()) {
options.emplace_back("ssl_ca_file");
options.emplace_back(sslCertAuthorityFile);
}
if (sslVerifyPeer.empty() || sslVerifyPeer == "no") {
options.emplace_back("ssl_verify_peer");
options.emplace_back("no");
} else {
options.emplace_back("ssl_verify_peer");
options.emplace_back("yes");
}
if (sslMinVer == "SSL2") {
options.emplace_back("ssl_protocol_version");
options.emplace_back(std::to_string(0));
} else if (sslMinVer == "SSL3") {
options.emplace_back("ssl_protocol_version");
options.emplace_back(std::to_string(1));
} else if (sslMinVer == "TLS1.0") {
options.emplace_back("ssl_protocol_version");
options.emplace_back(std::to_string(2));
} else if (sslMinVer == "TLS1.1") {
options.emplace_back("ssl_protocol_version");
options.emplace_back(std::to_string(3));
} else {
options.emplace_back("ssl_protocol_version");
options.emplace_back(std::to_string(4));
}
}
server_.reset(new CivetServer(options));
handler_.reset(new Handler(basePath, context, sessionFactory, std::move(authDNPattern), std::move(headersAsAttributesPattern)));
server_->addHandler(basePath, handler_.get());
if (randomPort) {
const auto& vec = server_->getListeningPorts();
if (vec.size() != 1) {
logger_->log_error("Random port is set, but there is no listening port! Server most probably failed to start!");
} else {
listeningPort = std::to_string(vec[0]);
logger_->log_info("Listening on port %s", listeningPort);
}
}
}
ListenHTTP::~ListenHTTP() {
}
void ListenHTTP::onTrigger(core::ProcessContext *context, core::ProcessSession *session) {
std::shared_ptr<FlowFileRecord> flow_file = std::static_pointer_cast<FlowFileRecord>(session->get());
// Do nothing if there are no incoming files
if (!flow_file) {
return;
}
std::string type;
flow_file->getAttribute("http.type", type);
if (type == "response_body") {
if (handler_) {
struct response_body response { "", "", "" };
ResponseBodyReadCallback cb(&response.body);
flow_file->getAttribute("filename", response.uri);
flow_file->getAttribute("mime.type", response.mime_type);
if (response.mime_type.empty()) {
logger_->log_warn("Using default mime type of application/octet-stream for response body file: %s", response.uri);
response.mime_type = "application/octet-stream";
}
session->read(flow_file, &cb);
handler_->set_response_body(std::move(response));
}
}
session->remove(flow_file);
}
ListenHTTP::Handler::Handler(std::string base_uri, core::ProcessContext *context, core::ProcessSessionFactory *session_factory, std::string &&auth_dn_regex, std::string &&header_as_attrs_regex)
: base_uri_(std::move(base_uri)),
auth_dn_regex_(std::move(auth_dn_regex)),
headers_as_attrs_regex_(std::move(header_as_attrs_regex)),
logger_(logging::LoggerFactory<ListenHTTP::Handler>::getLogger()) {
process_context_ = context;
session_factory_ = session_factory;
}
void ListenHTTP::Handler::send_error_response(struct mg_connection *conn) {
mg_printf(conn, "HTTP/1.1 500 Internal Server Error\r\n"
"Content-Type: text/html\r\n"
"Content-Length: 0\r\n\r\n");
}
void ListenHTTP::Handler::set_header_attributes(const mg_request_info *req_info, const std::shared_ptr<FlowFileRecord> &flow_file) const {
// Add filename from "filename" header value (and pattern headers)
for (int i = 0; i < req_info->num_headers; i++) {
auto header = &req_info->http_headers[i];
if (strcmp("filename", header->name) == 0) {
if (!flow_file->updateAttribute("filename", header->value)) {
flow_file->addAttribute("filename", header->value);
}
} else if (std::regex_match(header->name, headers_as_attrs_regex_)) {
if (!flow_file->updateAttribute(header->name, header->value)) {
flow_file->addAttribute(header->name, header->value);
}
}
}
if (req_info->query_string) {
flow_file->addAttribute("http.query", req_info->query_string);
}
}
bool ListenHTTP::Handler::handlePost(CivetServer *server, struct mg_connection *conn) {
auto req_info = mg_get_request_info(conn);
if (!req_info) {
logger_->log_error("ListenHTTP handling POST resulted in a null request");
return false;
}
logger_->log_debug("ListenHTTP handling POST request of length %ll", req_info->content_length);
if (!auth_request(conn, req_info)) {
return true;
}
// Always send 100 Continue, as allowed per standard to minimize client delay (https://www.w3.org/Protocols/rfc2616/rfc2616-sec8.html)
mg_printf(conn, "HTTP/1.1 100 Continue\r\n\r\n");
auto session = session_factory_->createSession();
ListenHTTP::WriteCallback callback(conn, req_info);
auto flow_file = std::static_pointer_cast<FlowFileRecord>(session->create());
if (!flow_file) {
send_error_response(conn);
return true;
}
try {
session->write(flow_file, &callback);
set_header_attributes(req_info, flow_file);
session->transfer(flow_file, Success);
session->commit();
} catch (std::exception &exception) {
logger_->log_error("ListenHTTP Caught Exception %s", exception.what());
send_error_response(conn);
session->rollback();
throw;
} catch (...) {
logger_->log_error("ListenHTTP Caught Exception Processor::onTrigger");
send_error_response(conn);
session->rollback();
throw;
}
mg_printf(conn, "HTTP/1.1 200 OK\r\n");
write_body(conn, req_info);
return true;
}
bool ListenHTTP::Handler::auth_request(mg_connection *conn, const mg_request_info *req_info) const {
// If this is a two-way TLS connection, authorize the peer against the configured pattern
bool authorized = true;
if (req_info->is_ssl && req_info->client_cert != nullptr) {
if (!std::regex_match(req_info->client_cert->subject, auth_dn_regex_)) {
mg_printf(conn, "HTTP/1.1 403 Forbidden\r\n"
"Content-Type: text/html\r\n"
"Content-Length: 0\r\n\r\n");
logger_->log_warn("ListenHTTP client DN not authorized: %s", req_info->client_cert->subject);
authorized = false;
}
}
return authorized;
}
bool ListenHTTP::Handler::handleGet(CivetServer *server, struct mg_connection *conn) {
auto req_info = mg_get_request_info(conn);
if (!req_info) {
logger_->log_error("ListenHTTP handling GET resulted in a null request");
return false;
}
logger_->log_debug("ListenHTTP handling GET request of URI %s", req_info->request_uri);
if (!auth_request(conn, req_info)) {
return true;
}
auto session = session_factory_->createSession();
auto flow_file = std::static_pointer_cast<FlowFileRecord>(session->create());
if (!flow_file) {
send_error_response(conn);
return true;
}
try {
set_header_attributes(req_info, flow_file);
session->transfer(flow_file, Success);
session->commit();
} catch (std::exception &exception) {
logger_->log_error("ListenHTTP Caught Exception %s", exception.what());
send_error_response(conn);
session->rollback();
throw;
} catch (...) {
logger_->log_error("ListenHTTP Caught Exception Processor::onTrigger");
send_error_response(conn);
session->rollback();
throw;
}
mg_printf(conn, "HTTP/1.1 200 OK\r\n");
write_body(conn, req_info);
return true;
}
void ListenHTTP::Handler::write_body(mg_connection *conn, const mg_request_info *req_info) {
const auto &request_uri_str = std::string(req_info->request_uri);
if (request_uri_str.size() > base_uri_.size() + 1) {
struct response_body response { };
{
// Attempt to minimize time holding mutex (it would be nice to have a lock-free concurrent map here)
std::lock_guard<std::mutex> guard(uri_map_mutex_);
std::string req_uri = request_uri_str.substr(base_uri_.size() + 1);
if (response_uri_map_.count(req_uri)) {
response = response_uri_map_[req_uri];
}
}
if (!response.body.empty()) {
logger_->log_debug("Writing response body of %lu bytes for URI: %s", response.body.size(), req_info->request_uri);
mg_printf(conn, "Content-type: ");
mg_printf(conn, "%s", response.mime_type.c_str());
mg_printf(conn, "\r\n");
mg_printf(conn, "Content-length: ");
mg_printf(conn, "%s", std::to_string(response.body.size()).c_str());
mg_printf(conn, "\r\n\r\n");
mg_printf(conn, "%s", response.body.c_str());
} else {
logger_->log_debug("No response body available for URI: %s", req_info->request_uri);
mg_printf(conn, "Content-length: 0\r\n\r\n");
}
} else {
logger_->log_debug("No response body available for URI: %s", req_info->request_uri);
mg_printf(conn, "Content-length: 0\r\n\r\n");
}
}
ListenHTTP::WriteCallback::WriteCallback(struct mg_connection *conn, const struct mg_request_info *reqInfo)
: logger_(logging::LoggerFactory<ListenHTTP::WriteCallback>::getLogger()) {
conn_ = conn;
req_info_ = reqInfo;
}
int64_t ListenHTTP::WriteCallback::process(std::shared_ptr<io::BaseStream> stream) {
int64_t rlen;
int64_t nlen = 0;
int64_t tlen = req_info_->content_length;
uint8_t buf[16384];
// if we have no content length we should call mg_read until
// there is no data left from the stream to be HTTP/1.1 compliant
while (tlen == -1 || nlen < tlen) {
rlen = tlen == -1 ? sizeof(buf) : tlen - nlen;
if (rlen > (int64_t) sizeof(buf)) {
rlen = (int64_t) sizeof(buf);
}
// Read a buffer of data from client
rlen = mg_read(conn_, &buf[0], (size_t) rlen);
if (rlen <= 0) {
break;
}
// Transfer buffer data to the output stream
stream->write(&buf[0], rlen);
nlen += rlen;
}
return nlen;
}
bool ListenHTTP::isSecure() const {
return (listeningPort.length() > 0) && *listeningPort.rbegin() == 's';
}
std::string ListenHTTP::getPort() const {
if(isSecure()) {
return listeningPort.substr(0, listeningPort.length() -1);
}
return listeningPort;
}
} /* namespace processors */
} /* namespace minifi */
} /* namespace nifi */
} /* namespace apache */
} /* namespace org */
| apache-2.0 |
softelnet/sponge | sponge-api/src/main/java/org/openksavi/sponge/engine/Session.java | 2925 | /*
* Copyright 2016-2017 The Sponge authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openksavi.sponge.engine;
import java.io.Serializable;
import java.util.function.Supplier;
/**
* A session.
*
*/
public interface Session extends Serializable {
/**
* Sets session variable value.
*
* @param name variable name.
* @param value variable value.
*/
void setVariable(String name, Object value);
/**
* Returns the value of the session variable. Throws exception if not found.
*
* @param name variable name.
* @return variable value.
* @param <T> variable.
*/
<T> T getVariable(String name);
/**
* Returns the value of the session variable. Throws exception if not found.
*
* @param cls variable class.
* @param name variable name.
*
* @return variable value.
* @param <T> variable.
*/
<T> T getVariable(Class<T> cls, String name);
/**
* Returns the value of the session variable or {@code defaultValue} if not found.
*
* @param name variable name.
* @param defaultValue default value.
*
* @return variable value.
* @param <T> variable.
*/
<T> T getVariable(String name, T defaultValue);
/**
* Returns the value of the session variable or {@code defaultValue} if not found.
*
* @param cls variable class.
* @param name variable name.
* @param defaultValue default value.
*
* @return variable value.
* @param <T> variable.
*/
<T> T getVariable(Class<T> cls, String name, T defaultValue);
/**
* Removes a session variable.
*
* @param name variable name.
*/
void removeVariable(String name);
/**
* Returns {@code true} if a session variable named {@code name} is defined.
*
* @param name variable name.
* @return {@code true} if a session variable named {@code name} is defined.
*/
boolean hasVariable(String name);
/**
* Sets the session variable if not set already.
*
* @param name variable name.
* @param supplier variable value supplier.
* @param <T> variable.
*/
<T> void setVariableIfNone(String name, Supplier<T> supplier);
/**
* Updates the session variable.
*
* @param name variable name.
*/
void updateVariable(String name);
}
| apache-2.0 |
icza/sc2gears | src-sc2gearsdb/hu/belicza/andras/sc2gearsdb/apiuser/client/beans/ApiCallStatFilters.java | 1196 | /*
* Project Sc2gears
*
* Copyright (c) 2010 Andras Belicza <[email protected]>
*
* This software is the property of Andras Belicza.
* Copying, modifying, distributing, refactoring without the authors permission
* is prohibited and protected by Law.
*/
package hu.belicza.andras.sc2gearsdb.apiuser.client.beans;
import com.google.gwt.user.client.rpc.IsSerializable;
/**
* API call stat filters.
*
* @author Andras Belicza
*/
public class ApiCallStatFilters implements IsSerializable {
private String fromDay;
private String toDay;
@Override
public String toString() {
final StringBuilder builder = new StringBuilder();
if ( fromDay != null )
builder.append( ", fromDay: " ).append( fromDay );
if ( toDay != null )
builder.append( ", toDay: " ).append( toDay );
return builder.length() > 0 ? builder.substring( 2 ) : "";
}
public void setFromDay( String fromDay ) {
this.fromDay = fromDay;
}
public String getFromDay() {
return fromDay;
}
public void setToDay( String toDay ) {
this.toDay = toDay;
}
public String getToDay() {
return toDay;
}
}
| apache-2.0 |
bshp/midPoint | repo/repo-sql-impl/src/main/java/com/evolveum/midpoint/repo/sql/DataSourceFactory.java | 5151 | /*
* Copyright (c) 2010-2015 Evolveum and contributors
*
* This work is dual-licensed under the Apache License 2.0
* and European Union Public License. See LICENSE file for details.
*/
package com.evolveum.midpoint.repo.sql;
import com.evolveum.midpoint.repo.api.RepositoryServiceFactoryException;
import com.evolveum.midpoint.util.logging.Trace;
import com.evolveum.midpoint.util.logging.TraceManager;
import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
import org.apache.commons.lang.StringUtils;
import org.springframework.jndi.JndiObjectFactoryBean;
import javax.annotation.PreDestroy;
import javax.naming.NamingException;
import javax.sql.DataSource;
import java.io.Closeable;
import java.io.IOException;
/**
* @author Viliam Repan (lazyman)
*/
public class DataSourceFactory {
private static final Trace LOGGER = TraceManager.getTrace(DataSourceFactory.class);
private SqlRepositoryConfiguration configuration;
private DataSource internalDataSource;
private DataSource dataSource;
public SqlRepositoryConfiguration getConfiguration() {
return configuration;
}
public void setConfiguration(SqlRepositoryConfiguration configuration) {
this.configuration = configuration;
}
public DataSource createDataSource() throws RepositoryServiceFactoryException {
LOGGER.info("Loading datasource.");
if (configuration == null) {
throw new RepositoryServiceFactoryException("SQL configuration is null, couldn't create datasource.");
}
try {
if (StringUtils.isNotEmpty(configuration.getDataSource())) {
LOGGER.info("JNDI datasource present in configuration, looking for '{}'.", configuration.getDataSource());
dataSource = createJNDIDataSource();
} else {
LOGGER.info("Constructing default datasource with connection pooling; JDBC URL: {}", configuration.getJdbcUrl());
internalDataSource = createDataSourceInternal();
dataSource = internalDataSource;
}
return dataSource;
} catch (Exception ex) {
throw new RepositoryServiceFactoryException("Couldn't initialize datasource, reason: " + ex.getMessage(), ex);
}
}
public DataSource getDataSource() {
return dataSource;
}
private DataSource createJNDIDataSource() throws IllegalArgumentException, NamingException {
JndiObjectFactoryBean factory = new JndiObjectFactoryBean();
factory.setJndiName(configuration.getDataSource());
factory.afterPropertiesSet();
return (DataSource) factory.getObject();
}
private HikariConfig createConfig() {
HikariConfig config = new HikariConfig();
config.setDriverClassName(configuration.getDriverClassName());
config.setJdbcUrl(configuration.getJdbcUrl());
config.setUsername(configuration.getJdbcUsername());
config.setPassword(configuration.getJdbcPassword());
config.setRegisterMbeans(true);
config.setMinimumIdle(configuration.getMinPoolSize());
config.setMaximumPoolSize(configuration.getMaxPoolSize());
if (configuration.getMaxLifetime() != null) {
config.setMaxLifetime(configuration.getMaxLifetime());
}
if (configuration.getIdleTimeout() != null) {
config.setIdleTimeout(configuration.getIdleTimeout());
}
config.setIsolateInternalQueries(true);
// config.setAutoCommit(false);
TransactionIsolation ti = configuration.getTransactionIsolation();
if (ti != null && TransactionIsolation.SNAPSHOT != ti) {
config.setTransactionIsolation("TRANSACTION_" + ti.name());
}
if (configuration.isUsingMySqlCompatible()) {
config.addDataSourceProperty("cachePrepStmts", "true");
config.addDataSourceProperty("prepStmtCacheSize", "250");
config.addDataSourceProperty("prepStmtCacheSqlLimit", "2048");
// config.addDataSourceProperty("useServerPrepStmts", "true");
// config.addDataSourceProperty("useLocalSessionState", "true");
// config.addDataSourceProperty("useLocalTransactionState", "true");
// config.addDataSourceProperty("rewriteBatchedStatements", "true");
// config.addDataSourceProperty("cacheResultSetMetadata", "true");
// config.addDataSourceProperty("cacheServerConfiguration", "true");
// config.addDataSourceProperty("elideSetAutoCommits", "true");
// config.addDataSourceProperty("maintainTimeStats", "false");
}
config.setInitializationFailTimeout(configuration.getInitializationFailTimeout());
return config;
}
private DataSource createDataSourceInternal() {
HikariConfig config = createConfig();
return new HikariDataSource(config);
}
@PreDestroy
public void destroy() throws IOException {
if (internalDataSource instanceof Closeable) {
((Closeable) internalDataSource).close();
}
}
}
| apache-2.0 |
bond-anton/ScientificProjects | tests/test_config.py | 3193 | from __future__ import division, print_function
import unittest
from BDProjects.Config import default_connection_parameters, read_config, write_config
class TestConfig(unittest.TestCase):
def setUp(self):
self.config_file_name = 'tests/config.ini'
self.broken_format_config_file_name = 'tests/config_broken_format.ini'
self.no_section_config_file_name = 'tests/config_no_section.ini'
self.temp_config_file_name = 'tests/config_tmp.ini'
def test_default_config(self):
cp = read_config(file_name=None)
self.assertEqual(cp, default_connection_parameters)
def test_read_config_file(self):
cp = read_config(file_name=self.config_file_name)
connection_parameters = {'db_name': 'tests/data/test.db',
'backend': 'sqlite',
'host': '',
'port': '',
'user': '',
'password': ''
}
self.assertEqual(cp, connection_parameters)
with self.assertRaises(IOError):
read_config(file_name='xxx' + self.config_file_name)
with self.assertRaises(ValueError):
read_config(self.broken_format_config_file_name)
with self.assertRaises(ValueError):
read_config(self.no_section_config_file_name)
def test_write_config_file(self):
connection_parameters = {'db_name': 'data/test.db',
'backend': 'sqlite',
'host': 'localhos',
'port': 0,
'user': 'test_user',
'password': 'secret_password'
}
write_config(connection_parameters, self.temp_config_file_name)
connection_parameters['port'] = ''
cp = read_config(self.temp_config_file_name)
self.assertEqual(cp, connection_parameters)
connection_parameters = {'db_name': 'data/test.db',
'backend': 'sqlite',
'host': 'localhos',
'port': '',
'user': 'test_user',
'password': 'secret_password'
}
write_config(connection_parameters, self.temp_config_file_name)
cp = read_config(self.temp_config_file_name)
self.assertEqual(cp, connection_parameters)
connection_parameters = {'db_name': 'data/test.db',
'backend': 'sqlite',
'host': 'localhos',
'port': 9000,
'user': 'test_user',
'password': 'secret_password'
}
write_config(connection_parameters, self.temp_config_file_name)
connection_parameters['port'] = str(connection_parameters['port'])
cp = read_config(self.temp_config_file_name)
self.assertEqual(cp, connection_parameters)
| apache-2.0 |
shisaq/shisaq.github.io | projects/ygshi2.0/js/ko-research.js | 1987 | // 定义 knockout viewmodel
var ViewModel = function () {
// 预存储this为self
var self = this;
/* 模板部分 */
// toggle nav list by clicking hamburger button
// 点击hamburger按钮从而使导航栏显示或消失
self.navStatus = ko.observable(-1);
self.toggleNav = function () {
if (self.navStatus() <= 0) {
self.navStatus(1);
} else {
self.navStatus(-1);
}
};
/* research部分 */
var Achievement = function (data) {
this.imgSrc = data.imgSrc;
this.vidSrc = data.vidSrc;
this.short = data.short;
this.title = data.title;
this.details = ko.observable(data.details);
};
// 构建research中3大结构所含内容的数组
self.achievementsList = ko.observableArray([]);
model.achievementsDetail.forEach( function(achievement) {
var newAchievement = new Achievement(achievement);
self.achievementsList.push(newAchievement);
});
// 定义currentAchievement的值
self.currentAchievement = ko.observable(function () {
// 取url中问号后面的数据
var num = window.location.search;
// 取该数据最后一位,赋给i
var i = num.charAt(num.length - 1);
// 当num存在,且i的值在achievementsList的数组长度内时,
// currentAchievement应为第i个元素
if (num && i >= 0 && i<= self.achievementsList().length) {
return self.achievementsList()[i];
} else {
// 否则默认显示第1个元素
return self.achievementsList()[0];
}
}());
// 根据点击的标题更新currentAchievement
self.updateAchievement = function (achievement) {
$('.achievement-details').css('opacity', 0);
window.setTimeout(function () {
self.currentAchievement(achievement);
$('.achievement-details').css('opacity', 1);
}, 200);
};
};
| apache-2.0 |
goolei/gool | user/wxreg.php | 3157 | <?php
/**
* 微信登录
**/
$is_defend=true;
include("../includes/common.php");
if(strpos($_SERVER['HTTP_USER_AGENT'], 'MicroMessenger')!==false){
require_once SYSTEM_ROOT."wxpay/WxPay.Api.php";
require_once SYSTEM_ROOT."wxpay/WxPay.JsApiPay.php";
$tools = new JsApiPay();
$openId = $tools->GetOpenid();
if(!$openId)sysmsg('OpenId获取失败');
header("Location: reg.php?do=wx&openid=".$openId);
exit;
}
$code_url = 'https://w.url.cn/s/Ay0emSP';
?>
<!DOCTYPE html>
<html lang="zh-CN">
<head>
<meta charset="utf-8" />
<title>微信注册商户 | <?php echo $conf['web_name']?></title>
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1" />
<link rel="stylesheet" href="https://template.down.swap.wang/ui/angulr_2.0.1/bower_components/bootstrap/dist/css/bootstrap.css" type="text/css" />
<link rel="stylesheet" href="https://template.down.swap.wang/ui/angulr_2.0.1/bower_components/animate.css/animate.css" type="text/css" />
<link rel="stylesheet" href="https://template.down.swap.wang/ui/angulr_2.0.1/bower_components/font-awesome/css/font-awesome.min.css" type="text/css" />
<link rel="stylesheet" href="https://template.down.swap.wang/ui/angulr_2.0.1/bower_components/simple-line-icons/css/simple-line-icons.css" type="text/css" />
<link rel="stylesheet" href="https://template.down.swap.wang/ui/angulr_2.0.1/html/css/font.css" type="text/css" />
<link rel="stylesheet" href="https://template.down.swap.wang/ui/angulr_2.0.1/html/css/app.css" type="text/css" />
<style>input:-webkit-autofill{-webkit-box-shadow:0 0 0px 1000px white inset;-webkit-text-fill-color:#333;}img.logo{width:14px;height:14px;margin:0 5px 0 3px;}</style>
</head>
<body>
<div class="app app-header-fixed ">
<div class="container w-xxl w-auto-xs" ng-controller="SigninFormController" ng-init="app.settings.container = false;">
<span class="navbar-brand block m-t" id="sitename"><?php echo $conf['web_name']?></span>
<div class="m-b-lg">
<div class="wrapper text-center">
<strong>请用微信扫描以下二维码继续注册商户</strong>
</div>
<form name="form" class="form-validation">
<div class="qr-image text-center" id="qrcode">
</div><br/>
<p>
或复制链接到微信打开:<a href="<?php echo $code_url?>"><?php echo $code_url?></a>
</p>
</div>
<a href="reg.php" ui-sref="access.signup" class="btn btn-lg btn-default btn-block">返回</a>
</form>
</div>
<div class="text-center">
<p>
<small class="text-muted"><?php echo $conf['web_name']?><br>© 2016~2017</small>
</p>
</div>
</div>
</div>
<script src="https://template.down.swap.wang/ui/angulr_2.0.1/bower_components/jquery/dist/jquery.min.js"></script>
<script src="https://template.down.swap.wang/ui/angulr_2.0.1/bower_components/bootstrap/dist/js/bootstrap.js"></script>
<script src="../assets/layer/layer.js"></script>
<script src="../assets/js/qrcode.min.js"></script>
<script>
$(document).ready(function(){
var qrcode = new QRCode("qrcode", {
text: "<?php echo $code_url?>",
width: 160,
height: 160,
colorDark: "#000000",
colorLight: "#ffffff",
correctLevel: QRCode.CorrectLevel.H
});
});
</script>
</body>
</html> | apache-2.0 |
google-research/google-research | tunas/search_space_utils.py | 13161 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Common utility functions for basic elements in search space.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from typing import Any, Optional, Dict, List, Sequence, Text, Tuple, TypeVar, Union
import tensorflow.compat.v1 as tf
from tunas import basic_specs
from tunas import schema
# List of possible reward functions to use during a search.
RL_REWARD_MNAS = 'mnas'
RL_REWARD_MNAS_HARD = 'mnas_hard'
RL_REWARD_ABS = 'abs'
RL_REWARDS = (
RL_REWARD_MNAS,
RL_REWARD_MNAS_HARD,
RL_REWARD_ABS
)
def normalize_strides(
strides
):
"""Normalize strides of the same format.
Args:
strides: An integer or pair of integers.
Returns:
A pair of integers.
Raises:
ValueError: Input strides is neither an integer nor a pair of integers.
"""
if isinstance(strides, (list, tuple)) and len(strides) == 2:
return tuple(strides)
elif isinstance(strides, int):
return (strides, strides)
else:
raise ValueError(
'Strides - {} is neither an integer nor a pair of integers.'.format(
strides))
def scale_filters(filters, multiplier, base):
"""Scale `filters` by `factor`and round to the nearest multiple of `base`.
Args:
filters: Positive integer. The original filter size.
multiplier: Positive float. The factor by which to scale the filters.
base: Positive integer. The number of filters will be rounded to a multiple
of this value.
Returns:
Positive integer, the scaled filter size.
"""
round_half_up = int(filters * multiplier / base + 0.5)
result = int(round_half_up * base)
return max(result, base)
def tf_scale_filters(filters,
multiplier,
base):
"""Similar to `scale_filters`, but with Tensor instead of numeric inputs.
Args:
filters: Scalar int32 Tensor. The original filter size.
multiplier: Scalar float32 Tensor. The factor by which to scale `filters`.
base: Scalar int32 Tensor. The number of filters will be rounded to a
multiple of this value.
Returns:
Scalar int32 Tensor. The scaled filter size.
"""
filters = tf.convert_to_tensor(filters, dtype=tf.int32)
base = tf.convert_to_tensor(base, dtype=filters.dtype)
multiplier = tf.convert_to_tensor(multiplier, dtype=tf.float32)
float_filters = tf.cast(filters, multiplier.dtype)
float_base = tf.cast(base, multiplier.dtype)
round_half_up = tf.cast(
float_filters * multiplier / float_base + 0.5, tf.int32)
round_half_up_float = tf.cast(round_half_up, multiplier.dtype)
result = tf.cast(round_half_up_float * float_base, filters.dtype)
return tf.math.maximum(result, base)
def make_divisible(v, divisor):
"""Alternate filter scaling, compatible with the one used by MobileNet V3."""
new_v = max(divisor, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def tf_make_divisible(v,
divisor):
"""Analogue of make_divisible() that operates on tf.Tensor objects."""
v = tf.convert_to_tensor(v, preferred_dtype=tf.int32)
divisor = tf.convert_to_tensor(divisor, dtype=v.dtype)
new_v = tf.cast(v, tf.float32) + tf.cast(divisor, tf.float32)/2
new_v = tf.cast(new_v, v.dtype) // divisor * divisor
new_v = tf.maximum(new_v, divisor)
# Condition is equivalent to (new_v * 0.9*v), but works with integer inputs.
new_v = tf.where_v2(10*new_v < 9*v, new_v + divisor, new_v)
return new_v
_T = TypeVar('_T')
def _validate_genotype_dict(model_spec,
genotype):
"""Verify that the tag counts in `genotype` match those in `model_spec`."""
# Count the number of times each tag appears in ConvTowerSpec.
tag_counts = collections.Counter()
def update_tag_counts(oneof):
tag_counts[oneof.tag] += 1
schema.map_oneofs(update_tag_counts, model_spec)
# Report any size mismatches we come across.
bad_tags = set(genotype) - set(tag_counts)
if bad_tags:
raise ValueError(
'Tag(s) appear in genotype but not in model_spec: {:s}'
.format(', '.join(bad_tags)))
for tag in genotype:
if len(genotype[tag]) != tag_counts[tag]:
raise ValueError(
'Tag {:s} appears {:d} times in genotype but {:d} times in '
'model_spec'.format(tag, len(genotype[tag]), tag_counts[tag]))
def _validate_genotype_sequence(model_spec,
genotype):
"""Verify that the number of OneOfs in `genotype` matches `model_spec`."""
# Note: Conceptually, we just need oneof_count to be an integer. But we need
# to be able to update its value from within the update_count() function, and
# storing it inside a dictionary makes that easier.
oneof_count = {'value': 0}
def update_count(oneof):
del oneof # Unused
oneof_count['value'] += 1
schema.map_oneofs(update_count, model_spec)
if len(genotype) != oneof_count['value']:
raise ValueError(
'Genotype contains {:d} oneofs but model_spec contains {:d}'
.format(len(genotype), oneof_count['value']))
def prune_model_spec(model_spec,
genotype,
path_dropout_rate = 0.0,
training = None,
prune_filters_by_value = False):
"""Creates a representation for an architecture with constant ops.
Args:
model_spec: Nested data structure containing schema.OneOf objects.
genotype: A dictionary mapping tags to sequences of integers. Or a sequence
of integers containing the selections for all the OneOf nodes in
model_spec.
path_dropout_rate: Float or scalar float Tensor between 0 and 1. If greater
than zero, we will randomly zero out skippable operations during
training with this probability. Cannot be used with an rl controller.
Should be set to 0 at evaluation time.
training: Boolean. True during training, false during evaluation/inference.
Can be None if path_dropout_rate is zero.
prune_filters_by_value: Boolean. If true, treat genotype[FILTERS_TAG] as a
list of values rather than a list of indices.
Returns:
A pruned version of `model_spec` with all unused options removed.
"""
if path_dropout_rate != 0.0:
if basic_specs.OP_TAG not in genotype:
raise ValueError(
'If path_dropout_rate > 0 then genotype must contain key {:s}.'
.format(basic_specs.OP_TAG))
if training is None:
raise ValueError(
'If path_dropout_rate > 0 then training cannot be None.')
# Create a mutable copy of 'genotype'. This will let us modify the copy
# without updating the original.
genotype_is_dict = isinstance(genotype, dict)
if genotype_is_dict:
genotype = {key: list(value) for (key, value) in genotype.items()}
_validate_genotype_dict(model_spec, genotype)
else: # genotype is a list/tuple of integers
genotype = list(genotype)
_validate_genotype_sequence(model_spec, genotype)
# Everything looks good. Now prune the model.
zero_spec = basic_specs.ZeroSpec()
def update_spec(oneof):
"""Visit a schema.OneOf node in `model_spec`, return an updated value."""
if genotype_is_dict and oneof.tag not in genotype:
return oneof
if genotype_is_dict:
selection = genotype[oneof.tag].pop(0)
if oneof.tag == basic_specs.FILTERS_TAG and prune_filters_by_value:
selection = oneof.choices.index(selection)
else:
selection = genotype.pop(0)
# If an operation is skippable (i.e., it can be replaced with a ZeroSpec)
# then we optionally apply path dropout during stand-alone training.
# This logic, if enabled, will replace a standard RL controller.
mask = None
if (path_dropout_rate != 0.0
and training
and oneof.tag == basic_specs.OP_TAG
and zero_spec in oneof.choices):
keep_prob = 1.0 - path_dropout_rate
# Mask is [1] with probability `keep_prob`, and [0] otherwise.
mask = tf.cast(tf.less(tf.random_uniform([1]), keep_prob), tf.float32)
# Normalize the mask so that the expected value of each element 1.
mask = mask / keep_prob
return schema.OneOf([oneof.choices[selection]], oneof.tag, mask)
return schema.map_oneofs(update_spec, model_spec)
def scale_conv_tower_spec(
model_spec,
multipliers,
base = None):
"""Scale all the filters in `model_spec`, rounding to multiples of `base`.
Args:
model_spec: A ConvTowerSpec namedtuple.
multipliers: float or list/tuple of floats, the possible filter multipliers.
base: Positive integer, all filter sizes must be a multiple of this value.
Returns:
A new basic_specs.ConvTowerSpec.
"""
if base is None:
base = model_spec.filters_base
if isinstance(multipliers, (int, float)):
multipliers = (multipliers,)
def update(oneof):
"""Compute version of `oneof` whose filters have been scaled up/down."""
if oneof.tag != basic_specs.FILTERS_TAG:
return oneof
all_filters = set()
for filters in oneof.choices:
if isinstance(filters, basic_specs.FilterMultiplier):
# Skip scaling because the filter sizes are relative, not absolute.
all_filters.add(filters)
else:
for mult in multipliers:
all_filters.add(scale_filters(filters, mult, base))
return schema.OneOf(sorted(all_filters), basic_specs.FILTERS_TAG)
result = schema.map_oneofs(update, model_spec)
return basic_specs.ConvTowerSpec(result.blocks, base)
def tf_argmax_or_zero(oneof):
"""Returns zero or the index with the largest value across axes of mask.
Args:
oneof: A schema.OneOf objective.
Returns:
A scalar int32 tensor.
"""
if oneof.mask is None:
if len(oneof.choices) != 1:
raise ValueError(
'Expect pruned structure with one choice when mask is None. '
'Got {} number of choices in structure.'.format(len(oneof.choices)))
return tf.constant(0, tf.int32)
else:
return tf.argmax(oneof.mask, output_type=tf.int32)
def tf_indices(model_spec):
"""Extract `indices` from `model_spec` as Tensors.
Args:
model_spec: Nested data structure containing schema.OneOf objects.
Returns:
`indices`, a rank-1 integer Tensor.
"""
indices = []
def visit(oneof):
index = tf_argmax_or_zero(oneof)
indices.append(index)
schema.map_oneofs(visit, model_spec)
return tf.stack(indices)
def parse_list(string, convert_fn):
"""Parse a (possibly empty) colon-separated list of values."""
string = string.strip()
if string:
return [convert_fn(piece) for piece in string.split(':')]
else:
return []
def reward_for_single_cost_model(
quality,
rl_reward_function,
estimated_cost,
rl_cost_model_target,
rl_cost_model_exponent):
"""Compute reward based on quality and cost of a single cost model.
Args:
quality: quality of the model. For example, validation accuracy.
rl_reward_function: name of the reward function.
estimated_cost: estimated cost value.
rl_cost_model_target: the target value for cost.
rl_cost_model_exponent: a hyperparameter to balance cost and accuracy
in the reward function.
Returns:
A dictionary containing the following keys:
rl_reward: reward value.
rl_cost_ratio: a ratio between estimated cost and cost target.
rl_cost_adjustment: how much reward has been adjusted by cost.
"""
rl_cost_ratio = estimated_cost / rl_cost_model_target
if rl_reward_function == RL_REWARD_MNAS:
# reward = accuracy * (T/T0)^beta
rl_cost_adjustment = tf.pow(rl_cost_ratio, rl_cost_model_exponent)
rl_reward = quality * rl_cost_adjustment
elif rl_reward_function == RL_REWARD_MNAS_HARD:
# reward = accuracy * min((T/T0)^beta, 1)
rl_cost_adjustment = tf.pow(rl_cost_ratio, rl_cost_model_exponent)
rl_cost_adjustment = tf.minimum(rl_cost_adjustment, 1.)
rl_reward = quality * rl_cost_adjustment
elif rl_reward_function == RL_REWARD_ABS:
# reward = accuracy + beta * abs(T/T0 - 1)
rl_cost_adjustment = rl_cost_model_exponent * tf.abs(rl_cost_ratio - 1)
rl_reward = quality + rl_cost_adjustment
else:
raise ValueError('Unsupported rl_reward_function: {}'.format(
rl_reward_function))
return {
'rl_reward': rl_reward,
'rl_cost_ratio': rl_cost_ratio,
'rl_cost_adjustment': rl_cost_adjustment
}
| apache-2.0 |
baldwinn860/gapid | gapis/api/transform/early_terminator_test.go | 1448 | // Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transform
import (
"testing"
"github.com/google/gapid/core/log"
"github.com/google/gapid/gapis/api"
"github.com/google/gapid/gapis/api/testcmd"
)
func TestEarlyTerminator(t *testing.T) {
ctx := log.Testing(t)
inputs := testcmd.List(
&testcmd.A{ID: 10},
&testcmd.A{ID: 30},
&testcmd.A{ID: 20},
&testcmd.A{ID: 50},
&testcmd.A{ID: 90},
&testcmd.A{ID: 70},
&testcmd.A{ID: 80},
&testcmd.A{ID: 00},
&testcmd.A{ID: 60},
&testcmd.A{ID: 40},
)
expected := testcmd.List(
&testcmd.A{ID: 10},
&testcmd.A{ID: 30},
&testcmd.A{ID: 20},
&testcmd.A{ID: 50},
&testcmd.A{ID: 90},
&testcmd.A{ID: 70},
)
transform := NewEarlyTerminator(api.ID{})
transform.Add(ctx, 0, 20, []uint64{0})
transform.Add(ctx, 0, 50, []uint64{})
transform.Add(ctx, 0, 70, []uint64{1})
CheckTransform(ctx, t, transform, inputs, expected)
}
| apache-2.0 |
Subsets and Splits