filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
pkg/network/openshift_sdn.go
|
package network
import (
"log"
"net"
"os"
"path/filepath"
"reflect"
"github.com/ghodss/yaml"
"github.com/pkg/errors"
"github.com/openshift/cluster-network-operator/pkg/bootstrap"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
uns "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
configv1 "github.com/openshift/api/config/v1"
netv1 "github.com/openshift/api/network/v1"
operv1 "github.com/openshift/api/operator/v1"
"github.com/openshift/cluster-network-operator/pkg/render"
)
// renderOpenShiftSDN returns the manifests for the openshift-sdn.
// This creates
// - the ClusterNetwork object
// - the sdn namespace
// - the sdn daemonset
// - the openvswitch daemonset
// and some other small things.
func renderOpenShiftSDN(conf *operv1.NetworkSpec, bootstrapResult *bootstrap.BootstrapResult, manifestDir string) ([]*uns.Unstructured, error) {
c := conf.DefaultNetwork.OpenShiftSDNConfig
objs := []*uns.Unstructured{}
data := render.MakeRenderData()
data.Data["ReleaseVersion"] = os.Getenv("RELEASE_VERSION")
data.Data["SDNImage"] = os.Getenv("SDN_IMAGE")
data.Data["CNIPluginsImage"] = os.Getenv("CNI_PLUGINS_IMAGE")
data.Data["KubeRBACProxyImage"] = os.Getenv("KUBE_RBAC_PROXY_IMAGE")
data.Data["KUBERNETES_SERVICE_HOST"] = bootstrapResult.Infra.APIServers[bootstrap.APIServerDefault].Host
data.Data["KUBERNETES_SERVICE_PORT"] = bootstrapResult.Infra.APIServers[bootstrap.APIServerDefault].Port
data.Data["Mode"] = c.Mode
data.Data["CNIConfDir"] = pluginCNIConfDir(conf)
data.Data["CNIBinDir"] = CNIBinDir
data.Data["PlatformType"] = bootstrapResult.Infra.PlatformType
if bootstrapResult.Infra.PlatformType == configv1.AzurePlatformType {
data.Data["SDNPlatformAzure"] = true
} else {
data.Data["SDNPlatformAzure"] = false
}
data.Data["ExternalControlPlane"] = bootstrapResult.Infra.ExternalControlPlane
data.Data["RoutableMTU"] = nil
data.Data["MTU"] = nil
if conf.Migration != nil && conf.Migration.MTU != nil {
if *conf.Migration.MTU.Network.From > *conf.Migration.MTU.Network.To {
data.Data["MTU"] = conf.Migration.MTU.Network.From
data.Data["RoutableMTU"] = conf.Migration.MTU.Network.To
} else {
data.Data["MTU"] = conf.Migration.MTU.Network.To
data.Data["RoutableMTU"] = conf.Migration.MTU.Network.From
}
// c.MTU is used to set the applied network configuration MTU
// MTU migration procedure:
// 1. User sets the MTU they want to migrate to
// 2. CNO sets the MTU as applied
// 3. User can then set the MTU as configured
c.MTU = conf.Migration.MTU.Network.To
}
clusterNetwork, err := clusterNetwork(conf)
if err != nil {
return nil, errors.Wrap(err, "failed to build ClusterNetwork")
}
data.Data["ClusterNetwork"] = clusterNetwork
kpcDefaults := map[string]operv1.ProxyArgumentList{
"metrics-bind-address": {"127.0.0.1"},
"healthz-port": {"10256"},
"proxy-mode": {"iptables"},
"iptables-masquerade-bit": {"0"},
"enable-profiling": {"true"},
}
// For backward compatibility we allow conf to specify `metrics-port: 9101` but
// the daemonset always configures 9101 as the secure metrics port and 29101 as
// the insecure metrics port exposed by kube-proxy itself. So just override
// the value from conf (which we know is either "9101" or unspecified).
kpcOverrides := map[string]operv1.ProxyArgumentList{
"metrics-port": {"29101"},
}
if *c.EnableUnidling {
// We already validated that proxy-mode was either unset or iptables.
kpcOverrides["proxy-mode"] = operv1.ProxyArgumentList{"unidling+iptables"}
} else if *conf.DeployKubeProxy {
kpcOverrides["proxy-mode"] = operv1.ProxyArgumentList{"disabled"}
}
kpc, err := kubeProxyConfiguration(kpcDefaults, conf, kpcOverrides)
if err != nil {
return nil, errors.Wrap(err, "failed to build kube-proxy config")
}
data.Data["KubeProxyConfig"] = kpc
manifests, err := render.RenderDir(filepath.Join(manifestDir, "network/openshift-sdn"), &data)
if err != nil {
return nil, errors.Wrap(err, "failed to render manifests")
}
objs = append(objs, manifests...)
return objs, nil
}
// validateOpenShiftSDN checks that the openshift-sdn specific configuration
// is basically sane.
func validateOpenShiftSDN(conf *operv1.NetworkSpec) []error {
out := []error{}
if len(conf.ClusterNetwork) == 0 {
out = append(out, errors.Errorf("ClusterNetwork cannot be empty"))
}
if len(conf.ServiceNetwork) != 1 {
out = append(out, errors.Errorf("ServiceNetwork must have exactly 1 entry"))
}
sc := conf.DefaultNetwork.OpenShiftSDNConfig
if sc != nil {
if sc.Mode != "" && sdnPluginName(sc.Mode) == "" {
out = append(out, errors.Errorf("invalid openshift-sdn mode %q", sc.Mode))
}
if sc.VXLANPort != nil && (*sc.VXLANPort < 1 || *sc.VXLANPort > 65535) {
out = append(out, errors.Errorf("invalid VXLANPort %d", *sc.VXLANPort))
}
if sc.MTU != nil && (*sc.MTU < MinMTUIPv4 || *sc.MTU > MaxMTU) {
out = append(out, errors.Errorf("invalid MTU %d", *sc.MTU))
}
// the proxy mode must be unset or iptables for unidling to work
if (sc.EnableUnidling == nil || *sc.EnableUnidling) &&
conf.KubeProxyConfig != nil && conf.KubeProxyConfig.ProxyArguments != nil &&
len(conf.KubeProxyConfig.ProxyArguments["proxy-mode"]) > 0 &&
conf.KubeProxyConfig.ProxyArguments["proxy-mode"][0] != "iptables" {
out = append(out, errors.Errorf("invalid proxy-mode - when unidling is enabled, proxy-mode must be \"iptables\""))
}
}
if conf.DeployKubeProxy != nil && *conf.DeployKubeProxy {
// We allow deploying an external kube-proxy with openshift-sdn in very
// limited circumstances, for testing purposes. The error here
// intentionally lies about this.
if sc == nil || sc.EnableUnidling == nil || *sc.EnableUnidling || !noKubeProxyConfig(conf) {
out = append(out, errors.Errorf("openshift-sdn does not support 'deployKubeProxy: true'"))
}
}
return out
}
// isOpenShiftSDNChangeSafe ensures no unsafe changes are applied to the running
// network
// It allows changing only useExternalOpenvswitch and enableUnidling.
// In the future, we may support rolling out MTU or external openvswitch alterations.
// as with all is*ChangeSafe functions, defaults have already been applied.
func isOpenShiftSDNChangeSafe(prev, next *operv1.NetworkSpec) []error {
pn := prev.DefaultNetwork.OpenShiftSDNConfig
nn := next.DefaultNetwork.OpenShiftSDNConfig
errs := []error{}
if reflect.DeepEqual(pn, nn) && reflect.DeepEqual(prev.Migration, next.Migration) {
return errs
}
if pn.Mode != nn.Mode {
errs = append(errs, errors.Errorf("cannot change openshift-sdn mode"))
}
// deepequal is nil-safe
if !reflect.DeepEqual(pn.VXLANPort, nn.VXLANPort) {
errs = append(errs, errors.Errorf("cannot change openshift-sdn vxlanPort"))
}
if next.Migration != nil && next.Migration.MTU != nil {
mtuNet := next.Migration.MTU.Network
mtuMach := next.Migration.MTU.Machine
// For MTU values provided for migration, verify that:
// - The current and target MTUs for the CNI are provided
// - The machine target MTU is provided
// - The current MTU actually matches the MTU known as current
// - The machine target MTU has a valid overhead with the CNI target MTU
sdnOverhead := uint32(50) // 50 byte VXLAN header
if mtuNet == nil || mtuMach == nil || mtuNet.From == nil || mtuNet.To == nil || mtuMach.To == nil {
errs = append(errs, errors.Errorf("invalid Migration.MTU, at least one of the required fields is missing"))
} else {
// Only check next.Migration.MTU.Network.From when it changes
checkPrevMTU := prev.Migration == nil || prev.Migration.MTU == nil || prev.Migration.MTU.Network == nil || !reflect.DeepEqual(prev.Migration.MTU.Network.From, next.Migration.MTU.Network.From)
if checkPrevMTU && !reflect.DeepEqual(next.Migration.MTU.Network.From, pn.MTU) {
errs = append(errs, errors.Errorf("invalid Migration.MTU.Network.From(%d) not equal to the currently applied MTU(%d)", *next.Migration.MTU.Network.From, *pn.MTU))
}
if *next.Migration.MTU.Network.To < MinMTUIPv4 || *next.Migration.MTU.Network.To > MaxMTU {
errs = append(errs, errors.Errorf("invalid Migration.MTU.Network.To(%d), has to be in range: %d-%d", *next.Migration.MTU.Network.To, MinMTUIPv4, MaxMTU))
}
if *next.Migration.MTU.Machine.To < MinMTUIPv4 || *next.Migration.MTU.Machine.To > MaxMTU {
errs = append(errs, errors.Errorf("invalid Migration.MTU.Machine.To(%d), has to be in range: %d-%d", *next.Migration.MTU.Machine.To, MinMTUIPv4, MaxMTU))
}
if (*next.Migration.MTU.Network.To + sdnOverhead) > *next.Migration.MTU.Machine.To {
errs = append(errs, errors.Errorf("invalid Migration.MTU.Machine.To(%d), has to be at least %d", *next.Migration.MTU.Machine.To, *next.Migration.MTU.Network.To+sdnOverhead))
}
}
} else if !reflect.DeepEqual(pn.MTU, nn.MTU) {
errs = append(errs, errors.Errorf("cannot change openshift-sdn mtu without migration"))
}
// It is allowed to change useExternalOpenvswitch and enableUnidling
return errs
}
func fillOpenShiftSDNDefaults(conf, previous *operv1.NetworkSpec, hostMTU int) {
// NOTE: If you change any defaults, and it's not a safe change to roll out
// to existing clusters, you MUST use the value from previous instead.
if conf.DeployKubeProxy == nil {
prox := false
conf.DeployKubeProxy = &prox
}
if conf.KubeProxyConfig == nil {
conf.KubeProxyConfig = &operv1.ProxyConfig{}
}
if conf.KubeProxyConfig.BindAddress == "" {
conf.KubeProxyConfig.BindAddress = "0.0.0.0"
}
if conf.KubeProxyConfig.ProxyArguments == nil {
conf.KubeProxyConfig.ProxyArguments = map[string]operv1.ProxyArgumentList{}
}
if conf.DefaultNetwork.OpenShiftSDNConfig == nil {
conf.DefaultNetwork.OpenShiftSDNConfig = &operv1.OpenShiftSDNConfig{}
}
sc := conf.DefaultNetwork.OpenShiftSDNConfig
if sc.VXLANPort == nil {
var port uint32 = 4789
sc.VXLANPort = &port
}
if sc.EnableUnidling == nil {
truth := true
sc.EnableUnidling = &truth
}
// MTU is currently the only field we pull from previous.
// If it's not supplied, we infer it by probing a node's interface via the mtu-prober job.
// However, this can never change, so we always prefer previous.
if sc.MTU == nil {
var mtu uint32
if previous != nil &&
previous.DefaultNetwork.Type == operv1.NetworkTypeOpenShiftSDN &&
previous.DefaultNetwork.OpenShiftSDNConfig != nil &&
previous.DefaultNetwork.OpenShiftSDNConfig.MTU != nil {
mtu = *previous.DefaultNetwork.OpenShiftSDNConfig.MTU
} else {
// utter paranoia
// somehow we didn't probe the MTU in the controller, but we need it.
// This might be wrong in cases where the CNO is not local (e.g. Hypershift).
if hostMTU == 0 {
log.Printf("BUG: Probed MTU wasn't supplied, but was needed. Falling back to host MTU")
hostMTU, _ = GetDefaultMTU()
if hostMTU == 0 { // this is beyond unlikely.
panic("BUG: Probed MTU wasn't supplied, host MTU invalid")
}
}
mtu = uint32(hostMTU) - 50 // 50 byte VXLAN header
}
sc.MTU = &mtu
}
if sc.Mode == "" {
sc.Mode = operv1.SDNModeNetworkPolicy
}
}
func sdnPluginName(n operv1.SDNMode) string {
switch n {
case operv1.SDNModeSubnet:
return "redhat/openshift-ovs-subnet"
case operv1.SDNModeMultitenant:
return "redhat/openshift-ovs-multitenant"
case operv1.SDNModeNetworkPolicy:
return "redhat/openshift-ovs-networkpolicy"
}
return ""
}
// clusterNetwork builds the ClusterNetwork used by both the controller and the node
func clusterNetwork(conf *operv1.NetworkSpec) (string, error) {
c := conf.DefaultNetwork.OpenShiftSDNConfig
networks := []netv1.ClusterNetworkEntry{}
for _, entry := range conf.ClusterNetwork {
_, cidr, err := net.ParseCIDR(entry.CIDR) // already validated
if err != nil {
return "", err
}
_, size := cidr.Mask.Size()
hostSubnetLength := uint32(size) - entry.HostPrefix
networks = append(networks, netv1.ClusterNetworkEntry{CIDR: entry.CIDR, HostSubnetLength: hostSubnetLength})
}
cn := netv1.ClusterNetwork{
TypeMeta: metav1.TypeMeta{
APIVersion: "network.openshift.io/v1",
Kind: "ClusterNetwork",
},
ObjectMeta: metav1.ObjectMeta{
Name: netv1.ClusterNetworkDefault,
},
PluginName: sdnPluginName(c.Mode),
Network: networks[0].CIDR,
HostSubnetLength: networks[0].HostSubnetLength,
ClusterNetworks: networks,
ServiceNetwork: conf.ServiceNetwork[0],
VXLANPort: c.VXLANPort,
MTU: c.MTU,
}
cnBuf, err := yaml.Marshal(cn)
if err != nil {
return "", err
}
return string(cnBuf), nil
}
|
[
"\"RELEASE_VERSION\"",
"\"SDN_IMAGE\"",
"\"CNI_PLUGINS_IMAGE\"",
"\"KUBE_RBAC_PROXY_IMAGE\""
] |
[] |
[
"CNI_PLUGINS_IMAGE",
"SDN_IMAGE",
"RELEASE_VERSION",
"KUBE_RBAC_PROXY_IMAGE"
] |
[]
|
["CNI_PLUGINS_IMAGE", "SDN_IMAGE", "RELEASE_VERSION", "KUBE_RBAC_PROXY_IMAGE"]
|
go
| 4 | 0 | |
chia/daemon/server.py
|
import asyncio
import json
import logging
import os
import signal
import subprocess
import sys
import time
import traceback
import uuid
from concurrent.futures import ThreadPoolExecutor
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, Optional, TextIO, Tuple, cast
from websockets import ConnectionClosedOK, WebSocketException, WebSocketServerProtocol, serve
from chia.cmds.init_funcs import check_keys, chia_init
from chia.cmds.passphrase_funcs import default_passphrase, using_default_passphrase
from chia.daemon.keychain_server import KeychainServer, keychain_commands
from chia.daemon.windows_signal import kill
from chia.plotters.plotters import get_available_plotters
from chia.plotting.util import add_plot_directory
from chia.server.server import ssl_context_for_root, ssl_context_for_server
from chia.ssl.create_ssl import get_mozilla_ca_crt
from chia.util.chia_logging import initialize_logging
from chia.util.config import load_config
from chia.util.json_util import dict_to_json_str
from chia.util.keychain import (
Keychain,
KeyringCurrentPassphraseIsInvalid,
KeyringRequiresMigration,
passphrase_requirements,
supports_keyring_passphrase,
supports_os_passphrase_storage,
)
from chia.util.path import mkdir
from chia.util.service_groups import validate_service
from chia.util.setproctitle import setproctitle
from chia.util.ws_message import WsRpcMessage, create_payload, format_response
from chia import __version__
io_pool_exc = ThreadPoolExecutor()
try:
from aiohttp import ClientSession, web
except ModuleNotFoundError:
print("Error: Make sure to run . ./activate from the project folder before starting Chia.")
quit()
try:
import fcntl
has_fcntl = True
except ImportError:
has_fcntl = False
log = logging.getLogger(__name__)
service_plotter = "chia_plotter"
async def fetch(url: str):
async with ClientSession() as session:
try:
mozilla_root = get_mozilla_ca_crt()
ssl_context = ssl_context_for_root(mozilla_root, log=log)
response = await session.get(url, ssl=ssl_context)
if not response.ok:
log.warning("Response not OK.")
return None
return await response.text()
except Exception as e:
log.error(f"Exception while fetching {url}, exception: {e}")
return None
class PlotState(str, Enum):
SUBMITTED = "SUBMITTED"
RUNNING = "RUNNING"
REMOVING = "REMOVING"
FINISHED = "FINISHED"
class PlotEvent(str, Enum):
LOG_CHANGED = "log_changed"
STATE_CHANGED = "state_changed"
# determine if application is a script file or frozen exe
if getattr(sys, "frozen", False):
name_map = {
"chia": "chia",
"chia_wallet": "start_wallet",
"chia_full_node": "start_full_node",
"chia_harvester": "start_harvester",
"chia_farmer": "start_farmer",
"chia_introducer": "start_introducer",
"chia_timelord": "start_timelord",
"chia_timelord_launcher": "timelord_launcher",
"chia_full_node_simulator": "start_simulator",
"chia_seeder": "chia_seeder",
"chia_seeder_crawler": "chia_seeder_crawler",
"chia_seeder_dns": "chia_seeder_dns",
}
def executable_for_service(service_name: str) -> str:
application_path = os.path.dirname(sys.executable)
if sys.platform == "win32" or sys.platform == "cygwin":
executable = name_map[service_name]
path = f"{application_path}/{executable}.exe"
return path
else:
path = f"{application_path}/{name_map[service_name]}"
return path
else:
application_path = os.path.dirname(__file__)
def executable_for_service(service_name: str) -> str:
return service_name
async def ping() -> Dict[str, Any]:
response = {"success": True, "value": "pong"}
return response
class WebSocketServer:
def __init__(
self,
root_path: Path,
ca_crt_path: Path,
ca_key_path: Path,
crt_path: Path,
key_path: Path,
run_check_keys_on_unlock: bool = False,
):
self.root_path = root_path
self.log = log
self.services: Dict = dict()
self.plots_queue: List[Dict] = []
self.connections: Dict[str, List[WebSocketServerProtocol]] = dict() # service_name : [WebSocket]
self.remote_address_map: Dict[WebSocketServerProtocol, str] = dict() # socket: service_name
self.ping_job: Optional[asyncio.Task] = None
self.net_config = load_config(root_path, "config.yaml")
self.self_hostname = self.net_config["self_hostname"]
self.daemon_port = self.net_config["daemon_port"]
self.daemon_max_message_size = self.net_config.get("daemon_max_message_size", 50 * 1000 * 1000)
self.websocket_server = None
self.ssl_context = ssl_context_for_server(ca_crt_path, ca_key_path, crt_path, key_path, log=self.log)
self.shut_down = False
self.keychain_server = KeychainServer()
self.run_check_keys_on_unlock = run_check_keys_on_unlock
async def start(self):
self.log.info("Starting Daemon Server")
def master_close_cb():
asyncio.create_task(self.stop())
try:
asyncio.get_running_loop().add_signal_handler(signal.SIGINT, master_close_cb)
asyncio.get_running_loop().add_signal_handler(signal.SIGTERM, master_close_cb)
except NotImplementedError:
self.log.info("Not implemented")
self.websocket_server = await serve(
self.safe_handle,
self.self_hostname,
self.daemon_port,
max_size=self.daemon_max_message_size,
ping_interval=500,
ping_timeout=300,
ssl=self.ssl_context,
)
self.log.info("Waiting Daemon WebSocketServer closure")
def cancel_task_safe(self, task: Optional[asyncio.Task]):
if task is not None:
try:
task.cancel()
except Exception as e:
self.log.error(f"Error while canceling task.{e} {task}")
async def stop(self) -> Dict[str, Any]:
self.shut_down = True
self.cancel_task_safe(self.ping_job)
await self.exit()
if self.websocket_server is not None:
self.websocket_server.close()
return {"success": True}
async def safe_handle(self, websocket: WebSocketServerProtocol, path: str):
service_name = ""
try:
async for message in websocket:
try:
decoded = json.loads(message)
if "data" not in decoded:
decoded["data"] = {}
response, sockets_to_use = await self.handle_message(websocket, decoded)
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Error while handling message: {tb}")
error = {"success": False, "error": f"{e}"}
response = format_response(decoded, error)
sockets_to_use = []
if len(sockets_to_use) > 0:
for socket in sockets_to_use:
try:
await socket.send(response)
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Unexpected exception trying to send to websocket: {e} {tb}")
self.remove_connection(socket)
await socket.close()
except Exception as e:
tb = traceback.format_exc()
service_name = "Unknown"
if websocket in self.remote_address_map:
service_name = self.remote_address_map[websocket]
if isinstance(e, ConnectionClosedOK):
self.log.info(f"ConnectionClosedOk. Closing websocket with {service_name} {e}")
elif isinstance(e, WebSocketException):
self.log.info(f"Websocket exception. Closing websocket with {service_name} {e} {tb}")
else:
self.log.error(f"Unexpected exception in websocket: {e} {tb}")
finally:
self.remove_connection(websocket)
await websocket.close()
def remove_connection(self, websocket: WebSocketServerProtocol):
service_name = None
if websocket in self.remote_address_map:
service_name = self.remote_address_map[websocket]
self.remote_address_map.pop(websocket)
if service_name in self.connections:
after_removal = []
for connection in self.connections[service_name]:
if connection == websocket:
continue
else:
after_removal.append(connection)
self.connections[service_name] = after_removal
async def ping_task(self) -> None:
restart = True
await asyncio.sleep(30)
for remote_address, service_name in self.remote_address_map.items():
if service_name in self.connections:
sockets = self.connections[service_name]
for socket in sockets:
if socket.remote_address[1] == remote_address:
try:
self.log.info(f"About to ping: {service_name}")
await socket.ping()
except asyncio.CancelledError:
self.log.info("Ping task received Cancel")
restart = False
break
except Exception as e:
self.log.info(f"Ping error: {e}")
self.log.warning("Ping failed, connection closed.")
self.remove_connection(socket)
await socket.close()
if restart is True:
self.ping_job = asyncio.create_task(self.ping_task())
async def handle_message(
self, websocket: WebSocketServerProtocol, message: WsRpcMessage
) -> Tuple[Optional[str], List[Any]]:
"""
This function gets called when new message is received via websocket.
"""
command = message["command"]
destination = message["destination"]
if destination != "daemon":
destination = message["destination"]
if destination in self.connections:
sockets = self.connections[destination]
return dict_to_json_str(message), sockets
return None, []
data = message["data"]
commands_with_data = [
"start_service",
"start_plotting",
"stop_plotting",
"stop_service",
"is_running",
"register_service",
]
if len(data) == 0 and command in commands_with_data:
response = {"success": False, "error": f'{command} requires "data"'}
# Keychain commands should be handled by KeychainServer
elif command in keychain_commands and supports_keyring_passphrase():
response = await self.keychain_server.handle_command(command, data)
elif command == "ping":
response = await ping()
elif command == "start_service":
response = await self.start_service(cast(Dict[str, Any], data))
elif command == "start_plotting":
response = await self.start_plotting(cast(Dict[str, Any], data))
elif command == "stop_plotting":
response = await self.stop_plotting(cast(Dict[str, Any], data))
elif command == "stop_service":
response = await self.stop_service(cast(Dict[str, Any], data))
elif command == "is_running":
response = await self.is_running(cast(Dict[str, Any], data))
elif command == "is_keyring_locked":
response = await self.is_keyring_locked()
elif command == "keyring_status":
response = await self.keyring_status()
elif command == "unlock_keyring":
response = await self.unlock_keyring(cast(Dict[str, Any], data))
elif command == "validate_keyring_passphrase":
response = await self.validate_keyring_passphrase(cast(Dict[str, Any], data))
elif command == "migrate_keyring":
response = await self.migrate_keyring(cast(Dict[str, Any], data))
elif command == "set_keyring_passphrase":
response = await self.set_keyring_passphrase(cast(Dict[str, Any], data))
elif command == "remove_keyring_passphrase":
response = await self.remove_keyring_passphrase(cast(Dict[str, Any], data))
elif command == "notify_keyring_migration_completed":
response = await self.notify_keyring_migration_completed(cast(Dict[str, Any], data))
elif command == "exit":
response = await self.stop()
elif command == "register_service":
response = await self.register_service(websocket, cast(Dict[str, Any], data))
elif command == "get_status":
response = self.get_status()
elif command == "get_version":
response = self.get_version()
elif command == "get_plotters":
response = await self.get_plotters()
else:
self.log.error(f"UK>> {message}")
response = {"success": False, "error": f"unknown_command {command}"}
full_response = format_response(message, response)
return full_response, [websocket]
async def is_keyring_locked(self) -> Dict[str, Any]:
locked: bool = Keychain.is_keyring_locked()
response: Dict[str, Any] = {"success": True, "is_keyring_locked": locked}
return response
async def keyring_status(self) -> Dict[str, Any]:
passphrase_support_enabled: bool = supports_keyring_passphrase()
can_save_passphrase: bool = supports_os_passphrase_storage()
user_passphrase_is_set: bool = Keychain.has_master_passphrase() and not using_default_passphrase()
locked: bool = Keychain.is_keyring_locked()
needs_migration: bool = Keychain.needs_migration()
can_remove_legacy_keys: bool = False # Disabling GUI support for removing legacy keys post-migration
can_set_passphrase_hint: bool = True
passphrase_hint: str = Keychain.get_master_passphrase_hint() or ""
requirements: Dict[str, Any] = passphrase_requirements()
response: Dict[str, Any] = {
"success": True,
"is_keyring_locked": locked,
"passphrase_support_enabled": passphrase_support_enabled,
"can_save_passphrase": can_save_passphrase,
"user_passphrase_is_set": user_passphrase_is_set,
"needs_migration": needs_migration,
"can_remove_legacy_keys": can_remove_legacy_keys,
"can_set_passphrase_hint": can_set_passphrase_hint,
"passphrase_hint": passphrase_hint,
"passphrase_requirements": requirements,
}
return response
async def unlock_keyring(self, request: Dict[str, Any]) -> Dict[str, Any]:
success: bool = False
error: Optional[str] = None
key: Optional[str] = request.get("key", None)
if type(key) is not str:
return {"success": False, "error": "missing key"}
try:
if Keychain.master_passphrase_is_valid(key, force_reload=True):
Keychain.set_cached_master_passphrase(key)
success = True
# Inform the GUI of keyring status changes
self.keyring_status_changed(await self.keyring_status(), "wallet_ui")
else:
error = "bad passphrase"
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Keyring passphrase validation failed: {e} {tb}")
error = "validation exception"
if success and self.run_check_keys_on_unlock:
try:
self.log.info("Running check_keys now that the keyring is unlocked")
check_keys(self.root_path)
self.run_check_keys_on_unlock = False
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"check_keys failed after unlocking keyring: {e} {tb}")
response: Dict[str, Any] = {"success": success, "error": error}
return response
async def validate_keyring_passphrase(self, request: Dict[str, Any]) -> Dict[str, Any]:
success: bool = False
error: Optional[str] = None
key: Optional[str] = request.get("key", None)
if type(key) is not str:
return {"success": False, "error": "missing key"}
try:
success = Keychain.master_passphrase_is_valid(key, force_reload=True)
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Keyring passphrase validation failed: {e} {tb}")
error = "validation exception"
response: Dict[str, Any] = {"success": success, "error": error}
return response
async def migrate_keyring(self, request: Dict[str, Any]) -> Dict[str, Any]:
if Keychain.needs_migration() is False:
# If the keyring has already been migrated, we'll raise an error to the client.
# The reason for raising an error is because the migration request has side-
# effects beyond copying keys from the legacy keyring to the new keyring. The
# request may have set a passphrase and indicated that keys should be cleaned
# from the legacy keyring. If we were to return early and indicate success,
# the client and user's expectations may not match reality (were my keys
# deleted from the legacy keyring? was my passphrase set?).
return {"success": False, "error": "migration not needed"}
success: bool = False
error: Optional[str] = None
passphrase: Optional[str] = request.get("passphrase", None)
passphrase_hint: Optional[str] = request.get("passphrase_hint", None)
save_passphrase: bool = request.get("save_passphrase", False)
cleanup_legacy_keyring: bool = request.get("cleanup_legacy_keyring", False)
if passphrase is not None and type(passphrase) is not str:
return {"success": False, "error": 'expected string value for "passphrase"'}
if passphrase_hint is not None and type(passphrase_hint) is not str:
return {"success": False, "error": 'expected string value for "passphrase_hint"'}
if not Keychain.passphrase_meets_requirements(passphrase):
return {"success": False, "error": "passphrase doesn't satisfy requirements"}
if type(cleanup_legacy_keyring) is not bool:
return {"success": False, "error": 'expected bool value for "cleanup_legacy_keyring"'}
try:
Keychain.migrate_legacy_keyring(
passphrase=passphrase,
passphrase_hint=passphrase_hint,
save_passphrase=save_passphrase,
cleanup_legacy_keyring=cleanup_legacy_keyring,
)
success = True
# Inform the GUI of keyring status changes
self.keyring_status_changed(await self.keyring_status(), "wallet_ui")
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Legacy keyring migration failed: {e} {tb}")
error = f"keyring migration failed: {e}"
response: Dict[str, Any] = {"success": success, "error": error}
return response
async def set_keyring_passphrase(self, request: Dict[str, Any]) -> Dict[str, Any]:
success: bool = False
error: Optional[str] = None
current_passphrase: Optional[str] = None
new_passphrase: Optional[str] = None
passphrase_hint: Optional[str] = request.get("passphrase_hint", None)
save_passphrase: bool = request.get("save_passphrase", False)
if using_default_passphrase():
current_passphrase = default_passphrase()
if Keychain.has_master_passphrase() and not current_passphrase:
current_passphrase = request.get("current_passphrase", None)
if type(current_passphrase) is not str:
return {"success": False, "error": "missing current_passphrase"}
new_passphrase = request.get("new_passphrase", None)
if type(new_passphrase) is not str:
return {"success": False, "error": "missing new_passphrase"}
if not Keychain.passphrase_meets_requirements(new_passphrase):
return {"success": False, "error": "passphrase doesn't satisfy requirements"}
try:
assert new_passphrase is not None # mypy, I love you
Keychain.set_master_passphrase(
current_passphrase,
new_passphrase,
allow_migration=False,
passphrase_hint=passphrase_hint,
save_passphrase=save_passphrase,
)
except KeyringRequiresMigration:
error = "keyring requires migration"
except KeyringCurrentPassphraseIsInvalid:
error = "current passphrase is invalid"
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Failed to set keyring passphrase: {e} {tb}")
else:
success = True
# Inform the GUI of keyring status changes
self.keyring_status_changed(await self.keyring_status(), "wallet_ui")
response: Dict[str, Any] = {"success": success, "error": error}
return response
async def remove_keyring_passphrase(self, request: Dict[str, Any]) -> Dict[str, Any]:
success: bool = False
error: Optional[str] = None
current_passphrase: Optional[str] = None
if not Keychain.has_master_passphrase():
return {"success": False, "error": "passphrase not set"}
current_passphrase = request.get("current_passphrase", None)
if type(current_passphrase) is not str:
return {"success": False, "error": "missing current_passphrase"}
try:
Keychain.remove_master_passphrase(current_passphrase)
except KeyringCurrentPassphraseIsInvalid:
error = "current passphrase is invalid"
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Failed to remove keyring passphrase: {e} {tb}")
else:
success = True
# Inform the GUI of keyring status changes
self.keyring_status_changed(await self.keyring_status(), "wallet_ui")
response: Dict[str, Any] = {"success": success, "error": error}
return response
async def notify_keyring_migration_completed(self, request: Dict[str, Any]) -> Dict[str, Any]:
success: bool = False
error: Optional[str] = None
key: Optional[str] = request.get("key", None)
if type(key) is not str:
return {"success": False, "error": "missing key"}
Keychain.handle_migration_completed()
try:
if Keychain.master_passphrase_is_valid(key, force_reload=True):
Keychain.set_cached_master_passphrase(key)
success = True
# Inform the GUI of keyring status changes
self.keyring_status_changed(await self.keyring_status(), "wallet_ui")
else:
error = "bad passphrase"
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Keyring passphrase validation failed: {e} {tb}")
error = "validation exception"
response: Dict[str, Any] = {"success": success, "error": error}
return response
def get_status(self) -> Dict[str, Any]:
response = {"success": True, "genesis_initialized": True}
return response
def get_version(self) -> Dict[str, Any]:
response = {"success": True, "version": __version__}
return response
async def get_plotters(self) -> Dict[str, Any]:
plotters: Dict[str, Any] = get_available_plotters(self.root_path)
response: Dict[str, Any] = {"success": True, "plotters": plotters}
return response
async def _keyring_status_changed(self, keyring_status: Dict[str, Any], destination: str):
"""
Attempt to communicate with the GUI to inform it of any keyring status changes
(e.g. keyring becomes unlocked or migration completes)
"""
websockets = self.connections.get("wallet_ui", None)
if websockets is None:
return None
if keyring_status is None:
return None
response = create_payload("keyring_status_changed", keyring_status, "daemon", destination)
for websocket in websockets:
try:
await websocket.send(response)
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Unexpected exception trying to send to websocket: {e} {tb}")
websockets.remove(websocket)
await websocket.close()
def keyring_status_changed(self, keyring_status: Dict[str, Any], destination: str):
asyncio.create_task(self._keyring_status_changed(keyring_status, destination))
def plot_queue_to_payload(self, plot_queue_item, send_full_log: bool) -> Dict[str, Any]:
error = plot_queue_item.get("error")
has_error = error is not None
item = {
"id": plot_queue_item["id"],
"queue": plot_queue_item["queue"],
"size": plot_queue_item["size"],
"parallel": plot_queue_item["parallel"],
"delay": plot_queue_item["delay"],
"state": plot_queue_item["state"],
"error": str(error) if has_error else None,
"deleted": plot_queue_item["deleted"],
"log_new": plot_queue_item.get("log_new"),
}
if send_full_log:
item["log"] = plot_queue_item.get("log")
return item
def prepare_plot_state_message(self, state: PlotEvent, id):
message = {
"state": state,
"queue": self.extract_plot_queue(id),
}
return message
def extract_plot_queue(self, id=None) -> List[Dict]:
send_full_log = id is None
data = []
for item in self.plots_queue:
if id is None or item["id"] == id:
data.append(self.plot_queue_to_payload(item, send_full_log))
return data
async def _state_changed(self, service: str, message: Dict[str, Any]):
"""If id is None, send the whole state queue"""
if service not in self.connections:
return None
websockets = self.connections[service]
if message is None:
return None
response = create_payload("state_changed", message, service, "wallet_ui")
for websocket in websockets:
try:
await websocket.send(response)
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Unexpected exception trying to send to websocket: {e} {tb}")
websockets.remove(websocket)
await websocket.close()
def state_changed(self, service: str, message: Dict[str, Any]):
asyncio.create_task(self._state_changed(service, message))
async def _watch_file_changes(self, config, fp: TextIO, loop: asyncio.AbstractEventLoop):
id: str = config["id"]
plotter: str = config["plotter"]
final_words: List[str] = []
if plotter == "chiapos":
final_words = ["Renamed final file"]
elif plotter == "bladebit":
final_words = ["Finished plotting in"]
elif plotter == "madmax":
temp_dir = config["temp_dir"]
final_dir = config["final_dir"]
if temp_dir == final_dir:
final_words = ["Total plot creation time was"]
else:
# "Renamed final plot" if moving to a final dir on the same volume
# "Copy to <path> finished, took..." if copying to another volume
final_words = ["Renamed final plot", "finished, took"]
while True:
new_data = await loop.run_in_executor(io_pool_exc, fp.readline)
if config["state"] is not PlotState.RUNNING:
return None
if new_data not in (None, ""):
config["log"] = new_data if config["log"] is None else config["log"] + new_data
config["log_new"] = new_data
self.state_changed(service_plotter, self.prepare_plot_state_message(PlotEvent.LOG_CHANGED, id))
if new_data:
for word in final_words:
if word in new_data:
return None
else:
time.sleep(0.5)
async def _track_plotting_progress(self, config, loop: asyncio.AbstractEventLoop):
file_path = config["out_file"]
with open(file_path, "r") as fp:
await self._watch_file_changes(config, fp, loop)
def _common_plotting_command_args(self, request: Any, ignoreCount: bool) -> List[str]:
n = 1 if ignoreCount else request["n"] # Plot count
d = request["d"] # Final directory
r = request["r"] # Threads
f = request.get("f") # Farmer pubkey
p = request.get("p") # Pool pubkey
c = request.get("c") # Pool contract address
command_args: List[str] = []
command_args.append(f"-n{n}")
command_args.append(f"-d{d}")
command_args.append(f"-r{r}")
if f is not None:
command_args.append(f"-f{f}")
if p is not None:
command_args.append(f"-p{p}")
if c is not None:
command_args.append(f"-c{c}")
return command_args
def _chiapos_plotting_command_args(self, request: Any, ignoreCount: bool) -> List[str]:
k = request["k"] # Plot size
t = request["t"] # Temp directory
t2 = request["t2"] # Temp2 directory
b = request["b"] # Buffer size
u = request["u"] # Buckets
a = request.get("a") # Fingerprint
e = request["e"] # Disable bitfield
x = request["x"] # Exclude final directory
override_k = request["overrideK"] # Force plot sizes < k32
command_args: List[str] = []
command_args.append(f"-k{k}")
command_args.append(f"-t{t}")
command_args.append(f"-2{t2}")
command_args.append(f"-b{b}")
command_args.append(f"-u{u}")
if a is not None:
command_args.append(f"-a{a}")
if e is True:
command_args.append("-e")
if x is True:
command_args.append("-x")
if override_k is True:
command_args.append("--override-k")
return command_args
def _bladebit_plotting_command_args(self, request: Any, ignoreCount: bool) -> List[str]:
w = request.get("w", False) # Warm start
m = request.get("m", False) # Disable NUMA
command_args: List[str] = []
if w is True:
command_args.append("-w")
if m is True:
command_args.append("-m")
return command_args
def _madmax_plotting_command_args(self, request: Any, ignoreCount: bool, index: int) -> List[str]:
k = request["k"] # Plot size
t = request["t"] # Temp directory
t2 = request["t2"] # Temp2 directory
u = request["u"] # Buckets
v = request["v"] # Buckets for phase 3 & 4
K = request.get("K", 1) # Thread multiplier for phase 2
G = request.get("G", False) # Alternate tmpdir/tmp2dir
command_args: List[str] = []
command_args.append(f"-k{k}")
command_args.append(f"-u{u}")
command_args.append(f"-v{v}")
command_args.append(f"-K{K}")
# Handle madmax's tmptoggle option ourselves when managing GUI plotting
if G is True and t != t2 and index % 2:
# Swap tmp and tmp2
command_args.append(f"-t{t2}")
command_args.append(f"-2{t}")
else:
command_args.append(f"-t{t}")
command_args.append(f"-2{t2}")
return command_args
def _build_plotting_command_args(self, request: Any, ignoreCount: bool, index: int) -> List[str]:
plotter: str = request.get("plotter", "chiapos")
command_args: List[str] = ["chia", "plotters", plotter]
command_args.extend(self._common_plotting_command_args(request, ignoreCount))
if plotter == "chiapos":
command_args.extend(self._chiapos_plotting_command_args(request, ignoreCount))
elif plotter == "madmax":
command_args.extend(self._madmax_plotting_command_args(request, ignoreCount, index))
elif plotter == "bladebit":
command_args.extend(self._bladebit_plotting_command_args(request, ignoreCount))
return command_args
def _is_serial_plotting_running(self, queue: str = "default") -> bool:
response = False
for item in self.plots_queue:
if item["queue"] == queue and item["parallel"] is False and item["state"] is PlotState.RUNNING:
response = True
return response
def _get_plots_queue_item(self, id: str):
config = next(item for item in self.plots_queue if item["id"] == id)
return config
def _run_next_serial_plotting(self, loop: asyncio.AbstractEventLoop, queue: str = "default"):
next_plot_id = None
if self._is_serial_plotting_running(queue) is True:
return None
for item in self.plots_queue:
if item["queue"] == queue and item["state"] is PlotState.SUBMITTED and item["parallel"] is False:
next_plot_id = item["id"]
break
if next_plot_id is not None:
loop.create_task(self._start_plotting(next_plot_id, loop, queue))
def _post_process_plotting_job(self, job: Dict[str, Any]):
id: str = job["id"]
final_dir: str = job.get("final_dir", "")
exclude_final_dir: bool = job.get("exclude_final_dir", False)
log.info(f"Post-processing plotter job with ID {id}") # lgtm [py/clear-text-logging-sensitive-data]
if exclude_final_dir is False and len(final_dir) > 0:
resolved_final_dir: str = str(Path(final_dir).resolve())
config = load_config(self.root_path, "config.yaml")
plot_directories_list: str = config["harvester"]["plot_directories"]
if resolved_final_dir not in plot_directories_list:
# Adds the directory to the plot directories if it is not present
log.info(f"Adding directory {resolved_final_dir} to harvester for farming")
add_plot_directory(self.root_path, resolved_final_dir)
async def _start_plotting(self, id: str, loop: asyncio.AbstractEventLoop, queue: str = "default"):
current_process = None
try:
log.info(f"Starting plotting with ID {id}") # lgtm [py/clear-text-logging-sensitive-data]
config = self._get_plots_queue_item(id)
if config is None:
raise Exception(f"Plot queue config with ID {id} does not exist")
state = config["state"]
if state is not PlotState.SUBMITTED:
raise Exception(f"Plot with ID {id} has no state submitted")
id = config["id"]
delay = config["delay"]
await asyncio.sleep(delay)
if config["state"] is not PlotState.SUBMITTED:
return None
service_name = config["service_name"]
command_args = config["command_args"]
# Set the -D/--connect_to_daemon flag to signify that the child should connect
# to the daemon to access the keychain
command_args.append("-D")
self.log.debug(f"command_args before launch_plotter are {command_args}")
self.log.debug(f"self.root_path before launch_plotter is {self.root_path}")
process, pid_path = launch_plotter(self.root_path, service_name, command_args, id)
current_process = process
config["state"] = PlotState.RUNNING
config["out_file"] = plotter_log_path(self.root_path, id).absolute()
config["process"] = process
self.state_changed(service_plotter, self.prepare_plot_state_message(PlotEvent.STATE_CHANGED, id))
if service_name not in self.services:
self.services[service_name] = []
self.services[service_name].append(process)
await self._track_plotting_progress(config, loop)
self.log.debug("finished tracking plotting progress. setting state to FINISHED")
config["state"] = PlotState.FINISHED
self.state_changed(service_plotter, self.prepare_plot_state_message(PlotEvent.STATE_CHANGED, id))
self._post_process_plotting_job(config)
except (subprocess.SubprocessError, IOError):
log.exception(f"problem starting {service_name}") # lgtm [py/clear-text-logging-sensitive-data]
error = Exception("Start plotting failed")
config["state"] = PlotState.FINISHED
config["error"] = error
self.state_changed(service_plotter, self.prepare_plot_state_message(PlotEvent.STATE_CHANGED, id))
raise error
finally:
if current_process is not None:
self.services[service_name].remove(current_process)
current_process.wait() # prevent zombies
self._run_next_serial_plotting(loop, queue)
async def start_plotting(self, request: Dict[str, Any]):
service_name = request["service"]
plotter = request.get("plotter", "chiapos")
delay = int(request.get("delay", 0))
parallel = request.get("parallel", False)
size = request.get("k")
temp_dir = request.get("t")
final_dir = request.get("d")
exclude_final_dir = request.get("x", False)
count = int(request.get("n", 1))
queue = request.get("queue", "default")
if ("p" in request) and ("c" in request):
response = {
"success": False,
"service_name": service_name,
"error": "Choose one of pool_contract_address and pool_public_key",
}
return response
ids: List[str] = []
for k in range(count):
id = str(uuid.uuid4())
ids.append(id)
config = {
"id": id, # lgtm [py/clear-text-logging-sensitive-data]
"size": size,
"queue": queue,
"plotter": plotter,
"service_name": service_name,
"command_args": self._build_plotting_command_args(request, True, k),
"parallel": parallel,
"delay": delay * k if parallel is True else delay,
"state": PlotState.SUBMITTED,
"deleted": False,
"error": None,
"log": None,
"process": None,
"temp_dir": temp_dir,
"final_dir": final_dir,
"exclude_final_dir": exclude_final_dir,
}
self.plots_queue.append(config)
# notify GUI about new plot queue item
self.state_changed(service_plotter, self.prepare_plot_state_message(PlotEvent.STATE_CHANGED, id))
# only the first item can start when user selected serial plotting
can_start_serial_plotting = k == 0 and self._is_serial_plotting_running(queue) is False
if parallel is True or can_start_serial_plotting:
log.info(f"Plotting will start in {config['delay']} seconds")
loop = asyncio.get_event_loop()
loop.create_task(self._start_plotting(id, loop, queue))
else:
log.info("Plotting will start automatically when previous plotting finish")
response = {
"success": True,
"ids": ids,
"service_name": service_name,
}
return response
async def stop_plotting(self, request: Dict[str, Any]) -> Dict[str, Any]:
id = request["id"]
config = self._get_plots_queue_item(id)
if config is None:
return {"success": False}
id = config["id"]
state = config["state"]
process = config["process"]
queue = config["queue"]
if config["state"] is PlotState.REMOVING:
return {"success": False}
try:
run_next = False
if process is not None and state == PlotState.RUNNING:
run_next = True
config["state"] = PlotState.REMOVING
self.state_changed(service_plotter, self.prepare_plot_state_message(PlotEvent.STATE_CHANGED, id))
await kill_process(process, self.root_path, service_plotter, id)
config["state"] = PlotState.FINISHED
config["deleted"] = True
self.state_changed(service_plotter, self.prepare_plot_state_message(PlotEvent.STATE_CHANGED, id))
self.plots_queue.remove(config)
if run_next:
loop = asyncio.get_event_loop()
self._run_next_serial_plotting(loop, queue)
return {"success": True}
except Exception as e:
log.error(f"Error during killing the plot process: {e}")
config["state"] = PlotState.FINISHED
config["error"] = str(e)
self.state_changed(service_plotter, self.prepare_plot_state_message(PlotEvent.STATE_CHANGED, id))
return {"success": False}
async def start_service(self, request: Dict[str, Any]):
service_command = request["service"]
error = None
success = False
testing = False
if "testing" in request:
testing = request["testing"]
if not validate_service(service_command):
error = "unknown service"
if service_command in self.services:
service = self.services[service_command]
r = service is not None and service.poll() is None
if r is False:
self.services.pop(service_command)
error = None
else:
error = f"Service {service_command} already running"
if error is None:
try:
exe_command = service_command
if testing is True:
exe_command = f"{service_command} --testing=true"
process, pid_path = launch_service(self.root_path, exe_command)
self.services[service_command] = process
success = True
except (subprocess.SubprocessError, IOError):
log.exception(f"problem starting {service_command}")
error = "start failed"
response = {"success": success, "service": service_command, "error": error}
return response
async def stop_service(self, request: Dict[str, Any]) -> Dict[str, Any]:
service_name = request["service"]
result = await kill_service(self.root_path, self.services, service_name)
response = {"success": result, "service_name": service_name}
return response
async def is_running(self, request: Dict[str, Any]) -> Dict[str, Any]:
service_name = request["service"]
if service_name == service_plotter:
processes = self.services.get(service_name)
is_running = processes is not None and len(processes) > 0
response = {
"success": True,
"service_name": service_name,
"is_running": is_running,
}
else:
process = self.services.get(service_name)
is_running = process is not None and process.poll() is None
response = {
"success": True,
"service_name": service_name,
"is_running": is_running,
}
return response
async def exit(self) -> Dict[str, Any]:
jobs = []
for k in self.services.keys():
jobs.append(kill_service(self.root_path, self.services, k))
if jobs:
await asyncio.wait(jobs)
self.services.clear()
# TODO: fix this hack
asyncio.get_event_loop().call_later(5, lambda *args: sys.exit(0))
log.info("chia daemon exiting in 5 seconds")
response = {"success": True}
return response
async def register_service(self, websocket: WebSocketServerProtocol, request: Dict[str, Any]) -> Dict[str, Any]:
self.log.info(f"Register service {request}")
service = request["service"]
if service not in self.connections:
self.connections[service] = []
self.connections[service].append(websocket)
response: Dict[str, Any] = {"success": True}
if service == service_plotter:
response = {
"success": True,
"service": service,
"queue": self.extract_plot_queue(),
}
else:
self.remote_address_map[websocket] = service
if self.ping_job is None:
self.ping_job = asyncio.create_task(self.ping_task())
self.log.info(f"registered for service {service}")
log.info(f"{response}")
return response
def daemon_launch_lock_path(root_path: Path) -> Path:
"""
A path to a file that is lock when a daemon is launching but not yet started.
This prevents multiple instances from launching.
"""
return root_path / "run" / "start-daemon.launching"
def service_launch_lock_path(root_path: Path, service: str) -> Path:
"""
A path to a file that is lock when a service is running.
"""
service_name = service.replace(" ", "-").replace("/", "-")
return root_path / "run" / f"{service_name}.lock"
def pid_path_for_service(root_path: Path, service: str, id: str = "") -> Path:
"""
Generate a path for a PID file for the given service name.
"""
pid_name = service.replace(" ", "-").replace("/", "-")
return root_path / "run" / f"{pid_name}{id}.pid"
def plotter_log_path(root_path: Path, id: str):
return root_path / "plotter" / f"plotter_log_{id}.txt"
def launch_plotter(root_path: Path, service_name: str, service_array: List[str], id: str):
# we need to pass on the possibly altered CHIA_ROOT
os.environ["CHIA_ROOT"] = str(root_path)
service_executable = executable_for_service(service_array[0])
# Swap service name with name of executable
service_array[0] = service_executable
startupinfo = None
if os.name == "nt":
startupinfo = subprocess.STARTUPINFO() # type: ignore
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW # type: ignore
# Windows-specific.
# If the current process group is used, CTRL_C_EVENT will kill the parent and everyone in the group!
try:
creationflags: int = subprocess.CREATE_NEW_PROCESS_GROUP # type: ignore
except AttributeError: # Not on Windows.
creationflags = 0
plotter_path = plotter_log_path(root_path, id)
if plotter_path.parent.exists():
if plotter_path.exists():
plotter_path.unlink()
else:
mkdir(plotter_path.parent)
outfile = open(plotter_path.resolve(), "w")
log.info(f"Service array: {service_array}") # lgtm [py/clear-text-logging-sensitive-data]
process = subprocess.Popen(
service_array,
shell=False,
stderr=outfile,
stdout=outfile,
startupinfo=startupinfo,
creationflags=creationflags,
)
pid_path = pid_path_for_service(root_path, service_name, id)
try:
mkdir(pid_path.parent)
with open(pid_path, "w") as f:
f.write(f"{process.pid}\n")
except Exception:
pass
return process, pid_path
def launch_service(root_path: Path, service_command) -> Tuple[subprocess.Popen, Path]:
"""
Launch a child process.
"""
# set up CHIA_ROOT
# invoke correct script
# save away PID
# we need to pass on the possibly altered CHIA_ROOT
os.environ["CHIA_ROOT"] = str(root_path)
log.debug(f"Launching service with CHIA_ROOT: {os.environ['CHIA_ROOT']}")
# Insert proper e
service_array = service_command.split()
service_executable = executable_for_service(service_array[0])
service_array[0] = service_executable
if service_command == "chia_full_node_simulator":
# Set the -D/--connect_to_daemon flag to signify that the child should connect
# to the daemon to access the keychain
service_array.append("-D")
startupinfo = None
if os.name == "nt":
startupinfo = subprocess.STARTUPINFO() # type: ignore
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW # type: ignore
# CREATE_NEW_PROCESS_GROUP allows graceful shutdown on windows, by CTRL_BREAK_EVENT signal
if sys.platform == "win32" or sys.platform == "cygwin":
creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
else:
creationflags = 0
environ_copy = os.environ.copy()
process = subprocess.Popen(
service_array, shell=False, startupinfo=startupinfo, creationflags=creationflags, env=environ_copy
)
pid_path = pid_path_for_service(root_path, service_command)
try:
mkdir(pid_path.parent)
with open(pid_path, "w") as f:
f.write(f"{process.pid}\n")
except Exception:
pass
return process, pid_path
async def kill_process(
process: subprocess.Popen, root_path: Path, service_name: str, id: str, delay_before_kill: int = 15
) -> bool:
pid_path = pid_path_for_service(root_path, service_name, id)
if sys.platform == "win32" or sys.platform == "cygwin":
log.info("sending CTRL_BREAK_EVENT signal to %s", service_name)
# pylint: disable=E1101
kill(process.pid, signal.SIGBREAK)
else:
log.info("sending term signal to %s", service_name)
process.terminate()
count: float = 0
while count < delay_before_kill:
if process.poll() is not None:
break
await asyncio.sleep(0.5)
count += 0.5
else:
process.kill()
log.info("sending kill signal to %s", service_name)
r = process.wait()
log.info("process %s returned %d", service_name, r)
try:
pid_path_killed = pid_path.with_suffix(".pid-killed")
if pid_path_killed.exists():
pid_path_killed.unlink()
os.rename(pid_path, pid_path_killed)
except Exception:
pass
return True
async def kill_service(
root_path: Path, services: Dict[str, subprocess.Popen], service_name: str, delay_before_kill: int = 15
) -> bool:
process = services.get(service_name)
if process is None:
return False
del services[service_name]
result = await kill_process(process, root_path, service_name, "", delay_before_kill)
return result
def is_running(services: Dict[str, subprocess.Popen], service_name: str) -> bool:
process = services.get(service_name)
return process is not None and process.poll() is None
def create_server_for_daemon(root_path: Path):
routes = web.RouteTableDef()
services: Dict = dict()
@routes.get("/daemon/ping/")
async def ping(request: web.Request) -> web.Response:
return web.Response(text="pong")
@routes.get("/daemon/service/start/")
async def start_service(request: web.Request) -> web.Response:
service_name = request.query.get("service")
if service_name is None or not validate_service(service_name):
r = f"{service_name} unknown service"
return web.Response(text=str(r))
if is_running(services, service_name):
r = f"{service_name} already running"
return web.Response(text=str(r))
try:
process, pid_path = launch_service(root_path, service_name)
services[service_name] = process
r = f"{service_name} started"
except (subprocess.SubprocessError, IOError):
log.exception(f"problem starting {service_name}")
r = f"{service_name} start failed"
return web.Response(text=str(r))
@routes.get("/daemon/service/stop/")
async def stop_service(request: web.Request) -> web.Response:
service_name = request.query.get("service")
if service_name is None:
r = f"{service_name} unknown service"
return web.Response(text=str(r))
r = str(await kill_service(root_path, services, service_name))
return web.Response(text=str(r))
@routes.get("/daemon/service/is_running/")
async def is_running_handler(request: web.Request) -> web.Response:
service_name = request.query.get("service")
if service_name is None:
r = f"{service_name} unknown service"
return web.Response(text=str(r))
r = str(is_running(services, service_name))
return web.Response(text=str(r))
@routes.get("/daemon/exit/")
async def exit(request: web.Request):
jobs = []
for k in services.keys():
jobs.append(kill_service(root_path, services, k))
if jobs:
await asyncio.wait(jobs)
services.clear()
# we can't await `site.stop()` here because that will cause a deadlock, waiting for this
# request to exit
def singleton(lockfile: Path, text: str = "semaphore") -> Optional[TextIO]:
"""
Open a lockfile exclusively.
"""
if not lockfile.parent.exists():
mkdir(lockfile.parent)
try:
if has_fcntl:
f = open(lockfile, "w")
fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
else:
if lockfile.exists():
lockfile.unlink()
fd = os.open(lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)
f = open(fd, "w")
f.write(text)
except IOError:
return None
return f
async def async_run_daemon(root_path: Path, wait_for_unlock: bool = False) -> int:
# When wait_for_unlock is true, we want to skip the check_keys() call in chia_init
# since it might be necessary to wait for the GUI to unlock the keyring first.
chia_init(root_path, should_check_keys=(not wait_for_unlock))
config = load_config(root_path, "config.yaml")
setproctitle("chia_daemon")
initialize_logging("daemon", config["logging"], root_path)
lockfile = singleton(daemon_launch_lock_path(root_path))
crt_path = root_path / config["daemon_ssl"]["private_crt"]
key_path = root_path / config["daemon_ssl"]["private_key"]
ca_crt_path = root_path / config["private_ssl_ca"]["crt"]
ca_key_path = root_path / config["private_ssl_ca"]["key"]
sys.stdout.flush()
json_msg = dict_to_json_str(
{
"message": "cert_path",
"success": True,
"cert": f"{crt_path}",
"key": f"{key_path}",
"ca_crt": f"{ca_crt_path}",
}
)
sys.stdout.write("\n" + json_msg + "\n")
sys.stdout.flush()
if lockfile is None:
print("daemon: already launching")
return 2
# TODO: clean this up, ensuring lockfile isn't removed until the listen port is open
create_server_for_daemon(root_path)
ws_server = WebSocketServer(
root_path, ca_crt_path, ca_key_path, crt_path, key_path, run_check_keys_on_unlock=wait_for_unlock
)
await ws_server.start()
assert ws_server.websocket_server is not None
await ws_server.websocket_server.wait_closed()
log.info("Daemon WebSocketServer closed")
# sys.stdout.close()
return 0
def run_daemon(root_path: Path, wait_for_unlock: bool = False) -> int:
result = asyncio.get_event_loop().run_until_complete(async_run_daemon(root_path, wait_for_unlock))
return result
def main(argv) -> int:
from chia.util.default_root import DEFAULT_ROOT_PATH
from chia.util.keychain import Keychain
wait_for_unlock = "--wait-for-unlock" in argv and Keychain.is_keyring_locked()
return run_daemon(DEFAULT_ROOT_PATH, wait_for_unlock)
if __name__ == "__main__":
main(sys.argv[1:])
|
[] |
[] |
[
"CHIA_ROOT"
] |
[]
|
["CHIA_ROOT"]
|
python
| 1 | 0 | |
supports/pyload/src/pyload/core/network/cookie_jar.py
|
# -*- coding: utf-8 -*-
# AUTHOR: mkaay, RaNaN
import time
from datetime import timedelta
class CookieJar:
def __init__(self, pluginname, account=None):
self.cookies = {}
self.plugin = pluginname
self.account = account
def add_cookies(self, clist):
for c in clist:
name = c.split("\t")[5]
self.cookies[name] = c
def get_cookies(self):
return list(self.cookies.values())
def parse_cookie(self, name):
if name in self.cookies:
return self.cookies[name].split("\t")[6]
else:
return None
def get_cookie(self, name):
return self.parse_cookie(name)
def set_cookie(
self,
domain,
name,
value,
path="/",
exp=time.time() + timedelta(hours=744).seconds, #: 31 days retention
):
self.cookies[
name
] = f".{domain} TRUE {path} FALSE {exp} {name} {value}"
def clear(self):
self.cookies = {}
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
vendor/github.com/fatih/color/color.go
|
package color
import (
"fmt"
"io"
"os"
"strconv"
"strings"
"sync"
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
)
var (
// NoColor defines if the output is colorized or not. It's dynamically set to
// false or true based on the stdout's file descriptor referring to a terminal
// or not. This is a global option and affects all colors. For more control
// over each color block use the methods DisableColor() individually.
NoColor = os.Getenv("TERM") == "dumb" ||
(!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd()))
// Output defines the standard output of the print functions. By default
// os.Stdout is used.
Output = colorable.NewColorableStdout()
// Error defines a color supporting writer for os.Stderr.
Error = colorable.NewColorableStderr()
// colorsCache is used to reduce the count of created Color objects and
// allows to reuse already created objects with required Attribute.
colorsCache = make(map[Attribute]*Color)
colorsCacheMu sync.Mutex // protects colorsCache
)
// Color defines a custom color object which is defined by SGR parameters.
type Color struct {
params []Attribute
noColor *bool
}
// Attribute defines a single SGR Code
type Attribute int
const escape = "\x1b"
// Base attributes
const (
Reset Attribute = iota
Bold
Faint
Italic
Underline
BlinkSlow
BlinkRapid
ReverseVideo
Concealed
CrossedOut
)
// Foreground text colors
const (
FgBlack Attribute = iota + 30
FgRed
FgGreen
FgYellow
FgBlue
FgMagenta
FgCyan
FgWhite
)
// Foreground Hi-Intensity text colors
const (
FgHiBlack Attribute = iota + 90
FgHiRed
FgHiGreen
FgHiYellow
FgHiBlue
FgHiMagenta
FgHiCyan
FgHiWhite
)
// Background text colors
const (
BgBlack Attribute = iota + 40
BgRed
BgGreen
BgYellow
BgBlue
BgMagenta
BgCyan
BgWhite
)
// Background Hi-Intensity text colors
const (
BgHiBlack Attribute = iota + 100
BgHiRed
BgHiGreen
BgHiYellow
BgHiBlue
BgHiMagenta
BgHiCyan
BgHiWhite
)
// New returns a newly created color object.
func New(value ...Attribute) *Color {
c := &Color{params: make([]Attribute, 0)}
c.Add(value...)
return c
}
// Set sets the given parameters immediately. It will change the color of
// output with the given SGR parameters until color.Unset() is called.
func Set(p ...Attribute) *Color {
c := New(p...)
c.Set()
return c
}
// Unset resets all escape attributes and clears the output. Usually should
// be called after Set().
func Unset() {
if NoColor {
return
}
fmt.Fprintf(Output, "%s[%dm", escape, Reset)
}
// Set sets the SGR sequence.
func (c *Color) Set() *Color {
if c.isNoColorSet() {
return c
}
fmt.Fprintf(Output, c.format())
return c
}
func (c *Color) unset() {
if c.isNoColorSet() {
return
}
Unset()
}
func (c *Color) setWriter(w io.Writer) *Color {
if c.isNoColorSet() {
return c
}
fmt.Fprintf(w, c.format())
return c
}
func (c *Color) unsetWriter(w io.Writer) {
if c.isNoColorSet() {
return
}
if NoColor {
return
}
fmt.Fprintf(w, "%s[%dm", escape, Reset)
}
// Add is used to chain SGR parameters. Use as many as parameters to combine
// and create custom color objects. Example: Add(color.FgRed, color.Underline).
func (c *Color) Add(value ...Attribute) *Color {
c.params = append(c.params, value...)
return c
}
func (c *Color) prepend(value Attribute) {
c.params = append(c.params, 0)
copy(c.params[1:], c.params[0:])
c.params[0] = value
}
// Fprint formats using the default formats for its operands and writes to w.
// Spaces are added between operands when neither is a string.
// It returns the number of bytes written and any write error encountered.
// On Windows, users should wrap w with colorable.NewColorable() if w is of
// type *os.File.
func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
c.setWriter(w)
defer c.unsetWriter(w)
return fmt.Fprint(w, a...)
}
// Print formats using the default formats for its operands and writes to
// standard output. Spaces are added between operands when neither is a
// string. It returns the number of bytes written and any write error
// encountered. This is the standard fmt.Print() method wrapped with the given
// color.
func (c *Color) Print(a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprint(Output, a...)
}
// Fprintf formats according to a format specifier and writes to w.
// It returns the number of bytes written and any write error encountered.
// On Windows, users should wrap w with colorable.NewColorable() if w is of
// type *os.File.
func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
c.setWriter(w)
defer c.unsetWriter(w)
return fmt.Fprintf(w, format, a...)
}
// Printf formats according to a format specifier and writes to standard output.
// It returns the number of bytes written and any write error encountered.
// This is the standard fmt.Printf() method wrapped with the given color.
func (c *Color) Printf(format string, a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprintf(Output, format, a...)
}
// Fprintln formats using the default formats for its operands and writes to w.
// Spaces are always added between operands and a newline is appended.
// On Windows, users should wrap w with colorable.NewColorable() if w is of
// type *os.File.
func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
c.setWriter(w)
defer c.unsetWriter(w)
return fmt.Fprintln(w, a...)
}
// Println formats using the default formats for its operands and writes to
// standard output. Spaces are always added between operands and a newline is
// appended. It returns the number of bytes written and any write error
// encountered. This is the standard fmt.Print() method wrapped with the given
// color.
func (c *Color) Println(a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprintln(Output, a...)
}
// Sprint is just like Print, but returns a string instead of printing it.
func (c *Color) Sprint(a ...interface{}) string {
return c.wrap(fmt.Sprint(a...))
}
// Sprintln is just like Println, but returns a string instead of printing it.
func (c *Color) Sprintln(a ...interface{}) string {
return c.wrap(fmt.Sprintln(a...))
}
// Sprintf is just like Printf, but returns a string instead of printing it.
func (c *Color) Sprintf(format string, a ...interface{}) string {
return c.wrap(fmt.Sprintf(format, a...))
}
// FprintFunc returns a new function that prints the passed arguments as
// colorized with color.Fprint().
func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) {
return func(w io.Writer, a ...interface{}) {
c.Fprint(w, a...)
}
}
// PrintFunc returns a new function that prints the passed arguments as
// colorized with color.Print().
func (c *Color) PrintFunc() func(a ...interface{}) {
return func(a ...interface{}) {
c.Print(a...)
}
}
// FprintfFunc returns a new function that prints the passed arguments as
// colorized with color.Fprintf().
func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) {
return func(w io.Writer, format string, a ...interface{}) {
c.Fprintf(w, format, a...)
}
}
// PrintfFunc returns a new function that prints the passed arguments as
// colorized with color.Printf().
func (c *Color) PrintfFunc() func(format string, a ...interface{}) {
return func(format string, a ...interface{}) {
c.Printf(format, a...)
}
}
// FprintlnFunc returns a new function that prints the passed arguments as
// colorized with color.Fprintln().
func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) {
return func(w io.Writer, a ...interface{}) {
c.Fprintln(w, a...)
}
}
// PrintlnFunc returns a new function that prints the passed arguments as
// colorized with color.Println().
func (c *Color) PrintlnFunc() func(a ...interface{}) {
return func(a ...interface{}) {
c.Println(a...)
}
}
// SprintFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprint(). Useful to put into or mix into other
// string. Windows users should use this in conjunction with color.Output, example:
//
// put := New(FgYellow).SprintFunc()
// fmt.Fprintf(color.Output, "This is a %s", put("warning"))
func (c *Color) SprintFunc() func(a ...interface{}) string {
return func(a ...interface{}) string {
return c.wrap(fmt.Sprint(a...))
}
}
// SprintfFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprintf(). Useful to put into or mix into other
// string. Windows users should use this in conjunction with color.Output.
func (c *Color) SprintfFunc() func(format string, a ...interface{}) string {
return func(format string, a ...interface{}) string {
return c.wrap(fmt.Sprintf(format, a...))
}
}
// SprintlnFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprintln(). Useful to put into or mix into other
// string. Windows users should use this in conjunction with color.Output.
func (c *Color) SprintlnFunc() func(a ...interface{}) string {
return func(a ...interface{}) string {
return c.wrap(fmt.Sprintln(a...))
}
}
// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m"
// an example output might be: "1;36" -> bold cyan
func (c *Color) sequence() string {
format := make([]string, len(c.params))
for i, v := range c.params {
format[i] = strconv.Itoa(int(v))
}
return strings.Join(format, ";")
}
// wrap wraps the s string with the colors attributes. The string is ready to
// be printed.
func (c *Color) wrap(s string) string {
if c.isNoColorSet() {
return s
}
return c.format() + s + c.unformat()
}
func (c *Color) format() string {
return fmt.Sprintf("%s[%sm", escape, c.sequence())
}
func (c *Color) unformat() string {
return fmt.Sprintf("%s[%dm", escape, Reset)
}
// DisableColor disables the color output. Useful to not change any existing
// code and still being able to output. Can be used for flags like
// "--no-color". To enable back use EnableColor() method.
func (c *Color) DisableColor() {
c.noColor = boolPtr(true)
}
// EnableColor enables the color output. Use it in conjunction with
// DisableColor(). Otherwise this method has no side effects.
func (c *Color) EnableColor() {
c.noColor = boolPtr(false)
}
func (c *Color) isNoColorSet() bool {
// check first if we have user setted action
if c.noColor != nil {
return *c.noColor
}
// if not return the global option, which is disabled by default
return NoColor
}
// Equals returns a boolean value indicating whether two colors are equal.
func (c *Color) Equals(c2 *Color) bool {
if len(c.params) != len(c2.params) {
return false
}
for _, attr := range c.params {
if !c2.attrExists(attr) {
return false
}
}
return true
}
func (c *Color) attrExists(a Attribute) bool {
for _, attr := range c.params {
if attr == a {
return true
}
}
return false
}
func boolPtr(v bool) *bool {
return &v
}
func getCachedColor(p Attribute) *Color {
colorsCacheMu.Lock()
defer colorsCacheMu.Unlock()
c, ok := colorsCache[p]
if !ok {
c = New(p)
colorsCache[p] = c
}
return c
}
func colorPrint(format string, p Attribute, a ...interface{}) {
c := getCachedColor(p)
if !strings.HasSuffix(format, "\n") {
format += "\n"
}
if len(a) == 0 {
c.Print(format)
} else {
c.Printf(format, a...)
}
}
func colorString(format string, p Attribute, a ...interface{}) string {
c := getCachedColor(p)
if len(a) == 0 {
return c.SprintFunc()(format)
}
return c.SprintfFunc()(format, a...)
}
// Black is a convenient helper function to print with black foreground. A
// newline is appended to format by default.
func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) }
// Red is a convenient helper function to print with red foreground. A
// newline is appended to format by default.
func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) }
// Green is a convenient helper function to print with green foreground. A
// newline is appended to format by default.
func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) }
// Yellow is a convenient helper function to print with yellow foreground.
// A newline is appended to format by default.
func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) }
// Blue is a convenient helper function to print with blue foreground. A
// newline is appended to format by default.
func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) }
// Magenta is a convenient helper function to print with magenta foreground.
// A newline is appended to format by default.
func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) }
// Cyan is a convenient helper function to print with cyan foreground. A
// newline is appended to format by default.
func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) }
// White is a convenient helper function to print with white foreground. A
// newline is appended to format by default.
func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) }
// BlackString is a convenient helper function to return a string with black
// foreground.
func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) }
// RedString is a convenient helper function to return a string with red
// foreground.
func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) }
// GreenString is a convenient helper function to return a string with green
// foreground.
func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) }
// YellowString is a convenient helper function to return a string with yellow
// foreground.
func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) }
// BlueString is a convenient helper function to return a string with blue
// foreground.
func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) }
// MagentaString is a convenient helper function to return a string with magenta
// foreground.
func MagentaString(format string, a ...interface{}) string {
return colorString(format, FgMagenta, a...)
}
// CyanString is a convenient helper function to return a string with cyan
// foreground.
func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) }
// WhiteString is a convenient helper function to return a string with white
// foreground.
func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) }
// HiBlack is a convenient helper function to print with hi-intensity black foreground. A
// newline is appended to format by default.
func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) }
// HiRed is a convenient helper function to print with hi-intensity red foreground. A
// newline is appended to format by default.
func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) }
// HiGreen is a convenient helper function to print with hi-intensity green foreground. A
// newline is appended to format by default.
func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) }
// HiYellow is a convenient helper function to print with hi-intensity yellow foreground.
// A newline is appended to format by default.
func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) }
// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A
// newline is appended to format by default.
func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) }
// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground.
// A newline is appended to format by default.
func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) }
// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A
// newline is appended to format by default.
func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) }
// HiWhite is a convenient helper function to print with hi-intensity white foreground. A
// newline is appended to format by default.
func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) }
// HiBlackString is a convenient helper function to return a string with hi-intensity black
// foreground.
func HiBlackString(format string, a ...interface{}) string {
return colorString(format, FgHiBlack, a...)
}
// HiRedString is a convenient helper function to return a string with hi-intensity red
// foreground.
func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) }
// HiGreenString is a convenient helper function to return a string with hi-intensity green
// foreground.
func HiGreenString(format string, a ...interface{}) string {
return colorString(format, FgHiGreen, a...)
}
// HiYellowString is a convenient helper function to return a string with hi-intensity yellow
// foreground.
func HiYellowString(format string, a ...interface{}) string {
return colorString(format, FgHiYellow, a...)
}
// HiBlueString is a convenient helper function to return a string with hi-intensity blue
// foreground.
func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) }
// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta
// foreground.
func HiMagentaString(format string, a ...interface{}) string {
return colorString(format, FgHiMagenta, a...)
}
// HiCyanString is a convenient helper function to return a string with hi-intensity cyan
// foreground.
func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) }
// HiWhiteString is a convenient helper function to return a string with hi-intensity white
// foreground.
func HiWhiteString(format string, a ...interface{}) string {
return colorString(format, FgHiWhite, a...)
}
|
[
"\"TERM\""
] |
[] |
[
"TERM"
] |
[]
|
["TERM"]
|
go
| 1 | 0 | |
vendor/golang.org/x/net/http2/http2.go
|
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package http2 implements the HTTP/2 protocol.
//
// This package is low-level and intended to be used directly by very
// few people. Most users will use it indirectly through the automatic
// use by the net/http package (from Go 1.6 and later).
// For use in earlier Go versions see ConfigureServer. (Transport support
// requires Go 1.6 or later)
//
// See https://http2.github.io/ for more information on HTTP/2.
//
// See https://http2.golang.org/ for a test server running this code.
//
package http2
import (
"bufio"
"crypto/tls"
"fmt"
"io"
"net/http"
"os"
"sort"
"strconv"
"strings"
"sync"
"golang.org/x/net/http/httpguts"
)
var (
VerboseLogs bool
logFrameWrites bool
logFrameReads bool
inTests bool
)
func init() {
e := os.Getenv("GODEBUG")
if strings.Contains(e, "http2debug=1") {
VerboseLogs = true
}
if strings.Contains(e, "http2debug=2") {
VerboseLogs = true
logFrameWrites = true
logFrameReads = true
}
}
const (
// ClientPreface is the string that must be sent by new
// connections from clients.
ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
// SETTINGS_MAX_FRAME_SIZE default
// http://http2.github.io/http2-spec/#rfc.section.6.5.2
initialMaxFrameSize = 16384
// NextProtoTLS is the NPN/ALPN protocol negotiated during
// HTTP/2's TLS setup.
NextProtoTLS = "h2"
// http://http2.github.io/http2-spec/#SettingValues
initialHeaderTableSize = 4096
initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
defaultMaxReadFrameSize = 1 << 20
)
var (
clientPreface = []byte(ClientPreface)
)
type streamState int
// HTTP/2 stream states.
//
// See http://tools.ietf.org/html/rfc7540#section-5.1.
//
// For simplicity, the server code merges "reserved (local)" into
// "half-closed (remote)". This is one less state transition to track.
// The only downside is that we send PUSH_PROMISEs slightly less
// liberally than allowable. More discussion here:
// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html
//
// "reserved (remote)" is omitted since the client code does not
// support server push.
const (
stateIdle streamState = iota
stateOpen
stateHalfClosedLocal
stateHalfClosedRemote
stateClosed
)
var stateName = [...]string{
stateIdle: "Idle",
stateOpen: "Open",
stateHalfClosedLocal: "HalfClosedLocal",
stateHalfClosedRemote: "HalfClosedRemote",
stateClosed: "Closed",
}
func (st streamState) String() string {
return stateName[st]
}
// Setting is a setting parameter: which setting it is, and its value.
type Setting struct {
// ID is which setting is being set.
// See http://http2.github.io/http2-spec/#SettingValues
ID SettingID
// Val is the value.
Val uint32
}
func (s Setting) String() string {
return fmt.Sprintf("[%v = %d]", s.ID, s.Val)
}
// Valid reports whether the setting is valid.
func (s Setting) Valid() error {
// Limits and error codes from 6.5.2 Defined SETTINGS Parameters
switch s.ID {
case SettingEnablePush:
if s.Val != 1 && s.Val != 0 {
return ConnectionError(ErrCodeProtocol)
}
case SettingInitialWindowSize:
if s.Val > 1<<31-1 {
return ConnectionError(ErrCodeFlowControl)
}
case SettingMaxFrameSize:
if s.Val < 16384 || s.Val > 1<<24-1 {
return ConnectionError(ErrCodeProtocol)
}
}
return nil
}
// A SettingID is an HTTP/2 setting as defined in
// http://http2.github.io/http2-spec/#iana-settings
type SettingID uint16
const (
SettingHeaderTableSize SettingID = 0x1
SettingEnablePush SettingID = 0x2
SettingMaxConcurrentStreams SettingID = 0x3
SettingInitialWindowSize SettingID = 0x4
SettingMaxFrameSize SettingID = 0x5
SettingMaxHeaderListSize SettingID = 0x6
)
var settingName = map[SettingID]string{
SettingHeaderTableSize: "HEADER_TABLE_SIZE",
SettingEnablePush: "ENABLE_PUSH",
SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
SettingMaxFrameSize: "MAX_FRAME_SIZE",
SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
}
func (s SettingID) String() string {
if v, ok := settingName[s]; ok {
return v
}
return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
}
// validWireHeaderFieldName reports whether v is a valid header field
// name (key). See httpguts.ValidHeaderName for the base rules.
//
// Further, http2 says:
// "Just as in HTTP/1.x, header field names are strings of ASCII
// characters that are compared in a case-insensitive
// fashion. However, header field names MUST be converted to
// lowercase prior to their encoding in HTTP/2. "
func validWireHeaderFieldName(v string) bool {
if len(v) == 0 {
return false
}
for _, r := range v {
if !httpguts.IsTokenRune(r) {
return false
}
if 'A' <= r && r <= 'Z' {
return false
}
}
return true
}
func httpCodeString(code int) string {
switch code {
case 200:
return "200"
case 404:
return "404"
}
return strconv.Itoa(code)
}
// from pkg io
type stringWriter interface {
WriteString(s string) (n int, err error)
}
// A gate lets two goroutines coordinate their activities.
type gate chan struct{}
func (g gate) Done() { g <- struct{}{} }
func (g gate) Wait() { <-g }
// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
type closeWaiter chan struct{}
// Init makes a closeWaiter usable.
// It exists because so a closeWaiter value can be placed inside a
// larger struct and have the Mutex and Cond's memory in the same
// allocation.
func (cw *closeWaiter) Init() {
*cw = make(chan struct{})
}
// Close marks the closeWaiter as closed and unblocks any waiters.
func (cw closeWaiter) Close() {
close(cw)
}
// Wait waits for the closeWaiter to become closed.
func (cw closeWaiter) Wait() {
<-cw
}
// bufferedWriter is a buffered writer that writes to w.
// Its buffered writer is lazily allocated as needed, to minimize
// idle memory usage with many connections.
type bufferedWriter struct {
_ incomparable
w io.Writer // immutable
bw *bufio.Writer // non-nil when data is buffered
}
func newBufferedWriter(w io.Writer) *bufferedWriter {
return &bufferedWriter{w: w}
}
// bufWriterPoolBufferSize is the size of bufio.Writer's
// buffers created using bufWriterPool.
//
// TODO: pick a less arbitrary value? this is a bit under
// (3 x typical 1500 byte MTU) at least. Other than that,
// not much thought went into it.
const bufWriterPoolBufferSize = 4 << 10
var bufWriterPool = sync.Pool{
New: func() interface{} {
return bufio.NewWriterSize(nil, bufWriterPoolBufferSize)
},
}
func (w *bufferedWriter) Available() int {
if w.bw == nil {
return bufWriterPoolBufferSize
}
return w.bw.Available()
}
func (w *bufferedWriter) Write(p []byte) (n int, err error) {
if w.bw == nil {
bw := bufWriterPool.Get().(*bufio.Writer)
bw.Reset(w.w)
w.bw = bw
}
return w.bw.Write(p)
}
func (w *bufferedWriter) Flush() error {
bw := w.bw
if bw == nil {
return nil
}
err := bw.Flush()
bw.Reset(nil)
bufWriterPool.Put(bw)
w.bw = nil
return err
}
func mustUint31(v int32) uint32 {
if v < 0 || v > 2147483647 {
panic("out of range")
}
return uint32(v)
}
// bodyAllowedForStatus reports whether a given response status code
// permits a body. See RFC 7230, section 3.3.
func bodyAllowedForStatus(status int) bool {
switch {
case status >= 100 && status <= 199:
return false
case status == 204:
return false
case status == 304:
return false
}
return true
}
type httpError struct {
_ incomparable
msg string
timeout bool
}
func (e *httpError) Error() string { return e.msg }
func (e *httpError) Timeout() bool { return e.timeout }
func (e *httpError) Temporary() bool { return true }
var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true}
type connectionStater interface {
ConnectionState() tls.ConnectionState
}
var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }}
type sorter struct {
v []string // owned by sorter
}
func (s *sorter) Len() int { return len(s.v) }
func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] }
func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] }
// Keys returns the sorted keys of h.
//
// The returned slice is only valid until s used again or returned to
// its pool.
func (s *sorter) Keys(h http.Header) []string {
keys := s.v[:0]
for k := range h {
keys = append(keys, k)
}
s.v = keys
sort.Sort(s)
return keys
}
func (s *sorter) SortStrings(ss []string) {
// Our sorter works on s.v, which sorter owns, so
// stash it away while we sort the user's buffer.
save := s.v
s.v = ss
sort.Sort(s)
s.v = save
}
// validPseudoPath reports whether v is a valid :path pseudo-header
// value. It must be either:
//
// *) a non-empty string starting with '/'
// *) the string '*', for OPTIONS requests.
//
// For now this is only used a quick check for deciding when to clean
// up Opaque URLs before sending requests from the Transport.
// See golang.org/issue/16847
//
// We used to enforce that the path also didn't start with "//", but
// Google's GFE accepts such paths and Chrome sends them, so ignore
// that part of the spec. See golang.org/issue/19103.
func validPseudoPath(v string) bool {
return (len(v) > 0 && v[0] == '/') || v == "*"
}
// incomparable is a zero-width, non-comparable type. Adding it to a struct
// makes that struct also non-comparable, and generally doesn't add
// any size (as long as it's first).
type incomparable [0]func()
|
[
"\"GODEBUG\""
] |
[] |
[
"GODEBUG"
] |
[]
|
["GODEBUG"]
|
go
| 1 | 0 | |
internal/extsvc/bitbucketserver/client_test.go
|
package bitbucketserver
import (
"context"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"net/url"
"os"
"reflect"
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/sergi/go-diff/diffmatchpatch"
)
var update = flag.Bool("update", false, "update testdata")
func TestParseQueryStrings(t *testing.T) {
for _, tc := range []struct {
name string
qs []string
vals url.Values
err string
}{
{
name: "ignores query separator",
qs: []string{"?foo=bar&baz=boo"},
vals: url.Values{"foo": {"bar"}, "baz": {"boo"}},
},
{
name: "ignores query separator by itself",
qs: []string{"?"},
vals: url.Values{},
},
{
name: "perserves multiple values",
qs: []string{"?foo=bar&foo=baz", "foo=boo"},
vals: url.Values{"foo": {"bar", "baz", "boo"}},
},
} {
t.Run(tc.name, func(t *testing.T) {
if tc.err == "" {
tc.err = "<nil>"
}
vals, err := parseQueryStrings(tc.qs...)
if have, want := fmt.Sprint(err), tc.err; have != want {
t.Errorf("error:\nhave: %q\nwant: %q", have, want)
}
if have, want := vals, tc.vals; !reflect.DeepEqual(have, want) {
t.Error(cmp.Diff(have, want))
}
})
}
}
func TestUserFilters(t *testing.T) {
for _, tc := range []struct {
name string
fs UserFilters
qry url.Values
}{
{
name: "last one wins",
fs: UserFilters{
{Filter: "admin"},
{Filter: "tomas"}, // Last one wins
},
qry: url.Values{"filter": []string{"tomas"}},
},
{
name: "filters can be combined",
fs: UserFilters{
{Filter: "admin"},
{Group: "admins"},
},
qry: url.Values{
"filter": []string{"admin"},
"group": []string{"admins"},
},
},
{
name: "permissions",
fs: UserFilters{
{
Permission: PermissionFilter{
Root: PermProjectAdmin,
ProjectKey: "ORG",
},
},
{
Permission: PermissionFilter{
Root: PermRepoWrite,
ProjectKey: "ORG",
RepositorySlug: "foo",
},
},
},
qry: url.Values{
"permission.1": []string{"PROJECT_ADMIN"},
"permission.1.projectKey": []string{"ORG"},
"permission.2": []string{"REPO_WRITE"},
"permission.2.projectKey": []string{"ORG"},
"permission.2.repositorySlug": []string{"foo"},
},
},
} {
t.Run(tc.name, func(t *testing.T) {
have := make(url.Values)
tc.fs.EncodeTo(have)
if want := tc.qry; !reflect.DeepEqual(have, want) {
t.Error(cmp.Diff(have, want))
}
})
}
}
func TestClient_Users(t *testing.T) {
cli, save := NewTestClient(t, "Users", *update)
defer save()
timeout, cancel := context.WithDeadline(context.Background(), time.Now().Add(-time.Second))
defer cancel()
users := map[string]*User{
"admin": {
Name: "admin",
EmailAddress: "[email protected]",
ID: 1,
DisplayName: "admin",
Active: true,
Slug: "admin",
Type: "NORMAL",
},
"john": {
Name: "john",
EmailAddress: "[email protected]",
ID: 52,
DisplayName: "John Doe",
Active: true,
Slug: "john",
Type: "NORMAL",
},
}
for _, tc := range []struct {
name string
ctx context.Context
page *PageToken
filters []UserFilter
users []*User
next *PageToken
err string
}{
{
name: "timeout",
ctx: timeout,
err: "context deadline exceeded",
},
{
name: "pagination: first page",
page: &PageToken{Limit: 1},
users: []*User{users["admin"]},
next: &PageToken{
Size: 1,
Limit: 1,
NextPageStart: 1,
},
},
{
name: "pagination: last page",
page: &PageToken{
Size: 1,
Limit: 1,
NextPageStart: 1,
},
users: []*User{users["john"]},
next: &PageToken{
Size: 1,
Start: 1,
Limit: 1,
IsLastPage: true,
},
},
{
name: "filter by substring match in username, name and email address",
page: &PageToken{Limit: 1000},
filters: []UserFilter{{Filter: "Doe"}}, // matches "John Doe" in name
users: []*User{users["john"]},
next: &PageToken{
Size: 1,
Limit: 1000,
IsLastPage: true,
},
},
{
name: "filter by group",
page: &PageToken{Limit: 1000},
filters: []UserFilter{{Group: "admins"}},
users: []*User{users["admin"]},
next: &PageToken{
Size: 1,
Limit: 1000,
IsLastPage: true,
},
},
{
name: "filter by multiple ANDed permissions",
page: &PageToken{Limit: 1000},
filters: []UserFilter{
{
Permission: PermissionFilter{
Root: PermSysAdmin,
},
},
{
Permission: PermissionFilter{
Root: PermRepoRead,
ProjectKey: "ORG",
RepositorySlug: "foo",
},
},
},
users: []*User{users["admin"]},
next: &PageToken{
Size: 1,
Limit: 1000,
IsLastPage: true,
},
},
{
name: "multiple filters are ANDed",
page: &PageToken{Limit: 1000},
filters: []UserFilter{
{
Filter: "admin",
},
{
Permission: PermissionFilter{
Root: PermRepoRead,
ProjectKey: "ORG",
RepositorySlug: "foo",
},
},
},
users: []*User{users["admin"]},
next: &PageToken{
Size: 1,
Limit: 1000,
IsLastPage: true,
},
},
{
name: "maximum 50 permission filters",
page: &PageToken{Limit: 1000},
filters: func() (fs UserFilters) {
for i := 0; i < 51; i++ {
fs = append(fs, UserFilter{
Permission: PermissionFilter{
Root: PermSysAdmin,
},
})
}
return fs
}(),
err: ErrUserFiltersLimit.Error(),
},
} {
tc := tc
t.Run(tc.name, func(t *testing.T) {
if tc.ctx == nil {
tc.ctx = context.Background()
}
if tc.err == "" {
tc.err = "<nil>"
}
users, next, err := cli.Users(tc.ctx, tc.page, tc.filters...)
if have, want := fmt.Sprint(err), tc.err; have != want {
t.Errorf("error:\nhave: %q\nwant: %q", have, want)
}
if have, want := next, tc.next; !reflect.DeepEqual(have, want) {
t.Error(cmp.Diff(have, want))
}
if have, want := users, tc.users; !reflect.DeepEqual(have, want) {
t.Error(cmp.Diff(have, want))
}
})
}
}
func TestClient_LoadPullRequest(t *testing.T) {
instanceURL := os.Getenv("BITBUCKET_SERVER_URL")
if instanceURL == "" {
instanceURL = "http://127.0.0.1:7990"
}
cli, save := NewTestClient(t, "PullRequests", *update)
defer save()
timeout, cancel := context.WithDeadline(context.Background(), time.Now().Add(-time.Second))
defer cancel()
pr := &PullRequest{ID: 2}
pr.ToRef.Repository.Slug = "vegeta"
pr.ToRef.Repository.Project.Key = "SOUR"
for _, tc := range []struct {
name string
ctx context.Context
pr func() *PullRequest
err string
}{
{
name: "timeout",
pr: func() *PullRequest { return pr },
ctx: timeout,
err: "context deadline exceeded",
},
{
name: "repo not set",
pr: func() *PullRequest { return &PullRequest{ID: 2} },
err: "repository slug empty",
},
{
name: "project not set",
pr: func() *PullRequest {
pr := &PullRequest{ID: 2}
pr.ToRef.Repository.Slug = "vegeta"
return pr
},
err: "project key empty",
},
{
name: "non existing pr",
pr: func() *PullRequest {
pr := &PullRequest{ID: 9999}
pr.ToRef.Repository.Slug = "vegeta"
pr.ToRef.Repository.Project.Key = "SOUR"
return pr
},
err: "Bitbucket API HTTP error: code=404 url=\"${INSTANCEURL}/rest/api/1.0/projects/SOUR/repos/vegeta/pull-requests/9999\" body=\"{\\\"errors\\\":[{\\\"context\\\":null,\\\"message\\\":\\\"Pull request 9999 does not exist in SOUR/vegeta.\\\",\\\"exceptionName\\\":\\\"com.atlassian.bitbucket.pull.NoSuchPullRequestException\\\"}]}\"",
},
{
name: "non existing repo",
pr: func() *PullRequest {
pr := &PullRequest{ID: 9999}
pr.ToRef.Repository.Slug = "invalidslug"
pr.ToRef.Repository.Project.Key = "SOUR"
return pr
},
err: "Bitbucket API HTTP error: code=404 url=\"${INSTANCEURL}/rest/api/1.0/projects/SOUR/repos/invalidslug/pull-requests/9999\" body=\"{\\\"errors\\\":[{\\\"context\\\":null,\\\"message\\\":\\\"Repository SOUR/invalidslug does not exist.\\\",\\\"exceptionName\\\":\\\"com.atlassian.bitbucket.repository.NoSuchRepositoryException\\\"}]}\"",
},
{
name: "success",
pr: func() *PullRequest { return pr },
},
} {
tc := tc
t.Run(tc.name, func(t *testing.T) {
if tc.ctx == nil {
tc.ctx = context.Background()
}
if tc.err == "" {
tc.err = "<nil>"
}
tc.err = strings.ReplaceAll(tc.err, "${INSTANCEURL}", instanceURL)
pr := tc.pr()
err := cli.LoadPullRequest(tc.ctx, pr)
if have, want := fmt.Sprint(err), tc.err; have != want {
t.Fatalf("error:\nhave: %q\nwant: %q", have, want)
}
if err != nil || tc.err != "<nil>" {
return
}
data, err := json.MarshalIndent(pr, " ", " ")
if err != nil {
t.Fatal(err)
}
path := "testdata/golden/LoadPullRequest-" + strings.Replace(tc.name, " ", "-", -1)
if *update {
if err = ioutil.WriteFile(path, data, 0640); err != nil {
t.Fatalf("failed to update golden file %q: %s", path, err)
}
}
golden, err := ioutil.ReadFile(path)
if err != nil {
t.Fatalf("failed to read golden file %q: %s", path, err)
}
if have, want := string(data), string(golden); have != want {
dmp := diffmatchpatch.New()
diffs := dmp.DiffMain(have, want, false)
t.Error(dmp.DiffPrettyText(diffs))
}
})
}
}
func TestClient_CreatePullRequest(t *testing.T) {
instanceURL := os.Getenv("BITBUCKET_SERVER_URL")
if instanceURL == "" {
instanceURL = "http://127.0.0.1:7990"
}
timeout, cancel := context.WithDeadline(context.Background(), time.Now().Add(-time.Second))
defer cancel()
pr := &PullRequest{}
pr.Title = "This is a test PR"
pr.Description = "This is a test PR. Feel free to ignore."
pr.ToRef.Repository.Slug = "automation-testing"
pr.ToRef.Repository.Project.Key = "SOUR"
pr.ToRef.ID = "refs/heads/master"
pr.FromRef.Repository.Slug = "automation-testing"
pr.FromRef.Repository.Project.Key = "SOUR"
pr.FromRef.ID = "refs/heads/test-pr-bbs-1"
for _, tc := range []struct {
name string
ctx context.Context
pr func() *PullRequest
err string
}{
{
name: "timeout",
pr: func() *PullRequest { return pr },
ctx: timeout,
err: "context deadline exceeded",
},
{
name: "ToRef repo not set",
pr: func() *PullRequest {
pr := *pr
pr.ToRef.Repository.Slug = ""
return &pr
},
err: "ToRef repository slug empty",
},
{
name: "ToRef project not set",
pr: func() *PullRequest {
pr := *pr
pr.ToRef.Repository.Project.Key = ""
return &pr
},
err: "ToRef project key empty",
},
{
name: "ToRef ID not set",
pr: func() *PullRequest {
pr := *pr
pr.ToRef.ID = ""
return &pr
},
err: "ToRef id empty",
},
{
name: "FromRef repo not set",
pr: func() *PullRequest {
pr := *pr
pr.FromRef.Repository.Slug = ""
return &pr
},
err: "FromRef repository slug empty",
},
{
name: "FromRef project not set",
pr: func() *PullRequest {
pr := *pr
pr.FromRef.Repository.Project.Key = ""
return &pr
},
err: "FromRef project key empty",
},
{
name: "FromRef ID not set",
pr: func() *PullRequest {
pr := *pr
pr.FromRef.ID = ""
return &pr
},
err: "FromRef id empty",
},
{
name: "success",
pr: func() *PullRequest {
pr := *pr
pr.FromRef.ID = "refs/heads/test-pr-bbs-3"
return &pr
},
},
{
name: "pull request already exists",
pr: func() *PullRequest {
pr := *pr
pr.FromRef.ID = "refs/heads/always-open-pr-bbs"
return &pr
},
err: ErrAlreadyExists.Error(),
},
} {
tc := tc
t.Run(tc.name, func(t *testing.T) {
name := "CreatePullRequest-" + strings.Replace(tc.name, " ", "-", -1)
cli, save := NewTestClient(t, name, *update)
defer save()
if tc.ctx == nil {
tc.ctx = context.Background()
}
if tc.err == "" {
tc.err = "<nil>"
}
tc.err = strings.ReplaceAll(tc.err, "${INSTANCEURL}", instanceURL)
pr := tc.pr()
err := cli.CreatePullRequest(tc.ctx, pr)
if have, want := fmt.Sprint(err), tc.err; have != want {
t.Fatalf("error:\nhave: %q\nwant: %q", have, want)
}
if err != nil || tc.err != "<nil>" {
return
}
data, err := json.MarshalIndent(pr, " ", " ")
if err != nil {
t.Fatal(err)
}
path := "testdata/golden/" + name
if *update {
if err = ioutil.WriteFile(path, data, 0640); err != nil {
t.Fatalf("failed to update golden file %q: %s", path, err)
}
}
golden, err := ioutil.ReadFile(path)
if err != nil {
t.Fatalf("failed to read golden file %q: %s", path, err)
}
if have, want := string(data), string(golden); have != want {
dmp := diffmatchpatch.New()
diffs := dmp.DiffMain(have, want, false)
t.Error(dmp.DiffPrettyText(diffs))
}
})
}
}
func TestClient_LoadPullRequestActivities(t *testing.T) {
instanceURL := os.Getenv("BITBUCKET_SERVER_URL")
if instanceURL == "" {
instanceURL = "http://127.0.0.1:7990"
}
cli, save := NewTestClient(t, "PullRequestActivities", *update)
defer save()
timeout, cancel := context.WithDeadline(context.Background(), time.Now().Add(-time.Second))
defer cancel()
pr := &PullRequest{ID: 2}
pr.ToRef.Repository.Slug = "vegeta"
pr.ToRef.Repository.Project.Key = "SOUR"
for _, tc := range []struct {
name string
ctx context.Context
pr func() *PullRequest
err string
}{
{
name: "timeout",
pr: func() *PullRequest { return pr },
ctx: timeout,
err: "context deadline exceeded",
},
{
name: "repo not set",
pr: func() *PullRequest { return &PullRequest{ID: 2} },
err: "repository slug empty",
},
{
name: "project not set",
pr: func() *PullRequest {
pr := &PullRequest{ID: 2}
pr.ToRef.Repository.Slug = "vegeta"
return pr
},
err: "project key empty",
},
{
name: "success",
pr: func() *PullRequest { return pr },
},
} {
tc := tc
t.Run(tc.name, func(t *testing.T) {
if tc.ctx == nil {
tc.ctx = context.Background()
}
if tc.err == "" {
tc.err = "<nil>"
}
tc.err = strings.ReplaceAll(tc.err, "${INSTANCEURL}", instanceURL)
pr := tc.pr()
err := cli.LoadPullRequestActivities(tc.ctx, pr)
if have, want := fmt.Sprint(err), tc.err; have != want {
t.Fatalf("error:\nhave: %q\nwant: %q", have, want)
}
if err != nil || tc.err != "<nil>" {
return
}
data, err := json.MarshalIndent(pr, " ", " ")
if err != nil {
t.Fatal(err)
}
path := "testdata/golden/LoadPullRequestActivities-" + strings.Replace(tc.name, " ", "-", -1)
if *update {
if err = ioutil.WriteFile(path, data, 0640); err != nil {
t.Fatalf("failed to update golden file %q: %s", path, err)
}
}
golden, err := ioutil.ReadFile(path)
if err != nil {
t.Fatalf("failed to read golden file %q: %s", path, err)
}
if have, want := string(data), string(golden); have != want {
dmp := diffmatchpatch.New()
diffs := dmp.DiffMain(have, want, false)
t.Error(dmp.DiffPrettyText(diffs))
}
})
}
}
|
[
"\"BITBUCKET_SERVER_URL\"",
"\"BITBUCKET_SERVER_URL\"",
"\"BITBUCKET_SERVER_URL\""
] |
[] |
[
"BITBUCKET_SERVER_URL"
] |
[]
|
["BITBUCKET_SERVER_URL"]
|
go
| 1 | 0 | |
cmd/tink-cli/cmd/template.go
|
package cmd
import (
"fmt"
"os"
"github.com/spf13/cobra"
"github.com/tinkerbell/tink/cmd/tink-cli/cmd/get"
"github.com/tinkerbell/tink/cmd/tink-cli/cmd/template"
)
func NewTemplateCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "template",
Short: "tink template client",
Example: "tink template [command]",
Args: func(c *cobra.Command, args []string) error {
if len(args) == 0 {
return fmt.Errorf("%v requires arguments", c.UseLine())
}
return nil
},
}
cmd.AddCommand(template.NewCreateCommand())
cmd.AddCommand(template.NewDeleteCommand())
cmd.AddCommand(template.NewListCommand())
cmd.AddCommand(template.NewUpdateCommand())
// If the variable TINK_CLI_VERSION is not set to 0.0.0 use the old get
// command. This is a way to keep retro-compatibility with the old get command.
getCmd := template.GetCmd
if v := os.Getenv("TINK_CLI_VERSION"); v != "0.0.0" {
getCmd = get.NewGetCommand(template.NewGetOptions())
}
cmd.AddCommand(getCmd)
return cmd
}
|
[
"\"TINK_CLI_VERSION\""
] |
[] |
[
"TINK_CLI_VERSION"
] |
[]
|
["TINK_CLI_VERSION"]
|
go
| 1 | 0 | |
vendor/github.com/hashicorp/terraform/vendor/github.com/terraform-providers/terraform-provider-aws/aws/resource_aws_opsworks_stack.go
|
package aws
import (
"fmt"
"log"
"os"
"strings"
"time"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/opsworks"
)
func resourceAwsOpsworksStack() *schema.Resource {
return &schema.Resource{
Create: resourceAwsOpsworksStackCreate,
Read: resourceAwsOpsworksStackRead,
Update: resourceAwsOpsworksStackUpdate,
Delete: resourceAwsOpsworksStackDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"agent_version": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"arn": {
Type: schema.TypeString,
Computed: true,
},
"id": {
Type: schema.TypeString,
Computed: true,
},
"name": {
Type: schema.TypeString,
Required: true,
},
"region": {
Type: schema.TypeString,
ForceNew: true,
Required: true,
},
"service_role_arn": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"default_instance_profile_arn": {
Type: schema.TypeString,
Required: true,
},
"color": {
Type: schema.TypeString,
Optional: true,
},
"configuration_manager_name": {
Type: schema.TypeString,
Optional: true,
Default: "Chef",
},
"configuration_manager_version": {
Type: schema.TypeString,
Optional: true,
Default: "11.10",
},
"manage_berkshelf": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"berkshelf_version": {
Type: schema.TypeString,
Optional: true,
Default: "3.2.0",
},
"custom_cookbooks_source": {
Type: schema.TypeList,
Optional: true,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"type": {
Type: schema.TypeString,
Required: true,
},
"url": {
Type: schema.TypeString,
Required: true,
},
"username": {
Type: schema.TypeString,
Optional: true,
},
"password": {
Type: schema.TypeString,
Optional: true,
Sensitive: true,
},
"revision": {
Type: schema.TypeString,
Optional: true,
},
"ssh_key": {
Type: schema.TypeString,
Optional: true,
},
},
},
},
"custom_json": {
Type: schema.TypeString,
Optional: true,
},
"default_availability_zone": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"default_os": {
Type: schema.TypeString,
Optional: true,
Default: "Ubuntu 12.04 LTS",
},
"default_root_device_type": {
Type: schema.TypeString,
Optional: true,
Default: "instance-store",
},
"default_ssh_key_name": {
Type: schema.TypeString,
Optional: true,
},
"default_subnet_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"hostname_theme": {
Type: schema.TypeString,
Optional: true,
Default: "Layer_Dependent",
},
"tags": tagsSchema(),
"use_custom_cookbooks": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"use_opsworks_security_groups": {
Type: schema.TypeBool,
Optional: true,
Default: true,
},
"vpc_id": {
Type: schema.TypeString,
ForceNew: true,
Computed: true,
Optional: true,
},
"stack_endpoint": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func resourceAwsOpsworksStackValidate(d *schema.ResourceData) error {
cookbooksSourceCount := d.Get("custom_cookbooks_source.#").(int)
if cookbooksSourceCount > 1 {
return fmt.Errorf("Only one custom_cookbooks_source is permitted")
}
vpcId := d.Get("vpc_id").(string)
if vpcId != "" {
if d.Get("default_subnet_id").(string) == "" {
return fmt.Errorf("default_subnet_id must be set if vpc_id is set")
}
} else {
if d.Get("default_availability_zone").(string) == "" {
return fmt.Errorf("either vpc_id or default_availability_zone must be set")
}
}
return nil
}
func resourceAwsOpsworksStackCustomCookbooksSource(d *schema.ResourceData) *opsworks.Source {
count := d.Get("custom_cookbooks_source.#").(int)
if count == 0 {
return nil
}
return &opsworks.Source{
Type: aws.String(d.Get("custom_cookbooks_source.0.type").(string)),
Url: aws.String(d.Get("custom_cookbooks_source.0.url").(string)),
Username: aws.String(d.Get("custom_cookbooks_source.0.username").(string)),
Password: aws.String(d.Get("custom_cookbooks_source.0.password").(string)),
Revision: aws.String(d.Get("custom_cookbooks_source.0.revision").(string)),
SshKey: aws.String(d.Get("custom_cookbooks_source.0.ssh_key").(string)),
}
}
func resourceAwsOpsworksSetStackCustomCookbooksSource(d *schema.ResourceData, v *opsworks.Source) {
nv := make([]interface{}, 0, 1)
if v != nil && v.Type != nil && *v.Type != "" {
m := make(map[string]interface{})
if v.Type != nil {
m["type"] = *v.Type
}
if v.Url != nil {
m["url"] = *v.Url
}
if v.Username != nil {
m["username"] = *v.Username
}
if v.Revision != nil {
m["revision"] = *v.Revision
}
// v.Password will, on read, contain the placeholder string
// "*****FILTERED*****", so we ignore it on read and let persist
// the value already in the state.
nv = append(nv, m)
}
err := d.Set("custom_cookbooks_source", nv)
if err != nil {
// should never happen
panic(err)
}
}
func resourceAwsOpsworksStackRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*AWSClient).opsworksconn
var conErr error
if v := d.Get("stack_endpoint").(string); v != "" {
client, conErr = opsworksConnForRegion(v, meta)
if conErr != nil {
return conErr
}
}
req := &opsworks.DescribeStacksInput{
StackIds: []*string{
aws.String(d.Id()),
},
}
log.Printf("[DEBUG] Reading OpsWorks stack: %s", d.Id())
// notFound represents the number of times we've called DescribeStacks looking
// for this Stack. If it's not found in the the default region we're in, we
// check us-east-1 in the event this stack was created with Terraform before
// version 0.9
// See https://github.com/hashicorp/terraform/issues/12842
var notFound int
var resp *opsworks.DescribeStacksOutput
var dErr error
for {
resp, dErr = client.DescribeStacks(req)
if dErr != nil {
if awserr, ok := dErr.(awserr.Error); ok {
if awserr.Code() == "ResourceNotFoundException" {
if notFound < 1 {
// If we haven't already, try us-east-1, legacy connection
notFound++
var connErr error
client, connErr = opsworksConnForRegion("us-east-1", meta)
if connErr != nil {
return connErr
}
// start again from the top of the FOR loop, but with a client
// configured to talk to us-east-1
continue
}
// We've tried both the original and us-east-1 endpoint, and the stack
// is still not found
log.Printf("[DEBUG] OpsWorks stack (%s) not found", d.Id())
d.SetId("")
return nil
}
// not ResoureNotFoundException, fall through to returning error
}
return dErr
}
// If the stack was found, set the stack_endpoint
if client.Config.Region != nil && *client.Config.Region != "" {
log.Printf("[DEBUG] Setting stack_endpoint for (%s) to (%s)", d.Id(), *client.Config.Region)
if err := d.Set("stack_endpoint", *client.Config.Region); err != nil {
log.Printf("[WARN] Error setting stack_endpoint: %s", err)
}
}
log.Printf("[DEBUG] Breaking stack endpoint search, found stack for (%s)", d.Id())
// Break the FOR loop
break
}
stack := resp.Stacks[0]
d.Set("arn", stack.Arn)
d.Set("agent_version", stack.AgentVersion)
d.Set("name", stack.Name)
d.Set("region", stack.Region)
d.Set("default_instance_profile_arn", stack.DefaultInstanceProfileArn)
d.Set("service_role_arn", stack.ServiceRoleArn)
d.Set("default_availability_zone", stack.DefaultAvailabilityZone)
d.Set("default_os", stack.DefaultOs)
d.Set("default_root_device_type", stack.DefaultRootDeviceType)
d.Set("default_ssh_key_name", stack.DefaultSshKeyName)
d.Set("default_subnet_id", stack.DefaultSubnetId)
d.Set("hostname_theme", stack.HostnameTheme)
d.Set("use_custom_cookbooks", stack.UseCustomCookbooks)
if stack.CustomJson != nil {
d.Set("custom_json", stack.CustomJson)
}
d.Set("use_opsworks_security_groups", stack.UseOpsworksSecurityGroups)
d.Set("vpc_id", stack.VpcId)
if color, ok := stack.Attributes["Color"]; ok {
d.Set("color", color)
}
if stack.ConfigurationManager != nil {
d.Set("configuration_manager_name", stack.ConfigurationManager.Name)
d.Set("configuration_manager_version", stack.ConfigurationManager.Version)
}
if stack.ChefConfiguration != nil {
d.Set("berkshelf_version", stack.ChefConfiguration.BerkshelfVersion)
d.Set("manage_berkshelf", stack.ChefConfiguration.ManageBerkshelf)
}
resourceAwsOpsworksSetStackCustomCookbooksSource(d, stack.CustomCookbooksSource)
return nil
}
// opsworksConn will return a connection for the stack_endpoint in the
// configuration. Stacks can only be accessed or managed within the endpoint
// in which they are created, so we allow users to specify an original endpoint
// for Stacks created before multiple endpoints were offered (Terraform v0.9.0).
// See:
// - https://github.com/hashicorp/terraform/pull/12688
// - https://github.com/hashicorp/terraform/issues/12842
func opsworksConnForRegion(region string, meta interface{}) (*opsworks.OpsWorks, error) {
originalConn := meta.(*AWSClient).opsworksconn
// Regions are the same, no need to reconfigure
if originalConn.Config.Region != nil && *originalConn.Config.Region == region {
return originalConn, nil
}
// Set up base session
sess, err := session.NewSession(&originalConn.Config)
if err != nil {
return nil, errwrap.Wrapf("Error creating AWS session: {{err}}", err)
}
sess.Handlers.Build.PushBackNamed(addTerraformVersionToUserAgent)
if extraDebug := os.Getenv("TERRAFORM_AWS_AUTHFAILURE_DEBUG"); extraDebug != "" {
sess.Handlers.UnmarshalError.PushFrontNamed(debugAuthFailure)
}
newSession := sess.Copy(&aws.Config{Region: aws.String(region)})
newOpsworksconn := opsworks.New(newSession)
log.Printf("[DEBUG] Returning new OpsWorks client")
return newOpsworksconn, nil
}
func resourceAwsOpsworksStackCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*AWSClient).opsworksconn
err := resourceAwsOpsworksStackValidate(d)
if err != nil {
return err
}
req := &opsworks.CreateStackInput{
DefaultInstanceProfileArn: aws.String(d.Get("default_instance_profile_arn").(string)),
Name: aws.String(d.Get("name").(string)),
Region: aws.String(d.Get("region").(string)),
ServiceRoleArn: aws.String(d.Get("service_role_arn").(string)),
DefaultOs: aws.String(d.Get("default_os").(string)),
UseOpsworksSecurityGroups: aws.Bool(d.Get("use_opsworks_security_groups").(bool)),
}
req.ConfigurationManager = &opsworks.StackConfigurationManager{
Name: aws.String(d.Get("configuration_manager_name").(string)),
Version: aws.String(d.Get("configuration_manager_version").(string)),
}
inVpc := false
if vpcId, ok := d.GetOk("vpc_id"); ok {
req.VpcId = aws.String(vpcId.(string))
inVpc = true
}
if defaultSubnetId, ok := d.GetOk("default_subnet_id"); ok {
req.DefaultSubnetId = aws.String(defaultSubnetId.(string))
}
if defaultAvailabilityZone, ok := d.GetOk("default_availability_zone"); ok {
req.DefaultAvailabilityZone = aws.String(defaultAvailabilityZone.(string))
}
if defaultRootDeviceType, ok := d.GetOk("default_root_device_type"); ok {
req.DefaultRootDeviceType = aws.String(defaultRootDeviceType.(string))
}
log.Printf("[DEBUG] Creating OpsWorks stack: %s", req)
var resp *opsworks.CreateStackOutput
err = resource.Retry(20*time.Minute, func() *resource.RetryError {
var cerr error
resp, cerr = client.CreateStack(req)
if cerr != nil {
if opserr, ok := cerr.(awserr.Error); ok {
// If Terraform is also managing the service IAM role,
// it may have just been created and not yet be
// propagated.
// AWS doesn't provide a machine-readable code for this
// specific error, so we're forced to do fragile message
// matching.
// The full error we're looking for looks something like
// the following:
// Service Role Arn: [...] is not yet propagated, please try again in a couple of minutes
propErr := "not yet propagated"
trustErr := "not the necessary trust relationship"
validateErr := "validate IAM role permission"
if opserr.Code() == "ValidationException" && (strings.Contains(opserr.Message(), trustErr) || strings.Contains(opserr.Message(), propErr) || strings.Contains(opserr.Message(), validateErr)) {
log.Printf("[INFO] Waiting for service IAM role to propagate")
return resource.RetryableError(cerr)
}
}
return resource.NonRetryableError(cerr)
}
return nil
})
if err != nil {
return err
}
stackId := *resp.StackId
d.SetId(stackId)
d.Set("id", stackId)
if inVpc && *req.UseOpsworksSecurityGroups {
// For VPC-based stacks, OpsWorks asynchronously creates some default
// security groups which must exist before layers can be created.
// Unfortunately it doesn't tell us what the ids of these are, so
// we can't actually check for them. Instead, we just wait a nominal
// amount of time for their creation to complete.
log.Print("[INFO] Waiting for OpsWorks built-in security groups to be created")
time.Sleep(30 * time.Second)
}
return resourceAwsOpsworksStackUpdate(d, meta)
}
func resourceAwsOpsworksStackUpdate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*AWSClient).opsworksconn
var conErr error
if v := d.Get("stack_endpoint").(string); v != "" {
client, conErr = opsworksConnForRegion(v, meta)
if conErr != nil {
return conErr
}
}
err := resourceAwsOpsworksStackValidate(d)
if err != nil {
return err
}
req := &opsworks.UpdateStackInput{
CustomJson: aws.String(d.Get("custom_json").(string)),
DefaultInstanceProfileArn: aws.String(d.Get("default_instance_profile_arn").(string)),
DefaultRootDeviceType: aws.String(d.Get("default_root_device_type").(string)),
DefaultSshKeyName: aws.String(d.Get("default_ssh_key_name").(string)),
Name: aws.String(d.Get("name").(string)),
ServiceRoleArn: aws.String(d.Get("service_role_arn").(string)),
StackId: aws.String(d.Id()),
UseCustomCookbooks: aws.Bool(d.Get("use_custom_cookbooks").(bool)),
UseOpsworksSecurityGroups: aws.Bool(d.Get("use_opsworks_security_groups").(bool)),
Attributes: make(map[string]*string),
CustomCookbooksSource: resourceAwsOpsworksStackCustomCookbooksSource(d),
}
if v, ok := d.GetOk("agent_version"); ok {
req.AgentVersion = aws.String(v.(string))
}
if v, ok := d.GetOk("default_os"); ok {
req.DefaultOs = aws.String(v.(string))
}
if v, ok := d.GetOk("default_subnet_id"); ok {
req.DefaultSubnetId = aws.String(v.(string))
}
if v, ok := d.GetOk("default_availability_zone"); ok {
req.DefaultAvailabilityZone = aws.String(v.(string))
}
if v, ok := d.GetOk("hostname_theme"); ok {
req.HostnameTheme = aws.String(v.(string))
}
if v, ok := d.GetOk("color"); ok {
req.Attributes["Color"] = aws.String(v.(string))
}
arn := arn.ARN{
Partition: meta.(*AWSClient).partition,
Region: meta.(*AWSClient).region,
Service: "opsworks",
AccountID: meta.(*AWSClient).accountid,
Resource: fmt.Sprintf("stack/%s/", d.Id()),
}
if tagErr := setTagsOpsworks(client, d, arn.String()); tagErr != nil {
return tagErr
}
req.ChefConfiguration = &opsworks.ChefConfiguration{
BerkshelfVersion: aws.String(d.Get("berkshelf_version").(string)),
ManageBerkshelf: aws.Bool(d.Get("manage_berkshelf").(bool)),
}
req.ConfigurationManager = &opsworks.StackConfigurationManager{
Name: aws.String(d.Get("configuration_manager_name").(string)),
Version: aws.String(d.Get("configuration_manager_version").(string)),
}
log.Printf("[DEBUG] Updating OpsWorks stack: %s", req)
_, err = client.UpdateStack(req)
if err != nil {
return err
}
return resourceAwsOpsworksStackRead(d, meta)
}
func resourceAwsOpsworksStackDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*AWSClient).opsworksconn
var conErr error
if v := d.Get("stack_endpoint").(string); v != "" {
client, conErr = opsworksConnForRegion(v, meta)
if conErr != nil {
return conErr
}
}
req := &opsworks.DeleteStackInput{
StackId: aws.String(d.Id()),
}
log.Printf("[DEBUG] Deleting OpsWorks stack: %s", d.Id())
_, err := client.DeleteStack(req)
if err != nil {
return err
}
// For a stack in a VPC, OpsWorks has created some default security groups
// in the VPC, which it will now delete.
// Unfortunately, the security groups are deleted asynchronously and there
// is no robust way for us to determine when it is done. The VPC itself
// isn't deletable until the security groups are cleaned up, so this could
// make 'terraform destroy' fail if the VPC is also managed and we don't
// wait for the security groups to be deleted.
// There is no robust way to check for this, so we'll just wait a
// nominal amount of time.
_, inVpc := d.GetOk("vpc_id")
_, useOpsworksDefaultSg := d.GetOk("use_opsworks_security_group")
if inVpc && useOpsworksDefaultSg {
log.Print("[INFO] Waiting for Opsworks built-in security groups to be deleted")
time.Sleep(30 * time.Second)
}
return nil
}
|
[
"\"TERRAFORM_AWS_AUTHFAILURE_DEBUG\""
] |
[] |
[
"TERRAFORM_AWS_AUTHFAILURE_DEBUG"
] |
[]
|
["TERRAFORM_AWS_AUTHFAILURE_DEBUG"]
|
go
| 1 | 0 | |
processors/scrape_special_entities.py
|
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import csv
def prepare(driver):
driver.get("http://www.misim.gov.il/mm_lelorasham/firstPage.aspx")
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, "RadioBakasha1"))
)
element.click()
slugs = {u" \u05ea\u05d0\u05d2\u05d9\u05d3\u05d9 \u05d4\u05d0\u05d6\u05d5\u05e8 (\u05d9\u05d5''\u05e9 )": 'west_bank_corporation',
u'\u05d0\u05d9\u05d2\u05d5\u05d3\u05d9 \u05e2\u05e8\u05d9\u05dd': 'conurbation',
u'\u05d0\u05d9\u05d2\u05d5\u05d3\u05d9\u05dd \u05de\u05e7\u05e6\u05d5\u05e2\u05d9\u05d9\u05dd': 'professional_association',
u'\u05d2\u05d5\u05e4\u05d9\u05dd \u05e2"\u05e4 \u05d3\u05d9\u05df': 'law_mandated_organization',
u'\u05d4\u05e7\u05d3\u05e9 \u05d1\u05d9\u05ea \u05d3\u05d9\u05df \u05d3\u05ea\u05d9': 'religious_court_sacred_property',
u'\u05d5\u05d5\u05e2\u05d3\u05d9\u05dd \u05de\u05e7\u05d5\u05de\u05d9\u05d9\u05dd \u05d1\u05d9\u05e9\u05d5\u05d1\u05d9\u05dd': 'local_community_committee',
u'\u05d5\u05e2\u05d3\u05d5\u05ea \u05de\u05e7\u05d5\u05de\u05d9\u05d5\u05ea \u05dc\u05ea\u05db\u05e0\u05d5\u05df': 'local_planning_committee',
u'\u05d5\u05e2\u05d3\u05d9 \u05d1\u05ea\u05d9\u05dd': 'house_committee',
u'\u05d7\u05d1\u05e8\u05d5\u05ea \u05d7\u05d5\u05e5 \u05dc\u05d0 \u05e8\u05e9\u05d5\u05de\u05d5\u05ea': 'foreign_company',
u'\u05de\u05e9\u05e8\u05d3\u05d9 \u05de\u05de\u05e9\u05dc\u05d4': 'government_office',
u'\u05e0\u05e6\u05d9\u05d2\u05d5\u05d9\u05d5\u05ea \u05d6\u05e8\u05d5\u05ea': 'foreign_representative',
u'\u05e7\u05d5\u05e4\u05d5\u05ea \u05d2\u05de\u05dc': 'provident_fund',
u'\u05e8\u05d5\u05d1\u05e2\u05d9\u05dd \u05e2\u05d9\u05e8\u05d5\u05e0\u05d9\u05d9\u05dd': 'municipal_precinct',
u'\u05e8\u05e9\u05d5\u05d9\u05d5\u05ea \u05de\u05e7\u05d5\u05de\u05d9\u05d5\u05ea': 'municipality',
u'\u05e8\u05e9\u05d5\u05d9\u05d5\u05ea \u05e0\u05d9\u05e7\u05d5\u05d6': 'drainage_authority',
u'\u05e8\u05e9\u05d9\u05de\u05d5\u05ea \u05dc\u05e8\u05e9\u05d5\u05d9\u05d5\u05ea \u05d4\u05de\u05e7\u05d5\u05de\u05d9\u05d5\u05ea': 'municipal_parties',
u'\u05e9\u05d9\u05e8\u05d5\u05ea\u05d9 \u05d1\u05e8\u05d9\u05d0\u05d5\u05ea': 'health_service',
u'\u05e9\u05d9\u05e8\u05d5\u05ea\u05d9 \u05d3\u05ea': 'religion_service'}
if __name__=="__main__":
driver = webdriver.PhantomJS(service_args=['--ignore-ssl-errors=true'],executable_path=os.environ.get('PHANTOMJS_PATH'))
driver.set_window_size(1200, 800)
prepare(driver)
select = driver.find_element_by_id("DropdownlistSugYeshut")
options = {}
for option in select.find_elements_by_tag_name('option')[1:]:
options[option.get_attribute('value')] = option.text
out = csv.writer(file('out.csv','w'))
for selection in options.keys():
prepare(driver)
driver.find_element_by_css_selector('option[value="%s"]' % selection).click()
driver.find_element_by_id('btnHipus').click()
data = []
while True:
print slugs[options[selection]]
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.CSS_SELECTOR, "#dgReshima tr.row1 "))
)
rows = driver.find_elements_by_css_selector('#dgReshima tr.row1, #dgReshima tr.row2')
for row in rows:
if row.get_attribute('class') in ('row1','row2'):
datum = [slugs[options[selection]]]
datum.extend([x.text.encode('utf8') for x in row.find_elements_by_tag_name('td')])
data.append(datum)
try:
nextButton = driver.find_element_by_id('btnHaba')
nextButton.click()
time.sleep(1)
except:
out.writerows(data)
break
|
[] |
[] |
[
"PHANTOMJS_PATH"
] |
[]
|
["PHANTOMJS_PATH"]
|
python
| 1 | 0 | |
mutiple_thread_sentry.py
|
import time
import os
import sys
import traceback
from datetime import datetime
from gevent import monkey
from io import StringIO
from psycogreen.gevent import patch_psycopg
import logging
from threading import Thread
from django.core.management.base import BaseCommand
from common.helpers import decstr, getLogger, dec
logger = getLogger(__name__)
def t2(args):
print('t2 args==', args)
pass
def t1(args):
print('t1 args==', args)
pass
def exception_hook(_type, _value, tb):
from common.helpers import getLogger, CustomJsonFormatter
print('hookkkkkkk')
logger = getLogger('exception_hook')
f = StringIO()
traceback.print_tb(tb, file=f)
for handler in logger.handlers:
handler.setFormatter(CustomJsonFormatter)
logger.exception(f.getvalue())
def thread_function1(name):
# 没用
#os.environ.setdefault("DJANGO_SETTINGS_MODULE", "exchange_broker.settings")
#sys.excepthook = exception_hook
print("Thread %s: starting", name)
time.sleep(2)
# 尝试手动捕获异常
try:
1/0
except Exception as e:
print('catch exception======================')
from raven.contrib.django.raven_compat.models import client
client.captureException()
finally:
print('finally')
#sys.excepthook(*sys.exc_info())
print("Thread %s: finishing", name)
def thread_function2(name):
print("Thread %s: starting", name)
time.sleep(2)
print("Thread %s: finishing", name)
def test_thread():
# 两种方案1.daemon=False2.join的超时时间长一些
# Q1 deamon=False 会阻塞主线程吗?
# Q2 join 时间过长会阻塞主线程吗?
# A: 取决于任务的执行时间,如果任务while True,那么主线程一直阻塞
t1 = Thread(target=thread_function1, args=(1,), daemon=True)
t2 = Thread(target=thread_function2, args=(2,), daemon=True)
t1.start()
t2.start()
print('end1')
# 这里可以打到centry
# raise Exception('main thread')
t1.join(0.1)
#t2.join(1)
print('end2')
class Command(BaseCommand):
"""委托单,merge_order, sub_order监控"""
def handle(self, *args, **options):
"""统计委托单"""
test_thread()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
gerrit/out.go
|
// Copyright 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"golang.org/x/build/gerrit"
"github.com/google/concourse-resources/internal/resource"
)
type outParams struct {
Repository string `json:"repository"`
Message string `json:"message"`
MessageFile string `json:"message_file"`
Labels map[string]int `json:"labels"`
}
func init() {
resource.RegisterOutFunc(out)
}
func out(req resource.OutRequest) error {
var src Source
var params outParams
err := req.Decode(&src, ¶ms)
if err != nil {
return err
}
authMan := newAuthManager(src)
defer authMan.cleanup()
// Read gerrit_version.json
var ver Version
if params.Repository == "" {
return errors.New("param repository required")
}
gerritVersionPath := filepath.Join(
req.TargetDir(), params.Repository, gerritVersionFilename)
err = ver.ReadFromFile(gerritVersionPath)
if err != nil {
return fmt.Errorf("error reading %q: %v", gerritVersionPath, err)
}
req.SetResponseVersion(ver)
// Build comment message
message := params.Message
if messageFile := params.MessageFile; messageFile != "" {
var messageBytes []byte
messageBytes, err = ioutil.ReadFile(filepath.Join(req.TargetDir(), messageFile))
if err == nil {
message = string(messageBytes)
} else {
log.Printf("error reading message file %q: %v", messageFile, err)
if message == "" {
return errors.New("no fallback message; failing")
} else {
log.Printf("using fallback message %q", message)
}
}
}
// Replace environment variables in message
var variableTokens = map[string]string{
"${BUILD_ID}": os.Getenv("BUILD_ID"),
"${BUILD_NAME}": os.Getenv("BUILD_NAME"),
"${BUILD_JOB_NAME}": os.Getenv("BUILD_JOB_NAME"),
"${BUILD_PIPELINE_NAME}": os.Getenv("BUILD_PIPELINE_NAME"),
"${BUILD_TEAM_NAME}": os.Getenv("BUILD_TEAM_NAME"),
"${ATC_EXTERNAL_URL}": os.Getenv("ATC_EXTERNAL_URL"),
}
for k, v := range variableTokens {
message = strings.Replace(message, k, v, -1)
}
// Send review
c, err := gerritClient(src, authMan)
if err != nil {
return fmt.Errorf("error setting up gerrit client: %v", err)
}
ctx := context.Background()
err = c.SetReview(ctx, ver.ChangeId, ver.Revision, gerrit.ReviewInput{
Message: message,
Labels: params.Labels,
})
if err != nil {
return fmt.Errorf("error sending review: %v", err)
}
return nil
}
|
[
"\"BUILD_ID\"",
"\"BUILD_NAME\"",
"\"BUILD_JOB_NAME\"",
"\"BUILD_PIPELINE_NAME\"",
"\"BUILD_TEAM_NAME\"",
"\"ATC_EXTERNAL_URL\""
] |
[] |
[
"BUILD_NAME",
"BUILD_JOB_NAME",
"BUILD_TEAM_NAME",
"ATC_EXTERNAL_URL",
"BUILD_ID",
"BUILD_PIPELINE_NAME"
] |
[]
|
["BUILD_NAME", "BUILD_JOB_NAME", "BUILD_TEAM_NAME", "ATC_EXTERNAL_URL", "BUILD_ID", "BUILD_PIPELINE_NAME"]
|
go
| 6 | 0 | |
pkg/utils/home.go
|
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"bytes"
"errors"
"os"
"os/exec"
"os/user"
"runtime"
"strings"
)
// Home returns the home directory for the executing user.
// This uses an OS-specific method for discovering the home directory.
// An error is returned if a home directory cannot be detected.
func Home() (string, error) {
user, err := user.Current()
if nil == err {
return user.HomeDir, nil
}
// cross compile support
if "windows" == runtime.GOOS {
return homeWindows()
}
// Unix-like system, so just assume Unix
return homeUnix()
}
func homeUnix() (string, error) {
// First prefer the HOME environmental variable
if home := os.Getenv("HOME"); home != "" {
return home, nil
}
// If that fails, try the shell
var stdout bytes.Buffer
cmd := exec.Command("sh", "-c", "eval echo ~$USER")
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
return "", err
}
result := strings.TrimSpace(stdout.String())
if result == "" {
return "", errors.New("blank output when reading home directory")
}
return result, nil
}
func homeWindows() (string, error) {
drive := os.Getenv("HOMEDRIVE")
path := os.Getenv("HOMEPATH")
home := drive + path
if drive == "" || path == "" {
home = os.Getenv("USERPROFILE")
}
if home == "" {
return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank")
}
return home, nil
}
|
[
"\"HOME\"",
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"USERPROFILE\""
] |
[] |
[
"USERPROFILE",
"HOME",
"HOMEPATH",
"HOMEDRIVE"
] |
[]
|
["USERPROFILE", "HOME", "HOMEPATH", "HOMEDRIVE"]
|
go
| 4 | 0 | |
backend/chatapp/chatapp/asgi.py
|
"""
ASGI config for chatapp project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
from django.core.asgi import get_asgi_application
import room.routing
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'chatapp.settings')
application = ProtocolTypeRouter({
"http": get_asgi_application(),
"websocket": AuthMiddlewareStack(
URLRouter(
room.routing.websocket_urlpatterns
)
)
})
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
database/pgsql/pgsql_test.go
|
// Copyright 2016 clair authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pgsql
import (
"database/sql"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"github.com/pborman/uuid"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
yaml "gopkg.in/yaml.v2"
"github.com/coreos/clair/database"
"github.com/coreos/clair/pkg/pagination"
)
var (
withFixtureName, withoutFixtureName string
)
var testPaginationKey = pagination.Must(pagination.NewKey())
func genTemplateDatabase(name string, loadFixture bool) (sourceURL string, dbName string) {
config := generateTestConfig(name, loadFixture, false)
source := config.Options["source"].(string)
name, url, err := parseConnectionString(source)
if err != nil {
panic(err)
}
fixturePath := config.Options["fixturepath"].(string)
if err := createDatabase(url, name); err != nil {
panic(err)
}
// migration and fixture
db, err := sql.Open("postgres", source)
if err != nil {
panic(err)
}
// Verify database state.
if err := db.Ping(); err != nil {
panic(err)
}
// Run migrations.
if err := migrateDatabase(db); err != nil {
panic(err)
}
if loadFixture {
log.Info("pgsql: loading fixtures")
d, err := ioutil.ReadFile(fixturePath)
if err != nil {
panic(err)
}
_, err = db.Exec(string(d))
if err != nil {
panic(err)
}
}
db.Exec("UPDATE pg_database SET datistemplate=True WHERE datname=$1", name)
db.Close()
log.Info("Generated Template database ", name)
return url, name
}
func dropTemplateDatabase(url string, name string) {
db, err := sql.Open("postgres", url)
if err != nil {
panic(err)
}
if _, err := db.Exec("UPDATE pg_database SET datistemplate=False WHERE datname=$1", name); err != nil {
panic(err)
}
if err := db.Close(); err != nil {
panic(err)
}
if err := dropDatabase(url, name); err != nil {
panic(err)
}
}
func TestMain(m *testing.M) {
fURL, fName := genTemplateDatabase("fixture", true)
nfURL, nfName := genTemplateDatabase("nonfixture", false)
withFixtureName = fName
withoutFixtureName = nfName
m.Run()
dropTemplateDatabase(fURL, fName)
dropTemplateDatabase(nfURL, nfName)
}
func openCopiedDatabase(testConfig database.RegistrableComponentConfig, fixture bool) (database.Datastore, error) {
var fixtureName string
if fixture {
fixtureName = withFixtureName
} else {
fixtureName = withoutFixtureName
}
// copy the database into new database
var pg pgSQL
// Parse configuration.
pg.config = Config{
CacheSize: 16384,
}
bytes, err := yaml.Marshal(testConfig.Options)
if err != nil {
return nil, fmt.Errorf("pgsql: could not load configuration: %v", err)
}
err = yaml.Unmarshal(bytes, &pg.config)
if err != nil {
return nil, fmt.Errorf("pgsql: could not load configuration: %v", err)
}
dbName, pgSourceURL, err := parseConnectionString(pg.config.Source)
if err != nil {
return nil, err
}
// Create database.
if pg.config.ManageDatabaseLifecycle {
if err = copyDatabase(pgSourceURL, dbName, fixtureName); err != nil {
return nil, err
}
}
// Open database.
pg.DB, err = sql.Open("postgres", pg.config.Source)
fmt.Println("database", pg.config.Source)
if err != nil {
pg.Close()
return nil, fmt.Errorf("pgsql: could not open database: %v", err)
}
return &pg, nil
}
// copyDatabase creates a new database with
func copyDatabase(url, name string, templateName string) error {
// Open database.
db, err := sql.Open("postgres", url)
if err != nil {
return fmt.Errorf("pgsql: could not open 'postgres' database for creation: %v", err)
}
defer db.Close()
// Create database with copy
_, err = db.Exec("CREATE DATABASE " + name + " WITH TEMPLATE " + templateName)
if err != nil {
return fmt.Errorf("pgsql: could not create database: %v", err)
}
return nil
}
func openDatabaseForTest(testName string, loadFixture bool) (*pgSQL, error) {
var (
db database.Datastore
err error
testConfig = generateTestConfig(testName, loadFixture, true)
)
db, err = openCopiedDatabase(testConfig, loadFixture)
if err != nil {
return nil, err
}
datastore := db.(*pgSQL)
return datastore, nil
}
func generateTestConfig(testName string, loadFixture bool, manageLife bool) database.RegistrableComponentConfig {
dbName := "test_" + strings.ToLower(testName) + "_" + strings.Replace(uuid.New(), "-", "_", -1)
var fixturePath string
if loadFixture {
_, filename, _, _ := runtime.Caller(0)
fixturePath = filepath.Join(filepath.Dir(filename)) + "/testdata/data.sql"
}
source := fmt.Sprintf("postgresql://[email protected]:5432/%s?sslmode=disable", dbName)
if sourceEnv := os.Getenv("CLAIR_TEST_PGSQL"); sourceEnv != "" {
source = fmt.Sprintf(sourceEnv, dbName)
}
log.Infof("pagination key for current test: %s", testPaginationKey.String())
return database.RegistrableComponentConfig{
Options: map[string]interface{}{
"source": source,
"cachesize": 0,
"managedatabaselifecycle": manageLife,
"fixturepath": fixturePath,
"paginationkey": testPaginationKey.String(),
},
}
}
func closeTest(t *testing.T, store database.Datastore, session database.Session) {
err := session.Rollback()
if err != nil {
t.Error(err)
t.FailNow()
}
store.Close()
}
func openSessionForTest(t *testing.T, name string, loadFixture bool) (*pgSQL, *pgSession) {
store, err := openDatabaseForTest(name, loadFixture)
if err != nil {
t.Error(err)
t.FailNow()
}
tx, err := store.Begin()
if err != nil {
t.Error(err)
t.FailNow()
}
log.Infof("transaction pagination key: '%s'", tx.(*pgSession).key.String())
return store, tx.(*pgSession)
}
func restartSession(t *testing.T, datastore *pgSQL, tx *pgSession, commit bool) *pgSession {
var err error
if !commit {
err = tx.Rollback()
} else {
err = tx.Commit()
}
if assert.Nil(t, err) {
session, err := datastore.Begin()
if assert.Nil(t, err) {
return session.(*pgSession)
}
}
t.FailNow()
return nil
}
|
[
"\"CLAIR_TEST_PGSQL\""
] |
[] |
[
"CLAIR_TEST_PGSQL"
] |
[]
|
["CLAIR_TEST_PGSQL"]
|
go
| 1 | 0 | |
harambe.py
|
#! python3
# harambe.py - groupme bot to get weather, espn fantasy football scores,
# post random pictures, and troll friends
import json
import requests
import sys
import bs4
import pprint
import re
from http.server import BaseHTTPRequestHandler, HTTPServer
import os
import time
from random import randint
HOST_NAME = '0.0.0.0'
PORT_NUMBER = int(os.environ.get('PORT', 9000))
baseUrl = 'https://api.groupme.com/v3'
accessToken = '' # your access Token
tokenUrl = '?token=' + accessToken
bot_id = '' # insert your bot id
# Send HTTP POST request to post to group.
def post_group(content, pic_url):
postdo_post = '/bots/post'
resUrl = baseUrl + postdo_post
params = {'bot_id' : bot_id, 'text' : content, 'picture_url' : pic_url}
res = requests.post(resUrl, params)
res.raise_for_status()
def get_weather(city):
# uses google geocoding api to find a latitude and longitude for the city supplied
# uses the latitude and longitude in Dark Sky's weather api to get weather for the specific location
GOOGLEAPIKEY = '' # your key for Google's geocoding API
DARKSKYAPIKEY = '' # your key for Dark Sky's weather data API
city = city.replace(' ', '+') # replaces the space if state is also given e.g. 'gainesville, fl'
googlebaseURL = 'https://maps.googleapis.com/maps/api/geocode/json?address=%s&key=%s' % (city, GOOGLEAPIKEY) # URL for googles geocoding api
res = requests.get(googlebaseURL)
res.raise_for_status()
geocodeData = json.loads(res.text)
geocode = geocodeData['results'][0]['geometry']['location']
latitude = geocode['lat']
longitude = geocode['lng']
darkskybaseURL = 'https://api.darksky.net/forecast/%s/%s,%s' % (DARKSKYAPIKEY, latitude, longitude)
res = requests.get(darkskybaseURL)
res.raise_for_status()
weatherData = json.loads(res.text)
degree_sign= u'\N{DEGREE SIGN}' # degree unicode character
post_group(weatherData['currently']['summary'] + ', ' + str(weatherData['currently']['apparentTemperature']) + degree_sign + 'F. ' + weatherData['hourly']['summary'] + '\n\n' + weatherData['daily']['summary'], None)
def all_league_scores():
# Posts all league scores for your ESPN fantasy football league
leagueId = '' # insert your ESPN leagueId
seasonId = '' # insert season year
scoreboardUrl = 'http://games.espn.com/ffl/scoreboard?leagueId=%s&seasonId=%s' % (leagueId, seasonId)
res = requests.get(scoreboardUrl)
res.raise_for_status()
soup = bs4.BeautifulSoup(res.text, 'html.parser')
tag = soup.find_all(class_=['score', 'name', 'owners'])
message = tag[0].get_text()+': '+tag[2].get_text()+'\n'+tag[3].get_text()+': '+tag[5].get_text()+'\n\n'+tag[6].get_text()+': '+tag[8].get_text()+'\n'+tag[9].get_text()+': '+tag[11].get_text()+'\n\n'+tag[12].get_text()+': '+tag[14].get_text()+'\n'+tag[15].get_text()+': '+tag[17].get_text()+'\n\n'+tag[18].get_text()+': '+tag[20].get_text()+'\n'+tag[21].get_text()+': '+tag[23].get_text()+'\n\n'+tag[24].get_text()+': '+tag[26].get_text()+'\n'+tag[27].get_text()+': '+tag[29].get_text()+'\n\n'+tag[30].get_text()+': '+tag[32].get_text()+'\n'+tag[33].get_text()+': '+tag[35].get_text()
post_group(message, None)
def get_matchup_score(user_id):
# posts the matchup score from ESPN for the user who asks
groupMembers = {}
""" ^ dictionary with key equal to groupme userID (from API)
and value equal to members name e.g {'000000':'Walter'} """
leagueId = '' # insert your ESPN leagueId
seasonId = '' # insert season year
scoreboardUrl = 'http://games.espn.com/ffl/scoreboard?leagueId=%s&seasonId=%s' % (leagueId, seasonId)
res = requests.get(scoreboardUrl)
res.raise_for_status()
soup = bs4.BeautifulSoup(res.text, 'html.parser')
scores_tag = soup.find_all(class_='score')
names_tag = soup.find_all(class_='name')
owners_tag = soup.find_all(class_='owners')
score_content_line1 = None
score_content_line2 = None
for i in range(0, 12):
if owners_tag[i].get_text().lower().split(' ')[0] == groupMembers[user_id].lower().split(' ')[0]:
score_content_line1 = names_tag[i].get_text() + ': ' + scores_tag[i].get_text()
if i in range(1, 12, 2):
score_content_line2 = names_tag[i-1].get_text() + ': ' + scores_tag[i-1].get_text()
else:
score_content_line2 = names_tag[i+1].get_text() + ': ' + scores_tag[i+1].get_text()
post_group(str(score_content_line1) + '\n' + str(score_content_line2), None)
i += 1
def get_last_message():
if 'rip harambe' in message_lower:
rip_harambe_list = ['https://img.ifcdn.com/images/ccb85b3923314524e7203fe0e4284bad6e1b01e42eda8550b9b8b7988cf6de5b_1.jpg', 'https://i.redd.it/33d6a5it8eix.png', 'http://i1.kym-cdn.com/photos/images/original/001/155/744/c2f.jpg', 'https://static1.squarespace.com/static/570a0f1f4c2f85652de746c9/570a10085559863dc7612dc9/57c8eb429de4bb1598ee2b40/1472873504170/HARAMBE+2.0+(CLEAN).jpg?format=1500w', 'https://getonfleek.com/pub/media/catalog/product/d/i/dicks_out_for_harambe_crewneck.png', 'http://i2.kym-cdn.com/photos/images/original/001/155/662/8c5.jpg', 'https://pics.onsizzle.com/trending-stephen-hawking-renowned-physicist-makes-1st-facebook-post-since-3030807.png', 'https://img.ifcdn.com/images/159f2467d9d557ab49311de6462365a2bd21804ad6ea135ca56aaa8b06599280_1.jpg', 'http://i.imgur.com/y5WoTDN.jpg']
rip_harambe_length = len(rip_harambe_list)-1
i = randint(0, rip_harambe_length)
post_group(None, rip_harambe_list[i])
elif 'harambe' in message_lower and 'my fantasy' in message_lower:
get_matchup_score(my_id)
elif 'harambe' in message_lower and 'league scores' in message_lower:
all_league_scores()
elif 'harambe' in message_lower and 'resurrect' in message_lower:
post_group(None, 'https://i.groupme.com/1200x799.jpeg.1c2ae1fd84214f9681cccfa65650bd42')
elif my_id == '': # insert a friends user_id for the bot to troll every so often
i = randint(1, 50)
if i == 25:
post_group("", None) # insert the message for the bot to post
elif 'harambe' in message_lower and 'weather' in message_lower:
message_lower_index = message_lower.index('in')
target = message_lower[message_lower_index + 3:].strip('?')
get_weather(target)
elif 'harambe' in message_lower and my_id != '13439387':
post_group(sender_name + ', come in my cage you neanderthal and see what happens.', None)
class RequestHandler(BaseHTTPRequestHandler):
def do_POST(s):
# Respond to a post request
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(bytes("Response success", "utf8"))
content = int(s.headers.get_all('Content-Length')[0])
post_body_bytes = s.rfile.read(content)
post_body = post_body_bytes.decode('ascii')
recentdata = json.loads(post_body)
global original_message, message_lower, sender_name_lower, my_id, sender_name
original_message = recentdata['text']
message_lower = original_message.lower()
sender_name = recentdata['name']
print(sender_name)
print(original_message)
sender_name_lower = sender_name.lower()
my_id = str(recentdata['user_id'])
print(my_id)
get_last_message()
if __name__ == '__main__':
server_class = HTTPServer
handler_class = BaseHTTPRequestHandler
server_address = (HOST_NAME, PORT_NUMBER)
httpd = server_class(server_address, RequestHandler)
print(time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, PORT_NUMBER))
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print(time.asctime(), "Server Stops - %s:%s" % (HOST_NAME, PORT_NUMBER))
|
[] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
python
| 1 | 0 | |
src/main/go/src/pacman/game_test.go
|
package main
import (
"bytes"
"fmt"
"io"
"os"
"strconv"
"testing"
"time"
"github.com/DATA-DOG/godog"
"github.com/DATA-DOG/godog/gherkin"
)
var ANSIcodes = map[string]string{}
var gameLevel *levelStruct
var testDisplay Display
var game Game
var outputStream *bytes.Buffer
func TestMain(m *testing.M) {
tags := os.Getenv("BDD")
status := godog.RunWithOptions("godogs", func(s *godog.Suite) {
FeatureContext(s)
}, godog.Options{
Format: "pretty",
Paths: []string{"features/"},
Tags: tags,
Randomize: time.Now().UTC().UnixNano(), // randomize scenario execution order
})
if st := m.Run(); st > status {
status = st
}
os.Exit(status)
}
/** Givens ******************************************************/
// Given
func theGameFieldOfX(columns, rows int) error {
field := new(playField).New(rows, columns)
theGame.SetPlayfield(field)
return nil
}
//Given
func aPacmanAtFacing(x, y int, facing string) error {
var dir Direction
switch facing {
case "LEFT":
dir = LEFT
case "RIGHT":
dir = RIGHT
case "UP":
dir = UP
case "DOWN":
dir = DOWN
}
pacman := NewPacman(theGame, pacmanDirs[dir][0], Location{x, y})
theGame.SetPacman(pacman)
return nil
}
//Given
func wallsAtTheFollowingPlaces(wallSpec *gherkin.DataTable) error {
for _, row := range wallSpec.Rows {
icon := []rune(row.Cells[0].Value)
x, _ := strconv.Atoi(row.Cells[1].Value)
y, _ := strconv.Atoi(row.Cells[2].Value)
wall := NewWall(theGame, icon[0], Location{x, y})
theGame.AddWall(wall)
}
return nil
}
// Given
func theGameStateIs(input *gherkin.DocString) error {
theGame.SetInput(input.Content)
return nil
}
// Given
func theScoreIs(score int) error {
theGame.SetScore(score)
return nil
}
// Given
func theLivesAre(lives int) error {
theGame.SetLives(lives)
return nil
}
// Given
func aColourDisplay() error {
return godog.ErrPending
}
// Given
func theANSISequenceIs(sequence, hex string) error {
ANSIcodes[sequence] = hex
return nil
}
// Given
func aDisplay() error {
testDisplay = new(terminal).New(theGame)
theGame.SetDisplay(testDisplay)
return nil
}
// Given
func aGameWithLevels(levels int, levelMaps *gherkin.DocString) error {
theGame.SetInput(levelMaps.Content)
return nil
}
// Given
func thisIsLevel(level int) error {
theGame.SetLevel(level)
return nil
}
// Given
func theMaxLevelIs(maxLevel int) error {
theGame.SetMaxLevel(maxLevel)
return nil
}
// Given
func theGameUsesAnimation() error {
theGame.UseAnimation()
return nil
}
// Given
func thisIsTheLastLevel() error {
theGame.SetMaxLevel(1)
return nil
}
/** Whens ******************************************************/
// When
func weParseTheState() error {
theGame.Parse()
return nil
}
// When
func weRenderTheGame() error {
theGame.Render()
return nil
}
// When
func wePlayTurns(turns int) error {
for i := 0; i < turns; i++ {
theGame.Tick()
}
return nil
}
// When
func weRefreshTheDisplayWithTheBuffer(buffer string) error {
hexstring := fmt.Sprintf("%X", buffer)
ANSIcodes[buffer] = hexstring
testDisplay.Refresh(buffer, nil)
return nil
}
// When
func thePlayerPresses(key string) error {
theGame.KeyPress(key)
return nil
}
// When
func initializeTheDisplay() error {
testDisplay.Init(io.Writer(outputStream))
return nil
}
/** Thens ******************************************************/
// Then
func theGameFieldShouldBeX(x, y int) error {
cols, rows := theGame.Dimensions()
if (cols != x) || (rows != y) {
return fmt.Errorf("expected dimensions to be %v,%v but it is %v,%v", x, y, cols, rows)
}
return nil
}
// Then
func thePlayerHasLives(lives int) error {
if theGame.Lives() != lives {
return fmt.Errorf("expected lives to be %v, but it is %v", lives, theGame.Lives())
}
return nil
}
// Then
func thePlayerScoreIs(score int) error {
if theGame.Score() != score {
return fmt.Errorf("expected score to be %v, but it is %v", score, theGame.Score())
}
return nil
}
// Then
func pacmanIsAt(x, y int) error {
pacman := theGame.GetPacman()
loc := pacman.Location()
if (loc.x != x) && (loc.y != y) {
return fmt.Errorf("expected pacman to be at %v,%v but it is at %v,%v", x, y, loc.x, loc.y)
}
return nil
}
// Then
func pacmanIsFacing(direction string) error {
pacman := theGame.GetPacman()
if !pacman.Direction().Equals(direction) {
return fmt.Errorf("expected pacman to be facing %v but is %v", direction, pacman.Direction())
}
return nil
}
// Then
func ghostIsAt(x, y int) error {
for _, ghost := range theGame.GetGhosts() {
loc := ghost.Location()
if loc.x == x && loc.y == y {
return nil
}
}
return fmt.Errorf("expected ghost at %v,%v but didn't find one", x, y)
}
// Then
func thenPacmanGoes(direction string) error {
pacman := theGame.GetPacman()
if !pacman.Direction().Equals(direction) {
return fmt.Errorf("expected pacman to be facing %v but is %v", direction, pacman.Direction())
}
return nil
}
// Then
func thereIsAPointPillAt(points, x, y int) error {
for _, pill := range theGame.GetPills() {
loc := pill.Location()
if loc.x == x && loc.y == y {
return nil
}
}
return fmt.Errorf("expected pill at %v,%v but didn't find one", x, y)
}
// Then
func thereIsAWallAt(x, y int) error {
for _, wall := range theGame.GetWalls() {
loc := wall.Location()
if loc.x == x && loc.y == y {
return nil
}
}
return fmt.Errorf("expected wall at %v,%v but didn't find one", x, y)
}
// Then
func thereIsAForceFieldAt(x, y int) error {
for _, wall := range theGame.GetWalls() {
loc := wall.Location()
if loc.x == x && loc.y == y && wall.IsForceField() {
return nil
}
}
return fmt.Errorf("expected force field at %v,%v but didn't find one", x, y)
}
// Then
func thereIsAGateAt(x, y int) error {
gate := theGame.GetGate()
loc := gate.Location()
if loc.x == x && loc.y == y {
return nil
}
return fmt.Errorf("expected gate at %v,%v but didn't find one", x, y)
}
// Then
func theGameScreenIs(expected *gherkin.DocString) error {
output, _ := theGame.GetOutput()
if output == expected.Content {
return nil
}
return fmt.Errorf("expected screen to be:\n======\n%v\n but was\n%v\n======", expected.Content, output)
}
// Then
func theDisplayByteStreamShouldBe(bytestream *gherkin.DataTable) error {
var bytes bytes.Buffer
for _, row := range bytestream.Rows {
for _, cell := range row.Cells {
fmt.Fprintf(&bytes, "%s", ANSIcodes[cell.Value])
}
}
expected := bytes.String()
received := fmt.Sprintf("%X", outputStream.String())
if expected == received {
return nil
}
return fmt.Errorf("\nExpected:%s\nReceived:%X", expected, received)
}
// Then
func theGameLivesShouldBe(lives int) error {
if theGame.Lives() == lives {
return nil
}
return fmt.Errorf("expected lives to be %v but was %v", lives, theGame.Lives())
}
// Then
func theGameScoreShouldBe(score int) error {
if theGame.Score() != score {
return fmt.Errorf("expected score to be %v, but it is %v", score, theGame.Score())
}
return nil
}
// Then
func pacmanIsDead() error {
pacman := theGame.GetPacman()
if pacman != nil {
if pacman.(Pacman).Alive() != false {
return fmt.Errorf("expected pacman to be dead")
}
}
return nil
}
// Then
func pacmanIsAlive() error {
pacman := theGame.GetPacman()
if pacman != nil {
if pacman.(Pacman).Alive() == false {
return fmt.Errorf("expected pacman to be alive")
}
}
return nil
}
// Then
func theGameDimensionsShouldEqualTheDisplayDimensions() error {
gX, gY := theGame.Dimensions()
dX, dY := testDisplay.Dimensions()
if dX != gX && dY != gY {
return fmt.Errorf("expected display to be %v,%v, but it is %v,%v", gX, gY, dX, dY)
}
return nil
}
// Then
func ghostAtShouldBeCalm(x, y int) error {
for _, ghost := range theGame.GetGhosts() {
loc := ghost.Location()
if loc.x == x && loc.y == y {
if !ghost.(Ghost).IsPanicked() {
return nil
}
}
}
return fmt.Errorf("expected ghost at %v,%v to be calm", x, y)
}
// Then
func ghostAtShouldBePanicked(x, y int) error {
for _, ghost := range theGame.GetGhosts() {
loc := ghost.Location()
if loc.x == x && loc.y == y {
if ghost.(Ghost).IsPanicked() {
return nil
}
}
}
return fmt.Errorf("expected ghost at %v,%v to be calm", x, y)
}
// Feature matchers
func FeatureContext(s *godog.Suite) {
s.Step(`^the game state is$`, theGameStateIs)
s.Step(`^we parse the state$`, weParseTheState)
s.Step(`^there is a gate at (\d+) , (\d+)$`, thereIsAGateAt)
s.Step(`^pacman is at (\d+) , (\d+)$`, pacmanIsAt)
s.Step(`^the player has (\d+) lives$`, thePlayerHasLives)
s.Step(`^the player score is (\d+)$`, thePlayerScoreIs)
s.Step(`^there is a (\d+) point pill at (\d+) , (\d+)$`, thereIsAPointPillAt)
s.Step(`^pacman is facing "([^"]*)"$`, pacmanIsFacing)
s.Step(`^ghost is at (\d+) , (\d+)$`, ghostIsAt)
s.Step(`^there is a wall at (\d+) , (\d+)$`, thereIsAWallAt)
s.Step(`^the game lives should be (\d+)$`, theGameLivesShouldBe)
s.Step(`^the game score should be (\d+)$`, theGameScoreShouldBe)
s.Step(`^there is a force field at (\d+) , (\d+)$`, thereIsAForceFieldAt)
s.Step(`^the game field should be (\d+) x (\d+)$`, theGameFieldShouldBeX)
s.Step(`^the score is (\d+)$`, theScoreIs)
s.Step(`^the lives are (\d+)$`, theLivesAre)
s.Step(`^we play (\d+) turn(.*)$`, wePlayTurns)
s.Step(`^we render the game$`, weRenderTheGame)
s.Step(`^the game screen is$`, theGameScreenIs)
s.Step(`^a colour display$`, aColourDisplay)
s.Step(`^the ANSI "([^"]*)" sequence is "([^"]*)"$`, theANSISequenceIs)
s.Step(`^the display byte stream should be$`, theDisplayByteStreamShouldBe)
s.Step(`^we refresh the display with the buffer "([^"]*)"$`, weRefreshTheDisplayWithTheBuffer)
s.Step(`^a display$`, aDisplay)
s.Step(`^a game with (\d+) levels$`, aGameWithLevels)
s.Step(`^this is level (\d+)$`, thisIsLevel)
s.Step(`^the max level is (\d+)$`, theMaxLevelIs)
s.Step(`^the game uses animation$`, theGameUsesAnimation)
s.Step(`^the player presses "([^"]*)"$`, thePlayerPresses)
s.Step(`^then pacman goes "([^"]*)"$`, thenPacmanGoes)
s.Step(`^this is the last level$`, thisIsTheLastLevel)
s.Step(`^pacman is dead$`, pacmanIsDead)
s.Step(`^initialize the display$`, initializeTheDisplay)
s.Step(`^the game dimensions should equal the display dimensions$`, theGameDimensionsShouldEqualTheDisplayDimensions)
s.Step(`^the game field of (\d+) x (\d+)$`, theGameFieldOfX)
s.Step(`^a pacman at (\d+) , (\d+) facing "([^"]*)"$`, aPacmanAtFacing)
s.Step(`^walls at the following places:$`, wallsAtTheFollowingPlaces)
s.Step(`^pacman is alive$`, pacmanIsAlive)
s.Step(`^ghost at (\d+) , (\d+) should be calm$`, ghostAtShouldBeCalm)
s.Step(`^ghost at (\d+) , (\d+) should be panicked$`, ghostAtShouldBePanicked)
s.BeforeScenario(func(interface{}) {
outputStream = new(bytes.Buffer)
theGame = new(gameState).New() // clean the state before every scenario
testDisplay = new(terminal).New(nil)
theGame.SetDisplay(testDisplay)
})
s.AfterScenario(func(interface{}, error) {
return
})
}
|
[
"\"BDD\""
] |
[] |
[
"BDD"
] |
[]
|
["BDD"]
|
go
| 1 | 0 | |
client.go
|
// Package ftp implements an FTP client.
package ftp4go
import (
"bufio"
"golang.org/x/net/proxy"
"errors"
"fmt"
"io"
"log"
"net"
"net/textproto"
"net/url"
"os"
"strconv"
"strings"
"time"
)
// The default constants
const (
DefaultFtpPort = 21
DefaultTimeoutInMsec = 1000
CRLF = "\r\n"
BLOCK_SIZE = 8192
)
// FTP command strings
type FtpCmd int
const (
NONE_FTP_CMD FtpCmd = 0
USER_FTP_CMD FtpCmd = 1
PASSWORD_FTP_CMD FtpCmd = 2
ACCT_FTP_CMD FtpCmd = 3
ABORT_FTP_CMD FtpCmd = 4
PORT_FTP_CMD FtpCmd = 5
PASV_FTP_CMD FtpCmd = 6
TYPE_A_FTP_CMD FtpCmd = 7
NLST_FTP_CMD FtpCmd = 8
LIST_FTP_CMD FtpCmd = 9
FEAT_FTP_CMD FtpCmd = 10
OPTS_FTP_CMD FtpCmd = 11
RETR_FTP_CMD FtpCmd = 12
TYPE_I_FTP_CMD FtpCmd = 13
STORE_FTP_CMD FtpCmd = 14
RENAMEFROM_FTP_CMD FtpCmd = 15
RENAMETO_FTP_CMD FtpCmd = 16
DELETE_FTP_CMD FtpCmd = 17
CWD_FTP_CMD FtpCmd = 18
SIZE_FTP_CMD FtpCmd = 19
MKDIR_FTP_CMD FtpCmd = 20
RMDIR_FTP_CMD FtpCmd = 21
PWDIR_FTP_CMD FtpCmd = 22
CDUP_FTP_CMD FtpCmd = 23
QUIT_FTP_CMD FtpCmd = 24
MLSD_FTP_CMD FtpCmd = 25
REST_FTP_CMD FtpCmd = 26
)
const MSG_OOB = 0x1 //Process data out of band
var ftpCmdStrings = map[FtpCmd]string{
NONE_FTP_CMD: "",
USER_FTP_CMD: "USER",
PASSWORD_FTP_CMD: "PASS",
ACCT_FTP_CMD: "ACCT",
ABORT_FTP_CMD: "ABOR",
PORT_FTP_CMD: "PORT",
PASV_FTP_CMD: "PASV",
TYPE_A_FTP_CMD: "TYPE A",
NLST_FTP_CMD: "NLST",
LIST_FTP_CMD: "LIST",
MLSD_FTP_CMD: "MLSD",
FEAT_FTP_CMD: "FEAT",
OPTS_FTP_CMD: "OPTS",
RETR_FTP_CMD: "RETR",
TYPE_I_FTP_CMD: "TYPE I",
STORE_FTP_CMD: "STOR",
RENAMEFROM_FTP_CMD: "RNFR",
RENAMETO_FTP_CMD: "RNTO",
DELETE_FTP_CMD: "DELE",
CWD_FTP_CMD: "CWD",
SIZE_FTP_CMD: "SIZE",
MKDIR_FTP_CMD: "MKD",
RMDIR_FTP_CMD: "RMD",
PWDIR_FTP_CMD: "PWD",
CDUP_FTP_CMD: "CDUP",
QUIT_FTP_CMD: "QUIT",
REST_FTP_CMD: "REST",
}
// The FTP client structure containing:
// - host, user, password, acct, timeout
type FTP struct {
debugging int
Host string
Port int
file string
welcome string
passiveserver bool
logger *log.Logger
TimeoutInMsec int
textprotoConn *textproto.Conn
dialer proxy.Dialer
conn net.Conn
encoding string
}
type NameFactsLine struct {
Name string
Facts map[string]string
}
func getTimeoutInMsec(msec int) time.Time {
return time.Now().Add(time.Duration(msec) * time.Millisecond)
}
func (i FtpCmd) String() string {
if cmd, ok := ftpCmdStrings[i]; ok {
return cmd
}
panic("No cmd found")
}
func (i FtpCmd) AppendParameters(pars ...string) string {
allPars := make([]string, len(pars)+1)
allPars[0] = i.String()
var k int = 1
for _, par := range pars {
if p := strings.TrimSpace(par); len(p) > 0 {
allPars[k] = p
k++
}
}
return strings.Join(allPars[:k], " ")
}
func (ftp *FTP) writeInfo(params ...interface{}) {
if ftp.debugging >= 1 {
log.Println(params...)
}
}
// NewFTP creates a new FTP client using a debug level, default is 0, which is disabled.
// The FTP server uses the passive tranfer mode by default.
//
// Debuglevel:
// 0 -> disabled
// 1 -> information
// 2 -> verbose
//
func NewFTP(debuglevel int) *FTP {
logger := log.New(os.Stdout, "", log.LstdFlags) //syslog.NewLogger(syslog.LOG_ERR, 999)
ftp := &FTP{
debugging: debuglevel,
Port: DefaultFtpPort,
logger: logger,
TimeoutInMsec: DefaultTimeoutInMsec,
passiveserver: true,
}
return ftp
}
// Connect connects to the host by using the specified port or the default one if the value is <=0.
func (ftp *FTP) Connect(host string, port int, socks5ProxyUrl string) (resp *Response, err error) {
if len(host) == 0 {
return nil, errors.New("The host must be specified")
}
ftp.Host = host
if port <= 0 {
port = DefaultFtpPort
}
addr := fmt.Sprintf("%s:%d", ftp.Host, ftp.Port)
// use the system proxy if emtpy
if socks5ProxyUrl == "" {
ftp.writeInfo("using environment proxy, url: ", os.Getenv("all_proxy"))
ftp.dialer = proxy.FromEnvironment()
} else {
ftp.dialer = proxy.Direct
if u, err1 := url.Parse(socks5ProxyUrl); err1 == nil {
p, err2 := proxy.FromURL(u, proxy.Direct)
if err2 == nil {
ftp.dialer = p
}
}
}
err = ftp.NewConn(addr)
if err != nil {
return
}
ftp.writeInfo("host:", ftp.Host, " port:", strconv.Itoa(ftp.Port), " proxy enabled:", ftp.dialer != proxy.Direct)
// NOTE: this is an absolute time that needs refreshing after each READ/WRITE net operation
//ftp.conn.conn.SetDeadline(getTimeoutInMsec(ftp.timeoutInMsec))
if resp, err = ftp.Read(NONE_FTP_CMD); err != nil {
return
}
ftp.welcome = resp.Message
ftp.writeInfo("Successfully connected on local address:", ftp.conn.LocalAddr())
return
}
// SetPassive sets the mode to passive or active for data transfers.
// With a false statement use the normal PORT mode.
// With a true statement use the PASV command.
func (ftp *FTP) SetPassive(ispassive bool) {
ftp.passiveserver = ispassive
}
// Login logs on to the server.
func (ftp *FTP) Login(username, password string, acct string) (response *Response, err error) {
//Login, default anonymous.
if len(username) == 0 {
username = "anonymous"
}
if len(password) == 0 {
password = ""
}
if username == "anonymous" && len(password) == 0 {
// If there is no anonymous ftp password specified
// then we'll just use anonymous@
// We don't send any other thing because:
// - We want to remain anonymous
// - We want to stop SPAM
// - We don't want to let ftp sites to discriminate by the user,
// host or country.
password = password + "anonymous@"
}
ftp.writeInfo("username:", username)
tempResponse, err := ftp.SendAndRead(USER_FTP_CMD, username)
if err != nil {
return
}
if tempResponse.getFirstChar() == "3" {
tempResponse, err = ftp.SendAndRead(PASSWORD_FTP_CMD, password)
if err != nil {
return
}
}
if tempResponse.getFirstChar() == "3" {
tempResponse, err = ftp.SendAndRead(ACCT_FTP_CMD, acct)
if err != nil {
return
}
}
if tempResponse.getFirstChar() != "2" {
err = NewErrReply(errors.New(tempResponse.Message))
return
}
return tempResponse, err
}
// Abort interrupts a file transfer, which uses out-of-band data.
// This does not follow the procedure from the RFC to send Telnet IP and Synch;
// that does not seem to work with all servers. Instead just send the ABOR command as OOB data.
func (ftp *FTP) Abort() (response *Response, err error) {
return ftp.SendAndRead(ABORT_FTP_CMD)
}
// SendPort sends a PORT command with the current host and given port number
func (ftp *FTP) SendPort(host string, port int) (response *Response, err error) {
hbytes := strings.Split(host, ".") // return all substrings
pbytes := []string{strconv.Itoa(port / 256), strconv.Itoa(port % 256)}
bytes := strings.Join(append(hbytes, pbytes...), ",")
return ftp.SendAndRead(PORT_FTP_CMD, bytes)
}
// makePasv sends a PASV command and returns the host and port number to be used for the data transfer connection.
func (ftp *FTP) makePasv() (host string, port int, err error) {
var resp *Response
resp, err = ftp.SendAndRead(PASV_FTP_CMD)
if err != nil {
return
}
return parse227(resp)
}
// Acct sends an ACCT command.
func (ftp *FTP) Acct() (response *Response, err error) {
return ftp.SendAndRead(ACCT_FTP_CMD)
}
// Mlsd lists a directory in a standardized format by using MLSD
// command (RFC-3659). If path is omitted the current directory
// is assumed. "facts" is a list of strings representing the type
// of information desired (e.g. ["type", "size", "perm"]).
// Return a generator object yielding a tuple of two elements
// for every file found in path.
// First element is the file name, the second one is a dictionary
// including a variable number of "facts" depending on the server
// and whether "facts" argument has been provided.
func (ftp *FTP) Mlsd(path string, facts []string) (ls []*NameFactsLine, err error) {
if len(facts) > 0 {
if _, err = ftp.Opts("MLST", strings.Join(facts, ";")+";"); err != nil {
return nil, err
}
}
sw := &stringSliceWriter{make([]string, 0, 50)}
if err = ftp.GetLines(MLSD_FTP_CMD, sw, path); err != nil {
return nil, err
}
ls = make([]*NameFactsLine, len(sw.s))
for _, l := range sw.s {
tkns := strings.Split(strings.TrimSpace(l), " ")
name := tkns[0]
facts := strings.Split(tkns[1], ";")
ftp.writeInfo("Found facts:", facts)
vals := make(map[string]string, len(facts)-1)
for i := 0; i < len(facts)-1; i++ {
fpair := strings.Split(facts[i], "=")
vals[fpair[0]] = fpair[1]
}
ls = append(ls, &NameFactsLine{strings.ToLower(name), vals})
}
return
}
// Feat lists all new FTP features that the server supports beyond those described in RFC 959.
func (ftp *FTP) Feat(params ...string) (fts []string, err error) {
var r *Response
if r, err = ftp.SendAndRead(FEAT_FTP_CMD); err != nil {
return
}
return parse211(r)
}
// Nlst returns a list of file in a directory, by default the current.
func (ftp *FTP) Nlst(params ...string) (filelist []string, err error) {
return ftp.getList(NLST_FTP_CMD, params...)
}
// Dir returns a list of file in a directory in long form, by default the current.
func (ftp *FTP) Dir(params ...string) (filelist []string, err error) {
return ftp.getList(LIST_FTP_CMD, params...)
}
func (ftp *FTP) getList(cmd FtpCmd, params ...string) (filelist []string, err error) {
files := make([]string, 0, 50)
sw := &stringSliceWriter{files}
if err = ftp.GetLines(cmd, sw, params...); err != nil {
return nil, err
}
return sw.s, nil
}
// Rename renames a file.
func (ftp *FTP) Rename(fromname string, toname string) (response *Response, err error) {
tempResponse, err := ftp.SendAndRead(RENAMEFROM_FTP_CMD, fromname)
if err != nil {
return nil, err
}
if tempResponse.getFirstChar() != "3" {
err = NewErrReply(errors.New(tempResponse.Message))
return nil, err
}
return ftp.SendAndRead(RENAMETO_FTP_CMD, toname)
}
// Delete deletes a file.
func (ftp *FTP) Delete(filename string) (response *Response, err error) {
tempResponse, err := ftp.SendAndRead(DELETE_FTP_CMD, filename)
if err != nil {
return nil, err
}
if c := tempResponse.Code; c == 250 || c == 200 {
return tempResponse, nil
} else {
return nil, NewErrReply(errors.New(tempResponse.Message))
}
return
}
// Cwd changes to current directory.
func (ftp *FTP) Cwd(dirname string) (response *Response, err error) {
if dirname == ".." {
return ftp.SendAndRead(CDUP_FTP_CMD)
} else if dirname == "" {
dirname = "."
}
return ftp.SendAndRead(CWD_FTP_CMD, dirname)
}
// Size retrieves the size of a file.
func (ftp *FTP) Size(filename string) (size int, err error) {
response, err := ftp.SendAndRead(SIZE_FTP_CMD, filename)
if response.Code == 213 {
size, _ = strconv.Atoi(strings.TrimSpace(response.Message))
return size, err
}
return
}
// Mkd creates a directory and returns its full pathname.
func (ftp *FTP) Mkd(dirname string) (dname string, err error) {
var response *Response
response, err = ftp.SendAndRead(MKDIR_FTP_CMD, dirname)
if err != nil {
return
}
// fix around non-compliant implementations such as IIS shipped
// with Windows server 2003
if response.Code != 257 {
return "", nil
}
return parse257(response)
}
// Rmd removes a directory.
func (ftp *FTP) Rmd(dirname string) (response *Response, err error) {
return ftp.SendAndRead(RMDIR_FTP_CMD, dirname)
}
// Pwd returns the current working directory.
func (ftp *FTP) Pwd() (dirname string, err error) {
response, err := ftp.SendAndRead(PWDIR_FTP_CMD)
// fix around non-compliant implementations such as IIS shipped
// with Windows server 2003
if err != nil {
return "", err
}
if response.Code != 257 {
return "", nil
}
return parse257(response)
}
// Quits sends a QUIT command and closes the connection.
func (ftp *FTP) Quit() (response *Response, err error) {
response, err = ftp.SendAndRead(QUIT_FTP_CMD)
ftp.conn.Close()
return
}
// DownloadFile downloads a file and stores it locally.
// There are two modes:
// - binary, useLineMode = false
// - line by line (text), useLineMode = true
func (ftp *FTP) DownloadFile(remotename string, localpath string, useLineMode bool) (err error) {
// remove local file
os.Remove(localpath)
var f *os.File
f, err = os.OpenFile(localpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
defer f.Close()
if err != nil {
return
}
if useLineMode {
w := newTextFileWriter(f)
defer w.bw.Flush() // remember to flush
if err = ftp.GetLines(RETR_FTP_CMD, w, remotename); err != nil {
return err
}
} else {
if err = ftp.GetBytes(RETR_FTP_CMD, f, BLOCK_SIZE, remotename); err != nil {
return err
}
}
return err
}
// UploadFile uploads a file from a local path to the current folder (see Cwd too) on the FTP server.
// A remotename needs to be specified.
// There are two modes set via the useLineMode flag:
// - binary, useLineMode = false
// - line by line (text), useLineMode = true
func (ftp *FTP) UploadFile(remotename string, localpath string, useLineMode bool, callback Callback) (err error) {
var f *os.File
f, err = os.Open(localpath)
defer f.Close()
if err != nil {
return
}
if useLineMode {
if err = ftp.StoreLines(STORE_FTP_CMD, f, remotename, localpath, callback); err != nil {
return err
}
} else {
if err = ftp.StoreBytes(STORE_FTP_CMD, f, BLOCK_SIZE, remotename, localpath, callback); err != nil {
return err
}
}
return err
}
// Opts returns a list of file in a directory in long form, by default the current.
func (ftp *FTP) Opts(params ...string) (response *Response, err error) {
return ftp.SendAndRead(OPTS_FTP_CMD, params...)
}
// GetLines retrieves data in line mode.
// Args:
// cmd: A RETR, LIST, NLST, or MLSD command.
// writer: of interface type io.Writer that is called for each line with the trailing CRLF stripped.
//
// returns:
// The response code.
func (ftp *FTP) GetLines(cmd FtpCmd, writer io.Writer, params ...string) (err error) {
var conn net.Conn
if _, err = ftp.SendAndRead(TYPE_A_FTP_CMD); err != nil {
return
}
// wrap this code up to guarantee the connection disposal via a defer
separateCall := func() error {
if conn, _, err = ftp.transferCmd(cmd, params...); err != nil {
return err
}
defer conn.Close() // close the connection on exit
ftpReader := textproto.NewConn(conn)
ftp.writeInfo("Try and get lines via connection for remote address:", conn.RemoteAddr().String())
for {
line, err := ftpReader.ReadLineBytes()
if err != nil {
if err == io.EOF {
ftp.writeInfo("Reached end of buffer with line:", line)
break
}
return err
}
if _, err1 := writer.Write(line); err1 != nil {
return err1
}
}
return nil
}
if err := separateCall(); err != nil {
return err
}
ftp.writeInfo("Reading final empty line")
_, err = ftp.Read(cmd)
return
}
// GetBytes retrieves data in binary mode.
// Args:
// cmd: A RETR command.
// callback: A single parameter callable to be called on each
// block of data read.
// blocksize: The maximum number of bytes to read from the
// socket at one time. [default: 8192]
//
//Returns:
// The response code.
func (ftp *FTP) GetBytes(cmd FtpCmd, writer io.Writer, blocksize int, params ...string) (err error) {
var conn net.Conn
if _, err = ftp.SendAndRead(TYPE_I_FTP_CMD); err != nil {
return
}
// wrap this code up to guarantee the connection disposal via a defer
separateCall := func() error {
if conn, _, err = ftp.transferCmd(cmd, params...); err != nil {
return err
}
defer conn.Close() // close the connection on exit
bufReader := bufio.NewReaderSize(conn, blocksize)
ftp.writeInfo("Try and get bytes via connection for remote address:", conn.RemoteAddr().String())
s := make([]byte, blocksize)
var n int
for {
n, err = bufReader.Read(s)
ftp.writeInfo("GETBYTES: Number of bytes read:", n)
if _, err1 := writer.Write(s[:n]); err1 != nil {
return err1
}
if err != nil {
if err == io.EOF {
break
}
return err
}
}
return nil
}
if err := separateCall(); err != nil {
return err
}
_, err = ftp.Read(cmd)
return
}
// GetBytesProgress retrieves data in binary mode with progress channel
// Args:
// cmd: A RETR command.
// blocksize: The maximum number of bytes to read from the
// socket at one time. [default: 8192]
//
//Returns:
// The response code.
func (ftp *FTP) GetBytesProgress(cmd FtpCmd, writer io.Writer, blocksize int, progresschan chan int, params ...string) (err error) {
var conn net.Conn
if _, err = ftp.SendAndRead(TYPE_I_FTP_CMD); err != nil {
return
}
// wrap this code up to guarantee the connection disposal via a defer
separateCall := func() error {
if conn, _, err = ftp.transferCmd(cmd, params...); err != nil {
return err
}
defer conn.Close() // close the connection on exit
bufReader := bufio.NewReaderSize(conn, blocksize)
ftp.writeInfo("Try and get bytes via connection for remote address:", conn.RemoteAddr().String())
s := make([]byte, blocksize)
var n int
defer conn.SetDeadline(time.Time{});
for {
conn.SetDeadline(getTimeoutInMsec(ftp.TimeoutInMsec));
n, err = bufReader.Read(s)
ftp.writeInfo("GETBYTES: Number of bytes read:", n)
if _, err1 := writer.Write(s[:n]); err1 != nil {
return err1
}
progresschan <- n
if err != nil {
if err == io.EOF {
break
}
return err
}
}
return nil
}
if err := separateCall(); err != nil {
return err
}
_, err = ftp.Read(cmd)
return
}
// StoreLines stores a file in line mode.
//
// Args:
// cmd: A STOR command.
// reader: A reader object with a ReadLine() method.
// callback: An optional single parameter callable that is called on
// on each line after it is sent. [default: None]
//
// Returns:
// The response code.
func (ftp *FTP) StoreLines(cmd FtpCmd, reader io.Reader, remotename string, filename string, callback Callback) (err error) {
var conn net.Conn
if _, err = ftp.SendAndRead(TYPE_A_FTP_CMD); err != nil {
return
}
// wrap this code up to guarantee the connection disposal via a defer
separateCall := func() error {
if conn, _, err = ftp.transferCmd(cmd, remotename); err != nil {
return err
}
defer conn.Close() // close the connection on exit
ftp.writeInfo("Try and write lines via connection for remote address:", conn.RemoteAddr().String())
//lineReader := bufio.NewReader(reader)
lineReader := bufio.NewReader(reader)
var tot int64
for {
var n int
var eof bool
line, _, err := lineReader.ReadLine()
if err != nil {
eof = err == io.EOF
if !eof {
return err
}
}
// !Remember to convert to string (UTF-8 encoding)
if !eof {
n, err = fmt.Fprintln(conn, string(line))
if err != nil {
return err
}
}
if callback != nil {
tot += int64(n)
callback(&CallbackInfo{remotename, filename, tot, eof})
}
if eof {
break
}
}
return nil
}
if err := separateCall(); err != nil {
return err
}
ftp.writeInfo("Reading final empty line")
_, err = ftp.Read(cmd)
return
}
// StoreBytes uploads bytes in chunks defined by the blocksize parameter.
// It uses an io.Reader to read the input data.
func (ftp *FTP) StoreBytes(cmd FtpCmd, reader io.Reader, blocksize int, remotename string, filename string, callback Callback) (err error) {
var conn net.Conn
if _, err = ftp.SendAndRead(TYPE_I_FTP_CMD); err != nil {
return
}
// wrap this code up to guarantee the connection disposal via a defer
separateCall := func() error {
if conn, _, err = ftp.transferCmd(cmd, remotename); err != nil {
return err
}
defer conn.Close() // close the connection on exit
bufReader := bufio.NewReaderSize(reader, blocksize)
ftp.writeInfo("Try and store bytes via connection for remote address:", conn.RemoteAddr().String())
s := make([]byte, blocksize)
var tot int64
for {
var nr, nw int
var eof bool
nr, err = bufReader.Read(s)
eof = err == io.EOF
if err != nil && !eof {
return err
}
if nw, err = conn.Write(s[:nr]); err != nil {
return err
}
if callback != nil {
tot += int64(nw)
callback(&CallbackInfo{remotename, filename, tot, eof})
}
if eof {
break
}
}
return nil
}
if err := separateCall(); err != nil {
return err
}
_, err = ftp.Read(cmd)
return
}
//Set binary mode
func (ftp *FTP) Binary() error {
if _, err := ftp.SendAndRead(TYPE_I_FTP_CMD); err != nil {
return err
}
return nil
}
//Resume download that was interrupted before. It does not check if the remote file has changed.
func (ftp *FTP) ResumeDownload(remotename string, localpath string) (progresschan chan int, err error) {
ftp.Binary();
var f *os.File
f, err = os.OpenFile(localpath, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return
}
ret, err := f.Seek(0,2);
if err != nil {
return
}
if ret > 0 {
if _, err = ftp.SendAndRead(REST_FTP_CMD, fmt.Sprintf("%d", ret)); err != nil {
return
}
ftp.writeInfo("Resuming download from", ret)
}
progresschan = make(chan int, 10);
if ret > 0 {
progresschan<-int(ret)
}
go func(f *os.File){
defer f.Close()
if err = ftp.GetBytesProgress(RETR_FTP_CMD, f, BLOCK_SIZE, progresschan, remotename); err != nil {
fmt.Println(err);
progresschan <- -1
} else {
progresschan <- -2
}
close(progresschan);
}(f)
return
}
// transferCmd initializes a tranfer over the data connection.
//
// If the transfer is active, send a port command and the tranfer command
// then accept the connection. If the server is passive, send a pasv command, connect to it
// and start the tranfer command. Either way return the connection and the expected size of the transfer.
// The expected size may be none if it could be not be determined.
func (ftp *FTP) transferCmd(cmd FtpCmd, params ...string) (conn net.Conn, size int, err error) {
var listener net.Listener
ftp.writeInfo("Server is passive:", ftp.passiveserver)
if ftp.passiveserver {
host, port, error := ftp.makePasv()
if ftp.conn.LocalAddr().Network() != host {
ftp.writeInfo("The remote server answered with a different host address, which is", host, ", using the orginal host instead:", ftp.Host)
host = ftp.Host
}
if error != nil {
return nil, -1, error
}
addr := fmt.Sprintf("%s:%d", host, port)
if conn, err = ftp.dialer.Dial("tcp", addr); err != nil {
ftp.writeInfo("Dial error, address:", addr, "error:", err, "proxy enabled:", ftp.dialer != proxy.Direct)
return
}
} else {
if listener, err = ftp.makePort(); err != nil {
return
}
ftp.writeInfo("Listener created for non-passive mode")
}
var resp *Response
if resp, err = ftp.SendAndRead(cmd, params...); err != nil {
resp = nil
return
}
// Some servers apparently send a 200 reply to
// a LIST or STOR command, before the 150 reply
// (and way before the 226 reply). This seems to
// be in violation of the protocol (which only allows
// 1xx or error messages for LIST), so we just discard
// this response.
if resp.getFirstChar() == "2" {
resp, err = ftp.Read(cmd)
}
if resp.getFirstChar() != "1" {
err = NewErrReply(errors.New(resp.Message))
return
}
// not passive, open connection and close it then
if listener != nil {
ftp.writeInfo("Preparing to listen for non-passive mode.")
if conn, err = listener.Accept(); err != nil {
conn = nil
return
}
ftp.writeInfo("Trying to communicate with local host: ", conn.LocalAddr())
defer listener.Close() // close after getting the connection
}
if resp.Code == 150 {
// this is conditional in case we received a 125
ftp.writeInfo("Parsing return code 150")
size, err = parse150ForSize(resp)
}
return conn, size, err
}
// makePort creates a new communication port and return a listener for this.
func (ftp *FTP) makePort() (listener net.Listener, err error) {
tcpAddr := ftp.conn.LocalAddr()
network := tcpAddr.Network()
var la *net.TCPAddr
if la, err = net.ResolveTCPAddr(network, tcpAddr.String()); err != nil {
return
}
// get the new address
newad := la.IP.String() + ":0" // any available port
ftp.writeInfo("The new local address in makePort is:", newad)
listening := runServer(newad, network)
list := <-listening // wait for server to start and accept
if list == nil {
return nil, errors.New("Unable to create listener")
}
la, _ = net.ResolveTCPAddr(list.Addr().Network(), list.Addr().String())
ftp.writeInfo("Trying to listen locally at: ", la.IP.String(), " on new port:", la.Port)
_, err = ftp.SendPort(la.IP.String(), la.Port)
return list, err
}
func runServer(laddr string, network string) chan net.Listener {
listening := make(chan net.Listener)
go func() {
l, err := net.Listen(network, laddr)
if err != nil {
log.Fatalf("net.Listen(%q, %q) = _, %v", network, laddr, err)
listening <- nil
return
}
listening <- l
}()
return listening
}
|
[
"\"all_proxy\""
] |
[] |
[
"all_proxy"
] |
[]
|
["all_proxy"]
|
go
| 1 | 0 | |
code/kegg_populator.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Clinton Cario 9/11/2015
# 02/08/2016
# Rewritten based on SSM_populator for memsql, no dependency on peewee and cleaner more efficient tables
# 02/09/2016
# Fixed bug where single quotes ruin syntax by incorperating a strip_invalid function that sanitizes syntax inputs
# Using Peewee DB_models.py, creates the following tables:
#
# KEGG_genes KEGG_gene_alias KEGG_pathway KEGG_gene_path
# ---------- --------------- ------------ --------------
# ensembl_id ensembl_id pathway_id ensembl_id
# gene_name gene_name pathway_name pathway_id
# is_cancer
import os, sys, re, argparse
from restkit import Resource
from memsql.common import database
from random import random # To generate a fake Ensembl ID if none are found
from urlparse import urlparse
from time import sleep
## Get the command line arguments
parser = argparse.ArgumentParser(description='This script populates gene pathway membership tables in the specified database using information from KEGG (http://www.genome.jp/kegg/)')
parser.add_argument('-x', '--connection', action='store', dest='db_uri', help='A database URI connection string (e.g. mysql://user:pass@host:port/DB) if $DATABASE is not defined')
parser.add_argument('-v', '--verbose', action='store_true', default=False, dest='verbose', help='Whether to be verbose and display status on the command line')
options = parser.parse_args()
# Define database parameters
db_uri = options.db_uri or os.environ.get('DATABASE')
db_info = urlparse(db_uri)
# Define the connection interface
def get_connection(host=db_info.hostname, port=db_info.port, user=db_info.username, password=db_info.password, db=db_info.path.strip('/'), verbose=options.verbose):
for attempt in xrange(1,21):
try:
return database.connect(host=host, port=port, user=user, password=password, database=db)
except:
if verbose: print "\tTrouble establishing a database connection, retrying... (attempt: %d/20)" % attempt
sleep(attempt*2)
continue
def run_sql(sql, verbose=options.verbose):
for attempt in xrange(0,21):
try:
with get_connection() as db:
return db.execute(syntax)
except:
if verbose: print "\tTrouble running a query, retrying... (attempt: %d/20)" % attempt
sleep(attempt*2)
continue
def try_api(url, api, verbose=options.verbose):
for attempt in xrange(0,21):
try:
with get_connection() as db:
return api.get(url).body_string()
except:
if verbose: print "\tTrouble with the rest api, retrying... (attempt: %d/20)" % attempt
sleep(attempt*2)
continue
def strip_invalid(instr):
return re.sub('[^0-9a-zA-Z ]+', '', instr)
# =============================
# Create the tables
# ---------
if options.verbose: print "Verbose mode on.\n(Re)creating tables..."
syntax = """
CREATE TABLE IF NOT EXISTS `kegg_gene` (
kegg_gene_id INT unsigned NOT NULL AUTO_INCREMENT,
ensembl_id CHAR(16) DEFAULT NULL,
gene_name CHAR(63) NOT NULL,
PRIMARY KEY (kegg_gene_id),
KEY (ensembl_id)
);
"""
run_sql(syntax)
syntax = """
CREATE TABLE IF NOT EXISTS `kegg_gene_alias` (
kegg_gene_alias_id INT unsigned NOT NULL AUTO_INCREMENT,
ensembl_id CHAR(16) DEFAULT NULL,
gene_alias CHAR(63) NOT NULL,
PRIMARY KEY (kegg_gene_alias_id),
KEY (ensembl_id)
);
"""
run_sql(syntax)
syntax = """
CREATE TABLE IF NOT EXISTS `kegg_pathway` (
kegg_pathway_id CHAR(16) DEFAULT NULL,
pathway_name CHAR(128) DEFAULT NULL,
PRIMARY KEY (kegg_pathway_id)
);
"""
run_sql(syntax)
syntax = """
CREATE TABLE IF NOT EXISTS `kegg_gene_pathway` (
kegg_gene_pathway_id INT unsigned NOT NULL AUTO_INCREMENT,
kegg_pathway_id CHAR(16) DEFAULT NULL,
ensembl_id CHAR(16) DEFAULT NULL,
is_cancer BOOL NOT NULL,
PRIMARY KEY (kegg_gene_pathway_id),
KEY (ensembl_id),
KEY (kegg_pathway_id)
);
"""
run_sql(syntax)
if options.verbose: print "Querying KEGG REST API, please wait..."
api = Resource('http://rest.kegg.jp')
kegg_cancers = {
'hsa05200': 'Pathways in cancer [PATH:ko05200]',
'hsa05230': 'Central carbon metabolism in cancer [PATH:ko05230]',
'hsa05231': 'Choline metabolism in cancer [PATH:ko05231]',
'hsa05202': 'Transcriptional misregulation in cancers [PATH:ko05202]',
'hsa05206': 'MicroRNAs in cancer [PATH:ko05206]',
'hsa05205': 'Proteoglycans in cancer [PATH:ko05205]',
'hsa05204': 'Chemical carcinogenesis [PATH:ko05204]',
'hsa05203': 'Viral carcinogenesis [PATH:ko05203]',
'hsa05210': 'Colorectal cancer [PATH:ko05210]',
'hsa05212': 'Pancreatic cancer [PATH:ko05212]',
'hsa05214': 'Glioma [PATH:ko05214]',
'hsa05216': 'Thyroid cancer [PATH:ko05216]',
'hsa05221': 'Acute myeloid leukemia [PATH:ko05221]',
'hsa05220': 'Chronic myeloid leukemia [PATH:ko05220]',
'hsa05217': 'Basal cell carcinoma [PATH:ko05217]',
'hsa05218': 'Melanoma [PATH:ko05218]',
'hsa05211': 'Renal cell carcinoma [PATH:ko05211]',
'hsa05219': 'Bladder cancer [PATH:ko05219]',
'hsa05215': 'Prostate cancer [PATH:ko05215]',
'hsa05213': 'Endometrial cancer [PATH:ko05213]',
'hsa05222': 'Small cell lung cancer [PATH:ko05222]',
'hsa05223': 'Non-small cell lung cancer [PATH:ko05223]',
}
entry = 0
# Get all human pathways
results = try_api('/link/pathway/hsa', api).split('\n')
for result in results:
entry = entry + 1
if result == '': continue # Skip the final blank entry
# =============================
# Get the gene and pathway information for this result
# ---------
gene, pathway = result.split('\t')
pathway = pathway.replace('path:','')
if options.verbose: print "Pathway: %s\nGene: %s" % (pathway, gene)
gene_info = try_api('/get/'+gene, api) #.split('\n')
#if options.verbose: print gene_info
#if options.verbose: print gene_info[1].split(' ')[1:]
# =============================
# Get the pathway name
# ---------
path_name = "NOPATH" + str(entry)
path_info = try_api('/get/'+pathway, api) #.split('\n')
m = re.search("NAME (.*)\n", path_info)
if m:
path_name = m.groups()[0].lstrip(' ')
path_name = path_name.replace(' - Homo sapiens (human)','')
if options.verbose: print "Pathway Name: %s" % (path_name)
is_cancer = True if pathway in kegg_cancers.keys() else False
# =============================
# Get the gene name, aliases, and ensembl ID
# ---------
gene_names = ['NOGENE']
ensembl_id = None
m = re.search("NAME (.*)\n", gene_info)
if m:
gene_names = m.groups()[0].replace(' ','').split(',')
if options.verbose: print "Gene Names: %s" % (gene_names)
# And ensembl ID
m = re.search("Ensembl: (.*)\n", gene_info)
if m:
ensembl_id = m.groups()[0].split(' ')[0]
if options.verbose: print "Ensembl ID: %s" % (ensembl_id)
gene_name = gene_names[0]
# Attempt to fix bad ensembl ids by creating a dummy ID
if ensembl_id == None:
ensembl_id = "NOID_" + gene_name
# =============================
# Try to save this entry
# ---------
#try:
# Save gene name and ensembl ID
syntax = "INSERT IGNORE INTO `kegg_gene` (ensembl_id, gene_name) VALUE ('%s', '%s');" % (strip_invalid(ensembl_id), strip_invalid(gene_names[0]))
run_sql(syntax)
# Save aliases (if any)
if len(gene_names)>1:
for alias in gene_names[1:]:
syntax = "INSERT IGNORE INTO `kegg_gene_alias` (ensembl_id, gene_alias) VALUE ('%s', '%s');" % (strip_invalid(ensembl_id), strip_invalid(alias))
run_sql(syntax)
# Create pathway if it doesn't exist
syntax = "INSERT IGNORE INTO `kegg_pathway` (kegg_pathway_id, pathway_name) VALUE ('%s', '%s');" % (strip_invalid(pathway), strip_invalid(path_name))
#syntax = re.sub('^\s+','',syntax, flags=re.MULTILINE).replace('\n','')
run_sql(syntax)
# Link the gene to the pathway
syntax = "INSERT IGNORE INTO `kegg_gene_pathway` (ensembl_id, kegg_pathway_id, is_cancer) VALUE ('%s', '%s', %d);" % (strip_invalid(ensembl_id), strip_invalid(pathway), 1 if is_cancer else 0)
#syntax = re.sub('^\s+','',syntax, flags=re.MULTILINE).replace('\n','')
run_sql(syntax)
if options.verbose: print "ENTRY SAVED"
if options.verbose: print "============================================"
|
[] |
[] |
[
"DATABASE"
] |
[]
|
["DATABASE"]
|
python
| 1 | 0 | |
src/__init__.py
|
import os
import logging
from pathlib import Path
from dotenv import load_dotenv
from hypothepy.v1.api import HypoApi
load_dotenv(dotenv_path='./.env')
HYPOTHESIS_USER=os.getenv("HYPOTHESIS_USER")
HYPOTHESIS_API_KEY=os.getenv("HYPOTHESIS_API_KEY")
HYPO = HypoApi(HYPOTHESIS_API_KEY, HYPOTHESIS_USER)
logger = logging.getLogger('traxiv logger')
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', "%y-%m-%d %H:%M:%S")
log_dir = Path('./log')
log_file = Path('traxiv.log')
if not log_dir.exists():
log_dir.mkdir()
log_path = log_dir / log_file
fh = logging.FileHandler(log_path)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logger.addHandler(fh)
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
sh.setFormatter(formatter)
logger.addHandler(sh)
|
[] |
[] |
[
"HYPOTHESIS_USER",
"HYPOTHESIS_API_KEY"
] |
[]
|
["HYPOTHESIS_USER", "HYPOTHESIS_API_KEY"]
|
python
| 2 | 0 | |
test/e2e/libpod_suite_test.go
|
// +build !remoteclient
package integration
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/inspect"
. "github.com/containers/libpod/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
)
func SkipIfRemote() {}
// Podman is the exec call to podman on the filesystem
func (p *PodmanTestIntegration) Podman(args []string) *PodmanSessionIntegration {
podmanSession := p.PodmanBase(args)
return &PodmanSessionIntegration{podmanSession}
}
// PodmanAsUser is the exec call to podman on the filesystem with the specified uid/gid and environment
func (p *PodmanTestIntegration) PodmanAsUser(args []string, uid, gid uint32, cwd string, env []string) *PodmanSessionIntegration {
podmanSession := p.PodmanAsUserBase(args, uid, gid, cwd, env)
return &PodmanSessionIntegration{podmanSession}
}
// PodmanPID execs podman and returns its PID
func (p *PodmanTestIntegration) PodmanPID(args []string) (*PodmanSessionIntegration, int) {
podmanOptions := p.MakeOptions(args)
fmt.Printf("Running: %s %s\n", p.PodmanBinary, strings.Join(podmanOptions, " "))
command := exec.Command(p.PodmanBinary, podmanOptions...)
session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter)
if err != nil {
Fail(fmt.Sprintf("unable to run podman command: %s", strings.Join(podmanOptions, " ")))
}
podmanSession := &PodmanSession{session}
return &PodmanSessionIntegration{podmanSession}, command.Process.Pid
}
// Cleanup cleans up the temporary store
func (p *PodmanTestIntegration) Cleanup() {
// Remove all containers
stopall := p.Podman([]string{"stop", "-a", "--timeout", "0"})
stopall.WaitWithDefaultTimeout()
session := p.Podman([]string{"rm", "-fa"})
session.Wait(90)
// Nuke tempdir
if err := os.RemoveAll(p.TempDir); err != nil {
fmt.Printf("%q\n", err)
}
// Clean up the registries configuration file ENV variable set in Create
resetRegistriesConfigEnv()
}
// CleanupPod cleans up the temporary store
func (p *PodmanTestIntegration) CleanupPod() {
// Remove all containers
session := p.Podman([]string{"pod", "rm", "-fa"})
session.Wait(90)
// Nuke tempdir
if err := os.RemoveAll(p.TempDir); err != nil {
fmt.Printf("%q\n", err)
}
}
// CleanupVolume cleans up the temporary store
func (p *PodmanTestIntegration) CleanupVolume() {
// Remove all containers
session := p.Podman([]string{"volume", "rm", "-fa"})
session.Wait(90)
// Nuke tempdir
if err := os.RemoveAll(p.TempDir); err != nil {
fmt.Printf("%q\n", err)
}
}
// PullImages pulls multiple images
func (p *PodmanTestIntegration) PullImages(images []string) error {
for _, i := range images {
p.PullImage(i)
}
return nil
}
// PullImage pulls a single image
// TODO should the timeout be configurable?
func (p *PodmanTestIntegration) PullImage(image string) error {
session := p.Podman([]string{"pull", image})
session.Wait(60)
Expect(session.ExitCode()).To(Equal(0))
return nil
}
// InspectContainerToJSON takes the session output of an inspect
// container and returns json
func (s *PodmanSessionIntegration) InspectContainerToJSON() []inspect.ContainerData {
var i []inspect.ContainerData
err := json.Unmarshal(s.Out.Contents(), &i)
Expect(err).To(BeNil())
return i
}
// InspectPodToJSON takes the sessions output from a pod inspect and returns json
func (s *PodmanSessionIntegration) InspectPodToJSON() libpod.PodInspect {
var i libpod.PodInspect
err := json.Unmarshal(s.Out.Contents(), &i)
Expect(err).To(BeNil())
return i
}
// CreatePod creates a pod with no infra container
// it optionally takes a pod name
func (p *PodmanTestIntegration) CreatePod(name string) (*PodmanSessionIntegration, int, string) {
var podmanArgs = []string{"pod", "create", "--infra=false", "--share", ""}
if name != "" {
podmanArgs = append(podmanArgs, "--name", name)
}
session := p.Podman(podmanArgs)
session.WaitWithDefaultTimeout()
return session, session.ExitCode(), session.OutputToString()
}
//RunTopContainer runs a simple container in the background that
// runs top. If the name passed != "", it will have a name
func (p *PodmanTestIntegration) RunTopContainer(name string) *PodmanSessionIntegration {
var podmanArgs = []string{"run"}
if name != "" {
podmanArgs = append(podmanArgs, "--name", name)
}
podmanArgs = append(podmanArgs, "-d", ALPINE, "top")
return p.Podman(podmanArgs)
}
func (p *PodmanTestIntegration) RunTopContainerInPod(name, pod string) *PodmanSessionIntegration {
var podmanArgs = []string{"run", "--pod", pod}
if name != "" {
podmanArgs = append(podmanArgs, "--name", name)
}
podmanArgs = append(podmanArgs, "-d", ALPINE, "top")
return p.Podman(podmanArgs)
}
//RunLsContainer runs a simple container in the background that
// simply runs ls. If the name passed != "", it will have a name
func (p *PodmanTestIntegration) RunLsContainer(name string) (*PodmanSessionIntegration, int, string) {
var podmanArgs = []string{"run"}
if name != "" {
podmanArgs = append(podmanArgs, "--name", name)
}
podmanArgs = append(podmanArgs, "-d", ALPINE, "ls")
session := p.Podman(podmanArgs)
session.WaitWithDefaultTimeout()
return session, session.ExitCode(), session.OutputToString()
}
func (p *PodmanTestIntegration) RunLsContainerInPod(name, pod string) (*PodmanSessionIntegration, int, string) {
var podmanArgs = []string{"run", "--pod", pod}
if name != "" {
podmanArgs = append(podmanArgs, "--name", name)
}
podmanArgs = append(podmanArgs, "-d", ALPINE, "ls")
session := p.Podman(podmanArgs)
session.WaitWithDefaultTimeout()
return session, session.ExitCode(), session.OutputToString()
}
// BuildImage uses podman build and buildah to build an image
// called imageName based on a string dockerfile
func (p *PodmanTestIntegration) BuildImage(dockerfile, imageName string, layers string) {
dockerfilePath := filepath.Join(p.TempDir, "Dockerfile")
err := ioutil.WriteFile(dockerfilePath, []byte(dockerfile), 0755)
Expect(err).To(BeNil())
session := p.Podman([]string{"build", "--layers=" + layers, "-t", imageName, "--file", dockerfilePath, p.TempDir})
session.Wait(120)
Expect(session.ExitCode()).To(Equal(0))
}
func (p *PodmanTestIntegration) setDefaultRegistriesConfigEnv() {
defaultFile := filepath.Join(INTEGRATION_ROOT, "test/registries.conf")
os.Setenv("REGISTRIES_CONFIG_PATH", defaultFile)
}
func (p *PodmanTestIntegration) setRegistriesConfigEnv(b []byte) {
outfile := filepath.Join(p.TempDir, "registries.conf")
os.Setenv("REGISTRIES_CONFIG_PATH", outfile)
ioutil.WriteFile(outfile, b, 0644)
}
func resetRegistriesConfigEnv() {
os.Setenv("REGISTRIES_CONFIG_PATH", "")
}
func PodmanTestCreate(tempDir string) *PodmanTestIntegration {
return PodmanTestCreateUtil(tempDir, false)
}
//MakeOptions assembles all the podman main options
func (p *PodmanTestIntegration) makeOptions(args []string) []string {
podmanOptions := strings.Split(fmt.Sprintf("--root %s --runroot %s --runtime %s --conmon %s --cni-config-dir %s --cgroup-manager %s",
p.CrioRoot, p.RunRoot, p.OCIRuntime, p.ConmonBinary, p.CNIConfigDir, p.CgroupManager), " ")
if os.Getenv("HOOK_OPTION") != "" {
podmanOptions = append(podmanOptions, os.Getenv("HOOK_OPTION"))
}
podmanOptions = append(podmanOptions, strings.Split(p.StorageOptions, " ")...)
podmanOptions = append(podmanOptions, args...)
return podmanOptions
}
// RestoreArtifact puts the cached image into our test store
func (p *PodmanTestIntegration) RestoreArtifact(image string) error {
fmt.Printf("Restoring %s...\n", image)
dest := strings.Split(image, "/")
destName := fmt.Sprintf("/tmp/%s.tar", strings.Replace(strings.Join(strings.Split(dest[len(dest)-1], "/"), ""), ":", "-", -1))
restore := p.Podman([]string{"load", "-q", "-i", destName})
restore.Wait(90)
return nil
}
|
[
"\"HOOK_OPTION\"",
"\"HOOK_OPTION\""
] |
[] |
[
"HOOK_OPTION"
] |
[]
|
["HOOK_OPTION"]
|
go
| 1 | 0 | |
pkg/redisx/config_test.go
|
package redisx
import (
"context"
"os"
"testing"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
)
func newViper(t *testing.T) *viper.Viper {
if os.Getenv("REDIS_TEST") == "" {
t.Skip("skip redis test")
}
v := viper.New()
v.SetDefault("redis", map[string]interface{}{
"name": "redis",
"addr": "127.0.0.1:6379",
"password": "123456",
})
return v
}
func Test_config(t *testing.T) {
v := newViper(t)
Config(v)
assert.NotNil(t, Get("redis"))
assert.NotNil(t, Locker)
}
func Test_wrapper(t *testing.T) {
v := newViper(t)
Config(v)
w := Get("redis")
r := w.Ping(context.Background())
assert.NoError(t, r.Err())
ret, err := r.Result()
assert.NoError(t, err)
t.Log("result:", ret)
}
|
[
"\"REDIS_TEST\""
] |
[] |
[
"REDIS_TEST"
] |
[]
|
["REDIS_TEST"]
|
go
| 1 | 0 | |
template/template/scripts/launcher.py
|
#!/usr/bin/env python
#@PydevCodeAnalysisIgnore
# Copyright 2004-2012 Tom Rothamel <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import sys
import warnings
# Functions to be customized by distributors. ################################
# Given the Ren'Py base directory (usually the directory containing
# this file), this is expected to return the path to the common directory.
def path_to_common(renpy_base):
return renpy_base + "/common"
# Given a directory holding a Ren'Py game, this is expected to return
# the path to a directory that will hold save files.
def path_to_saves(gamedir):
import renpy #@UnresolvedImport
if not renpy.config.save_directory:
return gamedir + "/saves"
# Search the path above Ren'Py for a directory named "Ren'Py Data".
# If it exists, then use that for our save directory.
path = renpy.config.renpy_base
while True:
if os.path.isdir(path + "/Ren'Py Data"):
return path + "/Ren'Py Data/" + renpy.config.save_directory
newpath = os.path.dirname(path)
if path == newpath:
break
path = newpath
# Otherwise, put the saves in a platform-specific location.
if renpy.android:
return gamedir + "/saves"
elif renpy.macintosh:
rv = "~/Library/RenPy/" + renpy.config.save_directory
return os.path.expanduser(rv)
elif renpy.windows:
if 'APPDATA' in os.environ:
return os.environ['APPDATA'] + "/RenPy/" + renpy.config.save_directory
else:
rv = "~/RenPy/" + renpy.config.save_directory
return os.path.expanduser(rv)
else:
rv = "~/.renpy/" + renpy.config.save_directory
return os.path.expanduser(rv)
# Returns the path to the Ren'Py base directory (containing common and
# the launcher, usually.)
def path_to_renpy_base():
renpy_base = os.path.dirname(sys.argv[0])
renpy_base = os.environ.get('RENPY_BASE', renpy_base)
renpy_base = os.path.abspath(renpy_base)
return renpy_base
##############################################################################
# The version of the Mac Launcher and py4renpy that we require.
macos_version = (6, 14, 0)
linux_version = (6, 14, 0)
# Doing the version check this way also doubles as an import of ast,
# which helps py2exe et al.
try:
import ast; ast
except:
raise
print "Ren'Py requires at least python 2.6."
sys.exit(0)
android = ("ANDROID_PRIVATE" in os.environ)
# Android requires us to add code to the main module, and to command some
# renderers.
if android:
__main__ = sys.modules["__main__"]
__main__.path_to_renpy_base = path_to_renpy_base
__main__.path_to_common = path_to_common
__main__.path_to_saves = path_to_saves
os.environ["RENPY_RENDERER"] = "gl"
os.environ["RENPY_GL_ENVIRON"] = "limited"
#print "Ren'iOS: forcing renderer settings"
#os.environ["RENPY_RENDERER"] = "gl"
#os.environ["RENPY_GL_ENVIRON"] = "shader_es"
def main():
renpy_base = path_to_renpy_base()
# Add paths.
if os.path.exists(renpy_base + "/module"):
sys.path.append(renpy_base + "/module")
sys.path.append(renpy_base)
# This is looked for by the mac launcher.
if os.path.exists(renpy_base + "/renpy.zip"):
sys.path.append(renpy_base + "/renpy.zip")
# Ignore warnings that happen.
warnings.simplefilter("ignore", DeprecationWarning)
# Start Ren'Py proper.
try:
import renpy.bootstrap
except ImportError:
print >>sys.stderr, "Could not import renpy.bootstrap. Please ensure you decompressed Ren'Py"
print >>sys.stderr, "correctly, preserving the directory structure."
raise
if android:
renpy.linux = False
renpy.android = True
renpy.bootstrap.bootstrap(renpy_base)
#import profile
#profile.run('main()')
#print "Test STDOUT"
#
#import trace
#tracer = trace.Trace(
# ignoredirs=[sys.prefix, sys.exec_prefix],
# trace=1)
#tracer.run('main()')
if __name__ == "__main__":
main()
|
[] |
[] |
[
"APPDATA",
"RENPY_BASE",
"RENPY_RENDERER",
"RENPY_GL_ENVIRON"
] |
[]
|
["APPDATA", "RENPY_BASE", "RENPY_RENDERER", "RENPY_GL_ENVIRON"]
|
python
| 4 | 0 | |
readthedocs/settings/docker_compose.py
|
import os
from .dev import CommunityDevSettings
class DockerBaseSettings(CommunityDevSettings):
"""Settings for local development with Docker"""
DOCKER_ENABLE = True
RTD_DOCKER_COMPOSE = True
RTD_DOCKER_COMPOSE_VOLUME = 'readthedocsorg_build-user-builds'
RTD_DOCKER_USER = f'{os.geteuid()}:{os.getegid()}'
DOCKER_LIMITS = {'memory': '1g', 'time': 900}
USE_SUBDOMAIN = True
STATIC_URL = 'http://community.dev.readthedocs.io/devstoreaccount1/static/'
PRODUCTION_DOMAIN = 'community.dev.readthedocs.io'
PUBLIC_DOMAIN = 'community.dev.readthedocs.io'
PUBLIC_API_URL = 'http://community.dev.readthedocs.io'
RTD_PROXIED_API_URL = PUBLIC_API_URL
SLUMBER_API_HOST = 'http://web:8000'
MULTIPLE_APP_SERVERS = ['web']
MULTIPLE_BUILD_SERVERS = ['build']
# Enable auto syncing elasticsearch documents
ELASTICSEARCH_DSL_AUTOSYNC = True
ELASTICSEARCH_DSL = {
'default': {
'hosts': 'search:9200',
},
}
RTD_CLEAN_AFTER_BUILD = True
@property
def LOGGING(self):
logging = super().LOGGING
logging['loggers'].update({
# Disable azurite logging
'azure.storage.common.storageclient': {
'handlers': ['null'],
'propagate': False,
},
})
return logging
@property
def DATABASES(self): # noqa
return {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": "docs_db",
"USER": os.environ.get("DB_USER", "docs_user"),
"PASSWORD": os.environ.get("DB_PWD", "docs_pwd"),
"HOST": os.environ.get("DB_HOST", "database"),
"PORT": "",
}
}
ACCOUNT_EMAIL_VERIFICATION = "none"
SESSION_COOKIE_DOMAIN = None
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': 'cache:6379',
}
}
BROKER_URL = "redis://cache:6379/0"
CELERY_RESULT_BACKEND = "redis://cache:6379/0"
CELERY_RESULT_SERIALIZER = "json"
CELERY_ALWAYS_EAGER = False
CELERY_TASK_IGNORE_RESULT = False
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# Avoid syncing to the web servers
FILE_SYNCER = "readthedocs.builds.syncers.NullSyncer"
# https://github.com/Azure/Azurite/blob/master/README.md#default-storage-account
AZURE_ACCOUNT_NAME = 'devstoreaccount1'
AZURE_ACCOUNT_KEY = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='
AZURE_CONTAINER = 'static'
AZURE_STATIC_STORAGE_CONTAINER = AZURE_CONTAINER
AZURE_MEDIA_STORAGE_HOSTNAME = 'community.dev.readthedocs.io'
# We want to replace files for the same version built
AZURE_OVERWRITE_FILES = True
# Storage backend for build media artifacts (PDF, HTML, ePub, etc.)
RTD_BUILD_MEDIA_STORAGE = 'readthedocs.storage.azure_storage.AzureBuildMediaStorage'
AZURE_STATIC_STORAGE_HOSTNAME = 'community.dev.readthedocs.io'
# Storage for static files (those collected with `collectstatic`)
STATICFILES_STORAGE = 'readthedocs.storage.azure_storage.AzureStaticStorage'
STATICFILES_DIRS = [
os.path.join(CommunityDevSettings.SITE_ROOT, 'readthedocs', 'static'),
os.path.join(CommunityDevSettings.SITE_ROOT, 'media'),
]
AZURE_BUILD_STORAGE_CONTAINER = 'builds'
BUILD_COLD_STORAGE_URL = 'http://storage:10000/builds'
EXTERNAL_VERSION_URL = 'http://external-builds.community.dev.readthedocs.io'
AZURE_EMULATED_MODE = True
AZURE_CUSTOM_DOMAIN = 'storage:10000'
AZURE_SSL = False
|
[] |
[] |
[
"DB_PWD",
"DB_USER",
"DB_HOST"
] |
[]
|
["DB_PWD", "DB_USER", "DB_HOST"]
|
python
| 3 | 0 | |
SparkSecure/src/main/java/edu/escuelaing/AREP/sparkWebSecure/SecureSparkServicesApp.java
|
package edu.escuelaing.AREP.sparkWebSecure;
import static spark.Spark.*;
import edu.escuelaing.AREP.URLReader.UrlReader;
import spark.Filter;
import spark.Response;
import spark.Request;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;
public class SecureSparkServicesApp {
private static Map<String, String> userPasswords = new HashMap<>();
private static boolean firtLoggin = true;
/**
* Funcion main que crea un servicio REST en spark
* @param args parammetros de funcion main en java
*/
public static void main(String[] args) {
port(getPort());
secure("SparkSecure/keystores/ecikeystore.p12", "areplab7", "SparkSecure/keystores/myTrustStore", "areplab7");
get("/hello", (req, res) -> "Hello Heroku");
userPasswords.put("[email protected]", "areplab7");
userPasswords.put("[email protected]", "admin");
get("/", (req, res) -> LoginPage(req, res));
before("/home", new Filter() {
@Override
public void handle(Request request, Response response) {
String user = request.queryParams("user");
String password = request.queryParams("password");
String dbPassword = userPasswords.get(user);
if (!(password != null && password.equals(dbPassword))) {
firtLoggin = false;
response.redirect("/");
}
}
});
post("/home", (req, res) -> HomePage(req, res));
}
/**
* Metodo que retonra la pagina de login en fromato string
* @param req request hecha por el usuario
* @param res response dada al usuario
* @return pagina html en formato string
*/
private static String LoginPage(Request req, Response res) {
String pageContent
= "<!DOCTYPE html>"
+ "<html>"
+ "<script>\n"
+ "function boton(){ if(!"
+ firtLoggin
+"){alert(\"Autenticacion fallida intente de nuevo\");}}"
+ "</script>\n"
+ "<body onload=\"boton()\">"
+ "<Center>"
+ "</br>"
+ "</br>"
+ "<h2>Login</h2>"
+ "</br>"
+ "</br>"
+ "<form method=\"post\" action=\"/home\">"
+ " User:<br>"
+ " <input type=\"text\" name=\"user\">"
+ " </br>"
+ "</br>"
+ " password:<br>"
+ " <input type=\"password\" name=\"password\">"
+ "</br>"
+ "</br>"
+ " <input type=\"submit\" value=\"Submit\">"
+ "</form>"
+ "</Center>"
+ "</body>"
+ "</html>";
return pageContent;
}
/**
* Metodo que hace el request hacia el segundo servicio alojado segunda maquina virtual en AWS
* @param req request hecha por el usuario
* @param res response dada al usuario
* @return un string con la pagina que contiene el segundo servicio
*/
private static String HomePage(Request req, Response res) {
String home = "";
try {
URL url = new URL("https://ec2-34-224-66-113.compute-1.amazonaws.com:17000/home");
home = UrlReader.urlprueba(url.toString());
}catch (MalformedURLException e){
e.printStackTrace();
}catch (IOException e){
e.printStackTrace();
}
return home;
}
/**
* Metodo que retorna el puerto por el cual esta corriendo la aplicacion
*
* @return un entero que refiere al puerto
*/
static int getPort() {
if (System.getenv("PORT") != null) {
return Integer.parseInt(System.getenv("PORT"));
}
return 6000;
}
}
|
[
"\"PORT\"",
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
java
| 1 | 0 | |
train_code/train_ctpn/ctpn_train.py
|
#-*- coding:utf-8 -*-
#'''
# Created on 18-12-27 上午10:31
#
#'''
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import torch
from torch.utils.data import DataLoader
from torch import optim
import torchvision
import numpy as np
import argparse
import config
from ctpn_model import CTPN_Model, RPN_CLS_Loss, RPN_REGR_Loss
from data.dataset import ICDARDataset
import cv2
from tensorboardX import SummaryWriter
writer = SummaryWriter('runs/exp-1')
random_seed = 2019
torch.random.manual_seed(random_seed)
np.random.seed(random_seed)
epochs = 20
lr = 1e-3
resume_epoch = 0
def save_checkpoint(state, epoch, loss_cls, loss_regr, loss, ext='pth'):
check_path = os.path.join(config.checkpoints_dir,
f'v3_ctpn_ep{epoch:02d}_'
f'best.{ext}')
try:
torch.save(state, check_path)
except BaseException as e:
print(e)
print('fail to save to {}'.format(check_path))
print('saving to {}'.format(check_path))
# 权重初始化常规方法为调用torch.nn.init中:
# constant(tensor,val)
# normal(tensor,mean=0,std=1)
# xavier_uniform(tensor,gain)
# 此处为权重初始化的特别设置
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
if __name__ == '__main__':
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print("device:")
print(torch.cuda.is_available())
checkpoints_weight = config.pretrained_weights
print('exist pretrained ',os.path.exists(checkpoints_weight))
if os.path.exists(checkpoints_weight):
pretrained = False
dataset = ICDARDataset(config.icdar19_mlt_img_dir, config.icdar19_mlt_gt_dir)
dataloader = DataLoader(dataset, batch_size=1, shuffle=True, num_workers=config.num_workers)
model = CTPN_Model()
model.to(device)
if os.path.exists(checkpoints_weight):
print('using pretrained weight: {}'.format(checkpoints_weight))
cc = torch.load(checkpoints_weight, map_location=device)
model.load_state_dict(cc['model_state_dict'])
resume_epoch = cc['epoch']
else:
model.apply(weights_init) ## 函数-Module.apply(fn):会递归地搜索网络内的所有module并把参数表示的函数应用到所有的module上。
params_to_update = model.parameters()
optimizer = optim.SGD(params_to_update, lr=lr, momentum=0.9)
critetion_cls = RPN_CLS_Loss(device)
critetion_regr = RPN_REGR_Loss(device)
best_loss_cls = 100
best_loss_regr = 100
best_loss = 100
best_model = None
epochs += resume_epoch
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
# image, clsss, regrss = next(iter(dataloader))
# image = image.to(device)
# print(image.shape)
# print(image.device)
# print(next(model.parameters()).device)
# with writer:
# writer.add_images('images', image)
# writer.add_graph(model,image)
for epoch in range(resume_epoch+1, epochs):
print(f'Epoch {epoch}/{epochs}')
print('#'*50)
epoch_size = len(dataset) // 1
model.train()
epoch_loss_cls = 0
epoch_loss_regr = 0
epoch_loss = 0
scheduler.step(epoch)
for batch_i, (imgs, clss, regrs) in enumerate(dataloader):
print(imgs.shape)
imgs = imgs.to(device)
clss = clss.to(device)
regrs = regrs.to(device)
optimizer.zero_grad()
out_cls, out_regr = model(imgs)
#with writer:
# writer.add_graph(model,imgs)
loss_cls = critetion_cls(out_cls, clss)
loss_regr = critetion_regr(out_regr, regrs)
loss = loss_cls + loss_regr # total loss
loss.backward()
optimizer.step()
epoch_loss_cls += loss_cls.item()
epoch_loss_regr += loss_regr.item()
epoch_loss += loss.item()
mmp = batch_i+1
print(f'Ep:{epoch}/{epochs-1}--'
f'Batch:{batch_i}/{epoch_size}\n'
f'batch: loss_cls:{loss_cls.item():.4f}--loss_regr:{loss_regr.item():.4f}--loss:{loss.item():.4f}\n'
f'Epoch: loss_cls:{epoch_loss_cls/mmp:.4f}--loss_regr:{epoch_loss_regr/mmp:.4f}--'
f'loss:{epoch_loss/mmp:.4f}\n')
#if epoch == 1 and batch_i == 0:
# writer.add_graph(model,imgs)
# print("writing graph to tensorboardx \n")
# print(imgs.device)
# print(next(model.parameters()).is_cuda)
epoch_loss_cls /= epoch_size
epoch_loss_regr /= epoch_size
epoch_loss /= epoch_size
writer.add_scalar('loss_cls', epoch_loss_cls, epoch)
writer.add_scalar('loss_regs', epoch_loss_regr, epoch)
print(f'Epoch:{epoch}--{epoch_loss_cls:.4f}--{epoch_loss_regr:.4f}--{epoch_loss:.4f}')
if best_loss_cls > epoch_loss_cls or best_loss_regr > epoch_loss_regr or best_loss > epoch_loss:
best_loss = epoch_loss
best_loss_regr = epoch_loss_regr
best_loss_cls = epoch_loss_cls
best_model = model
save_checkpoint({'model_state_dict': best_model.state_dict(),
'epoch': epoch},
epoch,
best_loss_cls,
best_loss_regr,
best_loss)
if torch.cuda.is_available():
torch.cuda.empty_cache()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
enterprise/cmd/worker/internal/codeintel/indexing/dependency_indexing_scheduler.go
|
package indexing
import (
"context"
"fmt"
"os"
"strconv"
"time"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/autoindex/enqueuer"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/stores/dbstore"
"github.com/sourcegraph/sourcegraph/internal/actor"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/errcode"
"github.com/sourcegraph/sourcegraph/internal/workerutil"
"github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker"
dbworkerstore "github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker/store"
"github.com/sourcegraph/sourcegraph/lib/codeintel/precise"
"github.com/sourcegraph/sourcegraph/lib/errors"
"github.com/sourcegraph/sourcegraph/lib/log"
)
const requeueBackoff = time.Second * 30
// default is false aka index scheduler is enabled
var disableIndexScheduler, _ = strconv.ParseBool(os.Getenv("CODEINTEL_DEPENDENCY_INDEX_SCHEDULER_DISABLED"))
// NewDependencyIndexingScheduler returns a new worker instance that processes
// records from lsif_dependency_indexing_jobs.
func NewDependencyIndexingScheduler(
dbStore DBStore,
workerStore dbworkerstore.Store,
externalServiceStore ExternalServiceStore,
repoUpdaterClient RepoUpdaterClient,
gitserverClient GitserverClient,
enqueuer IndexEnqueuer,
pollInterval time.Duration,
numProcessorRoutines int,
workerMetrics workerutil.WorkerMetrics,
) *workerutil.Worker {
rootContext := actor.WithActor(context.Background(), &actor.Actor{Internal: true})
handler := &dependencyIndexingSchedulerHandler{
dbStore: dbStore,
extsvcStore: externalServiceStore,
indexEnqueuer: enqueuer,
workerStore: workerStore,
repoUpdater: repoUpdaterClient,
gitserver: gitserverClient,
}
return dbworker.NewWorker(rootContext, workerStore, handler, workerutil.WorkerOptions{
Name: "precise_code_intel_dependency_indexing_scheduler_worker",
NumHandlers: numProcessorRoutines,
Interval: pollInterval,
Metrics: workerMetrics,
HeartbeatInterval: 1 * time.Second,
})
}
type dependencyIndexingSchedulerHandler struct {
dbStore DBStore
indexEnqueuer IndexEnqueuer
extsvcStore ExternalServiceStore
workerStore dbworkerstore.Store
repoUpdater RepoUpdaterClient
gitserver GitserverClient
}
var _ workerutil.Handler = &dependencyIndexingSchedulerHandler{}
// Handle iterates all import monikers associated with a given upload that has
// recently completed processing. Each moniker is interpreted according to its
// scheme to determine the dependent repository and commit. A set of indexing
// jobs are enqueued for each repository and commit pair.
func (h *dependencyIndexingSchedulerHandler) Handle(ctx context.Context, logger log.Logger, record workerutil.Record) error {
if !autoIndexingEnabled() || disableIndexScheduler {
return nil
}
job := record.(dbstore.DependencyIndexingJob)
if job.ExternalServiceKind != "" {
externalServices, err := h.extsvcStore.List(ctx, database.ExternalServicesListOptions{
Kinds: []string{job.ExternalServiceKind},
})
if err != nil {
return errors.Wrap(err, "extsvcStore.List")
}
outdatedServices := make(map[int64]time.Duration, len(externalServices))
for _, externalService := range externalServices {
if externalService.LastSyncAt.Before(job.ExternalServiceSync) {
outdatedServices[externalService.ID] = job.ExternalServiceSync.Sub(externalService.LastSyncAt)
}
}
if len(outdatedServices) > 0 {
if err := h.workerStore.Requeue(ctx, job.ID, time.Now().Add(requeueBackoff)); err != nil {
return errors.Wrap(err, "store.Requeue")
}
entries := make([]log.Field, 0, len(outdatedServices))
for id, d := range outdatedServices {
entries = append(entries, log.Duration(fmt.Sprintf("%d", id), d))
}
logger.Warn("Requeued dependency indexing job (external services not yet updated)",
log.Object("outdated_services", entries...))
return nil
}
}
var errs []error
scanner, err := h.dbStore.ReferencesForUpload(ctx, job.UploadID)
if err != nil {
return errors.Wrap(err, "dbstore.ReferencesForUpload")
}
defer func() {
if closeErr := scanner.Close(); closeErr != nil {
err = errors.Append(err, errors.Wrap(closeErr, "dbstore.ReferencesForUpload.Close"))
}
}()
repoToPackages := make(map[api.RepoName][]precise.Package)
var repoNames []api.RepoName
for {
packageReference, exists, err := scanner.Next()
if err != nil {
return errors.Wrap(err, "dbstore.ReferencesForUpload.Next")
}
if !exists {
break
}
pkg := precise.Package{
Scheme: packageReference.Package.Scheme,
Name: packageReference.Package.Name,
Version: packageReference.Package.Version,
}
repoName, _, ok := enqueuer.InferRepositoryAndRevision(pkg)
if !ok {
continue
}
repoToPackages[repoName] = append(repoToPackages[repoName], pkg)
repoNames = append(repoNames, repoName)
}
// if this job is not associated with an external service kind that was just synced, then we need to guarantee
// that the repos are visible to the Sourcegraph instance, else skip them
if job.ExternalServiceKind == "" {
for _, repo := range repoNames {
if _, err := h.repoUpdater.RepoLookup(ctx, repo); errcode.IsNotFound(err) {
delete(repoToPackages, repo)
} else if err != nil {
return errors.Wrapf(err, "repoUpdater.RepoLookup", "repo", repo)
}
}
}
results, err := h.gitserver.RepoInfo(ctx, repoNames...)
if err != nil {
return errors.Wrap(err, "gitserver.RepoInfo")
}
for repo, info := range results {
if !info.Cloned && !info.CloneInProgress { // if the repository doesnt exist
delete(repoToPackages, repo)
} else if info.CloneInProgress { // we can't enqueue if still cloning
return h.workerStore.Requeue(ctx, job.ID, time.Now().Add(requeueBackoff))
}
}
for _, pkgs := range repoToPackages {
for _, pkg := range pkgs {
if err := h.indexEnqueuer.QueueIndexesForPackage(ctx, pkg); err != nil {
errs = append(errs, errors.Wrap(err, "enqueuer.QueueIndexesForPackage"))
}
}
}
if len(errs) == 0 {
return nil
}
if len(errs) == 1 {
return errs[0]
}
return errors.Append(nil, errs...)
}
|
[
"\"CODEINTEL_DEPENDENCY_INDEX_SCHEDULER_DISABLED\""
] |
[] |
[
"CODEINTEL_DEPENDENCY_INDEX_SCHEDULER_DISABLED"
] |
[]
|
["CODEINTEL_DEPENDENCY_INDEX_SCHEDULER_DISABLED"]
|
go
| 1 | 0 | |
noxfile.py
|
from distutils.command.clean import clean
import nox
import os
# Use system installed Python packages
PYT_PATH='/opt/conda/lib/python3.8/site-packages' if not 'PYT_PATH' in os.environ else os.environ["PYT_PATH"]
# Set the root directory to the directory of the noxfile unless the user wants to
# TOP_DIR
TOP_DIR=os.path.dirname(os.path.realpath(__file__)) if not 'TOP_DIR' in os.environ else os.environ["TOP_DIR"]
nox.options.sessions = ["l0_api_tests-3"]
def install_deps(session):
print("Installing deps")
session.install("-r", os.path.join(TOP_DIR, "py", "requirements.txt"))
session.install("-r", os.path.join(TOP_DIR, "tests", "py", "requirements.txt"))
def download_models(session, use_host_env=False):
print("Downloading test models")
session.install('timm')
print(TOP_DIR)
session.chdir(os.path.join(TOP_DIR, "tests", "modules"))
if use_host_env:
session.run_always('python', 'hub.py', env={'PYTHONPATH': PYT_PATH})
else:
session.run_always('python', 'hub.py')
def install_torch_trt(session):
print("Installing latest torch-tensorrt build")
session.chdir(os.path.join(TOP_DIR, "py"))
session.run("python", "setup.py", "develop")
def download_datasets(session):
print("Downloading dataset to path", os.path.join(TOP_DIR, 'examples/int8/training/vgg16'))
session.chdir(os.path.join(TOP_DIR, 'examples/int8/training/vgg16'))
session.run_always('wget', 'https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz', external=True)
session.run_always('tar', '-xvzf', 'cifar-10-binary.tar.gz', external=True)
session.run_always('mkdir', '-p',
os.path.join(TOP_DIR, 'tests/accuracy/datasets/data'),
external=True)
session.run_always('cp', '-rpf',
os.path.join(TOP_DIR, 'examples/int8/training/vgg16/cifar-10-batches-bin'),
os.path.join(TOP_DIR, 'tests/accuracy/datasets/data/cidar-10-batches-bin'),
external=True)
def train_model(session, use_host_env=False):
session.chdir(os.path.join(TOP_DIR, 'examples/int8/training/vgg16'))
if use_host_env:
session.run_always('python',
'main.py',
'--lr', '0.01',
'--batch-size', '128',
'--drop-ratio', '0.15',
'--ckpt-dir', 'vgg16_ckpts',
'--epochs', '25',
env={'PYTHONPATH': PYT_PATH})
session.run_always('python',
'export_ckpt.py',
'vgg16_ckpts/ckpt_epoch25.pth',
env={'PYTHONPATH': PYT_PATH})
else:
session.run_always('python',
'main.py',
'--lr', '0.01',
'--batch-size', '128',
'--drop-ratio', '0.15',
'--ckpt-dir', 'vgg16_ckpts',
'--epochs', '25')
session.run_always('python',
'export_ckpt.py',
'vgg16_ckpts/ckpt_epoch25.pth')
def finetune_model(session, use_host_env=False):
# Install pytorch-quantization dependency
session.install('pytorch-quantization', '--extra-index-url', 'https://pypi.ngc.nvidia.com')
session.chdir(os.path.join(TOP_DIR, 'examples/int8/training/vgg16'))
if use_host_env:
session.run_always('python',
'finetune_qat.py',
'--lr', '0.01',
'--batch-size', '128',
'--drop-ratio', '0.15',
'--ckpt-dir', 'vgg16_ckpts',
'--start-from', '25',
'--epochs', '26',
env={'PYTHONPATH': PYT_PATH})
# Export model
session.run_always('python',
'export_qat.py',
'vgg16_ckpts/ckpt_epoch26.pth',
env={'PYTHONPATH': PYT_PATH})
else:
session.run_always('python',
'finetune_qat.py',
'--lr', '0.01',
'--batch-size', '128',
'--drop-ratio', '0.15',
'--ckpt-dir', 'vgg16_ckpts',
'--start-from', '25',
'--epochs', '26')
# Export model
session.run_always('python',
'export_qat.py',
'vgg16_ckpts/ckpt_epoch26.pth')
def cleanup(session):
target = [
'examples/int8/training/vgg16/*.jit.pt',
'examples/int8/training/vgg16/vgg16_ckpts',
'examples/int8/training/vgg16/cifar-10-*',
'examples/int8/training/vgg16/data',
'tests/modules/*.jit.pt',
'tests/py/*.jit.pt'
]
target = ' '.join(x for x in [os.path.join(TOP_DIR, i) for i in target])
session.run_always('bash', '-c',
str('rm -rf ') + target,
external=True)
def run_base_tests(session, use_host_env=False):
print("Running basic tests")
session.chdir(os.path.join(TOP_DIR, 'tests/py'))
tests = [
"test_api.py",
"test_to_backend_api.py"
]
for test in tests:
if use_host_env:
session.run_always('python', test, env={'PYTHONPATH': PYT_PATH})
else:
session.run_always("python", test)
def run_accuracy_tests(session, use_host_env=False):
print("Running accuracy tests")
session.chdir(os.path.join(TOP_DIR, 'tests/py'))
tests = []
for test in tests:
if use_host_env:
session.run_always('python', test, env={'PYTHONPATH': PYT_PATH})
else:
session.run_always("python", test)
def copy_model(session):
model_files = [ 'trained_vgg16.jit.pt',
'trained_vgg16_qat.jit.pt']
for file_name in model_files:
src_file = os.path.join(TOP_DIR, str('examples/int8/training/vgg16/') + file_name)
if os.path.exists(src_file):
session.run_always('cp',
'-rpf',
os.path.join(TOP_DIR, src_file),
os.path.join(TOP_DIR, str('tests/py/') + file_name),
external=True)
def run_int8_accuracy_tests(session, use_host_env=False):
print("Running accuracy tests")
copy_model(session)
session.chdir(os.path.join(TOP_DIR, 'tests/py'))
tests = [
"test_ptq_dataloader_calibrator.py",
"test_ptq_to_backend.py",
"test_qat_trt_accuracy.py",
]
for test in tests:
if use_host_env:
session.run_always('python', test, env={'PYTHONPATH': PYT_PATH})
else:
session.run_always("python", test)
def run_trt_compatibility_tests(session, use_host_env=False):
print("Running TensorRT compatibility tests")
copy_model(session)
session.chdir(os.path.join(TOP_DIR, 'tests/py'))
tests = [
"test_trt_intercompatability.py",
"test_ptq_trt_calibrator.py",
]
for test in tests:
if use_host_env:
session.run_always('python', test, env={'PYTHONPATH': PYT_PATH})
else:
session.run_always("python", test)
def run_dla_tests(session, use_host_env=False):
print("Running DLA tests")
session.chdir(os.path.join(TOP_DIR, 'tests/py'))
tests = [
"test_api_dla.py",
]
for test in tests:
if use_host_env:
session.run_always('python', test, env={'PYTHONPATH': PYT_PATH})
else:
session.run_always("python", test)
def run_multi_gpu_tests(session, use_host_env=False):
print("Running multi GPU tests")
session.chdir(os.path.join(TOP_DIR, 'tests/py'))
tests = [
"test_multi_gpu.py",
]
for test in tests:
if use_host_env:
session.run_always('python', test, env={'PYTHONPATH': PYT_PATH})
else:
session.run_always("python", test)
def run_l0_api_tests(session, use_host_env=False):
if not use_host_env:
install_deps(session)
install_torch_trt(session)
download_models(session, use_host_env)
run_base_tests(session, use_host_env)
cleanup(session)
def run_l0_dla_tests(session, use_host_env=False):
if not use_host_env:
install_deps(session)
install_torch_trt(session)
download_models(session, use_host_env)
run_base_tests(session, use_host_env)
cleanup(session)
def run_l1_accuracy_tests(session, use_host_env=False):
if not use_host_env:
install_deps(session)
install_torch_trt(session)
download_models(session, use_host_env)
download_datasets(session)
train_model(session, use_host_env)
run_accuracy_tests(session, use_host_env)
cleanup(session)
def run_l1_int8_accuracy_tests(session, use_host_env=False):
if not use_host_env:
install_deps(session)
install_torch_trt(session)
download_models(session, use_host_env)
download_datasets(session)
train_model(session, use_host_env)
finetune_model(session, use_host_env)
run_int8_accuracy_tests(session, use_host_env)
cleanup(session)
def run_l2_trt_compatibility_tests(session, use_host_env=False):
if not use_host_env:
install_deps(session)
install_torch_trt(session)
download_models(session, use_host_env)
download_datasets(session)
train_model(session, use_host_env)
run_trt_compatibility_tests(session, use_host_env)
cleanup(session)
def run_l2_multi_gpu_tests(session, use_host_env=False):
if not use_host_env:
install_deps(session)
install_torch_trt(session)
download_models(session, use_host_env)
run_multi_gpu_tests(session, use_host_env)
cleanup(session)
@nox.session(python=["3"], reuse_venv=True)
def l0_api_tests(session):
"""When a developer needs to check correctness for a PR or something"""
run_l0_api_tests(session, use_host_env=False)
@nox.session(python=["3"], reuse_venv=True)
def l0_api_tests_host_deps(session):
"""When a developer needs to check basic api functionality using host dependencies"""
run_l0_api_tests(session, use_host_env=True)
@nox.session(python=["3"], reuse_venv=True)
def l0_dla_tests_host_deps(session):
"""When a developer needs to check basic api functionality using host dependencies"""
run_l0_dla_tests(session, use_host_env=True)
@nox.session(python=["3"], reuse_venv=True)
def l1_accuracy_tests(session):
"""Checking accuracy performance on various usecases"""
run_l1_accuracy_tests(session, use_host_env=False)
@nox.session(python=["3"], reuse_venv=True)
def l1_accuracy_tests_host_deps(session):
"""Checking accuracy performance on various usecases using host dependencies"""
run_l1_accuracy_tests(session, use_host_env=True)
@nox.session(python=["3"], reuse_venv=True)
def l1_int8_accuracy_tests(session):
"""Checking accuracy performance on various usecases"""
run_l1_int8_accuracy_tests(session, use_host_env=False)
@nox.session(python=["3"], reuse_venv=True)
def l1_int8_accuracy_tests_host_deps(session):
"""Checking accuracy performance on various usecases using host dependencies"""
run_l1_int8_accuracy_tests(session, use_host_env=True)
@nox.session(python=["3"], reuse_venv=True)
def l2_trt_compatibility_tests(session):
"""Makes sure that TensorRT Python and Torch-TensorRT can work together"""
run_l2_trt_compatibility_tests(session, use_host_env=False)
@nox.session(python=["3"], reuse_venv=True)
def l2_trt_compatibility_tests_host_deps(session):
"""Makes sure that TensorRT Python and Torch-TensorRT can work together using host dependencies"""
run_l2_trt_compatibility_tests(session, use_host_env=True)
@nox.session(python=["3"], reuse_venv=True)
def l2_multi_gpu_tests(session):
"""Makes sure that Torch-TensorRT can operate on multi-gpu systems"""
run_l2_multi_gpu_tests(session, use_host_env=False)
@nox.session(python=["3"], reuse_venv=True)
def l2_multi_gpu_tests_host_deps(session):
"""Makes sure that Torch-TensorRT can operate on multi-gpu systems using host dependencies"""
run_l2_multi_gpu_tests(session, use_host_env=True)
|
[] |
[] |
[
"TOP_DIR",
"PYT_PATH"
] |
[]
|
["TOP_DIR", "PYT_PATH"]
|
python
| 2 | 0 | |
vendor/github.com/heketi/heketi/pkg/kubernetes/backupdb.go
|
//
// Copyright (c) 2017 The heketi Authors
//
// This file is licensed to you under your choice of the GNU Lesser
// General Public License, version 3 or any later version (LGPLv3 or
// later), or the GNU General Public License, version 2 (GPLv2), in all
// cases as published by the Free Software Foundation.
//
package kubernetes
import (
"bytes"
"compress/gzip"
"fmt"
"os"
"github.com/boltdb/bolt"
wdb "github.com/heketi/heketi/pkg/db"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
)
var (
inClusterConfig = restclient.InClusterConfig
newForConfig = func(c *restclient.Config) (clientset.Interface, error) {
return clientset.NewForConfig(c)
}
getNamespace = GetNamespace
dbSecretName = "heketi-db-backup"
)
func KubeBackupDbToSecret(db wdb.RODB) error {
// Check if we should use another name for the heketi backup secret
env := os.Getenv("HEKETI_KUBE_DB_SECRET_NAME")
if len(env) != 0 {
dbSecretName = env
}
// Get Kubernetes configuration
kubeConfig, err := inClusterConfig()
if err != nil {
return fmt.Errorf("Unable to get kubernetes configuration: %v", err)
}
// Get clientset
c, err := newForConfig(kubeConfig)
if err != nil {
return fmt.Errorf("Unable to get kubernetes clientset: %v", err)
}
// Get namespace
ns, err := getNamespace()
if err != nil {
return fmt.Errorf("Unable to get namespace: %v", err)
}
// Create client for secrets
secrets := c.CoreV1().Secrets(ns)
if err != nil {
return fmt.Errorf("Unable to get a client to kubernetes secrets: %v", err)
}
// Get a backup
err = db.View(func(tx *bolt.Tx) error {
var backup bytes.Buffer
gz := gzip.NewWriter(&backup)
_, err := tx.WriteTo(gz)
if err != nil {
return fmt.Errorf("Unable to access database: %v", err)
}
if err := gz.Close(); err != nil {
return fmt.Errorf("Unable to close gzipped database: %v", err)
}
// Create a secret with backup
secret := &v1.Secret{}
secret.Kind = "Secret"
secret.Namespace = ns
secret.APIVersion = "v1"
secret.ObjectMeta.Name = dbSecretName
secret.Data = map[string][]byte{
"heketi.db.gz": backup.Bytes(),
}
// Submit secret
_, err = secrets.Create(secret)
if apierrors.IsAlreadyExists(err) {
// It already exists, so just update it instead
_, err = secrets.Update(secret)
if err != nil {
return fmt.Errorf("Unable to update database to secret: %v", err)
}
} else if err != nil {
return fmt.Errorf("Unable to create database secret: %v", err)
}
return nil
})
if err != nil {
return fmt.Errorf("Unable to backup database to kubernetes secret: %v", err)
}
return nil
}
|
[
"\"HEKETI_KUBE_DB_SECRET_NAME\""
] |
[] |
[
"HEKETI_KUBE_DB_SECRET_NAME"
] |
[]
|
["HEKETI_KUBE_DB_SECRET_NAME"]
|
go
| 1 | 0 | |
cmd/kubelet/kubelet.go
|
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// The kubelet binary is responsible for maintaining a set of containers on a particular host VM.
// It syncs data from both configuration file(s) as well as from a quorum of etcd servers.
// It then queries Docker to see what is currently running. It synchronizes the configuration data,
// with the running set of containers by starting or stopping Docker containers.
package main
import (
"flag"
"math/rand"
"net"
"net/http"
"os"
"os/exec"
"path"
"strings"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/capabilities"
"github.com/GoogleCloudPlatform/kubernetes/pkg/health"
_ "github.com/GoogleCloudPlatform/kubernetes/pkg/healthz"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet"
kconfig "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config"
"github.com/GoogleCloudPlatform/kubernetes/pkg/master/ports"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/version/verflag"
"github.com/coreos/go-etcd/etcd"
"github.com/fsouza/go-dockerclient"
"github.com/golang/glog"
"github.com/google/cadvisor/client"
)
const defaultRootDir = "/var/lib/kubelet"
var (
config = flag.String("config", "", "Path to the config file or directory of files")
syncFrequency = flag.Duration("sync_frequency", 10*time.Second, "Max period between synchronizing running containers and config")
fileCheckFrequency = flag.Duration("file_check_frequency", 20*time.Second, "Duration between checking config files for new data")
httpCheckFrequency = flag.Duration("http_check_frequency", 20*time.Second, "Duration between checking http for new data")
manifestURL = flag.String("manifest_url", "", "URL for accessing the container manifest")
enableServer = flag.Bool("enable_server", true, "Enable the info server")
address = util.IP(net.ParseIP("127.0.0.1"))
port = flag.Uint("port", ports.KubeletPort, "The port for the info server to serve on")
hostnameOverride = flag.String("hostname_override", "", "If non-empty, will use this string as identification instead of the actual hostname.")
networkContainerImage = flag.String("network_container_image", kubelet.NetworkContainerImage, "The image that network containers in each pod will use.")
dockerEndpoint = flag.String("docker_endpoint", "", "If non-empty, use this for the docker endpoint to communicate with")
etcdServerList util.StringList
etcdConfigFile = flag.String("etcd_config", "", "The config file for the etcd client. Mutually exclusive with -etcd_servers")
rootDirectory = flag.String("root_dir", defaultRootDir, "Directory path for managing kubelet files (volume mounts,etc).")
allowPrivileged = flag.Bool("allow_privileged", false, "If true, allow containers to request privileged mode. [default=false]")
registryPullQPS = flag.Float64("registry_qps", 0.0, "If > 0, limit registry pull QPS to this value. If 0, unlimited. [default=0.0]")
registryBurst = flag.Int("registry_burst", 10, "Maximum size of a bursty pulls, temporarily allows pulls to burst to this number, while still not exceeding registry_qps. Only used if --registry_qps > 0")
runonce = flag.Bool("runonce", false, "If true, exit after spawning pods from local manifests or remote urls. Exclusive with --etcd_servers and --enable-server")
enableDebuggingHandlers = flag.Bool("enable_debugging_handlers", true, "Enables server endpoints for log collection and local running of containers and commands")
minimumGCAge = flag.Duration("minimum_container_ttl_duration", 0, "Minimum age for a finished container before it is garbage collected. Examples: '300ms', '10s' or '2h45m'")
maxContainerCount = flag.Int("maximum_dead_containers_per_container", 5, "Maximum number of old instances of a container to retain per container. Each container takes up some disk space. Default: 5.")
)
func init() {
flag.Var(&etcdServerList, "etcd_servers", "List of etcd servers to watch (http://ip:port), comma separated. Mutually exclusive with -etcd_config")
flag.Var(&address, "address", "The IP address for the info server to serve on (set to 0.0.0.0 for all interfaces)")
}
func getDockerEndpoint() string {
var endpoint string
if len(*dockerEndpoint) > 0 {
endpoint = *dockerEndpoint
} else if len(os.Getenv("DOCKER_HOST")) > 0 {
endpoint = os.Getenv("DOCKER_HOST")
} else {
endpoint = "unix:///var/run/docker.sock"
}
glog.Infof("Connecting to docker on %s", endpoint)
return endpoint
}
func getHostname() string {
hostname := []byte(*hostnameOverride)
if string(hostname) == "" {
// Note: We use exec here instead of os.Hostname() because we
// want the FQDN, and this is the easiest way to get it.
fqdn, err := exec.Command("hostname", "-f").Output()
if err != nil {
glog.Fatalf("Couldn't determine hostname: %v", err)
}
hostname = fqdn
}
return strings.TrimSpace(string(hostname))
}
func main() {
flag.Parse()
util.InitLogs()
defer util.FlushLogs()
rand.Seed(time.Now().UTC().UnixNano())
verflag.PrintAndExitIfRequested()
if *runonce {
exclusiveFlag := "invalid option: --runonce and %s are mutually exclusive"
if len(etcdServerList) > 0 {
glog.Fatalf(exclusiveFlag, "--etcd_servers")
}
if *enableServer {
glog.Infof("--runonce is set, disabling server")
*enableServer = false
}
}
etcd.SetLogger(util.NewLogger("etcd "))
capabilities.Initialize(capabilities.Capabilities{
AllowPrivileged: *allowPrivileged,
})
dockerClient, err := docker.NewClient(getDockerEndpoint())
if err != nil {
glog.Fatal("Couldn't connect to docker.")
}
hostname := getHostname()
if *rootDirectory == "" {
glog.Fatal("Invalid root directory path.")
}
*rootDirectory = path.Clean(*rootDirectory)
if err := os.MkdirAll(*rootDirectory, 0750); err != nil {
glog.Warningf("Error creating root directory: %v", err)
}
// source of all configuration
cfg := kconfig.NewPodConfig(kconfig.PodConfigNotificationSnapshotAndUpdates)
// define file config source
if *config != "" {
kconfig.NewSourceFile(*config, *fileCheckFrequency, cfg.Channel("file"))
}
// define url config source
if *manifestURL != "" {
kconfig.NewSourceURL(*manifestURL, *httpCheckFrequency, cfg.Channel("http"))
}
// define etcd config source and initialize etcd client
var etcdClient *etcd.Client
if len(etcdServerList) > 0 {
etcdClient = etcd.NewClient(etcdServerList)
} else if *etcdConfigFile != "" {
var err error
etcdClient, err = etcd.NewClientFromFile(*etcdConfigFile)
if err != nil {
glog.Fatalf("Error with etcd config file: %v", err)
}
}
if etcdClient != nil {
glog.Infof("Watching for etcd configs at %v", etcdClient.GetCluster())
kconfig.NewSourceEtcd(kconfig.EtcdKeyForHost(hostname), etcdClient, cfg.Channel("etcd"))
}
// TODO: block until all sources have delivered at least one update to the channel, or break the sync loop
// up into "per source" synchronizations
k := kubelet.NewMainKubelet(
getHostname(),
dockerClient,
etcdClient,
*rootDirectory,
*networkContainerImage,
*syncFrequency,
float32(*registryPullQPS),
*registryBurst,
*minimumGCAge,
*maxContainerCount)
go func() {
util.Forever(func() {
err := k.GarbageCollectContainers()
if err != nil {
glog.Errorf("Garbage collect failed: %v", err)
}
}, time.Minute*1)
}()
go func() {
defer util.HandleCrash()
// TODO: Monitor this connection, reconnect if needed?
glog.V(1).Infof("Trying to create cadvisor client.")
cadvisorClient, err := cadvisor.NewClient("http://127.0.0.1:4194")
if err != nil {
glog.Errorf("Error on creating cadvisor client: %v", err)
return
}
glog.V(1).Infof("Successfully created cadvisor client.")
k.SetCadvisorClient(cadvisorClient)
}()
// TODO: These should probably become more plugin-ish: register a factory func
// in each checker's init(), iterate those here.
health.AddHealthChecker(health.NewExecHealthChecker(k))
health.AddHealthChecker(health.NewHTTPHealthChecker(&http.Client{}))
health.AddHealthChecker(&health.TCPHealthChecker{})
// process pods and exit.
if *runonce {
if _, err := k.RunOnce(cfg.Updates()); err != nil {
glog.Fatalf("--runonce failed: %v", err)
}
return
}
// start the kubelet
go util.Forever(func() { k.Run(cfg.Updates()) }, 0)
// start the kubelet server
if *enableServer {
go util.Forever(func() {
kubelet.ListenAndServeKubeletServer(k, cfg.Channel("http"), net.IP(address), *port, *enableDebuggingHandlers)
}, 0)
}
// runs forever
select {}
}
|
[
"\"DOCKER_HOST\"",
"\"DOCKER_HOST\""
] |
[] |
[
"DOCKER_HOST"
] |
[]
|
["DOCKER_HOST"]
|
go
| 1 | 0 | |
examples/03_linreg_starter.py
|
#encoding=utf-8
""" Starter code for simple linear regression example using placeholders
Created by Chip Huyen ([email protected])
CS20: "TensorFlow for Deep Learning Research"
cs20.stanford.edu
Lecture 03
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import time
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import utils
DATA_FILE = 'data/birth_life_2010.txt'
# Step 1: read in data from the .txt file
data, n_samples = utils.read_birth_life_data(DATA_FILE)
# Step 2: create placeholders for X (birth rate) and Y (life expectancy)
# Remember both X and Y are scalars with type float
X, Y = None, None
#############################
########## TO DO ############
X = tf.placeholder(tf.float32, shape=(None), name="x")
Y = tf.placeholder(tf.float32, shape=(None), name="y")
#############################
# Step 3: create weight and bias, initialized to 0.0
# Make sure to use tf.get_variable
w, b = None, None
#############################
########## TO DO ############
#w = tf.get_variable('w', shape=(1), initializer=tf.zeros_initializer())
#b= tf.get_variable('b', shape=(1), initializer=tf.zeros_initializer())
w = tf.get_variable('w', initializer=tf.constant(0.0))
b= tf.get_variable('b', initializer=tf.constant(0.0))
#############################
# Step 4: build model to predict Y
# e.g. how would you derive at Y_predicted given X, w, and b
Y_predicted = None
#############################
########## TO DO ############
Y_predicted = tf.add( tf.multiply( X, w ), b )
#############################
# Step 5: use the square error as the loss function
loss = None
#############################
########## TO DO ############
loss = tf.losses.mean_squared_error( Y, Y_predicted )
#############################
# Step 6: using gradient descent with learning rate of 0.001 to minimize loss
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss)
start = time.time()
# Create a filewriter to write the model's graph to TensorBoard
#############################
########## TO DO ############
writer = tf.summary.FileWriter('graphs/linear_regression', tf.get_default_graph())
#############################
with tf.Session() as sess:
# Step 7: initialize the necessary variables, in this case, w and b
#############################
########## TO DO ############
init=tf.global_variables_initializer()
sess.run(init)
#############################
# Step 8: train the model for 100 epochs
for i in range(100):
total_loss = 0
for x, y in data:
# Execute train_op and get the value of loss.
# Don't forget to feed in data for placeholders
#返回的损失值不能再命名为loss,因为这样就覆盖了之前定义的变量loss
#把loss定义为了一个float值,第一次是没有问题的,但是第二次就会
#变成run一个float了
_, loss_1 = sess.run([optimizer, loss] ,feed_dict={X:x, Y:y}) ########## TO DO ############
total_loss += loss_1
print('Epoch {0}: {1}'.format(i, total_loss/n_samples))
# close the writer when you're done using it
#############################
########## TO DO ############
#############################
writer.close()
# Step 9: output the values of w and b
w_out, b_out = None, None
#############################
########## TO DO ############
w_out = sess.run(w)
b_out = sess.run(b)
print w_out
print b_out
#############################
print('Took: %f seconds' %(time.time() - start))
# uncomment the following lines to see the plot
plt.plot(data[:,0], data[:,1], 'bo', label='Real data')
plt.plot(data[:,0], data[:,0] * w_out + b_out, 'r', label='Predicted data')
plt.legend()
plt.show()
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
delay-server.go
|
package main
import (
"fmt"
"math/rand"
"net/http"
"os"
"strconv"
"time"
)
func setupResponse(w *http.ResponseWriter, req *http.Request) {
(*w).Header().Set("Access-Control-Allow-Origin", "*")
(*w).Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
(*w).Header().Set("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization")
}
func handler(w http.ResponseWriter, r *http.Request) {
setupResponse(&w, r)
if (*r).Method == "OPTIONS" {
return
}
maxMs, parseError := strconv.ParseInt(r.URL.Query().Get("max"), 0, 32)
if parseError != nil {
maxMs = 1
}
minMs, parseError := strconv.ParseInt(r.URL.Query().Get("min"), 0, 32)
if parseError != nil {
minMs = 0
}
failureChance, parseError := strconv.ParseInt(r.URL.Query().Get("failure"), 0, 32)
if parseError != nil {
failureChance = 0
}
if maxMs < 0 || maxMs > 30000 {
http.Error(w, "invalid 'maxMs' query param. must be >= 0 and <= 30000", http.StatusBadRequest)
}
if minMs < 0 {
http.Error(w, "invalid 'minMs' query param. must be >= 0", http.StatusBadRequest)
}
if maxMs < minMs {
http.Error(w, "invalid 'maxMs' & 'minMs' query params. maxMs must be greater than or equal to minMs", http.StatusBadRequest)
}
time.Sleep(time.Duration(rand.Intn(int(maxMs-minMs))+int(minMs)) * time.Millisecond)
if failureChance > 0 && rand.Intn(int(failureChance)) == 0 {
http.Error(w, "Mock error", http.StatusInternalServerError)
} else {
fmt.Fprintf(w, "<h1>welcome to the go delay server</h1>"+
"<h2> supported query params</h2>"+
"<ul>"+
"<li>max : max delay in milliseconds. defaults to 1</li>"+
"<li>min : min delay in milliseconds. defaults to 0</li>"+
"<li>failure : 1 in X failure chance. defaults to 0 (off)</li>"+
"</ul>")
}
}
func main() {
http.HandleFunc("/", handler)
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
http.ListenAndServe(":"+port, nil)
fmt.Println("listing on " + port)
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
multinet-server/views.py
|
import os
import json
import logging
logger = logging.getLogger(__name__)
from aiohttp import web, ClientSession
async def index(request):
logger.debug('Accessing index')
client = request.app['arango']
sys_db = client.db('_system', username='root', password=os.environ['MULTINET_ROOT_PASSWORD'])
dbs = sys_db.databases()
logger.info('Response: %s' % dbs)
return web.Response(text=json.dumps(dbs, indent=4))
async def addDB(request):
logger.debug('Adding DB')
client = request.app['arango']
sys_db = client.db('_system', username='root', password=os.environ['MULTINET_ROOT_PASSWORD'])
name = request.match_info['name']
if not sys_db.has_database(name):
sys_db.create_database(name)
else:
logger.info('Request to add db {} is a no-op because database is already present'.format(name))
return web.Response(text=name)
async def getDB(request):
logger.debug('Getting DB')
client = request.app['arango']
db = client.db(request.match_info['name'], username='root', password=os.environ['MULTINET_ROOT_PASSWORD'])
graphs = [coll for coll in db.graphs() if not coll['name'].startswith('_')]
return web.Response(text=json.dumps(graphs, indent=4))
async def getGraph(request):
logger.debug('Getting Graph')
client = request.app['arango']
db = client.db(request.match_info['db_name'], username='root', password=os.environ['MULTINET_ROOT_PASSWORD'])
graph = db.graph(request.match_info['name'])
vertex_collections = graph.vertex_collections()
edge_definitions = graph.edge_definitions()
return web.Response(text=json.dumps(
{
"vertex_collections": vertex_collections,
"edge_definitions": edge_definitions
},
indent=4
))
async def addGraph(request):
logger.debug('Adding Graph')
client = request.app['arango']
db = client.db(request.match_info['db_name'], username='root', password=os.environ['MULTINET_ROOT_PASSWORD'])
name = request.match_info['name']
graph = db.graph(name) if db.has_graph(name) else db.create_graph(name)
return web.Response(text=graph.name)
async def addVertices(request):
logger.debug('Adding Vertices')
client = request.app['arango']
db = client.db(request.match_info['db_name'], username='root', password=os.environ['MULTINET_ROOT_PASSWORD'])
graph = db.graph(request.match_info['graph_name'])
name = request.match_info['name']
collection = graph.vertex_collection(name) if graph.has_vertex_collection(name) else graph.create_vertex_collection(name)
reader = await request.multipart()
import_file = await reader.next()
logger.info(import_file.filename)
filedata = await import_file.text()
fileschema = [key.strip('"') for key in filedata.splitlines()[0].split(',')]
logger.info(fileschema)
filelines = filedata.splitlines()[1:]
for line in filelines:
values = [value.strip('"') for value in line.split(',')]
doc = {key:value for key, value in zip(fileschema, values)}
try:
collection.insert(doc)
except Exception as e:
logger.info(e)
return web.Response(text=collection.name)
async def getVertices(request):
logger.debug('Getting Vertices')
client = request.app['arango']
db = client.db(request.match_info['db_name'], username='root', password=os.environ['MULTINET_ROOT_PASSWORD'])
graph = db.graph(request.match_info['graph_name'])
collection = db.collection(request.match_info['name'])
cursor = collection.all()
documents = [doc for doc in cursor]
return web.Response(text=json.dumps(documents[0:5], indent=4))
async def addEdges(request):
logger.debug('Adding Edges')
client = request.app['arango']
db = client.db(request.match_info['db_name'], username='root', password=os.environ['MULTINET_ROOT_PASSWORD'])
graph = db.graph(request.match_info['graph_name'])
name = request.match_info['name']
reader = await request.multipart()
field = await reader.next()
text = await field.text()
from_collections = text.split(',')
field = await reader.next()
text = await field.text()
to_collections = text.split(',')
if graph.has_edge_definition(name):
collection = graph.edge_collection(name)
else:
collection = graph.create_edge_definition(
edge_collection=name,
from_vertex_collections=from_collections,
to_vertex_collections=to_collections)
import_file = await reader.next()
filedata = await import_file.text()
fileschema = [key.strip('"') for key in filedata.splitlines()[0].split(',')]
filelines = filedata.splitlines()[1:]
for line in filelines:
values = [value.strip('"') for value in line.split(',')]
doc = {key:value for key, value in zip(fileschema, values)}
try:
collection.insert(doc)
except Exception as e:
logger.info(e)
return web.Response(text=collection.name)
async def getEdges(request):
logger.debug('Getting Edges')
client = request.app['arango']
db = client.db(request.match_info['db_name'], username='root', password=os.environ['MULTINET_ROOT_PASSWORD'])
graph = db.graph(request.match_info['graph_name'])
collection = graph.edge_collection(request.match_info['name'])
cursor = collection.all()
documents = [doc for doc in cursor]
return web.Response(text=json.dumps(documents[0:5], indent=4))
|
[] |
[] |
[
"MULTINET_ROOT_PASSWORD"
] |
[]
|
["MULTINET_ROOT_PASSWORD"]
|
python
| 1 | 0 | |
cni/network/network_windows.go
|
package network
import (
"encoding/json"
"fmt"
"net"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/Azure/azure-container-networking/cni"
"github.com/Azure/azure-container-networking/cns"
"github.com/Azure/azure-container-networking/log"
"github.com/Azure/azure-container-networking/network"
"github.com/Azure/azure-container-networking/network/policy"
"github.com/Microsoft/hcsshim"
"golang.org/x/sys/windows/registry"
cniSkel "github.com/containernetworking/cni/pkg/skel"
cniTypes "github.com/containernetworking/cni/pkg/types"
cniTypesCurr "github.com/containernetworking/cni/pkg/types/current"
)
var (
snatConfigFileName = filepath.FromSlash(os.Getenv("TEMP")) + "\\snatConfig"
// windows build for version 1903
win1903Version = 18362
)
/* handleConsecutiveAdd handles consecutive add calls for infrastructure containers on Windows platform.
* This is a temporary work around for issue #57253 of Kubernetes.
* We can delete this if statement once they fix it.
* Issue link: https://github.com/kubernetes/kubernetes/issues/57253
*/
func handleConsecutiveAdd(args *cniSkel.CmdArgs, endpointId string, nwInfo network.NetworkInfo, epInfo *network.EndpointInfo, nwCfg *cni.NetworkConfig) (*cniTypesCurr.Result, error) {
// Return in case of HNSv2 as consecutive add call doesn't need to be handled
if useHnsV2, err := network.UseHnsV2(args.Netns); useHnsV2 {
return nil, err
}
hnsEndpoint, err := hcsshim.GetHNSEndpointByName(endpointId)
if hnsEndpoint != nil {
log.Printf("[net] Found existing endpoint through hcsshim: %+v", hnsEndpoint)
endpoint, _ := hcsshim.GetHNSEndpointByID(hnsEndpoint.Id)
isAttached, _ := endpoint.IsAttached(args.ContainerID)
// Attach endpoint if it's not attached yet.
if !isAttached {
log.Printf("[net] Attaching ep %v to container %v", hnsEndpoint.Id, args.ContainerID)
err := hcsshim.HotAttachEndpoint(args.ContainerID, hnsEndpoint.Id)
if err != nil {
log.Printf("[cni-net] Failed to hot attach shared endpoint[%v] to container [%v], err:%v.", hnsEndpoint.Id, args.ContainerID, err)
return nil, err
}
}
// Populate result.
address := nwInfo.Subnets[0].Prefix
address.IP = hnsEndpoint.IPAddress
result := &cniTypesCurr.Result{
IPs: []*cniTypesCurr.IPConfig{
{
Version: "4",
Address: address,
Gateway: net.ParseIP(hnsEndpoint.GatewayAddress),
},
},
Routes: []*cniTypes.Route{
{
Dst: net.IPNet{net.IPv4zero, net.IPv4Mask(0, 0, 0, 0)},
GW: net.ParseIP(hnsEndpoint.GatewayAddress),
},
},
}
if nwCfg.IPV6Mode != "" && len(epInfo.IPAddresses) > 1 {
ipv6Config := &cniTypesCurr.IPConfig{
Version: "6",
Address: epInfo.IPAddresses[1],
}
if len(nwInfo.Subnets) > 1 {
ipv6Config.Gateway = nwInfo.Subnets[1].Gateway
}
result.IPs = append(result.IPs, ipv6Config)
}
// Populate DNS servers.
result.DNS.Nameservers = nwCfg.DNS.Nameservers
return result, nil
}
err = fmt.Errorf("GetHNSEndpointByName for %v returned nil with err %v", endpointId, err)
return nil, err
}
func addDefaultRoute(gwIPString string, epInfo *network.EndpointInfo, result *cniTypesCurr.Result) {
}
func addSnatForDNS(gwIPString string, epInfo *network.EndpointInfo, result *cniTypesCurr.Result) {
}
func addInfraRoutes(azIpamResult *cniTypesCurr.Result, result *cniTypesCurr.Result, epInfo *network.EndpointInfo) {
}
func setNetworkOptions(cnsNwConfig *cns.GetNetworkContainerResponse, nwInfo *network.NetworkInfo) {
if cnsNwConfig != nil && cnsNwConfig.MultiTenancyInfo.ID != 0 {
log.Printf("Setting Network Options")
vlanMap := make(map[string]interface{})
vlanMap[network.VlanIDKey] = strconv.Itoa(cnsNwConfig.MultiTenancyInfo.ID)
nwInfo.Options[dockerNetworkOption] = vlanMap
}
}
func setEndpointOptions(cnsNwConfig *cns.GetNetworkContainerResponse, epInfo *network.EndpointInfo, vethName string) {
if cnsNwConfig != nil && cnsNwConfig.MultiTenancyInfo.ID != 0 {
log.Printf("Setting Endpoint Options")
var cnetAddressMap []string
for _, ipSubnet := range cnsNwConfig.CnetAddressSpace {
cnetAddressMap = append(cnetAddressMap, ipSubnet.IPAddress+"/"+strconv.Itoa(int(ipSubnet.PrefixLength)))
}
epInfo.Data[network.CnetAddressSpace] = cnetAddressMap
epInfo.AllowInboundFromHostToNC = cnsNwConfig.AllowHostToNCCommunication
epInfo.AllowInboundFromNCToHost = cnsNwConfig.AllowNCToHostCommunication
epInfo.NetworkContainerID = cnsNwConfig.NetworkContainerID
}
}
func addSnatInterface(nwCfg *cni.NetworkConfig, result *cniTypesCurr.Result) {
}
func updateSubnetPrefix(cnsNwConfig *cns.GetNetworkContainerResponse, subnetPrefix *net.IPNet) error {
if cnsNwConfig != nil && cnsNwConfig.MultiTenancyInfo.ID != 0 {
ipconfig := cnsNwConfig.IPConfiguration
ipAddr := net.ParseIP(ipconfig.IPSubnet.IPAddress)
if ipAddr.To4() != nil {
*subnetPrefix = net.IPNet{Mask: net.CIDRMask(int(ipconfig.IPSubnet.PrefixLength), 32)}
} else if ipAddr.To16() != nil {
*subnetPrefix = net.IPNet{Mask: net.CIDRMask(int(ipconfig.IPSubnet.PrefixLength), 128)}
} else {
return fmt.Errorf("[cni-net] Failed to get mask from CNS network configuration")
}
subnetPrefix.IP = ipAddr.Mask(subnetPrefix.Mask)
log.Printf("Updated subnetPrefix: %s", subnetPrefix.String())
}
return nil
}
func getNetworkName(podName, podNs, ifName string, nwCfg *cni.NetworkConfig) (networkName string, err error) {
networkName = nwCfg.Name
err = nil
if nwCfg.MultiTenancy {
determineWinVer()
if len(strings.TrimSpace(podName)) == 0 || len(strings.TrimSpace(podNs)) == 0 {
err = fmt.Errorf("POD info cannot be empty. PodName: %s, PodNamespace: %s", podName, podNs)
return
}
_, cnsNetworkConfig, _, err := getContainerNetworkConfiguration(nwCfg, podName, podNs, ifName)
if err != nil {
log.Printf("GetContainerNetworkConfiguration failed for podname %v namespace %v with error %v", podName, podNs, err)
} else {
var subnet net.IPNet
if err = updateSubnetPrefix(cnsNetworkConfig, &subnet); err == nil {
// networkName will look like ~ azure-vlan1-172-28-1-0_24
networkName = strings.Replace(subnet.String(), ".", "-", -1)
networkName = strings.Replace(networkName, "/", "_", -1)
networkName = fmt.Sprintf("%s-vlan%v-%v", nwCfg.Name, cnsNetworkConfig.MultiTenancyInfo.ID, networkName)
}
}
}
return
}
func setupInfraVnetRoutingForMultitenancy(
nwCfg *cni.NetworkConfig,
azIpamResult *cniTypesCurr.Result,
epInfo *network.EndpointInfo,
result *cniTypesCurr.Result) {
}
func getNetworkDNSSettings(nwCfg *cni.NetworkConfig, result *cniTypesCurr.Result, namespace string) (network.DNSInfo, error) {
var nwDNS network.DNSInfo
// use custom dns if present
nwDNS = getCustomDNS(nwCfg)
if len(nwDNS.Servers) > 0 || nwDNS.Suffix != "" {
return nwDNS, nil
}
if (len(nwCfg.DNS.Search) == 0) != (len(nwCfg.DNS.Nameservers) == 0) {
err := fmt.Errorf("Wrong DNS configuration: %+v", nwCfg.DNS)
return nwDNS, err
}
nwDNS = network.DNSInfo{
Servers: nwCfg.DNS.Nameservers,
}
return nwDNS, nil
}
func getEndpointDNSSettings(nwCfg *cni.NetworkConfig, result *cniTypesCurr.Result, namespace string) (network.DNSInfo, error) {
var epDNS network.DNSInfo
// use custom dns if present
epDNS = getCustomDNS(nwCfg)
if len(epDNS.Servers) > 0 || epDNS.Suffix != "" {
return epDNS, nil
}
if (len(nwCfg.DNS.Search) == 0) != (len(nwCfg.DNS.Nameservers) == 0) {
err := fmt.Errorf("Wrong DNS configuration: %+v", nwCfg.DNS)
return epDNS, err
}
if len(nwCfg.DNS.Search) > 0 {
epDNS = network.DNSInfo{
Servers: nwCfg.DNS.Nameservers,
Suffix: namespace + "." + strings.Join(nwCfg.DNS.Search, ","),
Options: nwCfg.DNS.Options,
}
} else {
epDNS = network.DNSInfo{
Servers: result.DNS.Nameservers,
Suffix: result.DNS.Domain,
Options: nwCfg.DNS.Options,
}
}
return epDNS, nil
}
// getPoliciesFromRuntimeCfg returns network policies from network config.
func getPoliciesFromRuntimeCfg(nwCfg *cni.NetworkConfig) []policy.Policy {
log.Printf("[net] RuntimeConfigs: %+v", nwCfg.RuntimeConfig)
var policies []policy.Policy
for _, mapping := range nwCfg.RuntimeConfig.PortMappings {
rawPolicy, _ := json.Marshal(&hcsshim.NatPolicy{
Type: "NAT",
ExternalPort: uint16(mapping.HostPort),
InternalPort: uint16(mapping.ContainerPort),
Protocol: mapping.Protocol,
})
policy := policy.Policy{
Type: policy.EndpointPolicy,
Data: rawPolicy,
}
log.Printf("[net] Creating port mapping policy: %+v", policy)
policies = append(policies, policy)
}
return policies
}
func addIPV6EndpointPolicy(nwInfo network.NetworkInfo) (policy.Policy, error) {
var (
eppolicy policy.Policy
)
if len(nwInfo.Subnets) < 2 {
return eppolicy, fmt.Errorf("network state doesn't have ipv6 subnet")
}
// Everything should be snat'd except podcidr
exceptionList := []string{nwInfo.Subnets[1].Prefix.String()}
rawPolicy, _ := json.Marshal(&hcsshim.OutboundNatPolicy{
Policy: hcsshim.Policy{Type: hcsshim.OutboundNat},
Exceptions: exceptionList,
})
eppolicy = policy.Policy{
Type: policy.EndpointPolicy,
Data: rawPolicy,
}
log.Printf("[net] ipv6 outboundnat policy: %+v", eppolicy)
return eppolicy, nil
}
func getCustomDNS(nwCfg *cni.NetworkConfig) network.DNSInfo {
var search string
if len(nwCfg.RuntimeConfig.DNS.Searches) > 0 {
search = strings.Join(nwCfg.RuntimeConfig.DNS.Searches, ",")
}
return network.DNSInfo{
Servers: nwCfg.RuntimeConfig.DNS.Servers,
Suffix: search,
Options: nwCfg.RuntimeConfig.DNS.Options,
}
}
func determineWinVer() {
k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
if err == nil {
defer k.Close()
cb, _, err := k.GetStringValue("CurrentBuild")
if err == nil {
winVer, err := strconv.Atoi(cb)
if err == nil {
policy.ValidWinVerForDnsNat = winVer >= win1903Version
}
}
}
if err != nil {
log.Errorf(err.Error())
}
}
|
[
"\"TEMP\""
] |
[] |
[
"TEMP"
] |
[]
|
["TEMP"]
|
go
| 1 | 0 | |
pkg/util/coverage/coverage.go
|
// +build coverage
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package coverage provides tools for coverage-instrumented binaries to collect and
// flush coverage information.
package coverage
import (
"flag"
"fmt"
"os"
"testing"
"time"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog"
)
var coverageFile string
// tempCoveragePath returns a temporary file to write coverage information to.
// The file is in the same directory as the destination, ensuring os.Rename will work.
func tempCoveragePath() string {
return coverageFile + ".tmp"
}
// InitCoverage is called from the dummy unit test to prepare Go's coverage framework.
// Clients should never need to call it.
func InitCoverage(name string) {
// We read the coverage destination in from the KUBE_COVERAGE_FILE env var,
// or if it's empty we just use a default in /tmp
coverageFile = os.Getenv("KUBE_COVERAGE_FILE")
if coverageFile == "" {
coverageFile = "/tmp/k8s-" + name + ".cov"
}
fmt.Println("Dumping coverage information to " + coverageFile)
flushInterval := 5 * time.Second
requestedInterval := os.Getenv("KUBE_COVERAGE_FLUSH_INTERVAL")
if requestedInterval != "" {
if duration, err := time.ParseDuration(requestedInterval); err == nil {
flushInterval = duration
} else {
panic("Invalid KUBE_COVERAGE_FLUSH_INTERVAL value; try something like '30s'.")
}
}
// Set up the unit test framework with the required arguments to activate test coverage.
flag.CommandLine.Parse([]string{"-test.coverprofile", tempCoveragePath()})
// Begin periodic logging
go wait.Forever(FlushCoverage, flushInterval)
}
// FlushCoverage flushes collected coverage information to disk.
// The destination file is configured at startup and cannot be changed.
// Calling this function also sends a line like "coverage: 5% of statements" to stdout.
func FlushCoverage() {
// We're not actually going to run any tests, but we need Go to think we did so it writes
// coverage information to disk. To achieve this, we create a bunch of empty test suites and
// have it "run" them.
tests := []testing.InternalTest{}
benchmarks := []testing.InternalBenchmark{}
examples := []testing.InternalExample{}
var deps fakeTestDeps
dummyRun := testing.MainStart(deps, tests, benchmarks, examples)
dummyRun.Run()
// Once it writes to the temporary path, we move it to the intended path.
// This gets us atomic updates from the perspective of another process trying to access
// the file.
if err := os.Rename(tempCoveragePath(), coverageFile); err != nil {
klog.Errorf("Couldn't move coverage file from %s to %s", coverageFile, tempCoveragePath())
}
}
|
[
"\"KUBE_COVERAGE_FILE\"",
"\"KUBE_COVERAGE_FLUSH_INTERVAL\""
] |
[] |
[
"KUBE_COVERAGE_FILE",
"KUBE_COVERAGE_FLUSH_INTERVAL"
] |
[]
|
["KUBE_COVERAGE_FILE", "KUBE_COVERAGE_FLUSH_INTERVAL"]
|
go
| 2 | 0 | |
restdemo/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'restdemo.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
devel/garbage_collection.py
|
#!/usr/bin/env python
import os
def garbage_collection(*,
sha1_hashes_to_keep,
nwb_datajoint_base_dir,
kachery_storage_dir,
relative_path
):
# we should be very careful with these checks
assert isinstance(nwb_datajoint_base_dir, str)
assert nwb_datajoint_base_dir != ''
assert kachery_storage_dir == nwb_datajoint_base_dir + '/kachery-storage'
assert os.path.exists(kachery_storage_dir)
kachery_storage_recycling_dir = nwb_datajoint_base_dir + '/kachery-storage-recycling'
if not os.path.exists(kachery_storage_recycling_dir):
os.mkdir(kachery_storage_recycling_dir)
path = os.path.join(kachery_storage_dir, relative_path)
assert os.path.exists(path)
path_recycling = os.path.join(kachery_storage_recycling_dir, relative_path)
if not os.path.exists(path_recycling):
os.mkdir(path_recycling)
for fname in os.listdir(path):
filepath = path + '/' + fname
filepath_recycling = path_recycling + '/' + fname
if os.path.isfile(filepath):
if len(fname) == 40:
hash0 = fname
if filepath.endswith(f'{hash0[0]}{hash0[1]}/{hash0[2]}{hash0[3]}/{hash0[4]}{hash0[5]}/{hash0}'):
if hash0 not in sha1_hashes_to_keep:
print(f'Recycling file: {hash0}')
os.rename(filepath, filepath_recycling)
elif os.path.isdir(filepath):
relp = os.path.join(relative_path, fname)
garbage_collection(
sha1_hashes_to_keep=sha1_hashes_to_keep,
nwb_datajoint_base_dir=nwb_datajoint_base_dir,
kachery_storage_dir=kachery_storage_dir,
relative_path=relp
)
def main():
sha1_hashes_to_keep = set()
# Here is where we need to populate the sha1_hashes_to_keep set.
# sha1_hashes_to_keep.add('6253ee04e09a5145eae0ced9c26ce73b91876de4')
garbage_collection(
sha1_hashes_to_keep=sha1_hashes_to_keep,
nwb_datajoint_base_dir=os.environ['NWB_DATAJOINT_BASE_DIR'],
kachery_storage_dir=os.environ['KACHERY_STORAGE_DIR'],
relative_path='sha1'
)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"NWB_DATAJOINT_BASE_DIR",
"KACHERY_STORAGE_DIR"
] |
[]
|
["NWB_DATAJOINT_BASE_DIR", "KACHERY_STORAGE_DIR"]
|
python
| 2 | 0 | |
main.py
|
# a discord bot for registration in the server
# | IMPORT
import discord
import os
from datetime import datetime, timedelta
from dotenv import load_dotenv
from typing import Union
from googleModule.main import gmail_management, init_creds, sheet_management
from googleModule.parseSheet import parseSheet
from utils import log
from utils.command import command_parse
from utils.OTP import gen_otp, read_otp_file, write_otp_file
# | GLOBAL EXECUTIONS & GLOBAL VARIABLES
load_dotenv()
OTP_FILE = os.getenv("OTP_FILE")
SENDER_MAIL = os.getenv("SENDER_MAIL")
CLIENT = discord.Client(intents=discord.Intents.all())
LOGGER = log.logger(log_dir="logs")
COMMAND_PREFIX = os.getenv("COMMAND_PREFIX")
HELP_MSG = f"""
──────────────
ℹ️ **รายะเอียด** ℹ️
──────────────
**น้องแมวลงทะเบียนเข้าเซิฟเวอร์ของดิสคอร์ด**
_ถ้าอยากจะส่งข้อความที่มีวรรคอยู่ (เช่น Test Name), ต้องใช้เครื่องหมายคำพูด `"` (เช่น "Test Name")_
⚙️ **คำสั่ง** ⚙️
──────────────
**`{COMMAND_PREFIX}help`**
**__Description__**: ขอความช่วยเหลือให้น้องบอกวิธีใช้งานตัวเอง
**`{COMMAND_PREFIX}regis [ชื่อเซิฟเวอร์] [ชื่อ]`**
**__Description__**: เริ่มต้นการลงทะเบียนกับน้องทองหยอด
**__Parameter__**:
**`server_name`**: ชื่อของเซิฟเวอร์ที่ต้องการลงทะเบียน
**`name`**: ชื่อของคุณที่ต้องใช้ในการลงทะเบียน
**`{COMMAND_PREFIX}otp [OTP]`**
**__Description__**: ยืนยันตัวตนกับน้องทองหยอดด้วย OTP
**__Parameter__**:
**`OTP`**: One-Time Password ที่ส่งไปทางอีเมล
"""
# | FUNCTIONS
async def get_channel_from_id(msg: discord.Message) -> discord.channel:
global CLIENT
if msg.guild is not None:
ch = CLIENT.get_channel(msg.channel.id)
else:
ch = await msg.author.create_dm()
return ch
def get_server_from_name(server_name: str) -> Union[discord.Guild, None]:
global CLIENT
for s in CLIENT.guilds:
if s.name == server_name:
return s
return None
async def set_role(author: discord.User, server_name: str, raw_role: str):
_server = get_server_from_name(server_name)
for m in _server.members:
if m.id == author.id:
_member = m
role_input = [r for r in raw_role.strip().split(",") if r != ""]
for r in _server.roles:
if r.name in role_input:
await _member.add_roles(r)
@CLIENT.event
async def on_connect() -> None:
global LOGGER
LOGGER.print_log("Connected!", log_level=log.INFO)
@CLIENT.event
async def on_ready() -> None:
global LOGGER
LOGGER.print_log("Ready!", log_level=log.INFO)
await CLIENT.change_presence(status=discord.Status.online, activity=discord.Game("with น้องฝอยทอง"))
@CLIENT.event
async def on_message(msg: discord.Message) -> None:
global CLIENT
global GMAIL
global HELP_MSG
global LOGGER
global OTP_FILE
global OTP_WAIT_LST
global SENDER_MAIL
global SHEET
author = msg.author
author_id = author.id
client_id = CLIENT.user.id
if msg.guild is None:
if author_id != client_id:
ch = await get_channel_from_id(msg)
async with ch.typing():
parsed_cmd = command_parse(msg.content, COMMAND_PREFIX)
if parsed_cmd is not None:
cmd = parsed_cmd["command"]
if cmd == "help":
LOGGER.print_log(
f"{author} use {COMMAND_PREFIX+cmd} command", log.INFO
)
help_msg = discord.Embed(
title="❓วิธีให้น้องทองหยอดช่วย❓",
description=HELP_MSG,
color=discord.Color.random(),
)
await ch.send(embed=help_msg)
elif cmd == "regis":
LOGGER.print_log(
f"{author} use {COMMAND_PREFIX+cmd} command", log.INFO
)
_server_name = parsed_cmd["param"]["server_name"]
_name = parsed_cmd["param"]["name"]
_server = get_server_from_name(_server_name)
# print(_server)
if _server is None:
LOGGER.print_log(
f"{_server_name} is used in {COMMAND_PREFIX+cmd} as a param but can not access",
log.INFO,
)
await ch.send(f"ไปที่ `{_server_name}` ไม่ได้ฮะ! 🙀")
return
else:
sheet_data = parseSheet(
SHEET.read_sheet_by_range(f"{_server_name}!A:D")[
"values"
]
)
user_data = None
user_row = -1
for line in range(len(sheet_data)):
if sheet_data[line]["ชื่อ"] == _name:
user_data = sheet_data[line]
user_row = line + 2
break
if user_data is None:
LOGGER.print_log(
f"{author} try to use regis command but not in the sheet",
log.INFO,
)
await ch.send(
f"ใครอ่ะ! ไม่เห็นรู้จักเลยย 😾"
)
return
elif user_data["สถานะ"] == "รอส่ง OTP":
await ch.send(
f"ไม่ฮะ รอ OTP อยู่นะ... 😿"
)
return
elif user_data["สถานะ"] == "เสร็จสิ้นการลงทะเบียน":
await ch.send(f"ไม่ต้องลงทะเบียนใหม่หรอกฮะ 😸")
return
elif user_data["สถานะ"] == "ยังไม่ได้ลงทะเบียน":
otp, ref = gen_otp(
OTP_FILE, str(author), _server_name, 6
)
SHEET.write_sheet_by_range(
f"D{user_row}", [["รอส่ง OTP"]]
)
OTP_WAIT_LST.append({author: [user_data, user_row]})
LOGGER.print_log(
f"gmail response: {GMAIL.send_mail(to=user_data['อีเมล'], subject='TEDxKasetsartU Discord Server Registration OTP', otp=otp, ref=ref)}",
log.INFO,
)
await ch.send(
f"น้องทองหยอดส่งเมลไปแล้ว! 📨 OTP มีรหัสอ้างอิงอันนี้ฮะ `{ref}`. 🐱"
)
return
elif cmd == "otp":
LOGGER.print_log(
f"{author} use {COMMAND_PREFIX+cmd} command", log.INFO
)
_otp = parsed_cmd["param"]["otp"]
wait_data = None
for d in OTP_WAIT_LST:
if author in d.keys():
wait_data = d
break
if wait_data is not None:
data = read_otp_file(OTP_FILE)
try:
otp_data = data[_otp]
except KeyError:
LOGGER.print_log(
f"{author} has sent an invalid OTP", log.INFO
)
await ch.send(
f"อันนี้ไม่ใช่ของเราฮะ! ไปเอามาจากไหนอ่ะะ? 😾\nเช็ครหัสอ้างอิงดี ๆ น้าาา! 😺"
)
return
old_wait_data = wait_data
user_row = wait_data[author][1]
sheet_data = parseSheet(
SHEET.read_sheet_by_range(f"{otp_data['server']}!A:D")[
"values"
]
)
user_data = None
for line in range(len(sheet_data)):
if line == user_row - 2:
user_data = sheet_data[line]
break
wait_data = {author: [user_data, user_row]}
if str(author) == otp_data["for"]:
if datetime.now() - otp_data["create_at"] <= timedelta(
minutes=5
):
OTP_WAIT_LST.remove(old_wait_data)
write_otp_file(
OTP_FILE, _otp, "", "", "", mode="remove"
)
SHEET.write_sheet_by_range(
f"D{wait_data[author][1]}",
[["เสร็จสิ้นการลงทะเบียน"]],
)
await set_role(
author,
otp_data["server"],
wait_data[author][0]["ฝ่าย"],
)
await ch.send(f"เสร็จแล้วเหมียววว 😸🎉")
return
else:
OTP_WAIT_LST.remove(old_wait_data)
write_otp_file(
OTP_FILE, _otp, "", "", "", mode="remove"
)
SHEET.write_sheet_by_range(
f"D{wait_data[author][1]}",
[["ยังไม่ได้ลงทะเบียน"]],
)
LOGGER.print_log(
f"{author} has sent an expire OTP", log.INFO
)
await ch.send(
f"ทำไมนานจังอ่ะะ รหัสหมดอายุแล้วว 🙀\nไปใช้ `$regis` อีกทีน้าา!"
)
return
else:
LOGGER.print_log(
f"{author} has sent the other OTP", log.INFO
)
await ch.send(
f"อันนั้นไม่ใช่ OTP ของคุณนะ ไปเอามาจากไหนอ่ะะ 😾\nเช็ครหัสอ้างอิงดี ๆ น้าาา! 😺"
)
return
else:
LOGGER.print_log(
f"{author} try to use otp before regis", log.INFO
)
await ch.send(
f"ส่งมาทำไมอ่ะ? 😾"
)
return
else:
LOGGER.print_log(
f"{author} sent a unknown/incomplete command [{msg.content}]",
log.INFO,
)
await ch.send(
f"คือไรอ่ะ ไม่เข้าใจอ่ะ 🙀\nใช้ `{COMMAND_PREFIX}help` นะถ้าไม่รู้จะใช้ไงอ่ะะ"
)
# | MAIN
if __name__ == "__main__":
GMAIL_CREDS, SHEET_CREDS = init_creds(
os.path.join("googleModule", "credentials.json"),
os.path.join("googleModule", "key.json")
)
GMAIL = gmail_management(creds=GMAIL_CREDS)
SHEET = sheet_management(
sheet_id="1oxoO8yNdXKibjf3KFy-Iqwd4ON_O9Y8AUV42_KEVky4", creds=SHEET_CREDS
)
OTP_WAIT_LST = []
CLIENT.run(os.getenv("DISCORD_BOT_TOKEN"))
# https://discord.com/api/oauth2/authorize?client_id=951658220844384288&permissions=8&scope=bot
|
[] |
[] |
[
"DISCORD_BOT_TOKEN",
"SENDER_MAIL",
"OTP_FILE",
"COMMAND_PREFIX"
] |
[]
|
["DISCORD_BOT_TOKEN", "SENDER_MAIL", "OTP_FILE", "COMMAND_PREFIX"]
|
python
| 4 | 0 | |
scripts/send_sms.py
|
"""
This script can be called from terminal with two arguments: phone number and message body.
It sends the message from our Twilio number to the argument phone number.
Modified from send_sms_message.py in bear-as-a-service
"""
import re
import os
import click
from twilio.rest import Client
account_sid = os.getenv('TWILIO_ACCOUNT_SID')
auth_token = os.getenv('TWILIO_AUTH_TOKEN')
from_number = os.getenv('TWILIO_PHONE_NUMBER')
PHONE_NUMBER_RE = re.compile(r'^\+1\d{10}$')
PHONE_NUMBER_EXAMPLE = '+161723351010'
@click.command()
@click.argument('to_number')
@click.argument('message_body', default='hello')
def send_sms_message(to_number, message_body):
assert PHONE_NUMBER_RE.match(to_number), 'Phone number must match ' + PHONE_NUMBER_EXAMPLE
client = Client(account_sid, auth_token)
client.api.account.messages.create(
to=to_number,
from_=from_number,
body=message_body)
if __name__ == '__main__':
send_sms_message()
|
[] |
[] |
[
"TWILIO_PHONE_NUMBER",
"TWILIO_AUTH_TOKEN",
"TWILIO_ACCOUNT_SID"
] |
[]
|
["TWILIO_PHONE_NUMBER", "TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"]
|
python
| 3 | 0 | |
learn-django-web/blather/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "blather.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
kubernetes/show/health-status/src/main.go
|
package main
import (
"kubernetes/core/pkg/health"
"os"
)
func main() {
loadInputs().Run()
}
func loadInputs() CommandHandler {
return health.Inputs{
Namespace: os.Getenv("NAMESPACE"),
PodPartName: os.Getenv("POD_PART_NAME"),
Kubeconfig: os.Getenv("KUBECONFIG"),
}
}
type CommandHandler interface {
Run()
}
|
[
"\"NAMESPACE\"",
"\"POD_PART_NAME\"",
"\"KUBECONFIG\""
] |
[] |
[
"POD_PART_NAME",
"NAMESPACE",
"KUBECONFIG"
] |
[]
|
["POD_PART_NAME", "NAMESPACE", "KUBECONFIG"]
|
go
| 3 | 0 | |
oneflow/compatible_single_client_python/test/ops/test_categorical_ordinal_encoder.py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import numpy as np
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as oft
import typing
import unittest
import os
def _test_categorical_ordinal_encoder(
test_case, device_tag, dtype, size, capacity, num_tokens, num_iters
):
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def test_job(
x: oft.Numpy.Placeholder(shape=(size,), dtype=dtype)
) -> typing.Tuple[oft.Numpy, oft.Numpy]:
with flow.scope.placement(device_tag, "0:0"):
y = flow.layers.categorical_ordinal_encoder(x, capacity=capacity)
z = flow.layers.categorical_ordinal_encoder(
x, capacity=capacity, name="encode1"
)
# z = flow.layers.categorical_ordinal_encoder(x, capacity=320)
return y, z
tokens = np.random.randint(-sys.maxsize, sys.maxsize, size=[num_tokens]).astype(
flow.convert_oneflow_dtype_to_numpy_dtype(dtype)
)
k_set = set()
v_set = set()
kv_set = set()
vk_set = set()
for i in range(num_iters):
x = tokens[np.random.randint(0, num_tokens, (size,))]
y, z = test_job(x)
test_case.assertEqual(x.shape, y.shape)
if device_tag == "cpu":
test_case.assertTrue(
np.array_equal(y, z),
"\ny: {}\n{}\nz: {}\n{}".format(y.shape, y, z.shape, z),
)
for k, v in zip(x, y):
k_set.add(k)
v_set.add(v)
kv_set.add((k, v))
vk_set.add((v, k))
unique_size = len(k_set)
test_case.assertEqual(len(v_set), unique_size)
test_case.assertEqual(len(kv_set), unique_size)
test_case.assertEqual(len(vk_set), unique_size)
@flow.unittest.skip_unless_1n1d()
class TestCategoricalOrdinalEncoder(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_categorical_ordinal_encoder_gpu_large(test_case):
_test_categorical_ordinal_encoder(
test_case=test_case,
device_tag="gpu",
dtype=flow.int64,
size=10000,
capacity=320000,
num_tokens=200000,
num_iters=256,
)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_categorical_ordinal_encoder_gpu_small(test_case):
_test_categorical_ordinal_encoder(
test_case=test_case,
device_tag="gpu",
dtype=flow.int32,
size=10,
capacity=250,
num_tokens=200,
num_iters=4,
)
def test_categorical_ordinal_encoder_cpu_large(test_case):
_test_categorical_ordinal_encoder(
test_case=test_case,
device_tag="cpu",
dtype=flow.int64,
size=20000,
capacity=220000,
num_tokens=200000,
num_iters=100,
)
def test_categorical_ordinal_encoder_cpu_very_large(test_case):
_test_categorical_ordinal_encoder(
test_case=test_case,
device_tag="cpu",
dtype=flow.int64,
size=50000,
capacity=1000000,
num_tokens=500000,
num_iters=100,
)
if __name__ == "__main__":
unittest.main()
|
[] |
[] |
[
"ONEFLOW_TEST_CPU_ONLY"
] |
[]
|
["ONEFLOW_TEST_CPU_ONLY"]
|
python
| 1 | 0 | |
config.go
|
package main
import (
"io/ioutil"
"strings"
"path"
"os"
"os/exec"
"gopkg.in/yaml.v2"
)
type Pane struct {
Root string `yaml:"root"`
Type string `yaml:"type"`
Commands []string `yaml:"commands"`
}
type Window struct {
Name string `yaml:"name"`
Root string `yaml:"root"`
BeforeStart []string `yaml:"before_start"`
Panes []Pane `yaml:"panes"`
Commands []string `yaml:"commands"`
Layout string `yaml:"layout"`
Manual bool `yaml:"manual"`
}
type Config struct {
Session string `yaml:"session"`
Root string `yaml:"root"`
BeforeStart []string `yaml:"before_start"`
Stop []string `yaml:"stop"`
Windows []Window `yaml:"windows"`
}
func EditConfig(path string) error {
editor := os.Getenv("EDITOR")
if editor == "" {
editor = "vim"
}
cmd := exec.Command(editor, path)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
func GetConfig(path string) (Config, error) {
f, err := ioutil.ReadFile(path)
if err != nil {
return Config{}, err
}
return ParseConfig(string(f))
}
func ParseConfig(data string) (Config, error) {
c := Config{}
err := yaml.Unmarshal([]byte(data), &c)
if err != nil {
return Config{}, err
}
return c, nil
}
func ListConfigs(dir string) ([]string, error) {
var result []string
files, err := os.ReadDir(dir)
if err != nil {
return result, err
}
for _, file := range files {
result = append(result, strings.TrimSuffix(file.Name(), path.Ext(file.Name())))
}
return result, nil
}
|
[
"\"EDITOR\""
] |
[] |
[
"EDITOR"
] |
[]
|
["EDITOR"]
|
go
| 1 | 0 | |
cmd/ddltest/ddl_serial_test.go
|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ddltest
import (
"database/sql"
"database/sql/driver"
"flag"
"fmt"
"math/rand"
"os"
"os/exec"
"reflect"
"runtime"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
_ "github.com/go-sql-driver/mysql"
"github.com/pingcap/errors"
"github.com/pingcap/log"
zaplog "github.com/pingcap/log"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/terror"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/store"
tidbdriver "github.com/pingcap/tidb/store/driver"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/logutil"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
goctx "golang.org/x/net/context"
)
var (
etcd = flag.String("etcd", "127.0.0.1:2379", "etcd path")
tidbIP = flag.String("tidb_ip", "127.0.0.1", "tidb-server ip address")
tikvPath = flag.String("tikv_path", "", "tikv path")
lease = flag.Int("lease", 1, "DDL schema lease time, seconds")
serverNum = flag.Int("server_num", 3, "Maximum running tidb server")
startPort = flag.Int("start_port", 5000, "First tidb-server listening port")
statusPort = flag.Int("status_port", 8000, "First tidb-server status port")
logLevel = flag.String("L", "error", "log level")
ddlServerLogLevel = flag.String("ddl_log_level", "fatal", "DDL server log level")
dataNum = flag.Int("n", 100, "minimal test dataset for a table")
enableRestart = flag.Bool("enable_restart", true, "whether random restart servers for tests")
)
type server struct {
*exec.Cmd
logFP *os.File
db *sql.DB
addr string
}
type ddlSuite struct {
store kv.Storage
dom *domain.Domain
s session.Session
ctx sessionctx.Context
m sync.Mutex
procs []*server
wg sync.WaitGroup
quit chan struct{}
retryCount int
}
func createDDLSuite(t *testing.T) (s *ddlSuite) {
s = new(ddlSuite)
err := logutil.InitLogger(&logutil.LogConfig{Config: zaplog.Config{Level: *logLevel}})
require.NoError(t, err)
s.quit = make(chan struct{})
s.store, err = store.New(fmt.Sprintf("tikv://%s%s", *etcd, *tikvPath))
require.NoError(t, err)
// Make sure the schema lease of this session is equal to other TiDB servers'.
session.SetSchemaLease(time.Duration(*lease) * time.Second)
s.dom, err = session.BootstrapSession(s.store)
require.NoError(t, err)
s.s, err = session.CreateSession(s.store)
require.NoError(t, err)
s.ctx = s.s.(sessionctx.Context)
goCtx := goctx.Background()
_, err = s.s.Execute(goCtx, "create database if not exists test_ddl")
require.NoError(t, err)
s.Bootstrap(t)
// Stop current DDL worker, so that we can't be the owner now.
err = domain.GetDomain(s.ctx).DDL().Stop()
require.NoError(t, err)
ddl.RunWorker = false
session.ResetStoreForWithTiKVTest(s.store)
s.s, err = session.CreateSession(s.store)
require.NoError(t, err)
s.dom, err = session.BootstrapSession(s.store)
require.NoError(t, err)
s.ctx = s.s.(sessionctx.Context)
_, err = s.s.Execute(goCtx, "use test_ddl")
require.NoError(t, err)
addEnvPath("..")
// Start multi tidb servers
s.procs = make([]*server, *serverNum)
// Set server restart retry count.
s.retryCount = 20
createLogFiles(t, *serverNum)
err = s.startServers()
require.NoError(t, err)
s.wg.Add(1)
go s.restartServerRegularly()
return
}
// restartServerRegularly restarts a tidb server regularly.
func (s *ddlSuite) restartServerRegularly() {
defer s.wg.Done()
var err error
after := *lease * (6 + randomIntn(6))
for {
select {
case <-time.After(time.Duration(after) * time.Second):
if *enableRestart {
err = s.restartServerRand()
if err != nil {
log.Fatal("restartServerRand failed", zap.Error(err))
}
}
case <-s.quit:
return
}
}
}
func (s *ddlSuite) teardown(t *testing.T) {
close(s.quit)
s.wg.Wait()
s.dom.Close()
// TODO: Remove these logs after testing.
quitCh := make(chan struct{})
go func() {
select {
case <-time.After(100 * time.Second):
buf := make([]byte, 2<<20)
size := runtime.Stack(buf, true)
log.Error("testing timeout", zap.ByteString("buf", buf[:size]))
case <-quitCh:
}
}()
err := s.store.Close()
require.NoError(t, err)
close(quitCh)
err = s.stopServers()
require.NoError(t, err)
}
func (s *ddlSuite) startServers() (err error) {
s.m.Lock()
defer s.m.Unlock()
for i := 0; i < len(s.procs); i++ {
if s.procs[i] != nil {
continue
}
// Open log file.
logFP, err := os.OpenFile(fmt.Sprintf("%s%d", logFilePrefix, i), os.O_RDWR, 0766)
if err != nil {
return errors.Trace(err)
}
s.procs[i], err = s.startServer(i, logFP)
if err != nil {
return errors.Trace(err)
}
}
return nil
}
func (s *ddlSuite) killServer(proc *os.Process) error {
// Make sure this tidb is killed, and it makes the next tidb that has the same port as this one start quickly.
err := proc.Kill()
if err != nil {
log.Error("kill server failed", zap.Error(err))
return errors.Trace(err)
}
_, err = proc.Wait()
if err != nil {
log.Error("kill server, wait failed", zap.Error(err))
return errors.Trace(err)
}
time.Sleep(1 * time.Second)
return nil
}
func (s *ddlSuite) stopServers() error {
s.m.Lock()
defer s.m.Unlock()
for i := 0; i < len(s.procs); i++ {
if s.procs[i] != nil {
err := s.killServer(s.procs[i].Process)
if err != nil {
return errors.Trace(err)
}
s.procs[i] = nil
}
}
return nil
}
var logFilePrefix = "tidb_log_file_"
func createLogFiles(t *testing.T, length int) {
for i := 0; i < length; i++ {
fp, err := os.Create(fmt.Sprintf("%s%d", logFilePrefix, i))
if err != nil {
require.NoError(t, err)
}
require.NoError(t, fp.Close())
}
}
func (s *ddlSuite) startServer(i int, fp *os.File) (*server, error) {
cmd := exec.Command("ddltest_tidb-server",
"--store=tikv",
fmt.Sprintf("-L=%s", *ddlServerLogLevel),
fmt.Sprintf("--path=%s%s", *etcd, *tikvPath),
fmt.Sprintf("-P=%d", *startPort+i),
fmt.Sprintf("--status=%d", *statusPort+i),
fmt.Sprintf("--lease=%d", *lease))
cmd.Stderr = fp
cmd.Stdout = fp
err := cmd.Start()
if err != nil {
return nil, errors.Trace(err)
}
time.Sleep(500 * time.Millisecond)
// Make sure tidb server process is started.
ps := fmt.Sprintf("ps -aux|grep ddltest_tidb|grep %d", *startPort+i)
output, _ := exec.Command("sh", "-c", ps).Output()
if !strings.Contains(string(output), "ddltest_tidb-server") {
time.Sleep(1 * time.Second)
}
// Open database.
var db *sql.DB
addr := fmt.Sprintf("%s:%d", *tidbIP, *startPort+i)
sleepTime := time.Millisecond * 250
startTime := time.Now()
for i := 0; i < s.retryCount; i++ {
db, err = sql.Open("mysql", fmt.Sprintf("root@(%s)/test_ddl", addr))
if err != nil {
log.Warn("open addr failed", zap.String("addr", addr), zap.Int("retry count", i), zap.Error(err))
continue
}
err = db.Ping()
if err == nil {
break
}
log.Warn("ping addr failed", zap.String("addr", addr), zap.Int("retry count", i), zap.Error(err))
err = db.Close()
if err != nil {
log.Warn("close db failed", zap.Int("retry count", i), zap.Error(err))
break
}
time.Sleep(sleepTime)
sleepTime += sleepTime
}
if err != nil {
log.Error("restart server addr failed",
zap.String("addr", addr),
zap.Duration("take time", time.Since(startTime)),
zap.Error(err),
)
return nil, errors.Trace(err)
}
db.SetMaxOpenConns(10)
_, err = db.Exec("use test_ddl")
if err != nil {
return nil, errors.Trace(err)
}
log.Info("start server ok", zap.String("addr", addr), zap.Error(err))
return &server{
Cmd: cmd,
db: db,
addr: addr,
logFP: fp,
}, nil
}
func (s *ddlSuite) restartServerRand() error {
i := rand.Intn(*serverNum)
s.m.Lock()
defer s.m.Unlock()
if s.procs[i] == nil {
return nil
}
server := s.procs[i]
s.procs[i] = nil
log.Warn("begin to restart", zap.String("addr", server.addr))
err := s.killServer(server.Process)
if err != nil {
return errors.Trace(err)
}
s.procs[i], err = s.startServer(i, server.logFP)
return errors.Trace(err)
}
func isRetryError(err error) bool {
if err == nil {
return false
}
if terror.ErrorEqual(err, driver.ErrBadConn) ||
strings.Contains(err.Error(), "connection refused") ||
strings.Contains(err.Error(), "getsockopt: connection reset by peer") ||
strings.Contains(err.Error(), "KV error safe to retry") ||
strings.Contains(err.Error(), "try again later") ||
strings.Contains(err.Error(), "invalid connection") {
return true
}
// TODO: Check the specific columns number.
if strings.Contains(err.Error(), "Column count doesn't match value count at row") {
log.Warn("err", zap.Error(err))
return false
}
log.Error("can not retry", zap.Error(err))
return false
}
func (s *ddlSuite) exec(query string, args ...interface{}) (sql.Result, error) {
for {
server := s.getServer()
r, err := server.db.Exec(query, args...)
if isRetryError(err) {
log.Error("exec in server, retry",
zap.String("query", query),
zap.String("addr", server.addr),
zap.Error(err),
)
continue
}
return r, err
}
}
func (s *ddlSuite) mustExec(query string, args ...interface{}) sql.Result {
r, err := s.exec(query, args...)
if err != nil {
log.Fatal("[mustExec fail]query",
zap.String("query", query),
zap.Any("args", args),
zap.Error(err),
)
}
return r
}
func (s *ddlSuite) execInsert(query string, args ...interface{}) sql.Result {
for {
r, err := s.exec(query, args...)
if err == nil {
return r
}
if *enableRestart {
// If you use enable random restart servers, we should ignore key exists error.
if strings.Contains(err.Error(), "Duplicate entry") &&
strings.Contains(err.Error(), "for key") {
return r
}
}
log.Fatal("[execInsert fail]query",
zap.String("query", query),
zap.Any("args", args),
zap.Error(err),
)
}
}
func (s *ddlSuite) query(query string, args ...interface{}) (*sql.Rows, error) {
for {
server := s.getServer()
r, err := server.db.Query(query, args...)
if isRetryError(err) {
log.Error("query in server, retry",
zap.String("query", query),
zap.String("addr", server.addr),
zap.Error(err),
)
continue
}
return r, err
}
}
func (s *ddlSuite) getServer() *server {
s.m.Lock()
defer s.m.Unlock()
for i := 0; i < 20; i++ {
i := rand.Intn(*serverNum)
if s.procs[i] != nil {
return s.procs[i]
}
}
log.Fatal("try to get server too many times")
return nil
}
// runDDL executes the DDL query, returns a channel so that you can use it to wait DDL finished.
func (s *ddlSuite) runDDL(sql string) chan error {
done := make(chan error, 1)
go func() {
_, err := s.s.Execute(goctx.Background(), sql)
// We must wait 2 * lease time to guarantee all servers update the schema.
if err == nil {
time.Sleep(time.Duration(*lease) * time.Second * 2)
}
done <- err
}()
return done
}
func (s *ddlSuite) getTable(t *testing.T, name string) table.Table {
tbl, err := domain.GetDomain(s.ctx).InfoSchema().TableByName(model.NewCIStr("test_ddl"), model.NewCIStr(name))
require.NoError(t, err)
return tbl
}
func dumpRows(t *testing.T, rows *sql.Rows) [][]interface{} {
cols, err := rows.Columns()
require.NoError(t, err)
var ay [][]interface{}
for rows.Next() {
v := make([]interface{}, len(cols))
for i := range v {
v[i] = new(interface{})
}
err = rows.Scan(v...)
require.NoError(t, err)
for i := range v {
v[i] = *(v[i].(*interface{}))
}
ay = append(ay, v)
}
require.NoError(t, rows.Close())
require.NoErrorf(t, rows.Err(), "%v", ay)
return ay
}
func matchRows(t *testing.T, rows *sql.Rows, expected [][]interface{}) {
ay := dumpRows(t, rows)
require.Equalf(t, len(expected), len(ay), "%v", expected)
for i := range ay {
match(t, ay[i], expected[i]...)
}
}
func match(t *testing.T, row []interface{}, expected ...interface{}) {
require.Equal(t, len(expected), len(row))
for i := range row {
if row[i] == nil {
require.Nil(t, expected[i])
continue
}
got, err := types.ToString(row[i])
require.NoError(t, err)
need, err := types.ToString(expected[i])
require.NoError(t, err)
require.Equal(t, need, got)
}
}
func (s *ddlSuite) Bootstrap(t *testing.T) {
tk := testkit.NewTestKit(t, s.store)
tk.MustExec("use test_ddl")
tk.MustExec("drop table if exists test_index, test_column, test_insert, test_conflict_insert, " +
"test_update, test_conflict_update, test_delete, test_conflict_delete, test_mixed, test_inc")
tk.MustExec("create table test_index (c int, c1 bigint, c2 double, c3 varchar(256), primary key(c))")
tk.MustExec("create table test_column (c1 int, c2 int, primary key(c1))")
tk.MustExec("create table test_insert (c1 int, c2 int, primary key(c1))")
tk.MustExec("create table test_conflict_insert (c1 int, c2 int, primary key(c1))")
tk.MustExec("create table test_update (c1 int, c2 int, primary key(c1))")
tk.MustExec("create table test_conflict_update (c1 int, c2 int, primary key(c1))")
tk.MustExec("create table test_delete (c1 int, c2 int, primary key(c1))")
tk.MustExec("create table test_conflict_delete (c1 int, c2 int, primary key(c1))")
tk.MustExec("create table test_mixed (c1 int, c2 int, primary key(c1))")
tk.MustExec("create table test_inc (c1 int, c2 int, primary key(c1))")
tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn
tk.MustExec("drop table if exists test_insert_common, test_conflict_insert_common, " +
"test_update_common, test_conflict_update_common, test_delete_common, test_conflict_delete_common, " +
"test_mixed_common, test_inc_common")
tk.MustExec("create table test_insert_common (c1 int, c2 int, primary key(c1, c2))")
tk.MustExec("create table test_conflict_insert_common (c1 int, c2 int, primary key(c1, c2))")
tk.MustExec("create table test_update_common (c1 int, c2 int, primary key(c1, c2))")
tk.MustExec("create table test_conflict_update_common (c1 int, c2 int, primary key(c1, c2))")
tk.MustExec("create table test_delete_common (c1 int, c2 int, primary key(c1, c2))")
tk.MustExec("create table test_conflict_delete_common (c1 int, c2 int, primary key(c1, c2))")
tk.MustExec("create table test_mixed_common (c1 int, c2 int, primary key(c1, c2))")
tk.MustExec("create table test_inc_common (c1 int, c2 int, primary key(c1, c2))")
tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly
}
func TestSimple(t *testing.T) {
s := createDDLSuite(t)
defer s.teardown(t)
t.Run("Basic", func(t *testing.T) {
done := s.runDDL("create table if not exists test_simple (c1 int, c2 int, c3 int)")
err := <-done
require.NoError(t, err)
_, err = s.exec("insert into test_simple values (1, 1, 1)")
require.NoError(t, err)
rows, err := s.query("select c1 from test_simple limit 1")
require.NoError(t, err)
matchRows(t, rows, [][]interface{}{{1}})
done = s.runDDL("drop table if exists test_simple")
err = <-done
require.NoError(t, err)
})
t.Run("Mixed", func(t *testing.T) {
tests := []struct {
name string
}{
{"test_mixed"},
{"test_mixed_common"},
}
for _, test := range tests {
tblName := test.name
t.Run(test.name, func(t *testing.T) {
workerNum := 10
rowCount := 10000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(fmt.Sprintf("insert into %s values (%d, %d)", tblName, k, k))
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleMixed][Insert][Time Cost]%v\n", end.Sub(start))
start = time.Now()
rowID := int64(rowCount)
defaultValue := int64(-1)
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func() {
defer wg.Done()
for j := 0; j < batch; j++ {
key := atomic.AddInt64(&rowID, 1)
s.execInsert(fmt.Sprintf("insert into %s values (%d, %d)", tblName, key, key))
key = int64(randomNum(rowCount))
s.mustExec(fmt.Sprintf("update %s set c2 = %d where c1 = %d", tblName, defaultValue, key))
key = int64(randomNum(rowCount))
s.mustExec(fmt.Sprintf("delete from %s where c1 = %d", tblName, key))
}
}()
}
wg.Wait()
end = time.Now()
fmt.Printf("[TestSimpleMixed][Mixed][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
require.NoError(t, err)
tbl := s.getTable(t, tblName)
updateCount := int64(0)
insertCount := int64(0)
err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
if reflect.DeepEqual(data[1].GetValue(), data[0].GetValue()) {
insertCount++
} else if reflect.DeepEqual(data[1].GetValue(), defaultValue) && data[0].GetInt64() < int64(rowCount) {
updateCount++
} else {
log.Fatal("[TestSimpleMixed fail]invalid row", zap.Any("row", data))
}
return true, nil
})
require.NoError(t, err)
deleteCount := atomic.LoadInt64(&rowID) - insertCount - updateCount
require.Greater(t, insertCount, int64(0))
require.Greater(t, updateCount, int64(0))
require.Greater(t, deleteCount, int64(0))
})
}
})
t.Run("Inc", func(t *testing.T) {
tests := []struct {
name string
}{
{"test_inc"},
{"test_inc_common"},
}
for _, test := range tests {
tblName := test.name
t.Run(test.name, func(t *testing.T) {
workerNum := 10
rowCount := 1000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(fmt.Sprintf("insert into %s values (%d, %d)", tblName, k, k))
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleInc][Insert][Time Cost]%v\n", end.Sub(start))
start = time.Now()
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func() {
defer wg.Done()
for j := 0; j < batch; j++ {
s.mustExec(fmt.Sprintf("update %s set c2 = c2 + 1 where c1 = 0", tblName))
}
}()
}
wg.Wait()
end = time.Now()
fmt.Printf("[TestSimpleInc][Update][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
require.NoError(t, err)
tbl := s.getTable(t, "test_inc")
err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
if reflect.DeepEqual(data[0].GetValue(), int64(0)) {
if *enableRestart {
require.GreaterOrEqual(t, data[1].GetValue(), int64(rowCount))
} else {
require.Equal(t, int64(rowCount), data[1].GetValue())
}
} else {
require.Equal(t, data[1].GetValue(), data[0].GetValue())
}
return true, nil
})
require.NoError(t, err)
})
}
})
}
func TestSimpleInsert(t *testing.T) {
s := createDDLSuite(t)
defer s.teardown(t)
t.Run("Basic", func(t *testing.T) {
tests := []struct {
name string
}{
{"test_insert"},
{"test_insert_common"},
}
for _, test := range tests {
tblName := test.name
t.Run(test.name, func(t *testing.T) {
workerNum := 10
rowCount := 10000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(fmt.Sprintf("insert into %s values (%d, %d)", tblName, k, k))
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleInsert][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
require.NoError(t, err)
tbl := s.getTable(t, "test_insert")
handles := kv.NewHandleMap()
err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
handles.Set(h, struct{}{})
require.Equal(t, data[1].GetValue(), data[0].GetValue())
return true, nil
})
require.NoError(t, err)
require.Equal(t, rowCount, handles.Len())
})
}
})
t.Run("Conflict", func(t *testing.T) {
tests := []struct {
name string
}{
{"test_conflict_insert"},
{"test_conflict_insert_common"},
}
for _, test := range tests {
tblName := test.name
t.Run(test.name, func(t *testing.T) {
var mu sync.Mutex
keysMap := make(map[int64]int64)
workerNum := 10
rowCount := 10000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func() {
defer wg.Done()
for j := 0; j < batch; j++ {
k := randomNum(rowCount)
_, _ = s.exec(fmt.Sprintf("insert into %s values (%d, %d)", tblName, k, k))
mu.Lock()
keysMap[int64(k)] = int64(k)
mu.Unlock()
}
}()
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleConflictInsert][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
require.NoError(t, err)
tbl := s.getTable(t, tblName)
handles := kv.NewHandleMap()
err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
handles.Set(h, struct{}{})
require.Contains(t, keysMap, data[0].GetValue())
require.Equal(t, data[1].GetValue(), data[0].GetValue())
return true, nil
})
require.NoError(t, err)
require.Len(t, keysMap, handles.Len())
})
}
})
}
func TestSimpleUpdate(t *testing.T) {
s := createDDLSuite(t)
defer s.teardown(t)
t.Run("Basic", func(t *testing.T) {
tests := []struct {
name string
}{
{"test_update"},
{"test_update_common"},
}
for _, test := range tests {
tblName := test.name
t.Run(test.name, func(t *testing.T) {
var mu sync.Mutex
keysMap := make(map[int64]int64)
workerNum := 10
rowCount := 10000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(fmt.Sprintf("insert into %s values (%d, %d)", tblName, k, k))
v := randomNum(rowCount)
s.mustExec(fmt.Sprintf("update %s set c2 = %d where c1 = %d", tblName, v, k))
mu.Lock()
keysMap[int64(k)] = int64(v)
mu.Unlock()
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleUpdate][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
require.NoError(t, err)
tbl := s.getTable(t, tblName)
handles := kv.NewHandleMap()
err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
handles.Set(h, struct{}{})
key := data[0].GetInt64()
require.Equal(t, keysMap[key], data[1].GetValue())
return true, nil
})
require.NoError(t, err)
require.Equal(t, rowCount, handles.Len())
})
}
})
t.Run("Conflict", func(t *testing.T) {
tests := []struct {
name string
}{
{"test_conflict_update"},
{"test_conflict_update_common"},
}
for _, test := range tests {
tblName := test.name
t.Run(test.name, func(t *testing.T) {
var mu sync.Mutex
keysMap := make(map[int64]int64)
workerNum := 10
rowCount := 10000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(fmt.Sprintf("insert into %s values (%d, %d)", tblName, k, k))
mu.Lock()
keysMap[int64(k)] = int64(k)
mu.Unlock()
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleConflictUpdate][Insert][Time Cost]%v\n", end.Sub(start))
start = time.Now()
defaultValue := int64(-1)
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func() {
defer wg.Done()
for j := 0; j < batch; j++ {
k := randomNum(rowCount)
s.mustExec(fmt.Sprintf("update %s set c2 = %d where c1 = %d", tblName, defaultValue, k))
mu.Lock()
keysMap[int64(k)] = defaultValue
mu.Unlock()
}
}()
}
wg.Wait()
end = time.Now()
fmt.Printf("[TestSimpleConflictUpdate][Update][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
require.NoError(t, err)
tbl := s.getTable(t, tblName)
handles := kv.NewHandleMap()
err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
handles.Set(h, struct{}{})
require.Contains(t, keysMap, data[0].GetValue())
if !reflect.DeepEqual(data[1].GetValue(), data[0].GetValue()) && !reflect.DeepEqual(data[1].GetValue(), defaultValue) {
log.Fatal("[TestSimpleConflictUpdate fail]Bad row", zap.Any("row", data))
}
return true, nil
})
require.NoError(t, err)
require.Equal(t, rowCount, handles.Len())
})
}
})
}
func TestSimpleDelete(t *testing.T) {
s := createDDLSuite(t)
defer s.teardown(t)
t.Run("Basic", func(t *testing.T) {
tests := []struct {
name string
}{
{"test_delete"},
{"test_delete_common"},
}
for _, test := range tests {
tblName := test.name
t.Run(test.name, func(t *testing.T) {
workerNum := 10
rowCount := 1000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(fmt.Sprintf("insert into %s values (%d, %d)", tblName, k, k))
s.mustExec(fmt.Sprintf("delete from %s where c1 = %d", tblName, k))
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleDelete][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
require.NoError(t, err)
tbl := s.getTable(t, tblName)
handles := kv.NewHandleMap()
err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
handles.Set(h, struct{}{})
return true, nil
})
require.NoError(t, err)
require.Equal(t, 0, handles.Len())
})
}
})
t.Run("Conflict", func(t *testing.T) {
tests := []struct {
name string
}{
{"test_conflict_delete"},
{"test_conflict_delete_common"},
}
for _, test := range tests {
tblName := test.name
t.Run(test.name, func(t *testing.T) {
var mu sync.Mutex
keysMap := make(map[int64]int64)
workerNum := 10
rowCount := 1000
batch := rowCount / workerNum
start := time.Now()
var wg sync.WaitGroup
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := batch*i + j
s.execInsert(fmt.Sprintf("insert into %s values (%d, %d)", tblName, k, k))
mu.Lock()
keysMap[int64(k)] = int64(k)
mu.Unlock()
}
}(i)
}
wg.Wait()
end := time.Now()
fmt.Printf("[TestSimpleConflictDelete][Insert][Time Cost]%v\n", end.Sub(start))
start = time.Now()
wg.Add(workerNum)
for i := 0; i < workerNum; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < batch; j++ {
k := randomNum(rowCount)
s.mustExec(fmt.Sprintf("delete from %s where c1 = %d", tblName, k))
mu.Lock()
delete(keysMap, int64(k))
mu.Unlock()
}
}(i)
}
wg.Wait()
end = time.Now()
fmt.Printf("[TestSimpleConflictDelete][Delete][Time Cost]%v\n", end.Sub(start))
ctx := s.ctx
err := ctx.NewTxn(goctx.Background())
require.NoError(t, err)
tbl := s.getTable(t, tblName)
handles := kv.NewHandleMap()
err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(h kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) {
handles.Set(h, struct{}{})
require.Contains(t, keysMap, data[0].GetValue())
return true, nil
})
require.NoError(t, err)
require.Len(t, keysMap, handles.Len())
})
}
})
}
// addEnvPath appends newPath to $PATH.
func addEnvPath(newPath string) {
_ = os.Setenv("PATH", fmt.Sprintf("%s%c%s", os.Getenv("PATH"), os.PathListSeparator, newPath))
}
func init() {
rand.Seed(time.Now().UnixNano())
_ = store.Register("tikv", tidbdriver.TiKVDriver{})
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
gir_ecommerce/settings.py
|
"""
Django settings for gir_ecommerce project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('SECRET_KEY', '123')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#libs
'widget_tweaks',
#apps
'core',
'accounts',
'catalog',
'checkout',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'checkout.middleware.cart_item_middleware',
]
ROOT_URLCONF = 'gir_ecommerce.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
#app
'catalog.context_processors.categories',
],
},
},
]
WSGI_APPLICATION = 'gir_ecommerce.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Recife'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
SECURITY_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
ALLOWED_HOSTS = ['*']
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
#E-mail testar com configurações do gmail
EMAIL_HOST = ''
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
DEFAULT_FROM_EMAIL = '[email protected]'
# auth
LOGIN_URL = 'login'
LOGIN_REDIRECT_URL = 'index'
AUTH_USER_MODEL = 'accounts.User'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'accounts.backends.ModelBackend',
)
# Messages
from django.contrib.messages import constants as messages_constants
MESSAGE_TAGS = {
messages_constants.DEBUG: 'debug',
messages_constants.INFO: 'info',
messages_constants.SUCCESS: 'success',
messages_constants.WARNING: 'warning',
messages_constants.ERROR: 'danger',
}
# Pagseguro TOKEN para SANDBOX
PAGSEGURO_TOKEN = ''
PAGSEGURO_EMAIL = '[email protected]'
PAGSEGURO_SANDBOX = True
try:
from .local_settings import *
except ImportError:
pass
|
[] |
[] |
[
"SECRET_KEY"
] |
[]
|
["SECRET_KEY"]
|
python
| 1 | 0 | |
storage/local.go
|
package storage
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"os"
"github.com/minio/sio"
"golang.org/x/crypto/hkdf"
)
type localStore struct {
prefix string
masterKey [32]byte
}
// Local ...
var Local localStore
func (store localStore) Init() {
dir := os.Getenv("LOCAL_DIR")
if dir == "" {
dir = "files"
}
key := os.Getenv("LOCAL_ENCRYPTION_KEY")
eKey, err := hex.DecodeString(key)
var keyBytes [32]byte
if err == nil {
copy(keyBytes[:], eKey)
Local.masterKey = keyBytes
}
Local.prefix = dir
}
func (store localStore) GetReader(path string, nonce [32]byte) (rc io.ReadCloser, err error) {
// fmt.Println(store.prefix + "/" + path)
f, err := os.Open(store.prefix + "/" + path)
if err != nil {
return nil, err
}
buf := new(bytes.Buffer)
store.decryptReader(buf, f, nonce)
return ioutil.NopCloser(buf), nil
}
func (store localStore) Delete(path string) error {
var err = os.Remove(store.prefix + "/" + path)
if err != nil {
return err
}
fmt.Println("==> done deleting file")
return nil
}
func (store localStore) PostReader(path string, buf *bytes.Buffer, length int64, contentType string, nonce [32]byte) error {
file, err := os.Create(store.prefix + "/" + path)
if err != nil {
fmt.Println(err)
return err
}
defer file.Close()
if err := store.encryptReader(file, buf, nonce); err != nil {
fmt.Println(err)
}
return file.Sync()
}
// UNIMPLEMENTED
func (store localStore) List(path string) ([]string, error) {
return []string{}, nil
}
func (store localStore) encryptReader(dest io.Writer, src io.Reader, nonce [32]byte) error {
// derive an encryption key from the master key and the nonce
var key [32]byte
kdf := hkdf.New(sha256.New, store.masterKey[:], nonce[:], nil)
if _, err := io.ReadFull(kdf, key[:]); err != nil {
fmt.Printf("Failed to derive encryption key: %v", err) // add error handling
return err
}
encrypted, err := sio.EncryptReader(src, sio.Config{Key: key[:]})
if err != nil {
fmt.Printf("Failed to encrypted reader: %v", err) // add error handling
return err
}
// the encrypted io.Reader can be used like every other reader - e.g. for copying
if _, err := io.Copy(dest, encrypted); err != nil {
fmt.Printf("Failed to copy data: %v", err) // add error handling
return err
}
return nil
}
func (store localStore) decryptReader(dest io.Writer, src io.Reader, nonce [32]byte) error {
// derive the encryption key from the master key and the nonce
var key [32]byte
kdf := hkdf.New(sha256.New, store.masterKey[:], nonce[:], nil)
if _, err := io.ReadFull(kdf, key[:]); err != nil {
fmt.Printf("Failed to derive encryption key: %v", err) // add error handling
return err
}
if _, err := sio.Decrypt(dest, src, sio.Config{Key: key[:]}); err != nil {
if _, ok := err.(sio.Error); ok {
fmt.Printf("Malformed encrypted data: %v", err) // add error handling - here we know that the data is malformed/not authentic.
return err
}
fmt.Printf("Failed to decrypt data: %v", err) // add error handling
return err
}
return nil
}
func (store localStore) GenNonce() ([32]byte, error) {
var nonce [32]byte
if _, err := io.ReadFull(rand.Reader, nonce[:]); err != nil {
fmt.Printf("Failed to read random data: %v", err) // add error handling
return nonce, err
}
return nonce, nil
}
|
[
"\"LOCAL_DIR\"",
"\"LOCAL_ENCRYPTION_KEY\""
] |
[] |
[
"LOCAL_DIR",
"LOCAL_ENCRYPTION_KEY"
] |
[]
|
["LOCAL_DIR", "LOCAL_ENCRYPTION_KEY"]
|
go
| 2 | 0 | |
src/test/java/com/thoughtworks/cruise/RuntimePath.java
|
/*************************GO-LICENSE-START*********************************
* Copyright 2015 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*************************GO-LICENSE-END***********************************/
package com.thoughtworks.cruise;
import java.io.File;
import static com.thoughtworks.cruise.util.CruiseConstants.CURRENT_REVISION;
public class RuntimePath {
private static final String SERVER_ROOT = "target/go-server-" + CURRENT_REVISION;
private static final String SERVER_CONFIG_PATH = "target/go-server-" + CURRENT_REVISION + "/config"; // use this path if you want to run tests against Development Twist Server "../../cruise/server/config";
private static final String AGENT_ROOT = "target/go-agent-" + CURRENT_REVISION;
public static String pathFor(String path) {
return path;
}
public static String absolutePathFor(String path){
return new File(pathFor(path)).getAbsolutePath();
}
public static String getServerRoot() {
//System.getenv("TWIST_CRUISE_PORT") != null ? System.getenv("TWIST_CRUISE_PORT") : "8253";
String serverPath = System.getenv("TWIST_SERVER_PATH") != null ? System.getenv("TWIST_SERVER_PATH") : pathFor(SERVER_ROOT);
return serverPath;
}
public static String getArtifactPath(String artifactsDir) {
return new File(RuntimePath.getServerRoot(), artifactsDir).getAbsolutePath();
}
public static String getServerConfigPath() {
String serverConfigPath = System.getenv("TWIST_SERVER_CONFIG_PATH") != null ? System.getenv("TWIST_SERVER_CONFIG_PATH") : pathFor(SERVER_CONFIG_PATH);
return serverConfigPath;
}
public static String getAgentRoot() {
String agentPath = System.getenv("TWIST_AGENT_PATH") != null ? System.getenv("TWIST_AGENT_PATH") : pathFor(AGENT_ROOT);
return agentPath;
}
public static String getTwistAgentPath() {
String agentPath = System.getenv("TWIST_AGENT_PATH") != null ? System.getenv("TWIST_AGENT_PATH") : pathFor("target/twist-agents");
return agentPath;
}
public static String getOldAgentPath() {
String agentPath = pathFor(SetupAgents.AGENT_2_4);
return agentPath;
}
}
|
[
"\"TWIST_CRUISE_PORT\"",
"\"TWIST_CRUISE_PORT\"",
"\"TWIST_SERVER_PATH\"",
"\"TWIST_SERVER_PATH\"",
"\"TWIST_SERVER_CONFIG_PATH\"",
"\"TWIST_SERVER_CONFIG_PATH\"",
"\"TWIST_AGENT_PATH\"",
"\"TWIST_AGENT_PATH\"",
"\"TWIST_AGENT_PATH\"",
"\"TWIST_AGENT_PATH\""
] |
[] |
[
"TWIST_SERVER_PATH",
"TWIST_AGENT_PATH",
"TWIST_CRUISE_PORT",
"TWIST_SERVER_CONFIG_PATH"
] |
[]
|
["TWIST_SERVER_PATH", "TWIST_AGENT_PATH", "TWIST_CRUISE_PORT", "TWIST_SERVER_CONFIG_PATH"]
|
java
| 4 | 0 | |
driver/options.go
|
/* Firecracker-task-driver is a task driver for Hashicorp's nomad that allows
* to create microvms using AWS Firecracker vmm
* Copyright (C) 2019 Carlos Neira [email protected]
*
* Licensed under the Apache License, Version 2.0 (the "License")
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*/
package firevm
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"os"
"path/filepath"
"strconv"
"strings"
firecracker "github.com/firecracker-microvm/firecracker-go-sdk"
models "github.com/firecracker-microvm/firecracker-go-sdk/client/models"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
func newOptions() *options {
return &options{
createFifoFileLogs: createFifoFileLogs,
}
}
func genmacaddr() (string, error) {
buf := make([]byte, 6)
if _, err := rand.Read(buf); err != nil {
return "", fmt.Errorf("Fail to generate mac address: %w", err)
}
buf[0] |= 2
return strings.ToUpper(fmt.Sprintf("%02x:%02x:%02x:%02x:%02x:%02x", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5])), nil
}
func RandomVethName() (string, error) {
entropy := make([]byte, 4)
_, err := rand.Read(entropy)
if err != nil {
return "", fmt.Errorf("failed to generate random veth name: %v", err)
}
// NetworkManager (recent versions) will ignore veth devices that start with "veth"
return fmt.Sprintf("veth%x", entropy), nil
}
type options struct {
FcBinary string `long:"firecracker-binary" description:"Path to firecracker binary"`
FcKernelImage string `long:"kernel" description:"Path to the kernel image" default:"./vmlinux"`
FcKernelCmdLine string `long:"kernel-opts" description:"Kernel commandline" default:"ro console=ttyS0 noapic reboot=k panic=1 pci=off nomodules"`
FcRootDrivePath string `long:"root-drive" description:"Path to root disk image"`
FcRootPartUUID string `long:"root-partition" description:"Root partition UUID"`
FcAdditionalDrives []string `long:"add-drive" description:"Path to additional drive, suffixed with :ro or :rw, can be specified multiple times"`
FcNetworkName string `long:"Network-name" description:"Network name configured by CNI"`
FcNicConfig Nic `long:"Nic-config" description:"Nic configuration from tap device"`
FcVsockDevices []string `long:"vsock-device" description:"Vsock interface, specified as PATH:CID. Multiple OK"`
FcLogFifo string `long:"vmm-log-fifo" description:"FIFO for firecracker logs"`
FcLogLevel string `long:"log-level" description:"vmm log level" default:"Debug"`
FcMetricsFifo string `long:"metrics-fifo" description:"FIFO for firecracker metrics"`
FcDisableHt bool `long:"disable-hyperthreading" short:"t" description:"Disable CPU Hyperthreading"`
FcCPUCount int64 `long:"ncpus" short:"c" description:"Number of CPUs" default:"1"`
FcCPUTemplate string `long:"cpu-template" description:"Firecracker CPU Template (C3 or T2)"`
FcMemSz int64 `long:"memory" short:"m" description:"VM memory, in MiB" default:"512"`
FcMetadata string `long:"metadata" description:"Firecracker Metadata for MMDS (json)"`
FcFifoLogFile string `long:"firecracker-log" short:"l" description:"pipes the fifo contents to the specified file"`
FcSocketPath string `long:"socket-path" short:"s" description:"path to use for firecracker socket, defaults to a unique file in in the first existing directory from {$HOME, $TMPDIR, or /tmp}"`
Debug bool `long:"debug" short:"d" description:"Enable debug output"`
Version bool `long:"version" description:"Outputs the version of the application"`
closers []func() error
validMetadata interface{}
createFifoFileLogs func(fifoPath string) (*os.File, error)
}
// Converts options to a usable firecracker config
func (opts *options) getFirecrackerConfig(AllocId string) (firecracker.Config, error) {
// validate metadata json
if opts.FcMetadata != "" {
if err := json.Unmarshal([]byte(opts.FcMetadata), &opts.validMetadata); err != nil {
return firecracker.Config{},
errors.Wrap(err, errInvalidMetadata.Error())
}
}
//setup NICs
NICs, err := opts.getNetwork(AllocId)
if err != nil {
return firecracker.Config{}, err
}
// BlockDevices
blockDevices, err := opts.getBlockDevices()
if err != nil {
return firecracker.Config{}, err
}
// vsocks
vsocks, err := parseVsocks(opts.FcVsockDevices)
if err != nil {
return firecracker.Config{}, err
}
//fifos
fifo, err := opts.handleFifos()
if err != nil {
return firecracker.Config{}, err
}
var socketPath string
if opts.FcSocketPath != "" {
socketPath = opts.FcSocketPath
} else {
socketPath = getSocketPath()
}
htEnabled := !opts.FcDisableHt
return firecracker.Config{
SocketPath: socketPath,
LogFifo: opts.FcLogFifo,
LogLevel: opts.FcLogLevel,
MetricsFifo: opts.FcMetricsFifo,
FifoLogWriter: fifo,
KernelImagePath: opts.FcKernelImage,
KernelArgs: opts.FcKernelCmdLine,
Drives: blockDevices,
NetworkInterfaces: NICs,
VsockDevices: vsocks,
MachineCfg: models.MachineConfiguration{
VcpuCount: firecracker.Int64(opts.FcCPUCount),
CPUTemplate: models.CPUTemplate(opts.FcCPUTemplate),
HtEnabled: firecracker.Bool(htEnabled),
MemSizeMib: firecracker.Int64(opts.FcMemSz),
},
}, nil
}
func (opts *options) getNetwork(AllocId string) ([]firecracker.NetworkInterface, error) {
var NICs []firecracker.NetworkInterface
if len(opts.FcNetworkName) > 0 && len(opts.FcNicConfig.Ip) > 0 {
return nil, errConflictingNetworkOpts
}
if len(opts.FcNetworkName) > 0 {
veth, err := RandomVethName()
if err != nil {
return nil, err
}
nic := firecracker.NetworkInterface{
CNIConfiguration: &firecracker.CNIConfiguration{
NetworkName: opts.FcNetworkName,
IfName: veth,
},
}
NICs = append(NICs, nic)
}
if len(opts.FcNicConfig.Ip) > 0 {
_, Net, err := net.ParseCIDR(opts.FcNicConfig.Ip)
if err != nil {
return nil, fmt.Errorf("Fail to parse CIDR address: %v", err)
}
mockMacAddrString, err := genmacaddr()
if err != nil {
return nil, err
}
nic := firecracker.NetworkInterface{
StaticConfiguration: &firecracker.StaticNetworkConfiguration{
MacAddress: mockMacAddrString,
HostDevName: opts.FcNicConfig.Interface,
IPConfiguration: &firecracker.IPConfiguration{
IPAddr: net.IPNet{
IP: Net.IP,
Mask: Net.Mask,
},
Gateway: net.ParseIP(opts.FcNicConfig.Gateway),
Nameservers: opts.FcNicConfig.Nameservers,
},
},
}
NICs = append(NICs, nic)
}
return NICs, nil
}
// constructs a list of drives from the options config
func (opts *options) getBlockDevices() ([]models.Drive, error) {
blockDevices, err := parseBlockDevices(opts.FcAdditionalDrives)
if err != nil {
return nil, err
}
rootDrive := models.Drive{
DriveID: firecracker.String("1"),
PathOnHost: &opts.FcRootDrivePath,
IsRootDevice: firecracker.Bool(true),
IsReadOnly: firecracker.Bool(false),
Partuuid: opts.FcRootPartUUID,
}
blockDevices = append(blockDevices, rootDrive)
return blockDevices, nil
}
// handleFifos will see if any fifos need to be generated and if a fifo log
// file should be created.
func (opts *options) handleFifos() (io.Writer, error) {
// these booleans are used to check whether or not the fifo queue or metrics
// fifo queue needs to be generated. If any which need to be generated, then
// we know we need to create a temporary directory. Otherwise, a temporary
// directory does not need to be created.
generateFifoFilename := false
generateMetricFifoFilename := false
var err error
var fifo io.WriteCloser
if len(opts.FcFifoLogFile) > 0 {
if len(opts.FcLogFifo) > 0 {
return nil, errConflictingLogOpts
}
generateFifoFilename = true
// if a fifo log file was specified via the CLI then we need to check if
// metric fifo was also specified. If not, we will then generate that fifo
if len(opts.FcMetricsFifo) == 0 {
generateMetricFifoFilename = true
}
if fifo, err = opts.createFifoFileLogs(opts.FcFifoLogFile); err != nil {
return nil, errors.Wrap(err, errUnableToCreateFifoLogFile.Error())
}
opts.addCloser(func() error {
return fifo.Close()
})
} else if len(opts.FcLogFifo) > 0 || len(opts.FcMetricsFifo) > 0 {
// this checks to see if either one of the fifos was set. If at least one
// has been set we check to see if any of the others were not set. If one
// isn't set, we will generate the proper file path.
if len(opts.FcLogFifo) == 0 {
generateFifoFilename = true
}
if len(opts.FcMetricsFifo) == 0 {
generateMetricFifoFilename = true
}
}
if generateFifoFilename || generateMetricFifoFilename {
dir, err := ioutil.TempDir(os.TempDir(), "fcfifo")
if err != nil {
return fifo, fmt.Errorf("Fail to create temporary directory: %v", err)
}
opts.addCloser(func() error {
return os.RemoveAll(dir)
})
if generateFifoFilename {
opts.FcLogFifo = filepath.Join(dir, "fc_fifo")
}
if generateMetricFifoFilename {
opts.FcMetricsFifo = filepath.Join(dir, "fc_metrics_fifo")
}
}
return fifo, nil
}
func (opts *options) addCloser(c func() error) {
opts.closers = append(opts.closers, c)
}
func (opts *options) Close() {
for _, closer := range opts.closers {
err := closer()
if err != nil {
log.Error(err)
}
}
}
// given a []string in the form of path:suffix converts to []modesl.Drive
func parseBlockDevices(entries []string) ([]models.Drive, error) {
devices := []models.Drive{}
for i, entry := range entries {
path := ""
readOnly := true
if strings.HasSuffix(entry, ":rw") {
readOnly = false
path = strings.TrimSuffix(entry, ":rw")
} else if strings.HasSuffix(entry, ":ro") {
path = strings.TrimSuffix(entry, ":ro")
} else {
return nil, errInvalidDriveSpecificationNoSuffix
}
if path == "" {
return nil, errInvalidDriveSpecificationNoPath
}
if _, err := os.Stat(path); err != nil {
return nil, err
}
e := models.Drive{
// i + 2 represents the drive ID. We will reserve 1 for root.
DriveID: firecracker.String(strconv.Itoa(i + 2)),
PathOnHost: firecracker.String(path),
IsReadOnly: firecracker.Bool(readOnly),
IsRootDevice: firecracker.Bool(false),
}
devices = append(devices, e)
}
return devices, nil
}
// Given a string of the form DEVICE/MACADDR, return the device name and the mac address, or an error
func parseNicConfig(cfg string) (string, string, error) {
fields := strings.Split(cfg, "/")
if len(fields) != 2 || len(fields[0]) == 0 || len(fields[1]) == 0 {
return "", "", errInvalidNicConfig
}
return fields[0], fields[1], nil
}
// Given a list of string representations of vsock devices,
// return a corresponding slice of machine.VsockDevice objects
func parseVsocks(devices []string) ([]firecracker.VsockDevice, error) {
var result []firecracker.VsockDevice
for _, entry := range devices {
fields := strings.Split(entry, ":")
if len(fields) != 2 || len(fields[0]) == 0 || len(fields[1]) == 0 {
return []firecracker.VsockDevice{}, errUnableToParseVsockDevices
}
CID, err := strconv.ParseUint(fields[1], 10, 32)
if err != nil {
return []firecracker.VsockDevice{}, errUnableToParseVsockCID
}
dev := firecracker.VsockDevice{
Path: fields[0],
CID: uint32(CID),
}
result = append(result, dev)
}
return result, nil
}
func createFifoFileLogs(fifoPath string) (*os.File, error) {
return os.OpenFile(fifoPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
}
// getSocketPath provides a randomized socket path by building a unique fielname
// and searching for the existance of directories {$HOME, os.TempDir()} and returning
// the path with the first directory joined with the unique filename. If we can't
// find a good path panics.
func getSocketPath() string {
filename := strings.Join([]string{
".firecracker.sock",
strconv.Itoa(os.Getpid()),
strconv.Itoa(rand.Intn(1000))},
"-",
)
var dir string
if d := os.Getenv("HOME"); checkExistsAndDir(d) {
dir = d
} else if checkExistsAndDir(os.TempDir()) {
dir = os.TempDir()
} else {
panic("Unable to find a location for firecracker socket. 'It's not going to do any good to land on mars if we're stupid.' --Ray Bradbury")
}
return filepath.Join(dir, filename)
}
// checkExistsAndDir returns true if path exists and is a Dir
func checkExistsAndDir(path string) bool {
// empty
if path == "" {
return false
}
// does it exist?
if info, err := os.Stat(path); err == nil {
// is it a directory?
return info.IsDir()
}
return false
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
tests/conftest.py
|
import datetime
import locale
import logging
import os
import random
import shutil
import sys
import warnings
from typing import Dict, List, Optional
import numpy as np
import pandas as pd
import pytest
from freezegun import freeze_time
from ruamel.yaml import YAML
import great_expectations as ge
from great_expectations import DataContext
from great_expectations.core import ExpectationConfiguration
from great_expectations.core.expectation_suite import ExpectationSuite
from great_expectations.core.expectation_validation_result import (
ExpectationValidationResult,
)
from great_expectations.core.usage_statistics.usage_statistics import (
UsageStatisticsHandler,
)
from great_expectations.core.util import get_or_create_spark_application
from great_expectations.data_context.store.profiler_store import ProfilerStore
from great_expectations.data_context.types.base import (
AnonymizedUsageStatisticsConfig,
CheckpointConfig,
DataContextConfig,
GeCloudConfig,
)
from great_expectations.data_context.types.resource_identifiers import (
ConfigurationIdentifier,
ExpectationSuiteIdentifier,
GeCloudIdentifier,
)
from great_expectations.data_context.util import (
file_relative_path,
instantiate_class_from_config,
)
from great_expectations.dataset.pandas_dataset import PandasDataset
from great_expectations.datasource import SqlAlchemyDatasource
from great_expectations.datasource.data_connector.util import (
get_filesystem_one_level_directory_glob_path_list,
)
from great_expectations.datasource.new_datasource import BaseDatasource, Datasource
from great_expectations.rule_based_profiler.config import RuleBasedProfilerConfig
from great_expectations.rule_based_profiler.config.base import (
ruleBasedProfilerConfigSchema,
)
from great_expectations.rule_based_profiler.parameter_builder.simple_date_format_string_parameter_builder import (
DEFAULT_CANDIDATE_STRINGS,
)
from great_expectations.self_check.util import (
build_test_backends_list as build_test_backends_list_v3,
)
from great_expectations.self_check.util import (
expectationSuiteValidationResultSchema,
get_dataset,
)
from great_expectations.util import is_library_loadable
RULE_BASED_PROFILER_MIN_PYTHON_VERSION: tuple = (3, 7)
yaml = YAML()
###
#
# NOTE: THESE TESTS ARE WRITTEN WITH THE en_US.UTF-8 LOCALE AS DEFAULT FOR STRING FORMATTING
#
###
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
logger = logging.getLogger(__name__)
def skip_if_python_below_minimum_version():
"""
All test fixtures for Rule-Based Profiler must execute this method; for example:
```
skip_if_python_below_minimum_version()
```
for as long as the support for Python versions less than 3.7 is provided. In particular, Python-3.6 support for
"dataclasses.asdict()" does not handle None values as well as the more recent versions of Python do.
"""
if sys.version_info < RULE_BASED_PROFILER_MIN_PYTHON_VERSION:
pytest.skip(
"skipping fixture because Python version 3.7 (or greater) is required"
)
def pytest_configure(config):
config.addinivalue_line(
"markers",
"smoketest: mark test as smoketest--it does not have useful assertions but may produce side effects "
"that require manual inspection.",
)
config.addinivalue_line(
"markers",
"rendered_output: produces rendered output that should be manually reviewed.",
)
config.addinivalue_line(
"markers",
"aws_integration: runs aws integration test that may be very slow and requires credentials",
)
def pytest_addoption(parser):
# note: --no-spark will be deprecated in favor of --spark
parser.addoption(
"--no-spark",
action="store_true",
help="If set, suppress tests against the spark test suite",
)
parser.addoption(
"--spark",
action="store_true",
help="If set, execute tests against the spark test suite",
)
parser.addoption(
"--no-sqlalchemy",
action="store_true",
help="If set, suppress all tests using sqlalchemy",
)
parser.addoption(
"--postgresql",
action="store_true",
help="If set, execute tests against postgresql",
)
# note: --no-postgresql will be deprecated in favor of --postgresql
parser.addoption(
"--no-postgresql",
action="store_true",
help="If set, supress tests against postgresql",
)
parser.addoption(
"--mysql",
action="store_true",
help="If set, execute tests against mysql",
)
parser.addoption(
"--mssql",
action="store_true",
help="If set, execute tests against mssql",
)
parser.addoption(
"--bigquery",
action="store_true",
help="If set, execute tests against bigquery",
)
parser.addoption(
"--aws",
action="store_true",
help="If set, execute tests against AWS resources like S3, RedShift and Athena",
)
parser.addoption(
"--aws-integration",
action="store_true",
help="If set, run aws integration tests for usage_statistics",
)
parser.addoption(
"--docs-tests",
action="store_true",
help="If set, run integration tests for docs",
)
parser.addoption(
"--performance-tests",
action="store_true",
help="If set, run performance tests (which might also require additional arguments like --bigquery)",
)
def build_test_backends_list(metafunc):
test_backend_names: List[str] = build_test_backends_list_cfe(metafunc)
backend_name_class_name_map: Dict[str, str] = {
"pandas": "PandasDataset",
"spark": "SparkDFDataset",
}
backend_name: str
return [
(backend_name_class_name_map.get(backend_name) or backend_name)
for backend_name in test_backend_names
]
def build_test_backends_list_cfe(metafunc):
# adding deprecation warnings
if metafunc.config.getoption("--no-postgresql"):
warnings.warn(
"--no-sqlalchemy is deprecated as of v0.14 in favor of the --postgresql flag. It will be removed in v0.16. Please adjust your tests accordingly",
DeprecationWarning,
)
if metafunc.config.getoption("--no-spark"):
warnings.warn(
"--no-spark is deprecated as of v0.14 in favor of the --spark flag. It will be removed in v0.16. Please adjust your tests accordingly.",
DeprecationWarning,
)
include_pandas: bool = True
include_spark: bool = metafunc.config.getoption("--spark")
include_sqlalchemy: bool = not metafunc.config.getoption("--no-sqlalchemy")
include_postgresql: bool = metafunc.config.getoption("--postgresql")
include_mysql: bool = metafunc.config.getoption("--mysql")
include_mssql: bool = metafunc.config.getoption("--mssql")
include_bigquery: bool = metafunc.config.getoption("--bigquery")
include_aws: bool = metafunc.config.getoption("--aws")
test_backend_names: List[str] = build_test_backends_list_v3(
include_pandas=include_pandas,
include_spark=include_spark,
include_sqlalchemy=include_sqlalchemy,
include_postgresql=include_postgresql,
include_mysql=include_mysql,
include_mssql=include_mssql,
include_bigquery=include_bigquery,
include_aws=include_aws,
)
return test_backend_names
def pytest_generate_tests(metafunc):
test_backends = build_test_backends_list(metafunc)
if "test_backend" in metafunc.fixturenames:
metafunc.parametrize("test_backend", test_backends, scope="module")
if "test_backends" in metafunc.fixturenames:
metafunc.parametrize("test_backends", [test_backends], scope="module")
def pytest_collection_modifyitems(config, items):
if config.getoption("--aws-integration"):
# --aws-integration given in cli: do not skip aws-integration tests
return
if config.getoption("--docs-tests"):
# --docs-tests given in cli: do not skip documentation integration tests
return
skip_aws_integration = pytest.mark.skip(
reason="need --aws-integration option to run"
)
skip_docs_integration = pytest.mark.skip(reason="need --docs-tests option to run")
for item in items:
if "aws_integration" in item.keywords:
item.add_marker(skip_aws_integration)
if "docs" in item.keywords:
item.add_marker(skip_docs_integration)
@pytest.fixture(autouse=True)
def no_usage_stats(monkeypatch):
# Do not generate usage stats from test runs
monkeypatch.setenv("GE_USAGE_STATS", "False")
@pytest.fixture(scope="module")
def sa(test_backends):
if not any(
[dbms in test_backends for dbms in ["postgresql", "sqlite", "mysql", "mssql"]]
):
pytest.skip("No recognized sqlalchemy backend selected.")
else:
try:
import sqlalchemy as sa
return sa
except ImportError:
raise ValueError("SQL Database tests require sqlalchemy to be installed.")
@pytest.mark.order(index=2)
@pytest.fixture
def spark_session(test_backends):
if "SparkDFDataset" not in test_backends:
pytest.skip("No spark backend selected.")
try:
import pyspark
from pyspark.sql import SparkSession
return get_or_create_spark_application(
spark_config={
"spark.sql.catalogImplementation": "hive",
"spark.executor.memory": "450m",
# "spark.driver.allowMultipleContexts": "true", # This directive does not appear to have any effect.
}
)
except ImportError:
raise ValueError("spark tests are requested, but pyspark is not installed")
@pytest.fixture
def basic_spark_df_execution_engine(spark_session):
from great_expectations.execution_engine import SparkDFExecutionEngine
conf: List[tuple] = spark_session.sparkContext.getConf().getAll()
spark_config: Dict[str, str] = dict(conf)
execution_engine: SparkDFExecutionEngine = SparkDFExecutionEngine(
spark_config=spark_config,
)
return execution_engine
@pytest.mark.order(index=3)
@pytest.fixture
def spark_session_v012(test_backends):
if "SparkDFDataset" not in test_backends:
pytest.skip("No spark backend selected.")
try:
import pyspark
from pyspark.sql import SparkSession
return get_or_create_spark_application(
spark_config={
"spark.sql.catalogImplementation": "hive",
"spark.executor.memory": "450m",
# "spark.driver.allowMultipleContexts": "true", # This directive does not appear to have any effect.
}
)
except ImportError:
raise ValueError("spark tests are requested, but pyspark is not installed")
@pytest.fixture
def basic_expectation_suite(empty_data_context_stats_enabled):
context: DataContext = empty_data_context_stats_enabled
expectation_suite = ExpectationSuite(
expectation_suite_name="default",
meta={},
expectations=[
ExpectationConfiguration(
expectation_type="expect_column_to_exist",
kwargs={"column": "infinities"},
),
ExpectationConfiguration(
expectation_type="expect_column_to_exist", kwargs={"column": "nulls"}
),
ExpectationConfiguration(
expectation_type="expect_column_to_exist", kwargs={"column": "naturals"}
),
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_unique",
kwargs={"column": "naturals"},
),
],
data_context=context,
)
return expectation_suite
@pytest.fixture
def numeric_high_card_dict():
# fmt: off
data = {
"norm_0_1": [
0.7225866251125405, -0.5951819764073379, -0.2679313226299394, -0.22503289285616823, 0.1432092195399402, 1.1874676802669433, 1.2766412196640815, 0.15197071140718296, -0.08787273509474242, -0.14524643717509128, -1.236408169492396, -0.1595432263317598, 1.0856768114741797, 0.5082788229519655, 0.26419244684748955, -0.2532308428977167, -0.6362679196021943, -3.134120304969242, -1.8990888524318292, 0.15701781863102648,
-0.775788419966582, -0.7400872167978756, -0.10578357492485335, 0.30287010067847436, -1.2127058770179304, -0.6750567678010801, 0.3341434318919877, 1.8336516507046157, 1.105410842250908, -0.7711783703442725, -0.20834347267477862, -0.06315849766945486, 0.003016997583954831, -1.0500016329150343, -0.9168020284223636, 0.306128397266698, 1.0980602112281863, -0.10465519493772572, 0.4557797534454941, -0.2524452955086468,
-1.6176089110359837, 0.46251282530754667, 0.45751208998354903, 0.4222844954971609, 0.9651098606162691, -0.1364401431697167, -0.4988616288584964, -0.29549238375582904, 0.6950204582392359, 0.2975369992016046, -1.0159498719807218, 1.3704532401348395, 1.1210419577766673, 1.2051869452003332, 0.10749349867353084, -3.1876892257116562, 1.316240976262548, -1.3777452919511493, -1.0666211985935259, 1.605446695828751,
-0.39682821266996865, -0.2828059717857655, 1.30488698803017, -2.116606225467923, -0.2026680301462151, -0.05504008273574069, -0.028520163428411835, 0.4424105678123449, -0.3427628263418371, 0.23805293411919937, -0.7515414823259695, -0.1272505897548366, 1.803348436304099, -2.0178252709022124, 0.4860300090112474, 1.2304054166426217, 0.7228668982068365, 1.7400607500575112, 0.3480274098246697, -0.3887978895385282,
-1.6511926233909175, 0.14517929503564567, -1.1599010576123796, -0.016133552438119002, 0.47157644883706273, 0.27657785075518254, 1.4464286976282463, -1.2605489185634533, -1.2548765025615338, 0.0755319579826929, 1.0476733637516833, -0.7038690219524807, -0.9580696842862921, -0.18135657098008018, -0.18163993379314564, 0.4092798531146971, -2.049808182546896, -1.2447062617916826, -1.6681140306283337, 1.0709944517933483,
-0.7059385234342846, -0.8033587669003331, -1.8152275905903312, 0.11729996097670137, 2.2994900038012376, -0.1291192451734159, -0.6731565869164164, -0.06690994571366346, -0.40330072968473235, -0.23927186025094221, 2.7756216937096676, 0.06441299443146056, -0.5095247173507204, -0.5228853558871007, 0.806629654091097, -2.110096084114651, -0.1233374136509439, -1.021178519845751, 0.058906278340351045, -0.26316852406211017,
-1.2990807244026237, -0.1937986598084067, 0.3909222793445317, 0.578027315076297, -0.11837271520846208, -1.134297652720464, 0.496915417153268, -0.5315184110418045, 0.5284176849952198, -1.6810338988102331, 0.41220454054009154, 1.0554031136792, -1.4222775023918832, -1.1664353586956209, 0.018952180522661358, -0.04620616876577671, -0.8446292647938418, -0.6889432180332509, -0.16012081070647954, 0.5680940644754282,
-1.9792941921407943, 0.35441842206114726, 0.12433268557499534, 0.25366905921805377, 0.6262297786892028, 1.327981424671081, 1.774834324890265, -0.9725604763128438, 0.42824027889428, 0.19725541390327114, 1.4640606982992412, 1.6484993842838995, 0.009848260786412894, -2.318740403198263, -0.4125245127403577, -0.15500831770388285, 1.010740123094443, 0.7509498708766653, -0.021415407776108144, 0.6466776546788641,
-1.421096837521404, 0.5632248951325018, -1.230539161899903, -0.26766333435961503, -1.7208241092827994, -1.068122926814994, -1.6339248620455546, 0.07225436117508208, -1.2018233250224348, -0.07213000691963527, -1.0080992229563746, -1.151378048476321, -0.2660104149809121, 1.6307779136408695, 0.8394822016824073, -0.23362802143120032, -0.36799502320054384, 0.35359852278856263, 0.5830948999779656, -0.730683771776052,
1.4715728371820667, -1.0668090648998136, -1.025762014881618, 0.21056106958224155, -0.5141254207774576, -0.1592942838690149, 0.7688711617969363, -2.464535892598544, -0.33306989349452987, 0.9457207224940593, 0.36108072442574435, -0.6490066877470516, -0.8714147266896871, 0.6567118414749348, -0.18543305444915045, 0.11156511615955596, 0.7299392157186994, -0.9902398239693843, -1.3231344439063761, -1.1402773433114928,
0.3696183719476138, -1.0512718152423168, -0.6093518314203102, 0.0010622538704462257, -0.17676306948277776, -0.6291120128576891, 1.6390197341434742, -0.8105788162716191, -2.0105672384392204, -0.7909143328024505, -0.10510684692203587, -0.013384480496840259, 0.37683659744804815, -0.15123337965442354, 1.8427651248902048, 1.0371006855495906, 0.29198928612503655, -1.7455852392709181, 1.0854545339796853, 1.8156620972829793,
1.2399563224061596, 1.1196530775769857, 0.4349954478175989, 0.11093680938321168, 0.9945934589378227, -0.5779739742428905, 1.0398502505219054, -0.09401160691650227, 0.22793239636661505, -1.8664992140331715, -0.16104499274010126, -0.8497511318264537, -0.005035074822415585, -1.7956896952184151, 1.8304783101189757, 0.19094408763231646, 1.3353023874309002, 0.5889134606052353, -0.48487660139277866, 0.4817014755127622,
1.5981632863770983, 2.1416849775567943, -0.5524061711669017, 0.3364804821524787, -0.8609687548167294, 0.24548635047971906, -0.1281468603588133, -0.03871410517044196, -0.2678174852638268, 0.41800607312114096, -0.2503930647517959, 0.8432391494945226, -0.5684563173706987, -0.6737077809046504, 2.0559579098493606, -0.29098826888414253, -0.08572747304559661, -0.301857666880195, -0.3446199959065524, 0.7391340848217359,
-0.3087136212446006, 0.5245553707204758, -3.063281336805349, 0.47471623010413705, 0.3733427291759615, -0.26216851429591426, -0.5433523111756248, 0.3305385199964823, -1.4866150542941634, -0.4699911958560942, 0.7312367186673805, -0.22346998944216903, -0.4102860865811592, -0.3003478250288424, -0.3436168605845268, 0.9456524589400904, -0.03710285453384255, 0.10330609878001526, 0.6919858329179392, 0.8673477607085118,
0.380742577915601, 0.5785785515837437, -0.011421905830097267, 0.587187810965595, -1.172536467775141, -0.532086162097372, -0.34440413367820183, -1.404900386188497, -0.1916375229779241, 1.6910999461291834, -0.6070351182769795, -0.8371447893868493, 0.8853944070432224, 1.4062946075925473, -0.4575973141608374, 1.1458755768004445, 0.2619874618238163, 1.7105876844856704, -1.3938976454537522, -0.11403217166441704,
-1.0354305240085717, -0.4285770475062154, 0.10326635421187867, 0.6911853442971228, 0.6293835213179542, -0.819693698713199, -0.7378190403744175, -1.495947672573938, -1.2406693914431872, -1.0486341638186725, -1.3715759883075953, 3.585407817418151, -0.8007079372574223, -1.527336776754733, -0.4716571043072485, -0.6967311271405545, 1.0003347462169225, -0.30569565002022697, 0.3646134876772732, 0.49083033603832493,
0.07754580794955847, -0.13467337850920083, 0.02134473458605164, 0.5025183900540823, -0.940929087894874, 1.441600637127558, -0.0857298131221344, -0.575175243519591, 0.42622029657630595, -0.3239674701415489, 0.22648849821602596, -0.6636465305318631, 0.30415000329164754, -0.6170241274574016, 0.07578674772163065, 0.2952841441615124, 0.8120317689468056, -0.46861353019671337, 0.04718559572470416, -0.3105660017232523,
-0.28898463203535724, 0.9575298065734561, -0.1977556031830993, 0.009658232624257272, 1.1432743259603295, -1.8989396918936858, 0.20787070770386357, 1.4256750543782999, -0.03838329973778874, -0.9051229357470373, -1.2002277085489457, 2.405569956130733, 1.895817948326675, -0.8260858325924574, 0.5759061866255807, 2.7022875569683342, 1.0591327405967745, 0.21449833798124354, 0.19970388388081273, 0.018242139911433558,
-0.630960146999549, -2.389646042147776, 0.5424304992480339, -1.2159551561948718, -1.6851632640204128, -0.4812221268109694, 0.6217652794219579, -0.380139431677482, -0.2643524783321051, 0.5106648694993016, -0.895602157034141, -0.20559568725141816, 1.5449271875734911, 1.544075783565114, 0.17877619857826843, 1.9729717339967108, 0.8302033109816261, -0.39118561199170965, -0.4428357598297098, -0.02550407946753186,
-1.0202977138210447, 2.6604654314300835, 1.9163029269361842, 0.34697436596877657, -0.8078124769022497, -1.3876596649099957, 0.44707250163663864, -0.6752837232272447, -0.851291770954755, 0.7599767868730256, 0.8134109401706875, -1.6766750539980289, -0.06051832829232975, -0.4652931327216134, -0.9249124398287735, 1.9022739762222731, 1.7632300613807597, 1.675335012283785, 0.47529854476887495, -0.7892463423254658,
0.3910120652706098, 0.5812432547936405, 0.2693084649672777, -0.08138564925779349, 0.9150619269526952, -0.8637356349272142, -0.14137853834901817, -0.20192754829896423, 0.04718228147088756, -0.9743600144318, -0.9936290943927825, 0.3544612180477054, 0.6839546770735121, 1.5089070357620178, 1.301167565172228, -1.5396145667672985, 0.42854366341485456, -1.5876582617301032, -0.0316985879141714, 0.3144220016570915,
-0.05054766725644431, 0.2934139006870167, 0.11396170275994542, -0.6472140129693643, 1.6556030742445431, 1.0319410208453506, 0.3292217603989991, -0.058758121958605435, -0.19917171648476298, -0.5192866115874029, 0.1997510689920335, -1.3675686656161756, -1.7761517497832053, -0.11260276070167097, 0.9717892642758689, 0.0840815981843948, -0.40211265381258554, 0.27384496844034517, -1.0403875081272367, 1.2884781173493884,
-1.8066239592554476, 1.1136979156298865, -0.06223155785690416, 1.3930381289015936, 0.4586305673655182, 1.3159249757827194, -0.5369892835955705, 0.17827408233621184, 0.22693934439969682, 0.8216240002114816, -1.0422409752281838, 0.3329686606709231, -1.5128804353968217, 1.0323052869815534, 1.1640486934424354, 1.6450118078345612, -0.6717687395070293, -0.08135119186406627, 1.2746921873544188, -0.8255794145095643,
0.7123504776564864, 0.6953336934741682, 2.191382322698439, 1.4155790749261592, 2.4681081786912866, -2.2904357033803815, -0.8375155191566624, 1.1040106662196736, 0.7084133268872015, -3.401968681942055, 0.23237090512844757, 1.1199436238058174, 0.6333916486592628, -0.6012340913121055, -0.3693951838866523, -1.7742670566875682, -0.36431378282545124, -0.4042586409194551, -0.04648644034604476, 1.5138191613743486,
-0.2053670782251071, 1.8679122383251414, 0.8355881018692999, -0.5369705129279005, -0.7909355080370954, 2.1080036780007987, 0.019537331188020687, -1.4672982688640615, -1.486842866467901, -1.1036839537574874, 1.0800858540685894, -0.2313974176207594, 0.47763272078271807, -1.9196070490691473, -0.8193535127855751, -0.6853651905832031, -0.18272370464882973, -0.33413577684633056, 2.2261342671906106, 1.6853726343573683,
0.8563421109235769, 1.0468799885096596, 0.12189082561416206, -1.3596466927672854, -0.7607432068282968, 0.7061728288620306, -0.4384478018639071, 0.8620104661898899, 1.04258758121448, -1.1464159128515612, 0.9617945424413628, 0.04987102831355013, -0.8472878887606543, 0.32986774370339184, 1.278319839581162, -0.4040926804592034, -0.6691567800662129, 0.9415431940597389, 0.3974846022291844, -0.8425204662387112,
-1.506166868030291, -0.04248497940038203, 0.26434168799067986, -1.5698380163561454, -0.6651727917714935, 1.2400220571204048, -0.1251830593977037, 0.6156254221302833, 0.43585628657139575, -1.6014619037611209, 1.9152323656075512, -0.8847911114213622, 1.359854519784993, -0.5554989575409871, 0.25064804193232354, 0.7976616257678464, 0.37834567410982123, -0.6300374359617635, -1.0613465068052854, -0.866474302027355,
1.2458556977164312, 0.577814049080149, 2.069400463823993, 0.9068690176961165, -0.5031387968484738, -0.3640749863516844, -1.041502465417534, 0.6732994659644133, -0.006355018868252906, -0.3650517541386253, 1.0975063446734974, -2.203726812834859, 1.060685913143899, -0.4618706570892267, 0.06475263817517128, -0.19326357638969882, -0.01812119454736379, 0.1337618009668529, 1.1838276997792907, 0.4273677345455913,
-0.4912341608307858, 0.2349993979417651, 0.9566260826411601, -0.7948243131958422, -0.6168334352331588, 0.3369425926447926, 0.8547756445246633, 0.2666330662219728, 2.431868771129661, 1.0089732701876513, -0.1162341515974066, -1.1746306816795218, -0.08227639025627424, 0.794676385688044, 0.15005011094018297, -0.8763821573601055, -1.0811684990769739, 0.6311588092267179, 0.026124278982220386, 0.8306502001533514,
1.0856487813261877, -0.018702855899823106, -0.07338137135247896, -0.8435746484744243, -0.18091216366556986, 0.2295807891528797, -1.0689295774443397, -1.5621175533013612, 1.3314045672598216, 0.6211561903553582, 1.0479302317100871, -1.1509436982013124, 0.447985084931758, 0.19917261474342404, 0.3582887259341301, 0.9953552868908098, 0.8948165434511316, 0.4949033431999123, -0.23004847985703908, 0.6411581535557106,
-1.1589671573242186, -0.13691519182560624, -0.8849560872785238, 0.6629182075027006, 2.2608150731789696, 2.2823614453180294, -1.2291376923498247, -0.9267975556981378, 0.2597417839242135, -0.7667310491821938, 0.10503294084132372, 2.960320355577672, -1.0645098483081497, -1.2888339889815872, -0.6564570556444346, 0.4742489396354781, 0.8879606773334898, -0.6477585196839569, -0.7309497810668936, 1.7025953934976548,
0.1789174966941155, -0.4839093362740933, -0.8917713440107442, 1.4521776747175792, -0.1676974219641624, -0.500672037099228, -0.2947747621553442, 0.929636971325952, -0.7614935150071248, 1.6886298813725842, -0.8136217834373227, 1.2030997228178093, 1.382267485738376, 2.594387458306705, -0.7703668776292266, -0.7642584795112598, 1.3356598324609947, -0.5745269784148925, -2.212092904499444, -1.727975556661197,
-0.18543087256023608, -0.10167435635752538, 1.3480966068787303, 0.0142803272337873, -0.480077631815393, -0.32270216749876185, -1.7884435311074431, -0.5695640948971382, -0.22859087912027687, -0.08783386938029487, -0.18151955278624396, 0.2031493507095467, 0.06444304447669409, -0.4339138073294572, 0.236563959074551, -0.2937958719187449, 0.1611232843821199, -0.6574871644742827, 1.3141902865107886, 0.6093649138398077,
0.056674985715912514, -1.828714441504608, -0.46768482587669535, 0.6489735384886999, 0.5035677725398181, -0.887590772676158, -0.3222316759913631, -0.35172770495027483, -0.4329205472963193, -0.8449916868048998, 0.38282765028957993, 1.3171924061732359, 0.2956667124648384, 0.5390909497681301, -0.7591989862253667, -1.1520792974885883, -0.39344757869384944, 0.6192677330177175, -0.05578834574542242, 0.593015990282657,
0.9374465229256678, 0.647772562443425, 1.1071167572595217, -1.3015016617832518, 1.267300472456379, -0.5807673178649629, 0.9343468385348384, -0.28554893036513673, 0.4487573993840033, 0.6749018890520516, -1.20482985206765, 0.17291806504654686, -0.4124576407610529, -0.9203236505429044, -0.7461342369802754, -0.19694162321688435, 0.46556512963300906, 0.5198366004764268, -1.7222561645076129, -0.7078891617994071,
-1.1653209054214695, 1.5560964971092122, 0.3335520152642012, 0.008390825910327906, 0.11336719644324977, 0.3158913817073965, 0.4704483453862008, -0.5700583482495889, -1.276634964816531, -1.7880560933777756, -0.26514994709973827, 0.6194447367446946, -0.654762456435761, 1.0621929196158544, 0.4454719444987052, -0.9323145612076791, 1.3197357985874438, -0.8792938558447049, -0.2470423905508279, 0.5128954444799875,
-0.09202044992462606, -1.3082892596744382, -0.34428948138804927, 0.012422196356164879, 1.4626152292162142, 0.34678216997159833, 0.409462409138861, 0.32838364873801185, 1.8776849459782967, 1.6816627852133539, -0.24894138693568296, 0.7150105850753732, 0.22929306929129853, -0.21434910504054566, 1.3339497173912471, -1.2497042452057836, -0.04487255356399775, -0.6486304639082145, -0.8048044333264733, -1.8090170501469942,
1.481689285694336, -1.4772553200884717, -0.36792462539303805, -1.103508260812736, -0.2135236993720317, 0.40889179796540165, 1.993585196733386, 0.43879096427562897, -0.44512875171982147, -1.1780830020629518, -1.666001035275436, -0.2977294957665528, 1.7299614542270356, 0.9882265798853356, 2.2412430815464597, 0.5801434875813244, -0.739190619909163, -1.2663490594895201, 0.5735521649879137, 1.2105709455012765,
1.9112159951415644, -2.259218931706201, -0.563310876529377, -2.4119185903750493, 0.9662624485722368, -0.22788851242764951, 0.9198283887420099, 0.7855927065251492, -0.7459868094792474, 0.10543289218409971, 0.6401750224618271, -0.0077375118689326705, -0.11647036625911977, -0.4722391874001602, -0.2718425102733572, -0.8796746964457087, 0.6112903638894259, 0.5347851929096421, -0.4749419210717794, 1.0633720764557604,
-0.2590556665572949, 2.590182301241823, 1.4524061372706638, -0.8503733047335056, 0.5609357391481067, -1.5661825434426477, 0.8019667474525984, 1.2716795425969496, 0.20011166646917924, -0.7105405282282679, -0.5593129072748189, -1.2401371010520867, -0.7002520937780202, -2.236596391787529, -1.8130090502823886, -0.23990633860801777, 1.7428780878151378, 1.4661206538178901, -0.8678567353744017, 0.2957423562639015,
0.13935419069962593, 1.399598845123674, 0.059729544605779575, -0.9607778026198247, 0.18474907798482051, 1.0117193651915666, -0.9173540069396245, 0.8934765521365161, -0.665655291396948, -0.32955768273493324, 0.3062873812209283, 0.177342106982554, 0.3595522704599547, -1.5964209653110262, 0.6705899137346863, -1.1034642863469553, -1.0029562484065524, 0.10622956543479244, 0.4261871936541378, 0.7777501694354336,
-0.806235923997437, -0.8272801398172428, -1.2783440745845536, 0.5982979227669168, -0.28214494859284556, 1.101560367699546, -0.14008021262664466, -0.38717961692054237, 0.9962925044431369, -0.7391490127960976, -0.06294945881724459, 0.7283671247384875, -0.8458895297768138, 0.22808829204347086, 0.43685668023014523, 0.9204095286935638, -0.028241645704951284, 0.15951784765135396, 0.8068984900818966, -0.34387965576978663,
0.573828962760762, -0.13374515460012618, -0.5552788325377814, 0.5644705833909952, -0.7500532220469983, 0.33436674493862256, -0.8595435026628129, -0.38943898244735853, 0.6401502590131951, -1.2968645995363652, 0.5861622311675501, 0.2311759458689689, 0.10962292708600496, -0.26025023584932205, -0.5398478003611565, -1.0514168636922954, 1.2689172189127857, 1.7029909647408918, -0.02325431623491577, -0.3064675950620902,
-1.5816446841009473, 0.6874254059433739, 0.7755967316475798, 1.4119333324396597, 0.14198739135512406, 0.2927714469848192, -0.7239793888399496, 0.3506448783535265, -0.7568480706640158, -1.2158508387501554, 0.22197589131086445, -0.5621415304506887, -1.2381112050191665, -1.917208333033256, -0.3321665793941188, -0.5916951886991071, -1.244826507645294, -0.29767661008214463, 0.8590635852032509, -1.8579290298421591,
-1.0470546224962876, -2.540080936704841, 0.5458326769958273, 0.042222128206941614, 0.6080450228346708, 0.6542717901662132, -1.7292955132690793, -0.4793123354077725, 0.7341767020417185, -1.3322222208234826, -0.5076389542432337, 0.684399163420284, 0.3948487980667425, -1.7919279627150193, 1.582925890933478, 0.8341846456063038, 0.11776890377042544, 1.7471239793853526, 1.2269451783893597, 0.4235463733287474,
1.5908284320029056, -1.635191535538596, 0.04419903330064594, -1.264385360373252, 0.5370192519783876, 1.2368603501240771, -0.9241079150337286, -0.3428051342915208, 0.0882286441353256, -2.210824604513402, -1.9000343283757128, 0.4633735273417207, -0.32534396967175094, 0.026187836765356437, 0.18253601230609245, 0.8519745761039671, -0.028225375482784816, -0.5114197447067229, -1.2428743809444227, 0.2879711400745508,
1.2857130031108321, 0.5296743558975853, -0.8440551904275335, -1.3776032491368861, 1.8164028526343798, -1.1422045767986222, -1.8675179752970443, 0.6969635320800454, 0.9444010906414336, -1.28197913481747, -0.06259132322304235, -0.4518754825442558, 0.9183188639099813, -0.2916931407869574, -1.1464007469977915, -0.4475136941593681, 0.44385573868752803, 2.1606711638680762, -1.4813603018181851, -0.5647618024870872,
-1.474746204557383, -2.9067748098220485, 0.06132111635940877, -0.09663310829361334, -1.087053744976143, -1.774855117659402, 0.8130120568830074, -0.5179279676199186, -0.32549430825787784, -1.1995838271705979, 0.8587480835176114, -0.02095126282663596, 0.6677898019388228, -1.1891003375304232, -2.1125937754631305, -0.047765192715672734, 0.09812525010300294, -1.034992359189106, 1.0213451864081846, 1.0788796513160641,
-1.444469239557739, 0.28341828947950637, -2.4556013891966737, 1.7126080715698266, -0.5943068899412715, 1.0897594994215383, -0.16345461884651272, 0.7027032523865234, 2.2851158088542562, 0.5038100496225458, -0.16724173993999966, -0.6747457076421414, 0.42254684460738184, 1.277203836895222, -0.34438446183574595, 0.38956738377878264, -0.26884968654334923, -0.02148772950361766, 0.02044885235644607, -1.3873669828232345,
0.19995968746809226, -1.5826859815811556, -0.20385119370067947, 0.5724329589281247, -1.330307658319185, 0.7756101314358208, -0.4989071461473931, 0.5388161769427321, -0.9811085284266614, 2.335331094403556, -0.5588657325211347, -1.2850853695283377, 0.40092993245913744, -1.9675685522110529, 0.9378938542456674, -0.18645815013912917, -0.6828273180353106, -1.840122530632185, -1.2581798109361761, 0.2867275394896832,
],
}
# fmt: on
return data
@pytest.fixture
def numeric_high_card_dataset(test_backend, numeric_high_card_dict):
schemas = {
"pandas": {
"norm_0_1": "float64",
},
"postgresql": {
# "norm_0_1": "DOUBLE_PRECISION",
"norm_0_1": "NUMERIC",
},
"sqlite": {
"norm_0_1": "FLOAT",
},
"mysql": {
"norm_0_1": "DOUBLE",
},
"mssql": {
"norm_0_1": "FLOAT",
},
"spark": {
"norm_0_1": "FloatType",
},
}
return get_dataset(test_backend, numeric_high_card_dict, schemas=schemas)
@pytest.fixture
def non_numeric_high_card_dataset(test_backend):
"""Provide dataset fixtures that have special values and/or are otherwise useful outside
the standard json testing framework"""
# fmt: off
data = {
"highcardnonnum": [
"CZVYSnQhHhoti8mQ66XbDuIjE5FMeIHb", "cPWAg2MJjh8fkRRU1B9aD8vWq3P8KTxJ", "4tehKwWiCDpuOmTPRYYqTqM7TvEa8Zi7", "ZvlAnCGiGfkKgQoNrhnnyrjmU7sLsUZz", "AaqMhdYukVdexTk6LlWvzXYXTp5upPuf", "ZSKmXUB35K14khHGyjYtuCHuI8yeM7yR", "F1cwKp4HsCN2s2kXQGR5RUa3WAcibCq2", "coaX8bSHoVZ8FP8SuQ57SFbrvpRHcibq", "3IzmbSJF525qtn7O4AvfKONnz7eFgnyU", "gLCtw7435gaR532PNFVCtvk14lNJpZXv",
"hNyjMYZkVlOKRjhg8cKymU5Bvnh0MK5R", "IqKC2auGTNehP8y24HzDQOdt9oysgFyx", "TePy034aBKlNeAmcJmKJ4p1yF7EUYEOg", "cIfDv6ieTAobe84P84InzDKrJrccmqbq", "m1979gfI6lVF9ijJA245bchYFd1EaMap", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "7wcR161jyKYhFLEZkhFqSXLwXW46I5x8", "IpmNsUFgbbVnL0ljJZOBHnTV0FKARwSn", "hsA4btHJg6Gq1jwOuOc3pl2UPB5QUwZg", "vwZyG0jGUys3HQdUiOocIbzhUdUugwKX",
"rTc9h94WjOXN5Wg40DyatFEFfp9mgWj6", "p1f20s14ZJGUTIBUNeBmJEkWKlwoyqjA", "VzgAIYNKHA0APN0oZtzMAfmbCzJenswy", "IO7BqR3iS136YMlLCEo6W3jKNOVJIlLG", "eTEyhiRuyEcTnHThi1W6yi1mxUjq8TEp", "4OHPKQgk3sPPYpKWcEWUtNZ0jv00UuPU", "ZJCstyyUvTR2gwSM6FLgkXYDwG54qo8u", "nGQsvDAzuL5Yc2XpqoG5P7RhpiTpJp8H", "NfX4KfEompMbbKloFq8NQpdXtk5PjaPe", "CP22IFHDX1maoSjTEdtBfrMHWQKACGDB",
"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hGwZQW7ao9HqNV2xAovuMBdyafNDE8q6", "OJmDHbqP1wzarsaSwCphsqvdy5SnTQMT", "JQbXIcgwUhttfPIGB7VGGfL2KiElabrO", "eTTNDggfPpRC22SEVNo9W0BPEWO4Cr57", "GW2JuUJmuCebia7RUiCNl2BTjukIzZWj", "oVFAvQEKmRTLBqdCuPoJNvzPvQ7UArWC", "zeMHFFKLr5j4DIFxRQ7jHWCMClrP3LmJ", "eECcArV5TZRftL6ZWaUDO6D2l3HiZj1Y", "xLNJXaCkOLrD6E0kgGaFOFwctNXjrd77",
"1f8KOCkOvehXYvN8PKv1Ch6dzOjRAr01", "uVF6HJgjVmoipK1sEpVOFJYuv2TXXsOG", "agIk8H2nFa0K27IFr0VM2RNp6saihYI3", "cAUnysbb8SBLSTr0H7cA1fmnpaL80e0N", "fM1IzD5USx4lMYi6bqPCEZjd2aP7G9vv", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "i65d8jqET5FsVw9t5BwAvBjkEJI6eUMj", "HbT1b7DQL7n7ZEt2FsKHIggycT1XIYd8", "938eC0iGMSqZNlqgDNG9YVE7t4izO2Ev", "PyZetp4izgE4ymPcUXyImF5mm7I6zbta",
"FaXA6YSUrvSnW7quAimLqQMNrU1Dxyjs", "PisVMvI9RsqQw21B7qYcKkRo5c8C2AKd", "eSQIxFqyYVf55UMzMEZrotPO74i3Sh03", "2b74DhJ6YFHrAkrjK4tvvKkYUKll44bR", "3svDRnrELyAsC69Phpnl2Os89856tFBJ", "ZcSGN9YYNHnHjUp0SktWoZI7JDmvRTTN", "m9eDkZ5oZEOFP3HUfaZEirecv2UhQ1B1", "wZTwJmMX5Q58DhDdmScdigTSyUUC04sO", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "KAuFgcmRKQPIIqGMAQQPfjyC1VXt40vs",
"0S4iueoqKNjvS55O57BdY3DbfwhIDwKc", "ywbQfOLkvXEUzZISZp1cpwCenrrNPjfF", "Mayxk8JkV3Z6aROtnsKyqwVK5exiJa8i", "pXqIRP5fQzbDtj1xFqgJey6dyFOJ1YiU", "6Ba6RSM56x4MIaJ2wChQ3trBVOw1SWGM", "puqzOpRJyNVAwH2vLjVCL3uuggxO5aoB", "jOI4E43wA3lYBWbV0nMxqix885Tye1Pf", "YgTTYpRDrxU1dMKZeVHYzYNovH2mWGB7", "24yYfUg1ATvfI1PW79ytsEqHWJHI69wQ", "mS2AVcLFp6i36sX7yAUrdfM0g0RB2X4D",
"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "ItvI4l02oAIZEd5cPtDf4OnyBazji0PL", "DW4oLNP49MNNENFoFf7jDTI04xdvCiWg", "vrOZrkAS9MCGOqzhCv4cmr5AGddVBShU", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "R74JT4EEhh3Xeu5tbx8bZFkXZRhx6HUn", "bd9yxS6b1QrKXuT4irY4kpjSyLmKZmx6", "UMdFQNSiJZtLK3jxBETZrINDKcRqRd0c", "He7xIY2BMNZ7vSO47KfKoYskVJeeedI7", "G8PqO0ADoKfDPsMT1K0uOrYf1AtwlTSR",
"hqfmEBNCA7qgntcQVqB7beBt0hB7eaxF", "mlYdlfei13P6JrT7ZbSZdsudhE24aPYr", "gUTUoH9LycaItbwLZkK9qf0xbRDgOMN4", "xw3AuIPyHYq59Qbo5QkQnECSqd2UCvLo", "kbfzRyRqGZ9WvmTdYKDjyds6EK4fYCyx", "7AOZ3o2egl6aU1zOrS8CVwXYZMI8NTPg", "Wkh43H7t95kRb9oOMjTSqC7163SrI4rU", "x586wCHsLsOaXl3F9cYeaROwdFc2pbU1", "oOd7GdoPn4qqfAeFj2Z3ddyFdmkuPznh", "suns0vGgaMzasYpwDEEof2Ktovy0o4os",
"of6W1csCTCBMBXli4a6cEmGZ9EFIOFRC", "mmTiWVje9SotwPgmRxrGrNeI9DssAaCj", "pIX0vhOzql5c6Z6NpLbzc8MvYiONyT54", "nvyCo3MkIK4tS6rkuL4Yw1RgGKwhm4c2", "prQGAOvQbB8fQIrp8xaLXmGwcxDcCnqt", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "mty9rQJBeTsBQ7ra8vWRbBaWulzhWRSG", "JL38Vw7yERPC4gBplBaixlbpDg8V7gC6", "MylTvGl5L1tzosEcgGCQPjIRN6bCUwtI", "hmr0LNyYObqe5sURs408IhRb50Lnek5K",
"CZVYSnQhHhoti8mQ66XbDuIjE5FMeIHb", "cPWAg2MJjh8fkRRU1B9aD8vWq3P8KTxJ", "4tehKwWiCDpuOmTPRYYqTqM7TvEa8Zi7", "ZvlAnCGiGfkKgQoNrhnnyrjmU7sLsUZz", "AaqMhdYukVdexTk6LlWvzXYXTp5upPuf", "ZSKmXUB35K14khHGyjYtuCHuI8yeM7yR", "F1cwKp4HsCN2s2kXQGR5RUa3WAcibCq2", "coaX8bSHoVZ8FP8SuQ57SFbrvpRHcibq", "3IzmbSJF525qtn7O4AvfKONnz7eFgnyU", "gLCtw7435gaR532PNFVCtvk14lNJpZXv",
"hNyjMYZkVlOKRjhg8cKymU5Bvnh0MK5R", "IqKC2auGTNehP8y24HzDQOdt9oysgFyx", "TePy034aBKlNeAmcJmKJ4p1yF7EUYEOg", "cIfDv6ieTAobe84P84InzDKrJrccmqbq", "m1979gfI6lVF9ijJA245bchYFd1EaMap", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "7wcR161jyKYhFLEZkhFqSXLwXW46I5x8", "IpmNsUFgbbVnL0ljJZOBHnTV0FKARwSn", "hsA4btHJg6Gq1jwOuOc3pl2UPB5QUwZg", "vwZyG0jGUys3HQdUiOocIbzhUdUugwKX",
"rTc9h94WjOXN5Wg40DyatFEFfp9mgWj6", "p1f20s14ZJGUTIBUNeBmJEkWKlwoyqjA", "VzgAIYNKHA0APN0oZtzMAfmbCzJenswy", "IO7BqR3iS136YMlLCEo6W3jKNOVJIlLG", "eTEyhiRuyEcTnHThi1W6yi1mxUjq8TEp", "4OHPKQgk3sPPYpKWcEWUtNZ0jv00UuPU", "ZJCstyyUvTR2gwSM6FLgkXYDwG54qo8u", "nGQsvDAzuL5Yc2XpqoG5P7RhpiTpJp8H", "NfX4KfEompMbbKloFq8NQpdXtk5PjaPe", "CP22IFHDX1maoSjTEdtBfrMHWQKACGDB",
"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hGwZQW7ao9HqNV2xAovuMBdyafNDE8q6", "OJmDHbqP1wzarsaSwCphsqvdy5SnTQMT", "JQbXIcgwUhttfPIGB7VGGfL2KiElabrO", "eTTNDggfPpRC22SEVNo9W0BPEWO4Cr57", "GW2JuUJmuCebia7RUiCNl2BTjukIzZWj", "oVFAvQEKmRTLBqdCuPoJNvzPvQ7UArWC", "zeMHFFKLr5j4DIFxRQ7jHWCMClrP3LmJ", "eECcArV5TZRftL6ZWaUDO6D2l3HiZj1Y", "xLNJXaCkOLrD6E0kgGaFOFwctNXjrd77",
"1f8KOCkOvehXYvN8PKv1Ch6dzOjRAr01", "uVF6HJgjVmoipK1sEpVOFJYuv2TXXsOG", "agIk8H2nFa0K27IFr0VM2RNp6saihYI3", "cAUnysbb8SBLSTr0H7cA1fmnpaL80e0N", "fM1IzD5USx4lMYi6bqPCEZjd2aP7G9vv", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "i65d8jqET5FsVw9t5BwAvBjkEJI6eUMj", "HbT1b7DQL7n7ZEt2FsKHIggycT1XIYd8", "938eC0iGMSqZNlqgDNG9YVE7t4izO2Ev", "PyZetp4izgE4ymPcUXyImF5mm7I6zbta",
"FaXA6YSUrvSnW7quAimLqQMNrU1Dxyjs", "PisVMvI9RsqQw21B7qYcKkRo5c8C2AKd", "eSQIxFqyYVf55UMzMEZrotPO74i3Sh03", "2b74DhJ6YFHrAkrjK4tvvKkYUKll44bR", "3svDRnrELyAsC69Phpnl2Os89856tFBJ", "ZcSGN9YYNHnHjUp0SktWoZI7JDmvRTTN", "m9eDkZ5oZEOFP3HUfaZEirecv2UhQ1B1", "wZTwJmMX5Q58DhDdmScdigTSyUUC04sO", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "KAuFgcmRKQPIIqGMAQQPfjyC1VXt40vs",
"0S4iueoqKNjvS55O57BdY3DbfwhIDwKc", "ywbQfOLkvXEUzZISZp1cpwCenrrNPjfF", "Mayxk8JkV3Z6aROtnsKyqwVK5exiJa8i", "pXqIRP5fQzbDtj1xFqgJey6dyFOJ1YiU", "6Ba6RSM56x4MIaJ2wChQ3trBVOw1SWGM", "puqzOpRJyNVAwH2vLjVCL3uuggxO5aoB", "jOI4E43wA3lYBWbV0nMxqix885Tye1Pf", "YgTTYpRDrxU1dMKZeVHYzYNovH2mWGB7", "24yYfUg1ATvfI1PW79ytsEqHWJHI69wQ", "mS2AVcLFp6i36sX7yAUrdfM0g0RB2X4D",
"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "ItvI4l02oAIZEd5cPtDf4OnyBazji0PL", "DW4oLNP49MNNENFoFf7jDTI04xdvCiWg", "vrOZrkAS9MCGOqzhCv4cmr5AGddVBShU", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "R74JT4EEhh3Xeu5tbx8bZFkXZRhx6HUn", "bd9yxS6b1QrKXuT4irY4kpjSyLmKZmx6", "UMdFQNSiJZtLK3jxBETZrINDKcRqRd0c", "He7xIY2BMNZ7vSO47KfKoYskVJeeedI7", "G8PqO0ADoKfDPsMT1K0uOrYf1AtwlTSR",
"hqfmEBNCA7qgntcQVqB7beBt0hB7eaxF", "mlYdlfei13P6JrT7ZbSZdsudhE24aPYr", "gUTUoH9LycaItbwLZkK9qf0xbRDgOMN4", "xw3AuIPyHYq59Qbo5QkQnECSqd2UCvLo", "kbfzRyRqGZ9WvmTdYKDjyds6EK4fYCyx", "7AOZ3o2egl6aU1zOrS8CVwXYZMI8NTPg", "Wkh43H7t95kRb9oOMjTSqC7163SrI4rU", "x586wCHsLsOaXl3F9cYeaROwdFc2pbU1", "oOd7GdoPn4qqfAeFj2Z3ddyFdmkuPznh", "suns0vGgaMzasYpwDEEof2Ktovy0o4os",
"of6W1csCTCBMBXli4a6cEmGZ9EFIOFRC", "mmTiWVje9SotwPgmRxrGrNeI9DssAaCj", "pIX0vhOzql5c6Z6NpLbzc8MvYiONyT54", "nvyCo3MkIK4tS6rkuL4Yw1RgGKwhm4c2", "prQGAOvQbB8fQIrp8xaLXmGwcxDcCnqt", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "mty9rQJBeTsBQ7ra8vWRbBaWulzhWRSG", "JL38Vw7yERPC4gBplBaixlbpDg8V7gC6", "MylTvGl5L1tzosEcgGCQPjIRN6bCUwtI", "hmr0LNyYObqe5sURs408IhRb50Lnek5K",
],
# Built from highcardnonnum using the following:
# vals = pd.Series(data["highcardnonnum"])
# sample_vals = vals.sample(n=10, random_state=42)
# weights = np.random.RandomState(42).rand(10)
# weights = weights / np.sum(weights)
# new_vals = sample_vals.sample(n=200, weights=weights, replace=True, random_state=11)
"medcardnonnum": [
"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "NhTsracusfp5V6zVeWqLZnychDl7jjO4",
"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP",
"NhTsracusfp5V6zVeWqLZnychDl7jjO4", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ",
"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "NhTsracusfp5V6zVeWqLZnychDl7jjO4",
"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP",
"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk",
"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J",
"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "NhTsracusfp5V6zVeWqLZnychDl7jjO4",
"oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J",
"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "mS2AVcLFp6i36sX7yAUrdfM0g0RB2X4D",
"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk",
"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "NfX4KfEompMbbKloFq8NQpdXtk5PjaPe", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer",
"NfX4KfEompMbbKloFq8NQpdXtk5PjaPe", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J",
"NhTsracusfp5V6zVeWqLZnychDl7jjO4", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J",
"hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "NfX4KfEompMbbKloFq8NQpdXtk5PjaPe", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "NfX4KfEompMbbKloFq8NQpdXtk5PjaPe", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J",
"k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk",
"ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J",
"2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "NhTsracusfp5V6zVeWqLZnychDl7jjO4",
"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "NhTsracusfp5V6zVeWqLZnychDl7jjO4",
"T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "hW0kFZ6ijfciJWN4vvgcFa6MWv8cTeVk", "T7EUE54HUhyJ9Hnxv1pKY0Bmg42qiggP", "NhTsracusfp5V6zVeWqLZnychDl7jjO4", "k8B9KCXhaQb6Q82zFbAzOESAtDxK174J", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "2K8njWnvuq1u6tkzreNhxTEyO8PTeWer", "ajcLVizD2vwZlmmGKyXYki03SWn7fnt3", "oRnY5jDWFw2KZRYLh6ihFd021ggy4UxJ",
],
}
# fmt: on
schemas = {
"pandas": {
"highcardnonnum": "str",
"medcardnonnum": "str",
},
"postgresql": {
"highcardnonnum": "TEXT",
"medcardnonnum": "TEXT",
},
"sqlite": {
"highcardnonnum": "VARCHAR",
"medcardnonnum": "VARCHAR",
},
"mysql": {
"highcardnonnum": "TEXT",
"medcardnonnum": "TEXT",
},
"mssql": {
"highcardnonnum": "VARCHAR",
"medcardnonnum": "VARCHAR",
},
"spark": {
"highcardnonnum": "StringType",
"medcardnonnum": "StringType",
},
}
return get_dataset(test_backend, data, schemas=schemas)
def dataset_sample_data(test_backend):
# No infinities for mysql
if test_backend == "mysql":
data = {
# "infinities": [-np.inf, -10, -np.pi, 0, np.pi, 10/2.2, np.inf],
"nulls": [np.nan, None, 0, 1.1, 2.2, 3.3, None],
"naturals": [1, 2, 3, 4, 5, 6, 7],
}
else:
data = {
"infinities": [-np.inf, -10, -np.pi, 0, np.pi, 10 / 2.2, np.inf],
"nulls": [np.nan, None, 0, 1.1, 2.2, 3.3, None],
"naturals": [1, 2, 3, 4, 5, 6, 7],
}
schemas = {
"pandas": {"infinities": "float64", "nulls": "float64", "naturals": "float64"},
"postgresql": {
"infinities": "DOUBLE_PRECISION",
"nulls": "DOUBLE_PRECISION",
"naturals": "NUMERIC",
},
"sqlite": {"infinities": "FLOAT", "nulls": "FLOAT", "naturals": "FLOAT"},
"mysql": {"nulls": "DOUBLE", "naturals": "DOUBLE"},
"mssql": {"infinities": "FLOAT", "nulls": "FLOAT", "naturals": "FLOAT"},
"spark": {
"infinities": "FloatType",
"nulls": "FloatType",
"naturals": "FloatType",
},
}
return data, schemas
@pytest.fixture
def dataset(test_backend):
"""Provide dataset fixtures that have special values and/or are otherwise useful outside
the standard json testing framework"""
data, schemas = dataset_sample_data(test_backend)
return get_dataset(test_backend, data, schemas=schemas)
@pytest.fixture
def pandas_dataset():
test_backend = "PandasDataset"
data, schemas = dataset_sample_data(test_backend)
return get_dataset(test_backend, data, schemas=schemas)
@pytest.fixture
def sqlalchemy_dataset(test_backends):
"""Provide dataset fixtures that have special values and/or are otherwise useful outside
the standard json testing framework"""
if "postgresql" in test_backends:
backend = "postgresql"
elif "sqlite" in test_backends:
backend = "sqlite"
else:
return
data = {
"infinities": [-np.inf, -10, -np.pi, 0, np.pi, 10 / 2.2, np.inf],
"nulls": [np.nan, None, 0, 1.1, 2.2, 3.3, None],
"naturals": [1, 2, 3, 4, 5, 6, 7],
}
schemas = {
"postgresql": {
"infinities": "DOUBLE_PRECISION",
"nulls": "DOUBLE_PRECISION",
"naturals": "DOUBLE_PRECISION",
},
"sqlite": {"infinities": "FLOAT", "nulls": "FLOAT", "naturals": "FLOAT"},
}
return get_dataset(backend, data, schemas=schemas, profiler=None)
@pytest.fixture
def sqlitedb_engine(test_backend):
if test_backend == "sqlite":
try:
import sqlalchemy as sa
return sa.create_engine("sqlite://")
except ImportError:
raise ValueError("sqlite tests require sqlalchemy to be installed")
else:
pytest.skip("Skipping test designed for sqlite on non-sqlite backend.")
@pytest.fixture
def postgresql_engine(test_backend):
if test_backend == "postgresql":
try:
import sqlalchemy as sa
db_hostname = os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost")
engine = sa.create_engine(
f"postgresql://postgres@{db_hostname}/test_ci"
).connect()
yield engine
engine.close()
except ImportError:
raise ValueError("SQL Database tests require sqlalchemy to be installed.")
else:
pytest.skip("Skipping test designed for postgresql on non-postgresql backend.")
@pytest.fixture(scope="function")
def empty_data_context(
tmp_path,
) -> DataContext:
project_path = tmp_path / "empty_data_context"
project_path.mkdir()
project_path = str(project_path)
context = ge.data_context.DataContext.create(project_path)
context_path = os.path.join(project_path, "great_expectations")
asset_config_path = os.path.join(context_path, "expectations")
os.makedirs(asset_config_path, exist_ok=True)
assert context.list_datasources() == []
return context
@pytest.fixture
def titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled(
tmp_path_factory,
monkeypatch,
):
# Re-enable GE_USAGE_STATS
monkeypatch.delenv("GE_USAGE_STATS")
project_path: str = str(tmp_path_factory.mktemp("titanic_data_context"))
context_path: str = os.path.join(project_path, "great_expectations")
os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True)
data_path: str = os.path.join(context_path, "..", "data", "titanic")
os.makedirs(os.path.join(data_path), exist_ok=True)
shutil.copy(
file_relative_path(
__file__,
os.path.join(
"test_fixtures",
"great_expectations_v013_no_datasource_stats_enabled.yml",
),
),
str(os.path.join(context_path, "great_expectations.yml")),
)
shutil.copy(
file_relative_path(__file__, os.path.join("test_sets", "Titanic.csv")),
str(
os.path.join(
context_path, "..", "data", "titanic", "Titanic_19120414_1313.csv"
)
),
)
shutil.copy(
file_relative_path(__file__, os.path.join("test_sets", "Titanic.csv")),
str(
os.path.join(context_path, "..", "data", "titanic", "Titanic_19120414_1313")
),
)
shutil.copy(
file_relative_path(__file__, os.path.join("test_sets", "Titanic.csv")),
str(os.path.join(context_path, "..", "data", "titanic", "Titanic_1911.csv")),
)
shutil.copy(
file_relative_path(__file__, os.path.join("test_sets", "Titanic.csv")),
str(os.path.join(context_path, "..", "data", "titanic", "Titanic_1912.csv")),
)
context: DataContext = DataContext(context_root_dir=context_path)
assert context.root_directory == context_path
datasource_config: str = f"""
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
my_basic_data_connector:
class_name: InferredAssetFilesystemDataConnector
base_directory: {data_path}
default_regex:
pattern: (.*)\\.csv
group_names:
- data_asset_name
my_special_data_connector:
class_name: ConfiguredAssetFilesystemDataConnector
base_directory: {data_path}
glob_directive: "*.csv"
default_regex:
pattern: (.+)\\.csv
group_names:
- name
assets:
users:
base_directory: {data_path}
pattern: (.+)_(\\d+)_(\\d+)\\.csv
group_names:
- name
- timestamp
- size
my_other_data_connector:
class_name: ConfiguredAssetFilesystemDataConnector
base_directory: {data_path}
glob_directive: "*.csv"
default_regex:
pattern: (.+)\\.csv
group_names:
- name
assets:
users: {{}}
my_runtime_data_connector:
module_name: great_expectations.datasource.data_connector
class_name: RuntimeDataConnector
batch_identifiers:
- pipeline_stage_name
- airflow_run_id
"""
# noinspection PyUnusedLocal
datasource: Datasource = context.test_yaml_config(
name="my_datasource", yaml_config=datasource_config, pretty_print=False
)
# noinspection PyProtectedMember
context._save_project_config()
return context
@pytest.fixture
def titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled(
titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,
tmp_path_factory,
monkeypatch,
):
context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled
project_dir: str = context.root_directory
data_path: str = os.path.join(project_dir, "..", "data", "titanic")
datasource_config: str = f"""
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
my_additional_data_connector:
class_name: InferredAssetFilesystemDataConnector
base_directory: {data_path}
default_regex:
pattern: (.*)\\.csv
group_names:
- data_asset_name
"""
# noinspection PyUnusedLocal
datasource: BaseDatasource = context.add_datasource(
"my_additional_datasource", **yaml.load(datasource_config)
)
return context
@pytest.fixture
def titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled(
sa,
spark_session,
titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled,
tmp_path_factory,
test_backends,
monkeypatch,
):
context: DataContext = titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled
project_dir: str = context.root_directory
data_path: str = os.path.join(project_dir, "..", "data", "titanic")
if (
any(
[
dbms in test_backends
for dbms in ["postgresql", "sqlite", "mysql", "mssql"]
]
)
and (sa is not None)
and is_library_loadable(library_name="sqlalchemy")
):
db_fixture_file_path: str = file_relative_path(
__file__,
os.path.join("test_sets", "titanic_sql_test_cases.db"),
)
db_file_path: str = os.path.join(
data_path,
"titanic_sql_test_cases.db",
)
shutil.copy(
db_fixture_file_path,
db_file_path,
)
datasource_config: str = f"""
class_name: Datasource
execution_engine:
class_name: SqlAlchemyExecutionEngine
connection_string: sqlite:///{db_file_path}
data_connectors:
default_runtime_data_connector_name:
class_name: RuntimeDataConnector
batch_identifiers:
- default_identifier_name
default_inferred_data_connector_name:
class_name: InferredAssetSqlDataConnector
name: whole_table
"""
# noinspection PyUnusedLocal
datasource: BaseDatasource = context.add_datasource(
"my_sqlite_db_datasource", **yaml.load(datasource_config)
)
return context
@pytest.fixture
def deterministic_asset_dataconnector_context(
tmp_path_factory,
monkeypatch,
):
# Re-enable GE_USAGE_STATS
monkeypatch.delenv("GE_USAGE_STATS")
project_path = str(tmp_path_factory.mktemp("titanic_data_context"))
context_path = os.path.join(project_path, "great_expectations")
os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True)
data_path = os.path.join(context_path, "..", "data", "titanic")
os.makedirs(os.path.join(data_path), exist_ok=True)
shutil.copy(
file_relative_path(
__file__,
"./test_fixtures/great_expectations_v013_no_datasource_stats_enabled.yml",
),
str(os.path.join(context_path, "great_expectations.yml")),
)
shutil.copy(
file_relative_path(__file__, "./test_sets/Titanic.csv"),
str(
os.path.join(
context_path, "..", "data", "titanic", "Titanic_19120414_1313.csv"
)
),
)
shutil.copy(
file_relative_path(__file__, "./test_sets/Titanic.csv"),
str(os.path.join(context_path, "..", "data", "titanic", "Titanic_1911.csv")),
)
shutil.copy(
file_relative_path(__file__, "./test_sets/Titanic.csv"),
str(os.path.join(context_path, "..", "data", "titanic", "Titanic_1912.csv")),
)
context = ge.data_context.DataContext(context_path)
assert context.root_directory == context_path
datasource_config = f"""
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
my_other_data_connector:
class_name: ConfiguredAssetFilesystemDataConnector
base_directory: {data_path}
glob_directive: "*.csv"
default_regex:
pattern: (.+)\\.csv
group_names:
- name
assets:
users: {{}}
"""
context.test_yaml_config(
name="my_datasource", yaml_config=datasource_config, pretty_print=False
)
# noinspection PyProtectedMember
context._save_project_config()
return context
@pytest.fixture
def titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates(
titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,
):
context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled
# add simple template config
simple_checkpoint_template_config: CheckpointConfig = CheckpointConfig(
name="my_simple_template_checkpoint",
config_version=1,
run_name_template="%Y-%M-foo-bar-template-$VAR",
action_list=[
{
"name": "store_validation_result",
"action": {
"class_name": "StoreValidationResultAction",
},
},
{
"name": "store_evaluation_params",
"action": {
"class_name": "StoreEvaluationParametersAction",
},
},
{
"name": "update_data_docs",
"action": {
"class_name": "UpdateDataDocsAction",
},
},
],
evaluation_parameters={
"environment": "$GE_ENVIRONMENT",
"tolerance": 1.0e-2,
"aux_param_0": "$MY_PARAM",
"aux_param_1": "1 + $MY_PARAM",
},
runtime_configuration={
"result_format": {
"result_format": "BASIC",
"partial_unexpected_count": 20,
}
},
)
simple_checkpoint_template_config_key: ConfigurationIdentifier = (
ConfigurationIdentifier(
configuration_key=simple_checkpoint_template_config.name
)
)
context.checkpoint_store.set(
key=simple_checkpoint_template_config_key,
value=simple_checkpoint_template_config,
)
# add nested template configs
nested_checkpoint_template_config_1: CheckpointConfig = CheckpointConfig(
name="my_nested_checkpoint_template_1",
config_version=1,
run_name_template="%Y-%M-foo-bar-template-$VAR",
expectation_suite_name="suite_from_template_1",
action_list=[
{
"name": "store_validation_result",
"action": {
"class_name": "StoreValidationResultAction",
},
},
{
"name": "store_evaluation_params",
"action": {
"class_name": "StoreEvaluationParametersAction",
},
},
{
"name": "update_data_docs",
"action": {
"class_name": "UpdateDataDocsAction",
},
},
],
evaluation_parameters={
"environment": "FOO",
"tolerance": "FOOBOO",
"aux_param_0": "FOOBARBOO",
"aux_param_1": "FOOBARBOO",
"template_1_key": 456,
},
runtime_configuration={
"result_format": "FOOBARBOO",
"partial_unexpected_count": "FOOBARBOO",
"template_1_key": 123,
},
validations=[
{
"batch_request": {
"datasource_name": "my_datasource_template_1",
"data_connector_name": "my_special_data_connector_template_1",
"data_asset_name": "users_from_template_1",
"data_connector_query": {"partition_index": -999},
}
}
],
)
nested_checkpoint_template_config_1_key: ConfigurationIdentifier = (
ConfigurationIdentifier(
configuration_key=nested_checkpoint_template_config_1.name
)
)
context.checkpoint_store.set(
key=nested_checkpoint_template_config_1_key,
value=nested_checkpoint_template_config_1,
)
nested_checkpoint_template_config_2: CheckpointConfig = CheckpointConfig(
name="my_nested_checkpoint_template_2",
config_version=1,
template_name="my_nested_checkpoint_template_1",
run_name_template="%Y-%M-foo-bar-template-$VAR-template-2",
action_list=[
{
"name": "store_validation_result",
"action": {
"class_name": "StoreValidationResultAction",
},
},
{
"name": "store_evaluation_params",
"action": {
"class_name": "MyCustomStoreEvaluationParametersActionTemplate2",
},
},
{
"name": "update_data_docs",
"action": {
"class_name": "UpdateDataDocsAction",
},
},
{
"name": "new_action_from_template_2",
"action": {"class_name": "Template2SpecialAction"},
},
],
evaluation_parameters={
"environment": "$GE_ENVIRONMENT",
"tolerance": 1.0e-2,
"aux_param_0": "$MY_PARAM",
"aux_param_1": "1 + $MY_PARAM",
},
runtime_configuration={
"result_format": "BASIC",
"partial_unexpected_count": 20,
},
)
nested_checkpoint_template_config_2_key: ConfigurationIdentifier = (
ConfigurationIdentifier(
configuration_key=nested_checkpoint_template_config_2.name
)
)
context.checkpoint_store.set(
key=nested_checkpoint_template_config_2_key,
value=nested_checkpoint_template_config_2,
)
nested_checkpoint_template_config_3: CheckpointConfig = CheckpointConfig(
name="my_nested_checkpoint_template_3",
config_version=1,
template_name="my_nested_checkpoint_template_2",
run_name_template="%Y-%M-foo-bar-template-$VAR-template-3",
action_list=[
{
"name": "store_validation_result",
"action": {
"class_name": "StoreValidationResultAction",
},
},
{
"name": "store_evaluation_params",
"action": {
"class_name": "MyCustomStoreEvaluationParametersActionTemplate3",
},
},
{
"name": "update_data_docs",
"action": {
"class_name": "UpdateDataDocsAction",
},
},
{
"name": "new_action_from_template_3",
"action": {"class_name": "Template3SpecialAction"},
},
],
evaluation_parameters={
"environment": "$GE_ENVIRONMENT",
"tolerance": 1.0e-2,
"aux_param_0": "$MY_PARAM",
"aux_param_1": "1 + $MY_PARAM",
"template_3_key": 123,
},
runtime_configuration={
"result_format": "BASIC",
"partial_unexpected_count": 20,
"template_3_key": "bloopy!",
},
)
nested_checkpoint_template_config_3_key: ConfigurationIdentifier = (
ConfigurationIdentifier(
configuration_key=nested_checkpoint_template_config_3.name
)
)
context.checkpoint_store.set(
key=nested_checkpoint_template_config_3_key,
value=nested_checkpoint_template_config_3,
)
# add minimal SimpleCheckpoint
simple_checkpoint_config: CheckpointConfig = CheckpointConfig(
name="my_minimal_simple_checkpoint",
class_name="SimpleCheckpoint",
config_version=1,
)
simple_checkpoint_config_key: ConfigurationIdentifier = ConfigurationIdentifier(
configuration_key=simple_checkpoint_config.name
)
context.checkpoint_store.set(
key=simple_checkpoint_config_key,
value=simple_checkpoint_config,
)
# add SimpleCheckpoint with slack webhook
simple_checkpoint_with_slack_webhook_config: CheckpointConfig = CheckpointConfig(
name="my_simple_checkpoint_with_slack",
class_name="SimpleCheckpoint",
config_version=1,
slack_webhook="https://hooks.slack.com/foo/bar",
)
simple_checkpoint_with_slack_webhook_config_key: ConfigurationIdentifier = (
ConfigurationIdentifier(
configuration_key=simple_checkpoint_with_slack_webhook_config.name
)
)
context.checkpoint_store.set(
key=simple_checkpoint_with_slack_webhook_config_key,
value=simple_checkpoint_with_slack_webhook_config,
)
# add SimpleCheckpoint with slack webhook and notify_with
simple_checkpoint_with_slack_webhook_and_notify_with_all_config: CheckpointConfig = CheckpointConfig(
name="my_simple_checkpoint_with_slack_and_notify_with_all",
class_name="SimpleCheckpoint",
config_version=1,
slack_webhook="https://hooks.slack.com/foo/bar",
notify_with="all",
)
simple_checkpoint_with_slack_webhook_and_notify_with_all_config_key: ConfigurationIdentifier = ConfigurationIdentifier(
configuration_key=simple_checkpoint_with_slack_webhook_and_notify_with_all_config.name
)
context.checkpoint_store.set(
key=simple_checkpoint_with_slack_webhook_and_notify_with_all_config_key,
value=simple_checkpoint_with_slack_webhook_and_notify_with_all_config,
)
# add SimpleCheckpoint with site_names
simple_checkpoint_with_site_names_config: CheckpointConfig = CheckpointConfig(
name="my_simple_checkpoint_with_site_names",
class_name="SimpleCheckpoint",
config_version=1,
site_names=["local_site"],
)
simple_checkpoint_with_site_names_config_key: ConfigurationIdentifier = (
ConfigurationIdentifier(
configuration_key=simple_checkpoint_with_site_names_config.name
)
)
context.checkpoint_store.set(
key=simple_checkpoint_with_site_names_config_key,
value=simple_checkpoint_with_site_names_config,
)
# noinspection PyProtectedMember
context._save_project_config()
return context
@pytest.fixture
def empty_context_with_checkpoint(empty_data_context):
context = empty_data_context
root_dir = empty_data_context.root_directory
fixture_name = "my_checkpoint.yml"
fixture_path = file_relative_path(
__file__, f"./data_context/fixtures/contexts/{fixture_name}"
)
checkpoints_file = os.path.join(root_dir, "checkpoints", fixture_name)
shutil.copy(fixture_path, checkpoints_file)
assert os.path.isfile(checkpoints_file)
return context
@pytest.fixture
def empty_data_context_stats_enabled(tmp_path_factory, monkeypatch):
# Re-enable GE_USAGE_STATS
monkeypatch.delenv("GE_USAGE_STATS", raising=False)
project_path = str(tmp_path_factory.mktemp("empty_data_context"))
context = ge.data_context.DataContext.create(project_path)
context_path = os.path.join(project_path, "great_expectations")
asset_config_path = os.path.join(context_path, "expectations")
os.makedirs(asset_config_path, exist_ok=True)
return context
@pytest.fixture
def titanic_data_context(
tmp_path_factory,
) -> DataContext:
project_path = str(tmp_path_factory.mktemp("titanic_data_context"))
context_path = os.path.join(project_path, "great_expectations")
os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True)
os.makedirs(os.path.join(context_path, "checkpoints"), exist_ok=True)
data_path = os.path.join(context_path, "..", "data")
os.makedirs(os.path.join(data_path), exist_ok=True)
titanic_yml_path = file_relative_path(
__file__, "./test_fixtures/great_expectations_v013_titanic.yml"
)
shutil.copy(
titanic_yml_path, str(os.path.join(context_path, "great_expectations.yml"))
)
titanic_csv_path = file_relative_path(__file__, "./test_sets/Titanic.csv")
shutil.copy(
titanic_csv_path, str(os.path.join(context_path, "..", "data", "Titanic.csv"))
)
return ge.data_context.DataContext(context_path)
@pytest.fixture
def titanic_data_context_no_data_docs_no_checkpoint_store(tmp_path_factory):
project_path = str(tmp_path_factory.mktemp("titanic_data_context"))
context_path = os.path.join(project_path, "great_expectations")
os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True)
os.makedirs(os.path.join(context_path, "checkpoints"), exist_ok=True)
data_path = os.path.join(context_path, "..", "data")
os.makedirs(os.path.join(data_path), exist_ok=True)
titanic_yml_path = file_relative_path(
__file__, "./test_fixtures/great_expectations_titanic_pre_v013_no_data_docs.yml"
)
shutil.copy(
titanic_yml_path, str(os.path.join(context_path, "great_expectations.yml"))
)
titanic_csv_path = file_relative_path(__file__, "./test_sets/Titanic.csv")
shutil.copy(
titanic_csv_path, str(os.path.join(context_path, "..", "data", "Titanic.csv"))
)
return ge.data_context.DataContext(context_path)
@pytest.fixture
def titanic_data_context_no_data_docs(tmp_path_factory):
project_path = str(tmp_path_factory.mktemp("titanic_data_context"))
context_path = os.path.join(project_path, "great_expectations")
os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True)
os.makedirs(os.path.join(context_path, "checkpoints"), exist_ok=True)
data_path = os.path.join(context_path, "..", "data")
os.makedirs(os.path.join(data_path), exist_ok=True)
titanic_yml_path = file_relative_path(
__file__, "./test_fixtures/great_expectations_titanic_no_data_docs.yml"
)
shutil.copy(
titanic_yml_path, str(os.path.join(context_path, "great_expectations.yml"))
)
titanic_csv_path = file_relative_path(__file__, "./test_sets/Titanic.csv")
shutil.copy(
titanic_csv_path, str(os.path.join(context_path, "..", "data", "Titanic.csv"))
)
return ge.data_context.DataContext(context_path)
@pytest.fixture
def titanic_data_context_stats_enabled(tmp_path_factory, monkeypatch):
# Re-enable GE_USAGE_STATS
monkeypatch.delenv("GE_USAGE_STATS")
project_path = str(tmp_path_factory.mktemp("titanic_data_context"))
context_path = os.path.join(project_path, "great_expectations")
os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True)
os.makedirs(os.path.join(context_path, "checkpoints"), exist_ok=True)
data_path = os.path.join(context_path, "..", "data")
os.makedirs(os.path.join(data_path), exist_ok=True)
titanic_yml_path = file_relative_path(
__file__, "./test_fixtures/great_expectations_v013_titanic.yml"
)
shutil.copy(
titanic_yml_path, str(os.path.join(context_path, "great_expectations.yml"))
)
titanic_csv_path = file_relative_path(__file__, "./test_sets/Titanic.csv")
shutil.copy(
titanic_csv_path, str(os.path.join(context_path, "..", "data", "Titanic.csv"))
)
return ge.data_context.DataContext(context_path)
@pytest.fixture
def titanic_data_context_stats_enabled_config_version_2(tmp_path_factory, monkeypatch):
# Re-enable GE_USAGE_STATS
monkeypatch.delenv("GE_USAGE_STATS")
project_path = str(tmp_path_factory.mktemp("titanic_data_context"))
context_path = os.path.join(project_path, "great_expectations")
os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True)
os.makedirs(os.path.join(context_path, "checkpoints"), exist_ok=True)
data_path = os.path.join(context_path, "..", "data")
os.makedirs(os.path.join(data_path), exist_ok=True)
titanic_yml_path = file_relative_path(
__file__, "./test_fixtures/great_expectations_titanic.yml"
)
shutil.copy(
titanic_yml_path, str(os.path.join(context_path, "great_expectations.yml"))
)
titanic_csv_path = file_relative_path(__file__, "./test_sets/Titanic.csv")
shutil.copy(
titanic_csv_path, str(os.path.join(context_path, "..", "data", "Titanic.csv"))
)
return ge.data_context.DataContext(context_path)
@pytest.fixture
def titanic_data_context_stats_enabled_config_version_3(tmp_path_factory, monkeypatch):
# Re-enable GE_USAGE_STATS
monkeypatch.delenv("GE_USAGE_STATS")
project_path = str(tmp_path_factory.mktemp("titanic_data_context"))
context_path = os.path.join(project_path, "great_expectations")
os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True)
os.makedirs(os.path.join(context_path, "checkpoints"), exist_ok=True)
data_path = os.path.join(context_path, "..", "data")
os.makedirs(os.path.join(data_path), exist_ok=True)
titanic_yml_path = file_relative_path(
__file__, "./test_fixtures/great_expectations_v013_upgraded_titanic.yml"
)
shutil.copy(
titanic_yml_path, str(os.path.join(context_path, "great_expectations.yml"))
)
titanic_csv_path = file_relative_path(__file__, "./test_sets/Titanic.csv")
shutil.copy(
titanic_csv_path, str(os.path.join(context_path, "..", "data", "Titanic.csv"))
)
return ge.data_context.DataContext(context_path)
@pytest.fixture
def titanic_sqlite_db(sa):
try:
import sqlalchemy as sa
from sqlalchemy import create_engine
titanic_db_path = file_relative_path(__file__, "./test_sets/titanic.db")
engine = create_engine(f"sqlite:///{titanic_db_path}")
assert engine.execute("select count(*) from titanic").fetchall()[0] == (1313,)
return engine
except ImportError:
raise ValueError("sqlite tests require sqlalchemy to be installed")
@pytest.fixture
def titanic_sqlite_db_connection_string(sa):
try:
import sqlalchemy as sa
from sqlalchemy import create_engine
titanic_db_path = file_relative_path(__file__, "./test_sets/titanic.db")
engine = create_engine(f"sqlite:////{titanic_db_path}")
assert engine.execute("select count(*) from titanic").fetchall()[0] == (1313,)
return f"sqlite:///{titanic_db_path}"
except ImportError:
raise ValueError("sqlite tests require sqlalchemy to be installed")
@pytest.fixture
def titanic_expectation_suite(empty_data_context_stats_enabled):
data_context: DataContext = empty_data_context_stats_enabled
return ExpectationSuite(
expectation_suite_name="Titanic.warning",
meta={},
data_asset_type="Dataset",
expectations=[
ExpectationConfiguration(
expectation_type="expect_column_to_exist", kwargs={"column": "PClass"}
),
ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs={"column": "Name"},
),
ExpectationConfiguration(
expectation_type="expect_table_row_count_to_equal",
kwargs={"value": 1313},
),
],
data_context=data_context,
)
@pytest.fixture
def empty_sqlite_db(sa):
"""An empty in-memory sqlite db that always gets run."""
try:
import sqlalchemy as sa
from sqlalchemy import create_engine
engine = create_engine("sqlite://")
assert engine.execute("select 1").fetchall()[0] == (1,)
return engine
except ImportError:
raise ValueError("sqlite tests require sqlalchemy to be installed")
@pytest.fixture
@freeze_time("09/26/2019 13:42:41")
def site_builder_data_context_with_html_store_titanic_random(
tmp_path_factory, filesystem_csv_3
):
base_dir = str(tmp_path_factory.mktemp("project_dir"))
project_dir = os.path.join(base_dir, "project_path")
os.mkdir(project_dir)
os.makedirs(os.path.join(project_dir, "data"))
os.makedirs(os.path.join(project_dir, "data/titanic"))
shutil.copy(
file_relative_path(__file__, "./test_sets/Titanic.csv"),
str(os.path.join(project_dir, "data", "titanic", "Titanic.csv")),
)
os.makedirs(os.path.join(project_dir, "data", "random"))
shutil.copy(
os.path.join(filesystem_csv_3, "f1.csv"),
str(os.path.join(project_dir, "data", "random", "f1.csv")),
)
shutil.copy(
os.path.join(filesystem_csv_3, "f2.csv"),
str(os.path.join(project_dir, "data", "random", "f2.csv")),
)
ge.data_context.DataContext.create(project_dir)
shutil.copy(
file_relative_path(
__file__, "./test_fixtures/great_expectations_site_builder.yml"
),
str(os.path.join(project_dir, "great_expectations", "great_expectations.yml")),
)
context = ge.data_context.DataContext(
context_root_dir=os.path.join(project_dir, "great_expectations")
)
context.add_datasource(
"titanic",
class_name="PandasDatasource",
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": os.path.join(project_dir, "data", "titanic"),
}
},
)
context.add_datasource(
"random",
class_name="PandasDatasource",
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": os.path.join(project_dir, "data", "random"),
}
},
)
context.profile_datasource("titanic")
context.profile_datasource("random")
context.profile_datasource(context.list_datasources()[0]["name"])
context._project_config.anonymous_usage_statistics = {
"enabled": True,
"data_context_id": "f43d4897-385f-4366-82b0-1a8eda2bf79c",
}
return context
@pytest.fixture(scope="function")
@freeze_time("09/26/2019 13:42:41")
def site_builder_data_context_v013_with_html_store_titanic_random(
tmp_path, filesystem_csv_3
):
base_dir = tmp_path / "project_dir"
base_dir.mkdir()
base_dir = str(base_dir)
project_dir = os.path.join(base_dir, "project_path")
os.mkdir(project_dir)
os.makedirs(os.path.join(project_dir, "data"))
os.makedirs(os.path.join(project_dir, "data", "titanic"))
shutil.copy(
file_relative_path(__file__, "./test_sets/Titanic.csv"),
str(os.path.join(project_dir, "data", "titanic", "Titanic.csv")),
)
os.makedirs(os.path.join(project_dir, "data", "random"))
shutil.copy(
os.path.join(filesystem_csv_3, "f1.csv"),
str(os.path.join(project_dir, "data", "random", "f1.csv")),
)
shutil.copy(
os.path.join(filesystem_csv_3, "f2.csv"),
str(os.path.join(project_dir, "data", "random", "f2.csv")),
)
ge.data_context.DataContext.create(project_dir)
shutil.copy(
file_relative_path(
__file__, "./test_fixtures/great_expectations_v013_site_builder.yml"
),
str(os.path.join(project_dir, "great_expectations", "great_expectations.yml")),
)
context = ge.data_context.DataContext(
context_root_dir=os.path.join(project_dir, "great_expectations")
)
context.add_datasource(
"titanic",
class_name="PandasDatasource",
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": os.path.join(project_dir, "data", "titanic"),
}
},
)
context.add_datasource(
"random",
class_name="PandasDatasource",
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": os.path.join(project_dir, "data", "random"),
}
},
)
context.profile_datasource("titanic")
context.profile_datasource("random")
context.profile_datasource(context.list_datasources()[0]["name"])
context._project_config.anonymous_usage_statistics = {
"enabled": True,
"data_context_id": "f43d4897-385f-4366-82b0-1a8eda2bf79c",
}
return context
@pytest.fixture
def v20_project_directory(tmp_path_factory):
"""
GE config_version: 2 project for testing upgrade helper
"""
project_path = str(tmp_path_factory.mktemp("v20_project"))
context_root_dir = os.path.join(project_path, "great_expectations")
shutil.copytree(
file_relative_path(
__file__, "./test_fixtures/upgrade_helper/great_expectations_v20_project/"
),
context_root_dir,
)
shutil.copy(
file_relative_path(
__file__, "./test_fixtures/upgrade_helper/great_expectations_v2.yml"
),
os.path.join(context_root_dir, "great_expectations.yml"),
)
return context_root_dir
@pytest.fixture
def data_context_parameterized_expectation_suite_no_checkpoint_store(tmp_path_factory):
"""
This data_context is *manually* created to have the config we want, vs
created with DataContext.create()
"""
project_path = str(tmp_path_factory.mktemp("data_context"))
context_path = os.path.join(project_path, "great_expectations")
asset_config_path = os.path.join(context_path, "expectations")
fixture_dir = file_relative_path(__file__, "./test_fixtures")
os.makedirs(
os.path.join(asset_config_path, "my_dag_node"),
exist_ok=True,
)
shutil.copy(
os.path.join(fixture_dir, "great_expectations_basic.yml"),
str(os.path.join(context_path, "great_expectations.yml")),
)
shutil.copy(
os.path.join(
fixture_dir,
"expectation_suites/parameterized_expectation_suite_fixture.json",
),
os.path.join(asset_config_path, "my_dag_node", "default.json"),
)
os.makedirs(os.path.join(context_path, "plugins"), exist_ok=True)
shutil.copy(
os.path.join(fixture_dir, "custom_pandas_dataset.py"),
str(os.path.join(context_path, "plugins", "custom_pandas_dataset.py")),
)
shutil.copy(
os.path.join(fixture_dir, "custom_sqlalchemy_dataset.py"),
str(os.path.join(context_path, "plugins", "custom_sqlalchemy_dataset.py")),
)
shutil.copy(
os.path.join(fixture_dir, "custom_sparkdf_dataset.py"),
str(os.path.join(context_path, "plugins", "custom_sparkdf_dataset.py")),
)
return ge.data_context.DataContext(context_path)
@pytest.fixture
def data_context_parameterized_expectation_suite(tmp_path_factory):
"""
This data_context is *manually* created to have the config we want, vs
created with DataContext.create()
"""
project_path = str(tmp_path_factory.mktemp("data_context"))
context_path = os.path.join(project_path, "great_expectations")
asset_config_path = os.path.join(context_path, "expectations")
fixture_dir = file_relative_path(__file__, "./test_fixtures")
os.makedirs(
os.path.join(asset_config_path, "my_dag_node"),
exist_ok=True,
)
shutil.copy(
os.path.join(fixture_dir, "great_expectations_v013_basic.yml"),
str(os.path.join(context_path, "great_expectations.yml")),
)
shutil.copy(
os.path.join(
fixture_dir,
"expectation_suites/parameterized_expectation_suite_fixture.json",
),
os.path.join(asset_config_path, "my_dag_node", "default.json"),
)
os.makedirs(os.path.join(context_path, "plugins"), exist_ok=True)
shutil.copy(
os.path.join(fixture_dir, "custom_pandas_dataset.py"),
str(os.path.join(context_path, "plugins", "custom_pandas_dataset.py")),
)
shutil.copy(
os.path.join(fixture_dir, "custom_sqlalchemy_dataset.py"),
str(os.path.join(context_path, "plugins", "custom_sqlalchemy_dataset.py")),
)
shutil.copy(
os.path.join(fixture_dir, "custom_sparkdf_dataset.py"),
str(os.path.join(context_path, "plugins", "custom_sparkdf_dataset.py")),
)
return ge.data_context.DataContext(context_path)
@pytest.fixture
def data_context_simple_expectation_suite(tmp_path_factory):
"""
This data_context is *manually* created to have the config we want, vs
created with DataContext.create()
"""
project_path = str(tmp_path_factory.mktemp("data_context"))
context_path = os.path.join(project_path, "great_expectations")
asset_config_path = os.path.join(context_path, "expectations")
fixture_dir = file_relative_path(__file__, "./test_fixtures")
os.makedirs(
os.path.join(asset_config_path, "my_dag_node"),
exist_ok=True,
)
shutil.copy(
os.path.join(fixture_dir, "great_expectations_basic.yml"),
str(os.path.join(context_path, "great_expectations.yml")),
)
shutil.copy(
os.path.join(
fixture_dir,
"rendering_fixtures/expectations_suite_1.json",
),
os.path.join(asset_config_path, "default.json"),
)
os.makedirs(os.path.join(context_path, "plugins"), exist_ok=True)
shutil.copy(
os.path.join(fixture_dir, "custom_pandas_dataset.py"),
str(os.path.join(context_path, "plugins", "custom_pandas_dataset.py")),
)
shutil.copy(
os.path.join(fixture_dir, "custom_sqlalchemy_dataset.py"),
str(os.path.join(context_path, "plugins", "custom_sqlalchemy_dataset.py")),
)
shutil.copy(
os.path.join(fixture_dir, "custom_sparkdf_dataset.py"),
str(os.path.join(context_path, "plugins", "custom_sparkdf_dataset.py")),
)
return ge.data_context.DataContext(context_path)
@pytest.fixture()
def filesystem_csv_data_context_with_validation_operators(
titanic_data_context_stats_enabled, filesystem_csv_2
):
titanic_data_context_stats_enabled.add_datasource(
"rad_datasource",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": str(filesystem_csv_2),
}
},
)
return titanic_data_context_stats_enabled
@pytest.fixture()
def filesystem_csv_data_context(
empty_data_context,
filesystem_csv_2,
) -> DataContext:
empty_data_context.add_datasource(
"rad_datasource",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": str(filesystem_csv_2),
}
},
)
return empty_data_context
@pytest.fixture
def filesystem_csv(tmp_path_factory):
base_dir = tmp_path_factory.mktemp("filesystem_csv")
base_dir = str(base_dir)
# Put a few files in the directory
with open(os.path.join(base_dir, "f1.csv"), "w") as outfile:
outfile.writelines(["a,b,c\n"])
with open(os.path.join(base_dir, "f2.csv"), "w") as outfile:
outfile.writelines(["a,b,c\n"])
os.makedirs(os.path.join(base_dir, "f3"), exist_ok=True)
with open(os.path.join(base_dir, "f3", "f3_20190101.csv"), "w") as outfile:
outfile.writelines(["a,b,c\n"])
with open(os.path.join(base_dir, "f3", "f3_20190102.csv"), "w") as outfile:
outfile.writelines(["a,b,c\n"])
return base_dir
@pytest.fixture(scope="function")
def filesystem_csv_2(tmp_path):
base_dir = tmp_path / "filesystem_csv_2"
base_dir.mkdir()
base_dir = str(base_dir)
# Put a file in the directory
toy_dataset = PandasDataset({"x": [1, 2, 3]})
toy_dataset.to_csv(os.path.join(base_dir, "f1.csv"), index=False)
assert os.path.isabs(base_dir)
assert os.path.isfile(os.path.join(base_dir, "f1.csv"))
return base_dir
@pytest.fixture(scope="function")
def filesystem_csv_3(tmp_path):
base_dir = tmp_path / "filesystem_csv_3"
base_dir.mkdir()
base_dir = str(base_dir)
# Put a file in the directory
toy_dataset = PandasDataset({"x": [1, 2, 3]})
toy_dataset.to_csv(os.path.join(base_dir, "f1.csv"), index=False)
toy_dataset_2 = PandasDataset({"y": [1, 2, 3]})
toy_dataset_2.to_csv(os.path.join(base_dir, "f2.csv"), index=False)
return base_dir
@pytest.fixture(scope="function")
def filesystem_csv_4(tmp_path):
base_dir = tmp_path / "filesystem_csv_4"
base_dir.mkdir()
base_dir = str(base_dir)
# Put a file in the directory
toy_dataset = PandasDataset(
{
"x": [1, 2, 3],
"y": [1, 2, 3],
}
)
toy_dataset.to_csv(os.path.join(base_dir, "f1.csv"), index=None)
return base_dir
@pytest.fixture
def titanic_profiled_evrs_1():
with open(
file_relative_path(
__file__, "./render/fixtures/BasicDatasetProfiler_evrs.json"
),
) as infile:
return expectationSuiteValidationResultSchema.loads(infile.read())
# various types of evr
@pytest.fixture
def evr_failed():
return ExpectationValidationResult(
success=False,
result={
"element_count": 1313,
"missing_count": 0,
"missing_percent": 0.0,
"unexpected_count": 3,
"unexpected_percent": 0.2284843869002285,
"unexpected_percent_nonmissing": 0.2284843869002285,
"partial_unexpected_list": [
"Daly, Mr Peter Denis ",
"Barber, Ms ",
"Geiger, Miss Emily ",
],
"partial_unexpected_index_list": [77, 289, 303],
"partial_unexpected_counts": [
{"value": "Barber, Ms ", "count": 1},
{"value": "Daly, Mr Peter Denis ", "count": 1},
{"value": "Geiger, Miss Emily ", "count": 1},
],
},
exception_info={
"raised_exception": False,
"exception_message": None,
"exception_traceback": None,
},
expectation_config=ExpectationConfiguration(
expectation_type="expect_column_values_to_not_match_regex",
kwargs={
"column": "Name",
"regex": "^\\s+|\\s+$",
"result_format": "SUMMARY",
},
),
)
@pytest.fixture
def evr_success():
return ExpectationValidationResult(
success=True,
result={"observed_value": 1313},
exception_info={
"raised_exception": False,
"exception_message": None,
"exception_traceback": None,
},
expectation_config=ExpectationConfiguration(
expectation_type="expect_table_row_count_to_be_between",
kwargs={"min_value": 0, "max_value": None, "result_format": "SUMMARY"},
),
)
@pytest.fixture
def sqlite_view_engine(test_backends):
# Create a small in-memory engine with two views, one of which is temporary
if "sqlite" in test_backends:
try:
import sqlalchemy as sa
sqlite_engine = sa.create_engine("sqlite://")
df = pd.DataFrame({"a": [1, 2, 3, 4, 5]})
df.to_sql(name="test_table", con=sqlite_engine, index=True)
sqlite_engine.execute(
"CREATE TEMP VIEW test_temp_view AS SELECT * FROM test_table where a < 4;"
)
sqlite_engine.execute(
"CREATE VIEW test_view AS SELECT * FROM test_table where a > 4;"
)
return sqlite_engine
except ImportError:
sa = None
else:
pytest.skip("SqlAlchemy tests disabled; not testing views")
@pytest.fixture
def expectation_suite_identifier():
return ExpectationSuiteIdentifier("my.expectation.suite.name")
@pytest.fixture
def basic_sqlalchemy_datasource(sqlitedb_engine):
return SqlAlchemyDatasource("basic_sqlalchemy_datasource", engine=sqlitedb_engine)
@pytest.fixture
def test_folder_connection_path_csv(tmp_path_factory):
df1 = pd.DataFrame({"col_1": [1, 2, 3, 4, 5], "col_2": ["a", "b", "c", "d", "e"]})
path = str(tmp_path_factory.mktemp("test_folder_connection_path_csv"))
df1.to_csv(path_or_buf=os.path.join(path, "test.csv"), index=False)
return str(path)
@pytest.fixture
def test_db_connection_string(tmp_path_factory, test_backends):
if "sqlite" not in test_backends:
pytest.skip("skipping fixture because sqlite not selected")
df1 = pd.DataFrame({"col_1": [1, 2, 3, 4, 5], "col_2": ["a", "b", "c", "d", "e"]})
df2 = pd.DataFrame({"col_1": [0, 1, 2, 3, 4], "col_2": ["b", "c", "d", "e", "f"]})
try:
import sqlalchemy as sa
basepath = str(tmp_path_factory.mktemp("db_context"))
path = os.path.join(basepath, "test.db")
engine = sa.create_engine("sqlite:///" + str(path))
df1.to_sql(name="table_1", con=engine, index=True)
df2.to_sql(name="table_2", con=engine, index=True, schema="main")
# Return a connection string to this newly-created db
return "sqlite:///" + str(path)
except ImportError:
raise ValueError("SQL Database tests require sqlalchemy to be installed.")
@pytest.fixture
def test_df(tmp_path_factory):
def generate_ascending_list_of_datetimes(
k, start_date=datetime.date(2020, 1, 1), end_date=datetime.date(2020, 12, 31)
):
start_time = datetime.datetime(
start_date.year, start_date.month, start_date.day
)
days_between_dates = (end_date - start_date).total_seconds()
datetime_list = [
start_time
+ datetime.timedelta(seconds=random.randrange(days_between_dates))
for i in range(k)
]
datetime_list.sort()
return datetime_list
k = 120
random.seed(1)
timestamp_list = generate_ascending_list_of_datetimes(
k, end_date=datetime.date(2020, 1, 31)
)
date_list = [datetime.date(ts.year, ts.month, ts.day) for ts in timestamp_list]
batch_ids = [random.randint(0, 10) for i in range(k)]
batch_ids.sort()
session_ids = [random.randint(2, 60) for i in range(k)]
session_ids.sort()
session_ids = [i - random.randint(0, 2) for i in session_ids]
events_df = pd.DataFrame(
{
"id": range(k),
"batch_id": batch_ids,
"date": date_list,
"y": [d.year for d in date_list],
"m": [d.month for d in date_list],
"d": [d.day for d in date_list],
"timestamp": timestamp_list,
"session_ids": session_ids,
"event_type": [
random.choice(["start", "stop", "continue"]) for i in range(k)
],
"favorite_color": [
"#"
+ "".join([random.choice(list("0123456789ABCDEF")) for j in range(6)])
for i in range(k)
],
}
)
return events_df
@pytest.fixture
def data_context_with_simple_sql_datasource_for_testing_get_batch(
sa, empty_data_context
):
context: DataContext = empty_data_context
db_file_path: str = file_relative_path(
__file__,
os.path.join("test_sets", "test_cases_for_sql_data_connector.db"),
)
datasource_config: str = f"""
class_name: SimpleSqlalchemyDatasource
connection_string: sqlite:///{db_file_path}
introspection:
whole_table: {{}}
daily:
splitter_method: _split_on_converted_datetime
splitter_kwargs:
column_name: date
date_format_string: "%Y-%m-%d"
weekly:
splitter_method: _split_on_converted_datetime
splitter_kwargs:
column_name: date
date_format_string: "%Y-%W"
by_id_dozens:
splitter_method: _split_on_divided_integer
splitter_kwargs:
column_name: id
divisor: 12
"""
try:
context.add_datasource("my_sqlite_db", **yaml.load(datasource_config))
except AttributeError:
pytest.skip("SQL Database tests require sqlalchemy to be installed.")
return context
@pytest.fixture
def basic_datasource(tmp_path_factory):
base_directory: str = str(
tmp_path_factory.mktemp("basic_datasource_runtime_data_connector")
)
basic_datasource: Datasource = instantiate_class_from_config(
config=yaml.load(
f"""
class_name: Datasource
data_connectors:
test_runtime_data_connector:
module_name: great_expectations.datasource.data_connector
class_name: RuntimeDataConnector
batch_identifiers:
- pipeline_stage_name
- airflow_run_id
- custom_key_0
execution_engine:
class_name: PandasExecutionEngine
""",
),
runtime_environment={
"name": "my_datasource",
},
config_defaults={
"module_name": "great_expectations.datasource",
},
)
return basic_datasource
@pytest.fixture
def db_file():
return file_relative_path(
__file__,
os.path.join("test_sets", "test_cases_for_sql_data_connector.db"),
)
@pytest.fixture
def data_context_with_datasource_pandas_engine(empty_data_context):
context = empty_data_context
config = yaml.load(
f"""
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
default_runtime_data_connector_name:
class_name: RuntimeDataConnector
batch_identifiers:
- default_identifier_name
""",
)
context.add_datasource(
"my_datasource",
**config,
)
return context
@pytest.fixture
def data_context_with_datasource_spark_engine(empty_data_context, spark_session):
context = empty_data_context
config = yaml.load(
f"""
class_name: Datasource
execution_engine:
class_name: SparkDFExecutionEngine
data_connectors:
default_runtime_data_connector_name:
class_name: RuntimeDataConnector
batch_identifiers:
- default_identifier_name
""",
)
context.add_datasource(
"my_datasource",
**config,
)
return context
@pytest.fixture
def data_context_with_datasource_sqlalchemy_engine(empty_data_context, db_file):
context = empty_data_context
config = yaml.load(
f"""
class_name: Datasource
execution_engine:
class_name: SqlAlchemyExecutionEngine
connection_string: sqlite:///{db_file}
data_connectors:
default_runtime_data_connector_name:
class_name: RuntimeDataConnector
batch_identifiers:
- default_identifier_name
""",
)
context.add_datasource(
"my_datasource",
**config,
)
return context
@pytest.fixture
def data_context_with_query_store(
empty_data_context, titanic_sqlite_db_connection_string
):
context = empty_data_context
config = yaml.load(
f"""
class_name: Datasource
execution_engine:
class_name: SqlAlchemyExecutionEngine
connection_string: {titanic_sqlite_db_connection_string}
data_connectors:
default_runtime_data_connector_name:
class_name: RuntimeDataConnector
batch_identifiers:
- default_identifier_name
"""
)
context.add_datasource(
"my_datasource",
**config,
)
store_config = yaml.load(
f"""
class_name: SqlAlchemyQueryStore
credentials:
connection_string: {titanic_sqlite_db_connection_string}
queries:
col_count:
query: "SELECT COUNT(*) FROM titanic;"
return_type: "scalar"
dist_col_count:
query: "SELECT COUNT(DISTINCT PClass) FROM titanic;"
return_type: "scalar"
"""
)
context.add_store("my_query_store", store_config)
return context
@pytest.fixture
def ge_cloud_base_url():
return "https://app.test.greatexpectations.io"
@pytest.fixture
def ge_cloud_organization_id():
return "bd20fead-2c31-4392-bcd1-f1e87ad5a79c"
@pytest.fixture
def ge_cloud_access_token():
return "6bb5b6f5c7794892a4ca168c65c2603e"
@pytest.fixture
def ge_cloud_config(ge_cloud_base_url, ge_cloud_organization_id, ge_cloud_access_token):
return GeCloudConfig(
base_url=ge_cloud_base_url,
organization_id=ge_cloud_organization_id,
access_token=ge_cloud_access_token,
)
@pytest.fixture(scope="function")
def empty_ge_cloud_data_context_config(
ge_cloud_base_url, ge_cloud_organization_id, ge_cloud_access_token
):
config_yaml_str = f"""
stores:
default_evaluation_parameter_store:
class_name: EvaluationParameterStore
default_expectations_store:
class_name: ExpectationsStore
store_backend:
class_name: GeCloudStoreBackend
ge_cloud_base_url: {ge_cloud_base_url}
ge_cloud_resource_type: expectation_suite
ge_cloud_credentials:
access_token: {ge_cloud_access_token}
organization_id: {ge_cloud_organization_id}
suppress_store_backend_id: True
default_validations_store:
class_name: ValidationsStore
store_backend:
class_name: GeCloudStoreBackend
ge_cloud_base_url: {ge_cloud_base_url}
ge_cloud_resource_type: suite_validation_result
ge_cloud_credentials:
access_token: {ge_cloud_access_token}
organization_id: {ge_cloud_organization_id}
suppress_store_backend_id: True
default_checkpoint_store:
class_name: CheckpointStore
store_backend:
class_name: GeCloudStoreBackend
ge_cloud_base_url: {ge_cloud_base_url}
ge_cloud_resource_type: contract
ge_cloud_credentials:
access_token: {ge_cloud_access_token}
organization_id: {ge_cloud_organization_id}
suppress_store_backend_id: True
evaluation_parameter_store_name: default_evaluation_parameter_store
expectations_store_name: default_expectations_store
validations_store_name: default_validations_store
checkpoint_store_name: default_checkpoint_store
"""
data_context_config_dict = yaml.load(config_yaml_str)
return DataContextConfig(**data_context_config_dict)
@pytest.fixture(scope="function")
def empty_cloud_data_context(
tmp_path, empty_ge_cloud_data_context_config, ge_cloud_config
) -> DataContext:
project_path = tmp_path / "empty_data_context"
project_path.mkdir()
project_path = str(project_path)
context = ge.data_context.BaseDataContext(
project_config=empty_ge_cloud_data_context_config,
context_root_dir=project_path,
ge_cloud_mode=True,
ge_cloud_config=ge_cloud_config,
)
assert context.list_datasources() == []
return context
@pytest.fixture
def cloud_data_context_with_datasource_pandas_engine(empty_cloud_data_context):
context = empty_cloud_data_context
config = yaml.load(
f"""
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
default_runtime_data_connector_name:
class_name: RuntimeDataConnector
batch_identifiers:
- default_identifier_name
""",
)
context.add_datasource(
"my_datasource",
**config,
)
return context
@pytest.fixture
def cloud_data_context_with_datasource_sqlalchemy_engine(
empty_cloud_data_context, db_file
):
context = empty_cloud_data_context
config = yaml.load(
f"""
class_name: Datasource
execution_engine:
class_name: SqlAlchemyExecutionEngine
connection_string: sqlite:///{db_file}
data_connectors:
default_runtime_data_connector_name:
class_name: RuntimeDataConnector
batch_identifiers:
- default_identifier_name
""",
)
context.add_datasource(
"my_datasource",
**config,
)
return context
@pytest.fixture(scope="function")
def profiler_name() -> str:
skip_if_python_below_minimum_version()
return "my_first_profiler"
@pytest.fixture(scope="function")
def profiler_store_name() -> str:
skip_if_python_below_minimum_version()
return "profiler_store"
@pytest.fixture(scope="function")
def profiler_config_with_placeholder_args(
profiler_name: str,
) -> RuleBasedProfilerConfig:
"""
This fixture does not correspond to a practical profiler with rules, whose constituent components perform meaningful
computations; rather, it uses "placeholder" style attribute values, which is adequate for configuration level tests.
"""
skip_if_python_below_minimum_version()
return RuleBasedProfilerConfig(
name=profiler_name,
class_name="RuleBasedProfiler",
config_version=1.0,
variables={
"false_positive_threshold": 1.0e-2,
},
rules={
"rule_1": {
"domain_builder": {
"class_name": "TableDomainBuilder",
},
"parameter_builders": [
{
"class_name": "MetricMultiBatchParameterBuilder",
"name": "my_parameter",
"metric_name": "my_metric",
},
],
"expectation_configuration_builders": [
{
"class_name": "DefaultExpectationConfigurationBuilder",
"expectation_type": "expect_column_pair_values_A_to_be_greater_than_B",
"column_A": "$domain.domain_kwargs.column_A",
"column_B": "$domain.domain_kwargs.column_B",
"my_arg": "$parameter.my_parameter.value[0]",
"my_other_arg": "$parameter.my_parameter.value[1]",
"meta": {
"details": {
"my_parameter_estimator": "$parameter.my_parameter.details",
"note": "Important remarks about estimation algorithm.",
},
},
},
],
},
},
)
@pytest.fixture
def empty_profiler_store(profiler_store_name: str) -> ProfilerStore:
skip_if_python_below_minimum_version()
return ProfilerStore(profiler_store_name)
@pytest.fixture
def profiler_key(profiler_name: str) -> ConfigurationIdentifier:
skip_if_python_below_minimum_version()
return ConfigurationIdentifier(configuration_key=profiler_name)
@pytest.fixture
def ge_cloud_profiler_id() -> str:
skip_if_python_below_minimum_version()
return "my_ge_cloud_profiler_id"
@pytest.fixture
def ge_cloud_profiler_key(ge_cloud_profiler_id: str) -> GeCloudIdentifier:
skip_if_python_below_minimum_version()
return GeCloudIdentifier(resource_type="contract", ge_cloud_id=ge_cloud_profiler_id)
@pytest.fixture
def populated_profiler_store(
empty_profiler_store: ProfilerStore,
profiler_config_with_placeholder_args: RuleBasedProfilerConfig,
profiler_key: ConfigurationIdentifier,
) -> ProfilerStore:
skip_if_python_below_minimum_version()
# Roundtrip through schema validation to remove any illegal fields add/or restore any missing fields.
serialized_config: dict = ruleBasedProfilerConfigSchema.dump(
profiler_config_with_placeholder_args
)
deserialized_config: dict = ruleBasedProfilerConfigSchema.load(serialized_config)
profiler_config: RuleBasedProfilerConfig = RuleBasedProfilerConfig(
**deserialized_config
)
profiler_store = empty_profiler_store
profiler_store.set(key=profiler_key, value=profiler_config)
return profiler_store
@pytest.fixture
@freeze_time("09/26/2019 13:42:41")
def alice_columnar_table_single_batch(empty_data_context):
"""
About the "Alice" User Workflow Fixture
Alice has a single table of columnar data called user_events (DataAsset) that she wants to check periodically as new
data is added.
- She knows what some of the columns mean, but not all - and there are MANY of them (only a subset currently shown
in examples and fixtures).
- She has organized other tables similarly so that for example column name suffixes indicate which are for user
ids (_id) and which timestamps are for versioning (_ts).
She wants to use a configurable profiler to generate a description (ExpectationSuite) about table so that she can:
1. use it to validate the user_events table periodically and set up alerts for when things change
2. have a place to add her domain knowledge of the data (that can also be validated against new data)
3. if all goes well, generalize some of the Profiler to use on her other tables
Alice configures her Profiler using the YAML configurations and data file locations captured in this fixture.
"""
skip_if_python_below_minimum_version()
verbose_profiler_config_file_path: str = file_relative_path(
__file__,
os.path.join(
"test_fixtures",
"rule_based_profiler",
"alice_user_workflow_verbose_profiler_config.yml",
),
)
verbose_profiler_config: str
with open(verbose_profiler_config_file_path) as f:
verbose_profiler_config = f.read()
my_rule_for_user_ids_expectation_configurations: List[ExpectationConfiguration] = [
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_of_type",
kwargs={
"column": "user_id",
"type_": "INTEGER",
},
meta={},
),
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_between",
kwargs={
"min_value": 1000,
"max_value": 999999999999,
"column": "user_id",
},
meta={},
),
ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs={
"column": "user_id",
},
meta={},
),
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_less_than",
meta={},
kwargs={"value": 9488404, "column": "user_id"},
),
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_greater_than",
meta={},
kwargs={"value": 397433, "column": "user_id"},
),
]
event_ts_column_data: Dict[str, str] = {
"column_name": "event_ts",
"observed_max_time_str": "2004-10-19 11:05:20",
"observed_strftime_format": "%Y-%m-%d %H:%M:%S",
}
my_rule_for_timestamps_column_data: List[Dict[str, str]] = [
event_ts_column_data,
{
"column_name": "server_ts",
"observed_max_time_str": "2004-10-19 11:05:20",
},
{
"column_name": "device_ts",
"observed_max_time_str": "2004-10-19 11:05:22",
},
]
my_rule_for_timestamps_expectation_configurations: List[
ExpectationConfiguration
] = []
column_data: Dict[str, str]
for column_data in my_rule_for_timestamps_column_data:
my_rule_for_timestamps_expectation_configurations.extend(
[
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_of_type",
kwargs={
"column": column_data["column_name"],
"type_": "TIMESTAMP",
},
meta={},
),
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_increasing",
kwargs={
"column": column_data["column_name"],
},
meta={},
),
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_dateutil_parseable",
kwargs={
"column": column_data["column_name"],
},
meta={},
),
ExpectationConfiguration(
expectation_type="expect_column_min_to_be_between",
kwargs={
"column": column_data["column_name"],
"min_value": "2004-10-19T10:23:54", # From variables
"max_value": "2004-10-19T10:23:54", # From variables
},
meta={
"notes": {
"format": "markdown",
"content": [
"### This expectation confirms no events occur before tracking started **2004-10-19 10:23:54**"
],
}
},
),
ExpectationConfiguration(
expectation_type="expect_column_max_to_be_between",
kwargs={
"column": column_data["column_name"],
"min_value": "2004-10-19T10:23:54", # From variables
"max_value": event_ts_column_data[
"observed_max_time_str"
], # Pin to event_ts column
},
meta={
"notes": {
"format": "markdown",
"content": [
"### This expectation confirms that the event_ts contains the latest timestamp of all domains"
],
}
},
),
ExpectationConfiguration(
expectation_type="expect_column_values_to_match_strftime_format",
kwargs={
"column": column_data["column_name"],
"strftime_format": {
"value": event_ts_column_data[
"observed_strftime_format"
], # Pin to event_ts column
"details": {
"success_ratio": 1.0,
"candidate_strings": sorted(DEFAULT_CANDIDATE_STRINGS),
},
},
},
meta={
"notes": {
"format": "markdown",
"content": [
"### This expectation confirms that fields ending in _ts are of the format detected by parameter builder SimpleDateFormatStringParameterBuilder"
],
}
},
),
]
)
my_rule_for_one_cardinality_expectation_configurations: List[
ExpectationConfiguration
] = [
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_set",
kwargs={
"column": "user_agent",
"value_set": [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36"
],
},
meta={},
),
]
expectation_configurations: List[ExpectationConfiguration] = []
expectation_configurations.extend(my_rule_for_user_ids_expectation_configurations)
expectation_configurations.extend(my_rule_for_timestamps_expectation_configurations)
expectation_configurations.extend(
my_rule_for_one_cardinality_expectation_configurations
)
expectation_suite_name: str = "alice_columnar_table_single_batch"
expected_expectation_suite: ExpectationSuite = ExpectationSuite(
expectation_suite_name=expectation_suite_name, data_context=empty_data_context
)
expectation_configuration: ExpectationConfiguration
for expectation_configuration in expectation_configurations:
# NOTE Will 20211208 add_expectation() method, although being called by an ExpectationSuite instance, is being
# called within a fixture, and we will prevent it from sending a usage_event by calling the private method
# _add_expectation().
expected_expectation_suite._add_expectation(
expectation_configuration=expectation_configuration, send_usage_event=False
)
# NOTE that this expectation suite should fail when validated on the data in "sample_data_relative_path"
# because the device_ts is ahead of the event_ts for the latest event
sample_data_relative_path: str = "alice_columnar_table_single_batch_data.csv"
profiler_config: dict = yaml.load(verbose_profiler_config)
# Roundtrip through schema validation to remove any illegal fields add/or restore any missing fields.
deserialized_config: dict = ruleBasedProfilerConfigSchema.load(profiler_config)
serialized_config: dict = ruleBasedProfilerConfigSchema.dump(deserialized_config)
# `class_name`/`module_name` are generally consumed through `instantiate_class_from_config`
# so we need to manually remove those values if we wish to use the **kwargs instantiation pattern
serialized_config.pop("class_name")
serialized_config.pop("module_name")
expected_expectation_suite.add_citation(
comment="Suite created by Rule-Based Profiler with the configuration included.",
profiler_config=serialized_config,
)
return {
"profiler_config": verbose_profiler_config,
"expected_expectation_suite_name": expectation_suite_name,
"expected_expectation_suite": expected_expectation_suite,
"sample_data_relative_path": sample_data_relative_path,
}
@pytest.fixture
def alice_columnar_table_single_batch_context(
monkeypatch,
empty_data_context_stats_enabled,
alice_columnar_table_single_batch,
):
skip_if_python_below_minimum_version()
context: DataContext = empty_data_context_stats_enabled
# We need our salt to be consistent between runs to ensure idempotent anonymized values
context._usage_statistics_handler = UsageStatisticsHandler(
context, "00000000-0000-0000-0000-00000000a004", "N/A"
)
monkeypatch.chdir(context.root_directory)
data_relative_path: str = "../data"
data_path: str = os.path.join(context.root_directory, data_relative_path)
os.makedirs(data_path, exist_ok=True)
# Copy data
filename: str = alice_columnar_table_single_batch["sample_data_relative_path"]
shutil.copy(
file_relative_path(
__file__,
os.path.join(
"test_sets",
f"{filename}",
),
),
str(os.path.join(data_path, filename)),
)
data_connector_base_directory: str = "./"
monkeypatch.setenv("base_directory", data_connector_base_directory)
monkeypatch.setenv("data_fixtures_root", data_relative_path)
datasource_name: str = "alice_columnar_table_single_batch_datasource"
data_connector_name: str = "alice_columnar_table_single_batch_data_connector"
data_asset_name: str = "alice_columnar_table_single_batch_data_asset"
datasource_config: str = rf"""
class_name: Datasource
module_name: great_expectations.datasource
execution_engine:
module_name: great_expectations.execution_engine
class_name: PandasExecutionEngine
data_connectors:
{data_connector_name}:
class_name: ConfiguredAssetFilesystemDataConnector
assets:
{data_asset_name}:
module_name: great_expectations.datasource.data_connector.asset
group_names:
- filename
pattern: (.*)\.csv
reader_options:
delimiter: ","
class_name: Asset
base_directory: ${{data_fixtures_root}}
glob_directive: "*.csv"
base_directory: ${{base_directory}}
module_name: great_expectations.datasource.data_connector
"""
context.add_datasource(name=datasource_name, **yaml.load(datasource_config))
assert context.list_datasources() == [
{
"class_name": "Datasource",
"data_connectors": {
data_connector_name: {
"assets": {
data_asset_name: {
"base_directory": data_relative_path,
"class_name": "Asset",
"glob_directive": "*.csv",
"group_names": ["filename"],
"module_name": "great_expectations.datasource.data_connector.asset",
"pattern": "(.*)\\.csv",
}
},
"base_directory": data_connector_base_directory,
"class_name": "ConfiguredAssetFilesystemDataConnector",
"module_name": "great_expectations.datasource.data_connector",
},
},
"execution_engine": {
"class_name": "PandasExecutionEngine",
"module_name": "great_expectations.execution_engine",
},
"module_name": "great_expectations.datasource",
"name": datasource_name,
}
]
return context
@pytest.fixture
@freeze_time("09/26/2019 13:42:41")
def bobby_columnar_table_multi_batch(empty_data_context):
"""
About the "Bobby" User Workflow Fixture
Bobby has multiple tables of columnar data called user_events (DataAsset) that he wants to check periodically as new
data is added.
- He knows what some of the columns are of the accounting/financial/account type.
He wants to use a configurable profiler to generate a description (ExpectationSuite) about tables so that he can:
1. monitor the average number of rows in the tables
2. use it to validate min/max boundaries of all columns are of the accounting/financial/account type and set up
alerts for when things change
3. have a place to add his domain knowledge of the data (that can also be validated against new data)
4. if all goes well, generalize some of the Profiler to use on his other tables
Bobby uses a crude, highly inaccurate deterministic parametric estimator -- for illustrative purposes.
Bobby configures his Profiler using the YAML configurations and data file locations captured in this fixture.
"""
skip_if_python_below_minimum_version()
verbose_profiler_config_file_path: str = file_relative_path(
__file__,
os.path.join(
"test_fixtures",
"rule_based_profiler",
"bobby_user_workflow_verbose_profiler_config.yml",
),
)
verbose_profiler_config: str
with open(verbose_profiler_config_file_path) as f:
verbose_profiler_config = f.read()
my_row_count_range_rule_expectation_configurations_oneshot_estimator: List[
ExpectationConfiguration
] = [
ExpectationConfiguration(
**{
"kwargs": {"min_value": 7505, "max_value": 8495},
"expectation_type": "expect_table_row_count_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "table.row_count",
"domain_kwargs": {},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
},
},
},
),
]
my_column_ranges_rule_expectation_configurations_oneshot_estimator: List[
ExpectationConfiguration
] = [
ExpectationConfiguration(
**{
"expectation_type": "expect_column_min_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.min",
"domain_kwargs": {"column": "VendorID"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "VendorID",
"min_value": 1,
"max_value": 1,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_max_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.max",
"domain_kwargs": {"column": "VendorID"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "VendorID",
"min_value": 4,
"max_value": 4,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_min_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.min",
"domain_kwargs": {"column": "passenger_count"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "passenger_count",
"min_value": 0,
"max_value": 1,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_max_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.max",
"domain_kwargs": {"column": "passenger_count"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "passenger_count",
"min_value": 6,
"max_value": 6,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_min_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.min",
"domain_kwargs": {"column": "trip_distance"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "trip_distance",
"min_value": 0.0,
"max_value": 0.0,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_max_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.max",
"domain_kwargs": {"column": "trip_distance"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "trip_distance",
"min_value": 37.62,
"max_value": 57.85,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_min_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.min",
"domain_kwargs": {"column": "RatecodeID"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "RatecodeID",
"min_value": 1,
"max_value": 1,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_max_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.max",
"domain_kwargs": {"column": "RatecodeID"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "RatecodeID",
"min_value": 5,
"max_value": 6,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_min_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.min",
"domain_kwargs": {"column": "PULocationID"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "PULocationID",
"min_value": 1,
"max_value": 1,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_max_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.max",
"domain_kwargs": {"column": "PULocationID"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "PULocationID",
"min_value": 265,
"max_value": 265,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_min_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.min",
"domain_kwargs": {"column": "DOLocationID"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "DOLocationID",
"min_value": 1,
"max_value": 1,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_max_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.max",
"domain_kwargs": {"column": "DOLocationID"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "DOLocationID",
"min_value": 265,
"max_value": 265,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_min_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.min",
"domain_kwargs": {"column": "payment_type"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "payment_type",
"min_value": 1,
"max_value": 1,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_max_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.max",
"domain_kwargs": {"column": "payment_type"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "payment_type",
"min_value": 4,
"max_value": 4,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_min_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.min",
"domain_kwargs": {"column": "fare_amount"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "fare_amount",
"min_value": -51.84,
"max_value": -21.16,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_max_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.max",
"domain_kwargs": {"column": "fare_amount"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "fare_amount",
"min_value": 228.94,
"max_value": 2990.05,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_min_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.min",
"domain_kwargs": {"column": "extra"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "extra",
"min_value": -36.53,
"max_value": -1.18,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_max_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.max",
"domain_kwargs": {"column": "extra"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "extra",
"min_value": 4.51,
"max_value": 6.99,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_min_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.min",
"domain_kwargs": {"column": "mta_tax"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "mta_tax",
"min_value": -0.5,
"max_value": -0.5,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_max_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.max",
"domain_kwargs": {"column": "mta_tax"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "mta_tax",
"min_value": 0.69,
"max_value": 37.32,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_min_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.min",
"domain_kwargs": {"column": "tip_amount"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "tip_amount",
"min_value": 0.0,
"max_value": 0.0,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_max_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.max",
"domain_kwargs": {"column": "tip_amount"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "tip_amount",
"min_value": 46.84,
"max_value": 74.86,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_min_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.min",
"domain_kwargs": {"column": "tolls_amount"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "tolls_amount",
"min_value": 0.0,
"max_value": 0.0,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_max_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.max",
"domain_kwargs": {"column": "tolls_amount"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "tolls_amount",
"min_value": 26.4,
"max_value": 497.67,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_min_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.min",
"domain_kwargs": {"column": "improvement_surcharge"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "improvement_surcharge",
"min_value": -0.3,
"max_value": -0.3,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_max_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.max",
"domain_kwargs": {"column": "improvement_surcharge"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "improvement_surcharge",
"min_value": 0.3,
"max_value": 0.3,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_min_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.min",
"domain_kwargs": {"column": "total_amount"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "total_amount",
"min_value": -52.66,
"max_value": -24.44,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_max_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.max",
"domain_kwargs": {"column": "total_amount"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "total_amount",
"min_value": 550.18,
"max_value": 2992.47,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_min_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.min",
"domain_kwargs": {"column": "congestion_surcharge"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "congestion_surcharge",
"min_value": -2.49,
"max_value": -0.01,
"mostly": 1.0,
},
},
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_max_to_be_between",
"meta": {
"profiler_details": {
"metric_configuration": {
"metric_name": "column.max",
"domain_kwargs": {"column": "congestion_surcharge"},
"metric_value_kwargs": None,
"metric_dependencies": None,
},
"num_batches": 2,
}
},
"kwargs": {
"column": "congestion_surcharge",
"min_value": 0.01,
"max_value": 2.49,
"mostly": 1.0,
},
},
),
]
my_column_timestamps_rule_expectation_configurations_oneshot_estimator: List[
ExpectationConfiguration
] = [
ExpectationConfiguration(
**{
"expectation_type": "expect_column_values_to_match_strftime_format",
"kwargs": {
"column": "pickup_datetime",
"strftime_format": "%Y-%m-%d %H:%M:%S",
},
"meta": {
"details": {
"success_ratio": 1.0,
"candidate_strings": [
"%Y-%m-%d %H:%M:%S",
"%y-%m-%d",
],
},
"notes": {
"format": "markdown",
"content": [
"### This expectation confirms that fields ending in _datetime are of the format detected by parameter builder SimpleDateFormatStringParameterBuilder"
],
},
},
}
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_values_to_match_strftime_format",
"kwargs": {
"column": "dropoff_datetime",
"strftime_format": "%Y-%m-%d %H:%M:%S",
},
"meta": {
"details": {
"success_ratio": 1.0,
"candidate_strings": [
"%Y-%m-%d %H:%M:%S",
"%y-%m-%d",
],
},
"notes": {
"format": "markdown",
"content": [
"### This expectation confirms that fields ending in _datetime are of the format detected by parameter builder SimpleDateFormatStringParameterBuilder"
],
},
},
}
),
]
my_column_regex_rule_expectation_configurations_oneshot_estimator: List[
ExpectationConfiguration
] = [
ExpectationConfiguration(
**{
"expectation_type": "expect_column_values_to_match_regex",
"kwargs": {
"column": "VendorID",
"regex": {
"value": [r"^\d{1}$"],
"details": {
"evaluated_regexes": {r"^\d{1}$": 1.0, r"^\d{2}$": 0.0},
"threshold": 0.9,
},
},
},
"meta": {
"notes": {
"format": "markdown",
"content": [
"### This expectation confirms that fields ending in ID are of the format detected by parameter builder RegexPatternStringParameterBuilder"
],
},
},
}
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_values_to_match_regex",
"meta": {"notes": {"format": "markdown", "content": None}},
"kwargs": {
"column": "RatecodeID",
"regex": {
"value": [r"^\d{1}$"],
"details": {
"evaluated_regexes": {r"^\d{1}$": 1.0, r"^\d{2}$": 0.0},
"threshold": 0.9,
},
},
},
"meta": {
"notes": {
"format": "markdown",
"content": [
"### This expectation confirms that fields ending in ID are of the format detected by parameter builder RegexPatternStringParameterBuilder"
],
}
},
}
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_values_to_match_regex",
"meta": {"notes": {"format": "markdown", "content": None}},
"kwargs": {
"column": "PULocationID",
"regex": {
"value": [r"^\d{1}$"],
"details": {
"evaluated_regexes": {r"^\d{1}$": 1.0, r"^\d{2}$": 0.0},
"threshold": 0.9,
},
},
},
"meta": {
"notes": {
"format": "markdown",
"content": [
"### This expectation confirms that fields ending in ID are of the format detected by parameter builder RegexPatternStringParameterBuilder"
],
}
},
}
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_values_to_match_regex",
"meta": {"notes": {"format": "markdown", "content": None}},
"kwargs": {
"column": "DOLocationID",
"regex": {
"value": [r"^\d{1}$"],
"details": {
"evaluated_regexes": {r"^\d{1}$": 1.0, r"^\d{2}$": 0.0},
"threshold": 0.9,
},
},
},
"meta": {
"notes": {
"format": "markdown",
"content": [
"### This expectation confirms that fields ending in ID are of the format detected by parameter builder RegexPatternStringParameterBuilder"
],
}
},
}
),
]
my_rule_for_very_few_cardinality_expectation_configurations: List[
ExpectationConfiguration
] = [
ExpectationConfiguration(
**{
"expectation_type": "expect_column_values_to_be_in_set",
"kwargs": {
"column": "VendorID",
"value_set": [1, 2, 4],
},
"meta": {},
}
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_values_to_be_in_set",
"kwargs": {
"column": "passenger_count",
"value_set": [0, 1, 2, 3, 4, 5, 6],
},
"meta": {},
}
),
]
expectation_configurations: List[ExpectationConfiguration] = []
expectation_configurations.extend(
my_row_count_range_rule_expectation_configurations_oneshot_estimator
)
expectation_configurations.extend(
my_column_ranges_rule_expectation_configurations_oneshot_estimator
)
expectation_configurations.extend(
my_column_timestamps_rule_expectation_configurations_oneshot_estimator
)
expectation_configurations.extend(
my_column_regex_rule_expectation_configurations_oneshot_estimator
)
expectation_configurations.extend(
my_rule_for_very_few_cardinality_expectation_configurations
)
expectation_suite_name_oneshot_estimator: str = (
"bobby_columnar_table_multi_batch_oneshot_estimator"
)
expected_expectation_suite_oneshot_estimator: ExpectationSuite = ExpectationSuite(
expectation_suite_name=expectation_suite_name_oneshot_estimator,
data_context=empty_data_context,
)
expectation_configuration: ExpectationConfiguration
for expectation_configuration in expectation_configurations:
# NOTE Will 20211208 add_expectation() method, although being called by an ExpectationSuite instance, is being
# called within a fixture, and we will prevent it from sending a usage_event by calling the private method.
expected_expectation_suite_oneshot_estimator._add_expectation(
expectation_configuration=expectation_configuration, send_usage_event=False
)
profiler_config: dict = yaml.load(verbose_profiler_config)
# Roundtrip through schema validation to remove any illegal fields add/or restore any missing fields.
deserialized_config: dict = ruleBasedProfilerConfigSchema.load(profiler_config)
serialized_config: dict = ruleBasedProfilerConfigSchema.dump(deserialized_config)
# `class_name`/`module_name` are generally consumed through `instantiate_class_from_config`
# so we need to manually remove those values if we wish to use the **kwargs instantiation pattern
serialized_config.pop("class_name")
serialized_config.pop("module_name")
expected_expectation_suite_oneshot_estimator.add_citation(
comment="Suite created by Rule-Based Profiler with the configuration included.",
profiler_config=serialized_config,
)
return {
"profiler_config": verbose_profiler_config,
"test_configuration_oneshot_estimator": {
"expectation_suite_name": expectation_suite_name_oneshot_estimator,
"expected_expectation_suite": expected_expectation_suite_oneshot_estimator,
},
}
@pytest.fixture
def bobby_columnar_table_multi_batch_deterministic_data_context(
tmp_path_factory,
monkeypatch,
) -> DataContext:
skip_if_python_below_minimum_version()
# Re-enable GE_USAGE_STATS
monkeypatch.delenv("GE_USAGE_STATS")
monkeypatch.setattr(AnonymizedUsageStatisticsConfig, "enabled", True)
project_path: str = str(tmp_path_factory.mktemp("taxi_data_context"))
context_path: str = os.path.join(project_path, "great_expectations")
os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True)
data_path: str = os.path.join(context_path, "..", "data")
os.makedirs(os.path.join(data_path), exist_ok=True)
shutil.copy(
file_relative_path(
__file__,
os.path.join(
"integration",
"fixtures",
"yellow_tripdata_pandas_fixture",
"great_expectations",
"great_expectations.yml",
),
),
str(os.path.join(context_path, "great_expectations.yml")),
)
shutil.copy(
file_relative_path(
__file__,
os.path.join(
"test_sets",
"taxi_yellow_tripdata_samples",
"random_subsamples",
"yellow_tripdata_7500_lines_sample_2019-01.csv",
),
),
str(
os.path.join(
context_path, "..", "data", "yellow_tripdata_sample_2019-01.csv"
)
),
)
shutil.copy(
file_relative_path(
__file__,
os.path.join(
"test_sets",
"taxi_yellow_tripdata_samples",
"random_subsamples",
"yellow_tripdata_8500_lines_sample_2019-02.csv",
),
),
str(
os.path.join(
context_path, "..", "data", "yellow_tripdata_sample_2019-02.csv"
)
),
)
shutil.copy(
file_relative_path(
__file__,
os.path.join(
"test_sets",
"taxi_yellow_tripdata_samples",
"random_subsamples",
"yellow_tripdata_9000_lines_sample_2019-03.csv",
),
),
str(
os.path.join(
context_path, "..", "data", "yellow_tripdata_sample_2019-03.csv"
)
),
)
context: DataContext = DataContext(context_root_dir=context_path)
assert context.root_directory == context_path
return context
@pytest.fixture
def bobster_columnar_table_multi_batch_normal_mean_5000_stdev_1000():
"""
About the "Bobster" User Workflow Fixture
Bobster has multiple tables of columnar data called user_events (DataAsset) that he wants to check periodically as
new data is added.
- He knows what some of the columns are of the acconting/financial/account type, but he is currently interested in
the average table size (in terms of the number of rows in a table).
He wants to use a configurable profiler to generate a description (ExpectationSuite) about tables so that he can:
1. monitor the average number of rows in the tables
2. have a place to add his domain knowledge of the data (that can also be validated against new data)
3. if all goes well, generalize some of the Profiler to use on his other tables
Bobster uses a custom implementation of the "bootstrap" non-parametric (i.e, data-driven) statistical estimator.
Bobster configures his Profiler using the YAML configurations and data file locations captured in this fixture.
"""
skip_if_python_below_minimum_version()
verbose_profiler_config_file_path: str = file_relative_path(
__file__,
os.path.join(
"test_fixtures",
"rule_based_profiler",
"bobster_user_workflow_verbose_profiler_config.yml",
),
)
verbose_profiler_config: str
with open(verbose_profiler_config_file_path) as f:
verbose_profiler_config = f.read()
expectation_suite_name_bootstrap_estimator: str = (
"bobby_columnar_table_multi_batch_bootstrap_estimator"
)
my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_mean_value: int = (
5000
)
my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_std_value: float = (
1.0e3
)
my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_num_stds: float = (
3.00
)
my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_min_value_mean_value: int = round(
float(
my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_mean_value
)
- (
my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_num_stds
* my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_std_value
)
)
my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_max_value_mean_value: int = round(
float(
my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_mean_value
)
+ (
my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_num_stds
* my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_std_value
)
)
return {
"profiler_config": verbose_profiler_config,
"test_configuration_bootstrap_estimator": {
"expectation_suite_name": expectation_suite_name_bootstrap_estimator,
"expect_table_row_count_to_be_between_mean_value": my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_mean_value,
"expect_table_row_count_to_be_between_min_value_mean_value": my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_min_value_mean_value,
"expect_table_row_count_to_be_between_max_value_mean_value": my_row_count_range_rule_expect_table_row_count_to_be_between_expectation_max_value_mean_value,
},
}
@pytest.fixture
def bobster_columnar_table_multi_batch_normal_mean_5000_stdev_1000_data_context(
tmp_path_factory,
monkeypatch,
) -> DataContext:
"""
This fixture generates three years' worth (36 months; i.e., 36 batches) of taxi trip data with the number of rows
of a batch sampled from a normal distribution with the mean of 5,000 rows and the standard deviation of 1,000 rows.
"""
skip_if_python_below_minimum_version()
# Re-enable GE_USAGE_STATS
monkeypatch.delenv("GE_USAGE_STATS")
monkeypatch.setattr(AnonymizedUsageStatisticsConfig, "enabled", True)
project_path: str = str(tmp_path_factory.mktemp("taxi_data_context"))
context_path: str = os.path.join(project_path, "great_expectations")
os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True)
data_path: str = os.path.join(context_path, "..", "data")
os.makedirs(os.path.join(data_path), exist_ok=True)
shutil.copy(
file_relative_path(
__file__,
os.path.join(
"integration",
"fixtures",
"yellow_tripdata_pandas_fixture",
"great_expectations",
"great_expectations.yml",
),
),
str(os.path.join(context_path, "great_expectations.yml")),
)
base_directory: str = file_relative_path(
__file__,
os.path.join(
"test_sets",
"taxi_yellow_tripdata_samples",
),
)
file_name_list: List[str] = get_filesystem_one_level_directory_glob_path_list(
base_directory_path=base_directory, glob_directive="*.csv"
)
file_name_list = sorted(file_name_list)
num_files: int = len(file_name_list)
rnd_num_sample: np.float64
output_file_lenths: List[int] = [
round(rnd_num_sample)
for rnd_num_sample in np.random.normal(loc=5.0e3, scale=1.0e3, size=num_files)
]
idx: int
file_name: str
output_file_name_length_map: Dict[str, int] = {
file_name_list[idx]: output_file_lenths[idx]
for idx, file_name in enumerate(file_name_list)
}
csv_source_path: str
df: pd.DataFrame
for file_name in file_name_list:
csv_source_path = os.path.join(base_directory, file_name)
df = pd.read_csv(filepath_or_buffer=csv_source_path)
df = df.sample(
n=output_file_name_length_map[file_name], replace=False, random_state=1
)
# noinspection PyTypeChecker
df.to_csv(
path_or_buf=os.path.join(context_path, "..", "data", file_name), index=False
)
context: DataContext = DataContext(context_root_dir=context_path)
assert context.root_directory == context_path
return context
@pytest.fixture
def quentin_columnar_table_multi_batch():
"""
About the "Quentin" User Workflow Fixture
Quentin has multiple tables of columnar data called user_events (DataAsset) that he wants to check periodically as
new data is added.
- He knows what some of the columns are of the accounting/financial/account type, but he is currently interested
in the range of quantiles of columns capturing financial quantities (column names ending on "_amount" suffix).
He wants to use a configurable profiler to generate a description (ExpectationSuite) about tables so that he can:
1. monitor the range of quantiles of columns capturing financial quantities in the tables
2. have a place to add his domain knowledge of the data (that can also be validated against new data)
3. if all goes well, generalize some of the Profiler to use on his other tables
Quentin uses a custom implementation of the "bootstrap" non-parametric (i.e, data-driven) statistical estimator.
Quentin configures his Profiler using the YAML configurations and data file locations captured in this fixture.
"""
skip_if_python_below_minimum_version()
verbose_profiler_config_file_path: str = file_relative_path(
__file__,
os.path.join(
"test_fixtures",
"rule_based_profiler",
"quentin_user_workflow_verbose_profiler_config.yml",
),
)
verbose_profiler_config: str
with open(verbose_profiler_config_file_path) as f:
verbose_profiler_config = f.read()
expectation_suite_name_bootstrap_estimator: str = (
"quentin_columnar_table_multi_batch"
)
return {
"profiler_config": verbose_profiler_config,
"test_configuration": {
"expectation_suite_name": expectation_suite_name_bootstrap_estimator,
"expect_column_quantile_values_to_be_between_quantile_ranges_by_column": {
"tolls_amount": [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]],
"fare_amount": [
[5.842754275, 6.5],
[8.675167517, 9.5750000000],
[13.344354435, 15.650000000],
],
"tip_amount": [
[0.0, 0.0],
[0.81269502, 1.97259736],
[2.346049055, 2.993680968],
],
"total_amount": [
[8.2740033, 11.422183043],
[11.2955000, 14.875000000],
[16.746263451, 21.327684643],
],
},
},
}
@pytest.fixture
def quentin_columnar_table_multi_batch_data_context(
tmp_path_factory,
monkeypatch,
) -> DataContext:
"""
This fixture generates three years' worth (36 months; i.e., 36 batches) of taxi trip data with the number of rows
of each batch being equal to the original number per log file (10,000 rows).
"""
skip_if_python_below_minimum_version()
# Re-enable GE_USAGE_STATS
monkeypatch.delenv("GE_USAGE_STATS")
monkeypatch.setattr(AnonymizedUsageStatisticsConfig, "enabled", True)
project_path: str = str(tmp_path_factory.mktemp("taxi_data_context"))
context_path: str = os.path.join(project_path, "great_expectations")
os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True)
data_path: str = os.path.join(context_path, "..", "data")
os.makedirs(os.path.join(data_path), exist_ok=True)
shutil.copy(
file_relative_path(
__file__,
os.path.join(
"integration",
"fixtures",
"yellow_tripdata_pandas_fixture",
"great_expectations",
"great_expectations.yml",
),
),
str(os.path.join(context_path, "great_expectations.yml")),
)
base_directory: str = file_relative_path(
__file__,
os.path.join(
"test_sets",
"taxi_yellow_tripdata_samples",
),
)
file_name_list: List[str] = get_filesystem_one_level_directory_glob_path_list(
base_directory_path=base_directory, glob_directive="*.csv"
)
file_name_list = sorted(file_name_list)
file_name: str
csv_source_path: str
for file_name in file_name_list:
csv_source_path = os.path.join(base_directory, file_name)
shutil.copy(
csv_source_path,
os.path.join(context_path, "..", "data", file_name),
)
context: DataContext = DataContext(context_root_dir=context_path)
assert context.root_directory == context_path
return context
# TODO: AJB 20210525 This fixture is not yet used but may be helpful to generate batches for unit tests of multibatch
# workflows. It should probably be extended to add different column types / data.
@pytest.fixture
def multibatch_generic_csv_generator():
"""
Construct a series of csv files with many data types for use in multibatch testing
"""
skip_if_python_below_minimum_version()
def _multibatch_generic_csv_generator(
data_path: str,
start_date: Optional[datetime.datetime] = None,
num_event_batches: Optional[int] = 20,
num_events_per_batch: Optional[int] = 5,
) -> List[str]:
if start_date is None:
start_date = datetime.datetime(2000, 1, 1)
file_list = []
category_strings = {
0: "category0",
1: "category1",
2: "category2",
3: "category3",
4: "category4",
5: "category5",
6: "category6",
}
for batch_num in range(num_event_batches):
# generate a dataframe with multiple column types
batch_start_date = start_date + datetime.timedelta(
days=(batch_num * num_events_per_batch)
)
# TODO: AJB 20210416 Add more column types
df = pd.DataFrame(
{
"event_date": [
(batch_start_date + datetime.timedelta(days=i)).strftime(
"%Y-%m-%d"
)
for i in range(num_events_per_batch)
],
"batch_num": [batch_num + 1 for _ in range(num_events_per_batch)],
"string_cardinality_3": [
category_strings[i % 3] for i in range(num_events_per_batch)
],
}
)
filename = f"csv_batch_{batch_num + 1:03}_of_{num_event_batches:03}.csv"
file_list.append(filename)
# noinspection PyTypeChecker
df.to_csv(
os.path.join(data_path, filename),
index_label="intra_batch_index",
)
return file_list
return _multibatch_generic_csv_generator
@pytest.fixture
def multibatch_generic_csv_generator_context(monkeypatch, empty_data_context):
skip_if_python_below_minimum_version()
context: DataContext = empty_data_context
monkeypatch.chdir(context.root_directory)
data_relative_path = "../data"
data_path = os.path.join(context.root_directory, data_relative_path)
os.makedirs(data_path, exist_ok=True)
data_connector_base_directory = "./"
monkeypatch.setenv("base_directory", data_connector_base_directory)
monkeypatch.setenv("data_fixtures_root", data_relative_path)
datasource_name = "generic_csv_generator"
data_connector_name = "daily_data_connector"
asset_name = "daily_data_asset"
datasource_config = rf"""
class_name: Datasource
module_name: great_expectations.datasource
execution_engine:
module_name: great_expectations.execution_engine
class_name: PandasExecutionEngine
data_connectors:
{data_connector_name}:
class_name: ConfiguredAssetFilesystemDataConnector
assets:
{asset_name}:
module_name: great_expectations.datasource.data_connector.asset
group_names:
- batch_num
- total_batches
pattern: csv_batch_(\d.+)_of_(\d.+)\.csv
reader_options:
delimiter: ","
class_name: Asset
base_directory: $data_fixtures_root
glob_directive: "*.csv"
base_directory: $base_directory
module_name: great_expectations.datasource.data_connector
"""
context.add_datasource(name=datasource_name, **yaml.load(datasource_config))
assert context.list_datasources() == [
{
"class_name": "Datasource",
"data_connectors": {
data_connector_name: {
"assets": {
asset_name: {
"base_directory": data_relative_path,
"class_name": "Asset",
"glob_directive": "*.csv",
"group_names": ["batch_num", "total_batches"],
"module_name": "great_expectations.datasource.data_connector.asset",
"pattern": "csv_batch_(\\d.+)_of_(\\d.+)\\.csv",
}
},
"base_directory": data_connector_base_directory,
"class_name": "ConfiguredAssetFilesystemDataConnector",
"module_name": "great_expectations.datasource.data_connector",
}
},
"execution_engine": {
"class_name": "PandasExecutionEngine",
"module_name": "great_expectations.execution_engine",
},
"module_name": "great_expectations.datasource",
"name": "generic_csv_generator",
}
]
return context
|
[] |
[] |
[
"GE_TEST_LOCAL_DB_HOSTNAME"
] |
[]
|
["GE_TEST_LOCAL_DB_HOSTNAME"]
|
python
| 1 | 0 | |
yt_dlp/compat.py
|
# coding: utf-8
import asyncio
import base64
import ctypes
import getpass
import html
import html.parser
import http
import http.client
import http.cookiejar
import http.cookies
import http.server
import itertools
import optparse
import os
import re
import shlex
import shutil
import socket
import struct
import subprocess
import sys
import tokenize
import urllib
import xml.etree.ElementTree as etree
from subprocess import DEVNULL
# HTMLParseError has been deprecated in Python 3.3 and removed in
# Python 3.5. Introducing dummy exception for Python >3.5 for compatible
# and uniform cross-version exception handling
class compat_HTMLParseError(Exception):
pass
# compat_ctypes_WINFUNCTYPE = ctypes.WINFUNCTYPE
# will not work since ctypes.WINFUNCTYPE does not exist in UNIX machines
def compat_ctypes_WINFUNCTYPE(*args, **kwargs):
return ctypes.WINFUNCTYPE(*args, **kwargs)
class _TreeBuilder(etree.TreeBuilder):
def doctype(self, name, pubid, system):
pass
def compat_etree_fromstring(text):
return etree.XML(text, parser=etree.XMLParser(target=_TreeBuilder()))
compat_os_name = os._name if os.name == 'java' else os.name
if compat_os_name == 'nt':
def compat_shlex_quote(s):
return s if re.match(r'^[-_\w./]+$', s) else '"%s"' % s.replace('"', '\\"')
else:
from shlex import quote as compat_shlex_quote
def compat_ord(c):
if type(c) is int:
return c
else:
return ord(c)
def compat_setenv(key, value, env=os.environ):
env[key] = value
if compat_os_name == 'nt' and sys.version_info < (3, 8):
# os.path.realpath on Windows does not follow symbolic links
# prior to Python 3.8 (see https://bugs.python.org/issue9949)
def compat_realpath(path):
while os.path.islink(path):
path = os.path.abspath(os.readlink(path))
return path
else:
compat_realpath = os.path.realpath
def compat_print(s):
assert isinstance(s, compat_str)
print(s)
# Fix https://github.com/ytdl-org/youtube-dl/issues/4223
# See http://bugs.python.org/issue9161 for what is broken
def workaround_optparse_bug9161():
op = optparse.OptionParser()
og = optparse.OptionGroup(op, 'foo')
try:
og.add_option('-t')
except TypeError:
real_add_option = optparse.OptionGroup.add_option
def _compat_add_option(self, *args, **kwargs):
enc = lambda v: (
v.encode('ascii', 'replace') if isinstance(v, compat_str)
else v)
bargs = [enc(a) for a in args]
bkwargs = dict(
(k, enc(v)) for k, v in kwargs.items())
return real_add_option(self, *bargs, **bkwargs)
optparse.OptionGroup.add_option = _compat_add_option
try:
compat_Pattern = re.Pattern
except AttributeError:
compat_Pattern = type(re.compile(''))
try:
compat_Match = re.Match
except AttributeError:
compat_Match = type(re.compile('').match(''))
try:
compat_asyncio_run = asyncio.run # >= 3.7
except AttributeError:
def compat_asyncio_run(coro):
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(coro)
asyncio.run = compat_asyncio_run
# Python 3.8+ does not honor %HOME% on windows, but this breaks compatibility with youtube-dl
# See https://github.com/yt-dlp/yt-dlp/issues/792
# https://docs.python.org/3/library/os.path.html#os.path.expanduser
if compat_os_name in ('nt', 'ce') and 'HOME' in os.environ:
_userhome = os.environ['HOME']
def compat_expanduser(path):
if not path.startswith('~'):
return path
i = path.replace('\\', '/', 1).find('/') # ~user
if i < 0:
i = len(path)
userhome = os.path.join(os.path.dirname(_userhome), path[1:i]) if i > 1 else _userhome
return userhome + path[i:]
else:
compat_expanduser = os.path.expanduser
try:
from Cryptodome.Cipher import AES as compat_pycrypto_AES
except ImportError:
try:
from Crypto.Cipher import AES as compat_pycrypto_AES
except ImportError:
compat_pycrypto_AES = None
WINDOWS_VT_MODE = False if compat_os_name == 'nt' else None
def windows_enable_vt_mode(): # TODO: Do this the proper way https://bugs.python.org/issue30075
if compat_os_name != 'nt':
return
global WINDOWS_VT_MODE
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
try:
subprocess.Popen('', shell=True, startupinfo=startupinfo)
WINDOWS_VT_MODE = True
except Exception:
pass
# Deprecated
compat_basestring = str
compat_chr = chr
compat_input = input
compat_integer_types = (int, )
compat_kwargs = lambda kwargs: kwargs
compat_numeric_types = (int, float, complex)
compat_str = str
compat_xpath = lambda xpath: xpath
compat_zip = zip
compat_HTMLParser = html.parser.HTMLParser
compat_HTTPError = urllib.error.HTTPError
compat_Struct = struct.Struct
compat_b64decode = base64.b64decode
compat_cookiejar = http.cookiejar
compat_cookiejar_Cookie = compat_cookiejar.Cookie
compat_cookies = http.cookies
compat_cookies_SimpleCookie = compat_cookies.SimpleCookie
compat_etree_Element = etree.Element
compat_etree_register_namespace = etree.register_namespace
compat_get_terminal_size = shutil.get_terminal_size
compat_getenv = os.getenv
compat_getpass = getpass.getpass
compat_html_entities = html.entities
compat_html_entities_html5 = compat_html_entities.html5
compat_http_client = http.client
compat_http_server = http.server
compat_itertools_count = itertools.count
compat_parse_qs = urllib.parse.parse_qs
compat_shlex_split = shlex.split
compat_socket_create_connection = socket.create_connection
compat_struct_pack = struct.pack
compat_struct_unpack = struct.unpack
compat_subprocess_get_DEVNULL = lambda: DEVNULL
compat_tokenize_tokenize = tokenize.tokenize
compat_urllib_error = urllib.error
compat_urllib_parse = urllib.parse
compat_urllib_parse_quote = urllib.parse.quote
compat_urllib_parse_quote_plus = urllib.parse.quote_plus
compat_urllib_parse_unquote = urllib.parse.unquote
compat_urllib_parse_unquote_plus = urllib.parse.unquote_plus
compat_urllib_parse_unquote_to_bytes = urllib.parse.unquote_to_bytes
compat_urllib_parse_urlencode = urllib.parse.urlencode
compat_urllib_parse_urlparse = urllib.parse.urlparse
compat_urllib_parse_urlunparse = urllib.parse.urlunparse
compat_urllib_request = urllib.request
compat_urllib_request_DataHandler = urllib.request.DataHandler
compat_urllib_response = urllib.response
compat_urlparse = urllib.parse
compat_urlretrieve = urllib.request.urlretrieve
compat_xml_parse_error = etree.ParseError
# Set public objects
__all__ = [
'WINDOWS_VT_MODE',
'compat_HTMLParseError',
'compat_HTMLParser',
'compat_HTTPError',
'compat_Match',
'compat_Pattern',
'compat_Struct',
'compat_asyncio_run',
'compat_b64decode',
'compat_basestring',
'compat_chr',
'compat_cookiejar',
'compat_cookiejar_Cookie',
'compat_cookies',
'compat_cookies_SimpleCookie',
'compat_ctypes_WINFUNCTYPE',
'compat_etree_Element',
'compat_etree_fromstring',
'compat_etree_register_namespace',
'compat_expanduser',
'compat_get_terminal_size',
'compat_getenv',
'compat_getpass',
'compat_html_entities',
'compat_html_entities_html5',
'compat_http_client',
'compat_http_server',
'compat_input',
'compat_integer_types',
'compat_itertools_count',
'compat_kwargs',
'compat_numeric_types',
'compat_ord',
'compat_os_name',
'compat_parse_qs',
'compat_print',
'compat_pycrypto_AES',
'compat_realpath',
'compat_setenv',
'compat_shlex_quote',
'compat_shlex_split',
'compat_socket_create_connection',
'compat_str',
'compat_struct_pack',
'compat_struct_unpack',
'compat_subprocess_get_DEVNULL',
'compat_tokenize_tokenize',
'compat_urllib_error',
'compat_urllib_parse',
'compat_urllib_parse_quote',
'compat_urllib_parse_quote_plus',
'compat_urllib_parse_unquote',
'compat_urllib_parse_unquote_plus',
'compat_urllib_parse_unquote_to_bytes',
'compat_urllib_parse_urlencode',
'compat_urllib_parse_urlparse',
'compat_urllib_parse_urlunparse',
'compat_urllib_request',
'compat_urllib_request_DataHandler',
'compat_urllib_response',
'compat_urlparse',
'compat_urlretrieve',
'compat_xml_parse_error',
'compat_xpath',
'compat_zip',
'windows_enable_vt_mode',
'workaround_optparse_bug9161',
]
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
e2etest/pkg/metallb/config.go
|
// SPDX-License-Identifier:Apache-2.0
package metallb
import (
"fmt"
"os"
"go.universe.tf/metallb/e2etest/pkg/config"
frrcontainer "go.universe.tf/metallb/e2etest/pkg/frr/container"
"go.universe.tf/metallb/pkg/ipfamily"
)
const (
defaultNameSpace = "metallb-system"
)
var Namespace = defaultNameSpace
func init() {
if ns := os.Getenv("OO_INSTALL_NAMESPACE"); len(ns) != 0 {
Namespace = ns
}
}
// PeersForContainers returns the metallb config peers related to the given containers.
func PeersForContainers(containers []*frrcontainer.FRR, ipFamily ipfamily.Family) []config.Peer {
var peers []config.Peer
for i, c := range containers {
addresses := c.AddressesForFamily(ipFamily)
holdTime := ""
if i > 0 {
holdTime = fmt.Sprintf("%ds", i*180)
}
ebgpMultihop := false
if c.NeighborConfig.MultiHop && c.NeighborConfig.ASN != c.RouterConfig.ASN {
ebgpMultihop = true
}
for _, address := range addresses {
peers = append(peers, config.Peer{
Addr: address,
ASN: c.RouterConfig.ASN,
MyASN: c.NeighborConfig.ASN,
Port: c.RouterConfig.BGPPort,
Password: c.RouterConfig.Password,
HoldTime: holdTime,
EBGPMultiHop: ebgpMultihop,
})
}
}
return peers
}
// WithBFD sets the given bfd profile to the peers.
func WithBFD(peers []config.Peer, bfdProfile string) []config.Peer {
for i := range peers {
peers[i].BFDProfile = bfdProfile
}
return peers
}
// WithRouterID sets the given routerID to the peers.
func WithRouterID(peers []config.Peer, routerID string) []config.Peer {
for i := range peers {
peers[i].RouterID = routerID
}
return peers
}
|
[
"\"OO_INSTALL_NAMESPACE\""
] |
[] |
[
"OO_INSTALL_NAMESPACE"
] |
[]
|
["OO_INSTALL_NAMESPACE"]
|
go
| 1 | 0 | |
integration/helpers/login.go
|
package helpers
import (
"fmt"
"os"
"strconv"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gexec"
)
// SetAPI sets the API endpoint to the value of the CF_INT_API environment variable,
// or "https://api.bosh-lite.com" if not set. If the SKIP_SSL_VALIDATION environment
// variable is set, it will use the '--skip-ssl-validation' flag. It returns the API
// URL and a boolean indicating if SSL validation was skipped.
func SetAPI() (string, bool) {
apiURL := GetAPI()
skipSSLValidation := skipSSLValidation()
if skipSSLValidation {
Eventually(CF("api", apiURL, "--skip-ssl-validation")).Should(Exit(0))
} else {
Eventually(CF("api", apiURL)).Should(Exit(0))
}
return apiURL, skipSSLValidation
}
// UnsetAPI unsets the currently set API endpoint for the CLI.
func UnsetAPI() {
Eventually(CF("api", "--unset")).Should(Exit(0))
}
func skipSSLValidation() bool {
if skip, err := strconv.ParseBool(os.Getenv("SKIP_SSL_VALIDATION")); err == nil && !skip {
return false
}
return true
}
// GetAPI gets the value of the CF_INT_API environment variable, if set, and prefixes
// it with "https://" if the value doesn't already start with "http". If the variable
// is not set, returns "https://api.bosh-lite.com".
func GetAPI() string {
apiURL := os.Getenv("CF_INT_API")
if apiURL == "" {
return "https://api.bosh-lite.com"
}
if !strings.HasPrefix(apiURL, "http") {
apiURL = fmt.Sprintf("https://%s", apiURL)
}
return apiURL
}
// LoginAs logs in to the CLI with 'cf auth' and the given username and password,
// retrying up to 3 times on failures.
func LoginAs(username, password string) {
env := map[string]string{
"CF_USERNAME": username,
"CF_PASSWORD": password,
}
for i := 0; i < 3; i++ {
session := CFWithEnv(env, "auth")
Eventually(session).Should(Exit())
if session.ExitCode() == 0 {
break
}
time.Sleep(3 * time.Second)
}
}
// LoginCF logs in to the CLI using the username and password from the CF_INT_USERNAME
// and CF_INT_PASSWORD environment variables, respectively, defaulting to "admin" for
// each if either is not set.
func LoginCF() string {
if ClientCredentialsTestMode() {
return LoginCFWithClientCredentials()
}
username, password := GetCredentials()
LoginAs(username, password)
return username
}
// LoginCFWithClientCredentials logs in to the CLI using client credentials from the CF_INT_CLIENT_ID and
// CF_INT_CLIENT_SECRET environment variables and returns the client ID. If these environment variables
// are not set, it skips the current test.
func LoginCFWithClientCredentials() string {
username, password := SkipIfClientCredentialsNotSet()
env := map[string]string{
"CF_USERNAME": username,
"CF_PASSWORD": password,
}
Eventually(CFWithEnv(env, "auth", "--client-credentials")).Should(Exit(0))
return username
}
// GetCredentials returns back the credentials for the user or client to authenticate with Cloud Foundry.
func GetCredentials() (string, string) {
if ClientCredentialsTestMode() {
return SkipIfClientCredentialsNotSet()
}
username := os.Getenv("CF_INT_USERNAME")
if username == "" {
username = "admin"
}
password := os.Getenv("CF_INT_PASSWORD")
if password == "" {
password = "admin"
}
return username, password
}
// SkipIfOIDCCredentialsNotSet returns back the username and the password for
// OIDC origin, or skips the test if those values are not set.
func SkipIfOIDCCredentialsNotSet() (string, string) {
oidcUsername := os.Getenv("CF_INT_OIDC_USERNAME")
oidcPassword := os.Getenv("CF_INT_OIDC_PASSWORD")
if oidcUsername == "" || oidcPassword == "" {
Skip("CF_INT_OIDC_USERNAME or CF_INT_OIDC_PASSWORD is not set")
}
return oidcUsername, oidcPassword
}
// LogoutCF logs out of the CLI.
func LogoutCF() {
Eventually(CF("logout")).Should(Exit(0))
}
// TargetOrgAndSpace targets the given org and space with 'cf target'.
func TargetOrgAndSpace(org string, space string) {
Eventually(CF("target", "-o", org, "-s", space)).Should(Exit(0))
}
// TargetOrg targets the given org with 'cf target'.
func TargetOrg(org string) {
Eventually(CF("target", "-o", org)).Should(Exit(0))
}
// ClearTarget logs out and logs back in to the CLI using LogoutCF and LoginCF.
func ClearTarget() {
LogoutCF()
LoginCF()
}
// SetupCF logs in to the CLI with LoginCF, creates the given org and space, and targets that
// org and space.
func SetupCF(org string, space string) {
LoginCF()
CreateOrgAndSpace(org, space)
TargetOrgAndSpace(org, space)
}
// SetupCFWithOrgOnly logs in to the CLI with LoginCF, creates the given org, and targets it.
func SetupCFWithOrgOnly(org string) {
LoginCF()
CreateOrg(org)
TargetOrg(org)
}
// SetupCFWithGeneratedOrgAndSpaceNames logs in to the CLI with LoginCF, creates the org and
// space with generated names, and targets that org and space. Returns the generated org so
// that it can be deleted easily in cleanup step of the test.
func SetupCFWithGeneratedOrgAndSpaceNames() string {
org := NewOrgName()
space := NewSpaceName()
SetupCF(org, space)
return org
}
// SwitchToNoRole logs out of the CLI and logs back in as a newly-created user without a role.
func SwitchToNoRole() string {
username, password := CreateUser()
LogoutCF()
LoginAs(username, password)
return username
}
// SwitchToOrgRole logs out of the CLI and logs back in as a newly-created user with the given
// org role in the given org.
func SwitchToOrgRole(org, role string) string {
username, password := CreateUserInOrgRole(org, role)
LogoutCF()
LoginAs(username, password)
return username
}
// SwitchToSpaceRole logs out of the CLI and logs back in as a newly-created user with the given
// space role in the given space and org.
func SwitchToSpaceRole(org, space, role string) string {
username, password := CreateUserInSpaceRole(org, space, role)
LogoutCF()
LoginAs(username, password)
return username
}
|
[
"\"SKIP_SSL_VALIDATION\"",
"\"CF_INT_API\"",
"\"CF_INT_USERNAME\"",
"\"CF_INT_PASSWORD\"",
"\"CF_INT_OIDC_USERNAME\"",
"\"CF_INT_OIDC_PASSWORD\""
] |
[] |
[
"CF_INT_USERNAME",
"CF_INT_OIDC_USERNAME",
"CF_INT_PASSWORD",
"CF_INT_OIDC_PASSWORD",
"CF_INT_API",
"SKIP_SSL_VALIDATION"
] |
[]
|
["CF_INT_USERNAME", "CF_INT_OIDC_USERNAME", "CF_INT_PASSWORD", "CF_INT_OIDC_PASSWORD", "CF_INT_API", "SKIP_SSL_VALIDATION"]
|
go
| 6 | 0 | |
tests/profiling/collector/test_stack.py
|
import os
import threading
import time
import timeit
import pytest
from ddtrace.vendor import six
from ddtrace.vendor.six.moves import _thread
from ddtrace.profiling import recorder
from ddtrace.profiling.collector import stack
from . import test_collector
TESTING_GEVENT = os.getenv("DD_PROFILE_TEST_GEVENT", False)
try:
from gevent import monkey
except ImportError:
sleep = time.sleep
else:
sleep = monkey.get_original("time", "sleep")
def func1():
return func2()
def func2():
return func3()
def func3():
return func4()
def func4():
return func5()
def func5():
return sleep(1)
def test_collect_truncate():
r = recorder.Recorder()
c = stack.StackCollector(r, nframes=5)
c.start()
func1()
while not r.events[stack.StackSampleEvent]:
pass
c.stop()
e = r.events[stack.StackSampleEvent][0]
assert e.nframes > c.nframes
assert len(e.frames) == c.nframes
def test_collect_once():
r = recorder.Recorder()
s = stack.StackCollector(r)
# Start the collector as we need to have a start time set
with s:
all_events = s.collect()
assert len(all_events) == 2
e = all_events[0][0]
assert e.thread_id > 0
# Thread name is None with gevent
assert isinstance(e.thread_name, (str, type(None)))
assert len(e.frames) >= 1
assert e.frames[0][0].endswith(".py")
assert e.frames[0][1] > 0
assert isinstance(e.frames[0][2], str)
def test_max_time_usage():
r = recorder.Recorder()
with pytest.raises(ValueError):
stack.StackCollector(r, max_time_usage_pct=0)
def test_max_time_usage_over():
r = recorder.Recorder()
with pytest.raises(ValueError):
stack.StackCollector(r, max_time_usage_pct=200)
def test_ignore_profiler():
r, c, thread_id = test_collector._test_collector_collect(stack.StackCollector, stack.StackSampleEvent)
events = r.events[stack.StackSampleEvent]
assert thread_id not in {e.thread_id for e in events}
def test_no_ignore_profiler():
r, c, thread_id = test_collector._test_collector_collect(
stack.StackCollector, stack.StackSampleEvent, ignore_profiler=False
)
events = r.events[stack.StackSampleEvent]
assert thread_id in {e.thread_id for e in events}
def test_collect():
test_collector._test_collector_collect(stack.StackCollector, stack.StackSampleEvent)
def test_restart():
test_collector._test_restart(stack.StackCollector)
def test_repr():
test_collector._test_repr(
stack.StackCollector,
"StackCollector(status=<ServiceStatus.STOPPED: 'stopped'>, "
"recorder=Recorder(max_size=49152), max_time_usage_pct=2.0, "
"nframes=64, ignore_profiler=True)",
)
def test_new_interval():
r = recorder.Recorder()
c = stack.StackCollector(r)
new_interval = c._compute_new_interval(1000000)
assert new_interval == 0.049
new_interval = c._compute_new_interval(2000000)
assert new_interval == 0.098
c = stack.StackCollector(r, max_time_usage_pct=10)
new_interval = c._compute_new_interval(200000)
assert new_interval == 0.01
new_interval = c._compute_new_interval(1)
assert new_interval == c.MIN_INTERVAL_TIME
# Function to use for stress-test of polling
MAX_FN_NUM = 30
FN_TEMPLATE = """def _f{num}():
return _f{nump1}()"""
for num in range(MAX_FN_NUM):
if six.PY3:
exec(FN_TEMPLATE.format(num=num, nump1=num + 1))
else:
exec(FN_TEMPLATE.format(num=num, nump1=num + 1))
exec(
"""def _f{MAX_FN_NUM}():
try:
raise ValueError('test')
except Exception:
sleep(2)""".format(
MAX_FN_NUM=MAX_FN_NUM
)
)
@pytest.mark.skipif(TESTING_GEVENT, reason="Test not compatible with gevent")
def test_stress_threads():
NB_THREADS = 20
threads = []
for i in range(NB_THREADS):
t = threading.Thread(target=_f0) # noqa: E149,F821
t.start()
threads.append(t)
s = stack.StackCollector(recorder=recorder.Recorder())
number = 10000
with s:
exectime = timeit.timeit(s.collect, number=number)
print("%.3f ms per call" % (1000.0 * exectime / number))
for t in threads:
t.join()
@pytest.mark.skipif(not stack.FEATURES["stack-exceptions"], reason="Stack exceptions not supported")
@pytest.mark.skipif(TESTING_GEVENT, reason="Test not compatible with gevent")
def test_exception_collection_threads():
NB_THREADS = 5
threads = []
for i in range(NB_THREADS):
t = threading.Thread(target=_f0) # noqa: E149,F821
t.start()
threads.append(t)
r, c, thread_id = test_collector._test_collector_collect(stack.StackCollector, stack.StackExceptionSampleEvent)
exception_events = r.events[stack.StackExceptionSampleEvent]
e = exception_events[0]
assert e.timestamp > 0
assert e.sampling_period > 0
assert e.thread_id in {t.ident for t in threads}
assert isinstance(e.thread_name, str)
assert e.frames == [("<string>", 5, "_f30")]
assert e.nframes == 1
assert e.exc_type == ValueError
for t in threads:
t.join()
@pytest.mark.skipif(not stack.FEATURES["stack-exceptions"], reason="Stack exceptions not supported")
def test_exception_collection():
r = recorder.Recorder()
c = stack.StackCollector(r)
c.start()
try:
raise ValueError("hello")
except Exception:
sleep(1)
c.stop()
exception_events = r.events[stack.StackExceptionSampleEvent]
assert len(exception_events) >= 1
e = exception_events[0]
assert e.timestamp > 0
assert e.sampling_period > 0
if not TESTING_GEVENT:
assert e.thread_id == _thread.get_ident()
assert e.thread_name == "MainThread"
assert e.frames == [(__file__, 207, "test_exception_collection")]
assert e.nframes == 1
assert e.exc_type == ValueError
|
[] |
[] |
[
"DD_PROFILE_TEST_GEVENT"
] |
[]
|
["DD_PROFILE_TEST_GEVENT"]
|
python
| 1 | 0 | |
sobject/metadata.go
|
package sobject
import (
"encoding/json"
"fmt"
"net/http"
"github.com/aheber/go-sfdc"
"github.com/aheber/go-sfdc/session"
)
// MetadataValue is the response from the SObject metadata API.
type MetadataValue struct {
ObjectDescribe ObjectDescribe `json:"objectDescribe"`
RecentItems []map[string]interface{} `json:"recentItems"`
}
// ObjectDescribe is the SObject metadata describe.
type ObjectDescribe struct {
Activatable bool `json:"activateable"`
Creatable bool `json:"createable"`
Custom bool `json:"custom"`
CustomSetting bool `json:"customSetting"`
Deletable bool `json:"deletable"`
DeprecatedAndHidden bool `json:"deprecatedAndHidden"`
FeedEnabled bool `json:"feedEnabled"`
HasSubtype bool `json:"hasSubtypes"`
IsSubtype bool `json:"isSubtype"`
KeyPrefix string `json:"keyPrefix"`
Label string `json:"label"`
LabelPlural string `json:"labelPlural"`
Layoutable bool `json:"layoutable"`
Mergeable bool `json:"mergeable"`
MruEnabled bool `json:"mruEnabled"`
Name string `json:"name"`
Queryable bool `json:"queryable"`
Replicateable bool `json:"replicateable"`
Retrieveable bool `json:"retrieveable"`
Searchable bool `json:"searchable"`
Triggerable bool `json:"triggerable"`
Undeletable bool `json:"undeletable"`
Updateable bool `json:"updateable"`
URLs ObjectURLs `json:"urls"`
}
type metadata struct {
session session.ServiceFormatter
}
func (md *metadata) callout(sobject string) (MetadataValue, error) {
request, err := md.request(sobject)
if err != nil {
return MetadataValue{}, err
}
value, err := md.response(request)
if err != nil {
return MetadataValue{}, err
}
return value, nil
}
func (md *metadata) request(sobject string) (*http.Request, error) {
url := md.session.ServiceURL() + objectEndpoint + sobject
request, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, err
}
request.Header.Add("Accept", "application/json")
md.session.AuthorizationHeader(request)
return request, nil
}
func (md *metadata) response(request *http.Request) (MetadataValue, error) {
response, err := md.session.Client().Do(request)
if err != nil {
return MetadataValue{}, err
}
decoder := json.NewDecoder(response.Body)
defer response.Body.Close()
if response.StatusCode != http.StatusOK {
var respErrs []sfdc.Error
err = decoder.Decode(&respErrs)
var errMsg error
if err == nil {
for _, respErr := range respErrs {
errMsg = fmt.Errorf("metadata response err: %s: %s", respErr.ErrorCode, respErr.Message)
}
} else {
errMsg = fmt.Errorf("metadata response err: %d %s", response.StatusCode, response.Status)
}
return MetadataValue{}, errMsg
}
var value MetadataValue
err = decoder.Decode(&value)
if err != nil {
return MetadataValue{}, err
}
return value, nil
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
examples/pwr_run/checkpointing/socket_short/true_random/job20.py
|
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.mobilenet_v2 import MobileNetV2
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
from random import randrange
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.0005
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_true_random/' + job_name + '*'
total_epochs = 44
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
base_model = MobileNetV2(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
epoch_waste_dict[job_name] += epoch_waste_time
json_file3 = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file3)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_true_random/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
if not args.resume:
# randomly assign it a value
trainable_count = randrange(1000)
# send signal 'jobxx param xxxxx'
message = job_name + ' param ' + str(trainable_count)
send_signal.send(args.node, 10002, message)
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
|
[] |
[] |
[
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"]
|
python
| 2 | 0 | |
scripts/test_image.py
|
import os
if 'PYTHONPATH' not in os.environ:
print("Error: Run `source env.sh` to be able to run `/scripts/*.py`")
exit(1)
from pathlib import Path
from time import time
from typing import Iterable
import ecgdigitize
from ecgdigitize.otsu import otsuThreshold
import ecgdigitize.visualization as viz
from ecgdigitize.image import openImage
myImage = openImage(Path('data/images/002.JPG'))
grayscale = myImage.toGrayscale()
print(grayscale)
print(grayscale.normalized())
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
pkg/services/ifttt/ifttt_test.go
|
package ifttt
import (
"encoding/json"
"fmt"
"log"
"net/url"
"os"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/containrrr/shoutrrr/pkg/types"
"github.com/containrrr/shoutrrr/pkg/util"
"github.com/jarcoal/httpmock"
)
func TestIFTTT(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Shoutrrr IFTTT Suite")
}
var (
service *Service
logger *log.Logger
envTestURL string
)
var _ = Describe("the ifttt package", func() {
BeforeSuite(func() {
envTestURL = os.Getenv("SHOUTRRR_IFTTT_URL")
logger = util.TestLogger()
})
BeforeEach(func() {
service = &Service{}
})
When("running integration tests", func() {
It("should work without errors", func() {
if envTestURL == "" {
return
}
serviceURL, err := url.Parse(envTestURL)
Expect(err).NotTo(HaveOccurred())
err = service.Initialize(serviceURL, logger)
Expect(err).NotTo(HaveOccurred())
err = service.Send(
"this is an integration test",
nil,
)
Expect(err).NotTo(HaveOccurred())
})
})
When("creating a config", func() {
When("given an url", func() {
It("should return an error if no arguments where supplied", func() {
serviceURL, _ := url.Parse("ifttt://")
err := service.Initialize(serviceURL, logger)
Expect(err).To(HaveOccurred())
})
It("should return an error if no webhook ID is given", func() {
serviceURL, _ := url.Parse("ifttt:///?events=event1")
err := service.Initialize(serviceURL, logger)
Expect(err).To(HaveOccurred())
})
It("should return an error no events are given", func() {
serviceURL, _ := url.Parse("ifttt://dummyID")
err := service.Initialize(serviceURL, logger)
Expect(err).To(HaveOccurred())
})
It("should return an error when an invalid query key is given", func() {
serviceURL, _ := url.Parse("ifttt://dummyID/?events=event1&badquery=foo")
err := service.Initialize(serviceURL, logger)
Expect(err).To(HaveOccurred())
})
It("should return an error if message value is above 3", func() {
serviceURL, _ := url.Parse("ifttt://dummyID/?events=event1&messagevalue=8")
config := Config{}
err := config.SetURL(serviceURL)
Expect(err).To(HaveOccurred())
})
It("should not return an error if webhook ID and at least one event is given", func() {
serviceURL, _ := url.Parse("ifttt://dummyID/?events=event1")
err := service.Initialize(serviceURL, logger)
Expect(err).NotTo(HaveOccurred())
})
It("should set value1, value2 and value3", func() {
serviceURL, _ := url.Parse("ifttt://dummyID/?events=dummyevent&value3=three&value2=two&value1=one")
config := Config{}
err := config.SetURL(serviceURL)
Expect(err).NotTo(HaveOccurred())
Expect(config.Value1).To(Equal("one"))
Expect(config.Value2).To(Equal("two"))
Expect(config.Value3).To(Equal("three"))
})
})
})
When("serializing a config to URL", func() {
When("given multiple events", func() {
It("should return an URL with all the events comma-separated", func() {
expectedURL := "ifttt://dummyID/?events=foo%2Cbar%2Cbaz&messagevalue=0"
config := Config{
Events: []string{"foo", "bar", "baz"},
WebHookID: "dummyID",
UseMessageAsValue: 0,
}
resultURL := config.GetURL().String()
Expect(resultURL).To(Equal(expectedURL))
})
})
When("given values", func() {
It("should return an URL with all the values", func() {
expectedURL := "ifttt://dummyID/?messagevalue=0&value1=v1&value2=v2&value3=v3"
config := Config{
WebHookID: "dummyID",
Value1: "v1",
Value2: "v2",
Value3: "v3",
}
resultURL := config.GetURL().String()
Expect(resultURL).To(Equal(expectedURL))
})
})
})
When("sending a message", func() {
It("should error if the response code is not 204 no content", func() {
httpmock.Activate()
defer httpmock.DeactivateAndReset()
setupResponder("foo", "dummy", 404, "")
URL, _ := url.Parse("ifttt://dummy/?events=foo")
if err := service.Initialize(URL, logger); err != nil {
Fail("errored during initialization")
}
err := service.Send("hello", nil)
Expect(err).To(HaveOccurred())
})
It("should not error if the response code is 204", func() {
httpmock.Activate()
defer httpmock.DeactivateAndReset()
setupResponder("foo", "dummy", 204, "")
URL, _ := url.Parse("ifttt://dummy/?events=foo")
if err := service.Initialize(URL, logger); err != nil {
Fail("errored during initialization")
}
err := service.Send("hello", nil)
Expect(err).NotTo(HaveOccurred())
})
})
When("creating a json payload", func() {
When("given config values \"a\", \"b\" and \"c\"", func() {
It("should return a valid jsonPayload string with values \"a\", \"b\" and \"c\"", func() {
bytes, err := createJSONToSend(&Config{
Value1: "a",
Value2: "b",
Value3: "c",
UseMessageAsValue: 0,
}, "d", nil)
Expect(err).ToNot(HaveOccurred())
payload := jsonPayload{}
err = json.Unmarshal(bytes, &payload)
Expect(err).ToNot(HaveOccurred())
Expect(payload.Value1).To(Equal("a"))
Expect(payload.Value2).To(Equal("b"))
Expect(payload.Value3).To(Equal("c"))
})
})
When("message value is set to 3", func() {
It("should return a jsonPayload string with value2 set to message", func() {
config := &Config{
Value1: "a",
Value2: "b",
Value3: "c",
}
for i := 1; i <= 3; i++ {
config.UseMessageAsValue = uint8(i)
bytes, err := createJSONToSend(config, "d", nil)
Expect(err).ToNot(HaveOccurred())
payload := jsonPayload{}
err = json.Unmarshal(bytes, &payload)
Expect(err).ToNot(HaveOccurred())
if i == 1 {
Expect(payload.Value1).To(Equal("d"))
} else if i == 2 {
Expect(payload.Value2).To(Equal("d"))
} else if i == 3 {
Expect(payload.Value3).To(Equal("d"))
}
}
})
})
When("given a param overrides for value1, value2 and value3", func() {
It("should return a jsonPayload string with value1, value2 and value3 overridden", func() {
bytes, err := createJSONToSend(&Config{
Value1: "a",
Value2: "b",
Value3: "c",
UseMessageAsValue: 0,
}, "d", (*types.Params)(&map[string]string{
"value1": "e",
"value2": "f",
"value3": "g",
}))
Expect(err).ToNot(HaveOccurred())
payload := &jsonPayload{}
err = json.Unmarshal(bytes, payload)
Expect(err).ToNot(HaveOccurred())
Expect(payload.Value1).To(Equal("e"))
Expect(payload.Value2).To(Equal("f"))
Expect(payload.Value3).To(Equal("g"))
})
})
})
})
func setupResponder(event string, key string, code int, body string) {
targetURL := fmt.Sprintf("https://maker.ifttt.com/trigger/%s/with/key/%s", event, key)
httpmock.RegisterResponder("POST", targetURL, httpmock.NewStringResponder(code, body))
}
|
[
"\"SHOUTRRR_IFTTT_URL\""
] |
[] |
[
"SHOUTRRR_IFTTT_URL"
] |
[]
|
["SHOUTRRR_IFTTT_URL"]
|
go
| 1 | 0 | |
pwm_db_api/cmd/main.go
|
package main
import (
"context"
"fmt"
"log"
"os"
"google.golang.org/grpc"
"github.com/t4ke0/pwm/keys_manager/common"
keys_manager_pb "github.com/t4ke0/pwm/keys_manager/proto"
db "github.com/t4ke0/pwm/pwm_db_api"
)
func main() {
var (
postgresPW = os.Getenv("POSTGRES_PASSWORD")
postgresDB = os.Getenv("POSTGRES_DATABASE")
postgresUser = os.Getenv("POSTGRES_USER")
postgresHost = os.Getenv("POSTGRES_HOST")
postgresLink = fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=disable",
postgresUser,
postgresPW,
postgresHost,
postgresDB)
//
keysManagerHost = os.Getenv("KEYS_MANAGER_HOST")
)
log.Printf("DEBUG postgres Link [%v]", postgresLink)
// If LOCAL_TEST
if os.Getenv("LOCAL_TEST") == "true" {
pqLink, err := db.CreateTestingDatabase(postgresLink)
if err != nil {
log.Fatal(err)
}
postgresLink = pqLink
}
log.Printf("debug postgresLink %v", postgresLink)
// When starting inital the database. support [prod & test] env
conn, err := db.New(postgresLink)
if err != nil {
log.Fatalf("Couldn't connect to the Database %v", err)
}
defer conn.Close()
db.SchemaFile = os.Getenv("SCHEMA_FILE_PATH")
if err := conn.InitDB(); err != nil {
log.Fatalf("Couldn't init the Database %v", err)
}
log.Printf("DEBUG initialized DB successfully!")
// If TEST in CI
if os.Getenv("TEST") == "true" {
wordsFilePath := "../keys_manager/common/words.txt"
key, err := common.GenerateEncryptionKey(wordsFilePath, 0)
if err != nil {
log.Fatalf("Error generating server key %v", err)
}
if err := conn.StoreServerKey(key.String()); err != nil {
log.Fatalf("Error storing server key into database %v", err)
}
}
serverKey, err := conn.GetServerEncryptionKey()
if err != nil {
log.Fatal(err)
}
log.Printf("DEBUG server key doesn't already exists %v", serverKey == "")
if serverKey == "" {
grpcConn, err := grpc.Dial(keysManagerHost, grpc.WithInsecure())
if err != nil {
log.Fatalf("GRPC ERROR: %v", err)
}
defer grpcConn.Close()
client := keys_manager_pb.NewKeyManagerClient(grpcConn)
key, err := client.GenKey(context.TODO(), &keys_manager_pb.KeyGenRequest{
Mode: keys_manager_pb.Mode_Server,
})
if err != nil {
log.Fatal(err)
}
log.Printf("DEBUG gen server key %v", key.Key)
return
}
log.Printf("DEBUG: server key %v", serverKey)
}
|
[
"\"POSTGRES_PASSWORD\"",
"\"POSTGRES_DATABASE\"",
"\"POSTGRES_USER\"",
"\"POSTGRES_HOST\"",
"\"KEYS_MANAGER_HOST\"",
"\"LOCAL_TEST\"",
"\"SCHEMA_FILE_PATH\"",
"\"TEST\""
] |
[] |
[
"TEST",
"POSTGRES_DATABASE",
"POSTGRES_USER",
"SCHEMA_FILE_PATH",
"KEYS_MANAGER_HOST",
"POSTGRES_HOST",
"LOCAL_TEST",
"POSTGRES_PASSWORD"
] |
[]
|
["TEST", "POSTGRES_DATABASE", "POSTGRES_USER", "SCHEMA_FILE_PATH", "KEYS_MANAGER_HOST", "POSTGRES_HOST", "LOCAL_TEST", "POSTGRES_PASSWORD"]
|
go
| 8 | 0 | |
ximpia/xpcore/views.py
|
# coding: utf-8
import httplib2
import urlparse
import oauth2
import json
import types
import traceback
import os
import datetime
import copy
from django.db import transaction
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.utils.translation import ugettext as _
from django.http import Http404
from ximpia.xpcore.util import get_class, AttrDict, get_app_full_path, get_app_path
from models import context, context_view, ctx, JsResultDict
from service import XpMsgException, view_tmpl, SearchService, TemplateService, CommonService
from data import ViewDAO, ActionDAO, ApplicationDAO
from ximpia.xpsite import constants as KSite
from ximpia.xpsite.models import Setting
settings = get_class(os.getenv("DJANGO_SETTINGS_MODULE"))
# Logging
import logging
logger = logging.getLogger(__name__)
def __showView(view, viewAttrs, ctx):
"""Show view. Returns classPath for service class, method and service operation attributes
** Attributes **
* ``view``
* ``viewAttrs``
* ``ctx``
** Returns **
* ``(classPath, method, viewAttrTuple):Tuple
"""
ctx.viewNameSource = view.name
ctx.path = '/apps/' + view.application.slug + '/' + view.slug
impl = view.implementation
# Parse method and class path
implFields = impl.split('.')
method = implFields[len(implFields)-1]
classPath = ".".join(implFields[:-1])
if viewAttrs.find('/') != -1:
viewAttrTuple = viewAttrs.split('/')
else:
if len(viewAttrs) == 0:
viewAttrTuple = []
else:
viewAttrTuple = [viewAttrs]
return (classPath, method, viewAttrTuple)
def oauth20(request, service):
"""Doc."""
logger.debug( 'GET : %s' % (json.dumps(request.GET)) )
ContextDict = {
'service': service,
'status': '',
'token': '',
'tokenSecret': '',
'errorMessage': ''
}
oauthVersion = settings.XIMPIA_CONSUMER_DICT[service][2]
if oauthVersion == '2.0':
if request.GET.has_key('code'):
code = request.GET['code']
# Exchange code for access token
logger.debug( settings.XIMPIA_CONSUMER_DICT[service][0] + ' ' + settings.XIMPIA_CONSUMER_DICT[service][1] )
url = settings.XIMPIA_OAUTH_URL_DICT[service]['access'][0] + '?' + \
'client_id=' + settings.XIMPIA_CONSUMER_DICT[service][0] + \
'&redirect_uri=' + settings.XIMPIA_OAUTH2_REDIRECT + service + \
'&client_secret=' + settings.XIMPIA_CONSUMER_DICT[service][1] + \
'&code=' + code
http = httplib2.Http()
resp, content = http.request(url)
if resp['status'] == '200':
responseDict = dict(urlparse.parse_qsl(content))
accessToken = responseDict['access_token']
logger.debug( accessToken )
ContextDict['status'] = 'OK'
ContextDict['token'] = accessToken
ContextDict['tokenSecret'] = ''
else:
# Show error
ContextDict['status'] = 'ERROR'
else:
ContextDict['status'] = 'ERROR'
else:
ContextDict['status'] = 'ERROR'
template = 'social_network/tags/networks/iconResponse.html'
Result = render_to_response(template, ContextDict)
return Result
def oauth(request, service):
"""Oauth logic with all providers registered in settings"""
logger.debug( 'GET : %s' % (json.dumps(request.GET)) )
# Think about methods in login: LinkedIn, parameters, etc...
ContextDict = {
'service': service,
'status': '',
'token': '',
'tokenSecret': '',
'errorMessage': ''
}
logger.debug( settings.XIMPIA_CONSUMER_DICT )
oauthVersion = settings.XIMPIA_CONSUMER_DICT[service][2]
if oauthVersion == '1.0':
if len(request.GET.keys()) == 0:
consumerTuple = settings.XIMPIA_CONSUMER_DICT[service]
consumer = oauth2.Consumer(consumerTuple[0], consumerTuple[1])
client = oauth2.Client(consumer)
resp, content = client.request(settings.XIMPIA_OAUTH_URL_DICT[service]['request'][0], settings.XIMPIA_OAUTH_URL_DICT[service]['request'][1])
#logger.debug( json.dumps(resp) )
if resp['status'] == '200':
#logger.debug( json.dumps(content) )
request_token = dict(urlparse.parse_qsl(content))
logger.debug( request_token )
request.session['request_token'] = request_token
# Redirect to linkedin Url
url = settings.XIMPIA_OAUTH_URL_DICT[service]['authorized'][0] + '?oauth_token=' + request_token['oauth_token']
return HttpResponseRedirect(url)
else:
# should show message of error in connecting with network
ContextDict['status'] = 'ERROR'
else:
# Callback : oauth_token and oauth_verifier
logger.debug( 'callback...' )
if request.GET.has_key('oauth_token') and request.GET.has_key('oauth_verifier'):
#oauth_token = request.GET['oauth_token']
oauth_verifier = request.GET['oauth_verifier']
request_token = request.session['request_token']
token = oauth2.Token(request_token['oauth_token'], request_token['oauth_token_secret'])
token.set_verifier(oauth_verifier)
consumerTuple = settings.XIMPIA_CONSUMER_DICT[service]
consumer = oauth2.Consumer(consumerTuple[0], consumerTuple[1])
client = oauth2.Client(consumer, token)
resp, content = client.request(settings.XIMPIA_OAUTH_URL_DICT[service]['access'][0], "POST")
access_token = dict(urlparse.parse_qsl(content))
logger.debug( 'access_token: %s' % (access_token) )
# Show web page... javascript logic and close window
ContextDict['status'] = 'OK'
ContextDict['token'] = access_token['oauth_token']
ContextDict['tokenSecret'] = access_token['oauth_token_secret']
else:
# Show error message
ContextDict['status'] = 'ERROR'
else:
# Show error
ContextDict['status'] = 'ERROR'
template = 'social_network/tags/networks/iconResponse.html'
Result = render_to_response(template, ContextDict)
return Result
@context
def jxJSON(request, **ArgsDict):
"""Sequence of actions are executed. Returns either OK or ERROR. jsonData has list of fields action,argsTuple, argsDict."""
# init
ctx = ArgsDict['ctx']
# Option 1 : Map method, argsTuple, argsDict
if request.POST.has_key('jsonData'):
try:
data = json.loads(request.POST['jsonData'])['jsonDataList']
for fields in data:
action, parameterList, parameterDict = fields
resultTmp = eval(action)(*parameterList, **parameterDict)
if type(resultTmp) == types.ListType:
listResult = []
for entity in resultTmp:
dd = entity.values()
listResult.append(dd)
ctx.rs['status'] = 'OK'
ctx.rs['response'] = listResult
else:
entity = resultTmp
ctx.rs['status'] = 'OK'
ctx.rs['response'] = entity.values()
except:
ctx.rs['status'] = 'ERROR'
else:
ctx.rs['status'] = 'ERROR'
response = json.dumps(ctx.rs)
return HttpResponse(response)
@context
def jxSuggestList(request, **args):
"""Suggest search list"""
# init
ctx = args['ctx']
# Do
resultList = []
if request.REQUEST.has_key('dbClass'):
dbClass = request.REQUEST['dbClass']
app = request.REQUEST['app']
logger.debug('jxSuggestList :: search: %s' % (request.REQUEST['search']) )
logger.debug('jxSuggestList :: path dbClass: %s' % (app + '.data.' + dbClass) )
cls = get_class( app + '.data.' + dbClass)
obj = cls(args['ctx']) #@UnusedVariable
obj.request = request
params = {}
if request.REQUEST.has_key('params'):
params = json.loads(request.REQUEST['params']);
searchField = request.REQUEST['searchField']
params[searchField + '__istartswith'] = request.REQUEST['search']
logger.debug('jxSuggestList :: params: %s' % (params) )
fields = eval('obj.search')(**params)
logger.debug('jxSuggestList :: fields: %s' % (fields) )
fieldValue = None
if request.REQUEST.has_key('fieldValue'):
fieldValue = request.REQUEST['fieldValue']
extraFields = None
if request.REQUEST.has_key('extraFields'):
extraFields = json.loads(request.REQUEST['extraFields'])
logger.debug('jxSuggestList :: extrafields: %s' % (extraFields) )
for entity in fields:
dd = {}
dd['id'] = entity.id
if fieldValue is None:
dd['text'] = str(entity)
else:
dd['text'] = eval('entity.' + fieldValue)
if extraFields is not None:
extraDict = {}
for extraField in extraFields:
extraDict[extraField] = eval('entity.' + extraField)
dd['extra'] = extraDict
resultList.append(dd)
logger.debug('jxSuggestList :: resultList: %s' % (resultList) )
return HttpResponse(json.dumps(resultList))
@context
def jxSearchHeader(request, **args):
"""Search ximpia for views and actions."""
try:
logger.debug( 'searchHeader...' )
logger.debug( 'search: %s' % (request.REQUEST['search']) )
# What are params in jxSuggestList?????
ctx = args['ctx']
searchObj = SearchService(ctx)
results = searchObj.search(request.REQUEST['search'])
logger.debug( 'results: %s' % (json.dumps(results)) )
except:
traceback.print_exc()
return HttpResponse(json.dumps(results))
def jxTemplate(request, app, mode, tmplName):
"""
Get ximpia template
**Attributes**
* ``app``:String : Application
* ``mode``:String : Mode: window, popup
* ``tmplName``:String : Template name
** Returns **
* ``template``:HttpResponse
"""
service = TemplateService(None)
tmpl = service.get(app, mode, tmplName)
return HttpResponse(tmpl)
def jxAppTemplate(request, app):
"""
Get ximpia application template
**Attributes**
* ``app``:String : Application
** Returns **
* ``template``:HttpResponse
"""
service = TemplateService(None)
tmpl = service.get_app(app)
return HttpResponse(tmpl)
@context
def jxDataQuery(request, **args):
"""
Execute data queries for lists with ordering, page and filters.
** Attributes **
* ``request``
* ``args``
** Html Attributes **
* ``dbClass``:str : Data class name (DAO)
* ``fields``:list<str> [optional]
* ``pageStart``:str [optional] [default:1] : Start page number
* ``pageEnd``:str [optional] : End page number
* ``orderBy``:tuple<str> [optional]
* ``method``:str [optional] [default:searchFields]
* ``args``:dict<str,str> [optional]
* ``hasOrdering``:bool [optional]
* ``orderField``:str [optional]
** Returns **
result
"""
logger.debug( 'jxDataQuery...' )
logger.debug('jxDataQuery :: args: %s' % (args) )
logger.debug('jxDataQuery :: REQUEST: %s' % (request.REQUEST) )
if not request.REQUEST.has_key('dbClass') or not request.REQUEST.has_key('app'):
raise XpMsgException(AttributeError, _('app and dbClass must be defined.'))
dbClass = request.REQUEST['dbClass']
dbApplication = ApplicationDAO(args['ctx'])
#logger.debug('jxDataQuery :: app: {}'.format(request.REQUEST['app'], get_app_full_path(request.REQUEST['app'])))
app = get_app_full_path(request.REQUEST['app'])
application = dbApplication.get(name=app)
# app: ximpia_site.web, MyDAO => ximpia_site.web.data.MyDAO
classPath = app + '.data.' + dbClass
cls = get_class( classPath )
obj = cls(args['ctx']) #@UnusedVariable
obj.request = request
logger.debug('jxDataQuery :: obj: %s' % (obj) )
# fields
fields = []
if request.REQUEST.has_key('fields'):
fields = json.loads(request.REQUEST['fields'])
dbArgs = {}
meta = AttrDict()
# disablePaging
dbArgs['disable_paging'] = False
if request.REQUEST.has_key('disablePaging'):
dbArgs['disable_paging'] = json.loads(request.REQUEST['disablePaging'])
if not dbArgs['disable_paging']:
if request.REQUEST.has_key('pageStart'):
dbArgs['page_start'] = int(request.REQUEST['pageStart'])
else:
dbArgs['page_start'] = 1
logger.debug('jxDataQuery :: pageStart: %s' % (dbArgs['page_start']) )
# pageEnd
if request.REQUEST.has_key('pageEnd') and not dbArgs['disable_paging']:
dbArgs['page_end'] = int(request.REQUEST['pageEnd'])
# orderBy
if request.REQUEST.has_key('orderBy'):
dbArgs['order_by'] = json.loads(request.REQUEST['orderBy'])
# args
if request.REQUEST.has_key('args'):
requestArgs = json.loads(request.REQUEST['args'])
for requestArg in requestArgs:
try:
dbArgs[requestArg] = json.loads(requestArgs[requestArg])
except ValueError:
dbArgs[requestArg] = requestArgs[requestArg]
# numberResults
if request.REQUEST.has_key('numberResults'):
dbArgs['number_results'] = int(request.REQUEST['numberResults'])
else:
# Get number results from settings
dbArgs['number_results'] = int(Setting.objects.get(name__name=KSite.SET_NUMBER_RESULTS_LIST).value)
logger.debug('jxDataQuery :: numberResults: %s' % (dbArgs['number_results']) )
# hasOrdering
if request.REQUEST.has_key('hasOrdering') and request.REQUEST['hasOrdering'] == 'true':
if request.REQUEST.has_key('orderField'):
fields.append(request.REQUEST['orderField'])
else:
fields.append('order')
# hasHeader
hasHeader = False
if request.REQUEST.has_key('hasHeader'):
hasHeader = json.loads(request.REQUEST['hasHeader'])
logger.debug('jxDataQuery :: hasHeader: %s' % (hasHeader) )
if 'id' not in fields and len(fields) != 0:
fields.insert(0, 'id')
logger.debug('jxDataQuery :: fields: %s' % (fields) )
logger.debug('jxDataQuery :: dbArgs: %s' % (dbArgs) )
"""dbArgs['disablePaging'] = False
dbArgs['pageStart'] = 1
dbArgs['pageEnd'] = 1
dbArgs['numberResults'] = 2"""
if request.REQUEST.has_key('method'):
dataListTmp = eval('obj.' + request.REQUEST['method'])(fields, **dbArgs)
else:
dataListTmp = obj.search_fields(fields, **dbArgs)
# numberPages
if dbArgs['disable_paging'] is not True:
dbArgsPages = copy.copy(dbArgs)
if dbArgsPages.has_key('page_start'):
del dbArgsPages['page_start']
if dbArgsPages.has_key('page_end'):
del dbArgsPages['page_end']
if request.REQUEST.has_key('method'):
"""dataListTmp = eval('obj.' + request.REQUEST['method'])(fields, **dbArgsPages)
logger.debug('jxDataQuery :: type dataListTmp: %s' % (type(dataListTmp)) )
meta.numberPages = dataListTmp.count()/numberResults"""
pass
else:
if dbArgsPages.has_key('disable_paging'):
del dbArgsPages['disable_paging']
if dbArgsPages.has_key('number_results'):
numberResults = dbArgsPages['number_results']
del dbArgsPages['number_results']
if dbArgsPages.has_key('page_start'): del dbArgsPages['page_start']
if dbArgsPages.has_key('page_end'): del dbArgsPages['page_end']
if dbArgsPages.has_key('order_by'): del dbArgsPages['order_by']
meta.numberPages = int(round(float(obj.model.objects.filter(**dbArgsPages).count())/float(numberResults)))
else:
meta.numberPages = 1
meta.pageStart = 1
if dbArgs.has_key('page_start'):
meta.pageStart = dbArgs['page_start']
if dbArgs.has_key('page_end'):
meta.pageEnd = dbArgs['page_end']
else:
meta.pageEnd = meta.pageStart
#logger.debug('jxDataQuery :: dataListTmp: %s' % (dataListTmp) )
dataList = []
for dbFields in dataListTmp:
ll = []
for dbField in dbFields:
if type(dbField) == datetime.date:
ll.append(dbField.strftime('%m/%d/%Y'))
elif type(dbField) == datetime.datetime:
ll.append(dbField.strftime('%m/%d/%Y %H:%M'))
elif type(dbField) == datetime.time:
ll.append(dbField.strftime('%H:%M'))
else:
ll.append(dbField)
dataList.append(ll)
logger.debug('jxDataQuery :: dataList: %s' % (dataList) )
# headers
headers = []
if hasHeader:
modelFields = obj.model._meta.fields
logger.debug('jxDataQuery :: modelFields: %s' % (modelFields) )
if len(fields) == 0:
# get all model fields from table and add to headers
for modelField in modelFields:
headers.append(modelField.verbose_name)
else:
# Get model fields with max level 3: field__field__field
for field in fields:
if field.count('__') == 0:
headerField = obj.model._meta.get_field_by_name(field)[0].verbose_name
logger.debug('jxDataQuery :: headerField: %s' % (headerField) )
headers.append(headerField)
elif field.count('__') == 1:
fieldFrom, fieldTo = field.split('__')
logger.debug('jxDataQuery :: fieldFrom: %s fieldTo: %s' % (fieldFrom, fieldTo) )
"""relField = obj.model._meta.get_field_by_name(fieldFrom)[0]\
.rel.to._meta.get_field_by_name(fieldTo)[0]"""
# 03/07/2013 : We get header name from fk verbose name and not linked to verbose name
relField = obj.model._meta.get_field_by_name(fieldFrom)[0]
if type(relField.verbose_name) == types.UnicodeType:
headerField = relField.verbose_name
else:
headerField = relField.name
logger.debug('jxDataQuery :: headerField: %s' % (headerField) )
headers.append(headerField)
elif field.count('__') == 2:
fieldFrom, fieldTo1, fieldTo2 = field.split('__')
logger.debug('jxDataQuery :: fieldFrom: %s fieldTo: %s' % (fieldFrom, fieldTo1, fieldTo2) )
"""relField = obj.model._meta.get_field_by_name(fieldFrom)[0]\
.rel.to._meta.get_field_by_name(fieldTo1)[0]\
.rel.to._meta.get_field_by_name(fieldTo2)[0]"""
# 03/07/2013 : We get header name from fk verbose name and not linked to verbose name
relField = obj.model._meta.get_field_by_name(fieldFrom)[0]
if type(relField.verbose_name) == types.UnicodeType:
headerField = relField.verbose_name
else:
headerField = relField.name
logger.debug('jxDataQuery :: headerField: %s' % (headerField) )
headers.append(headerField)
logger.debug('jxDataQuery :: headers: %s' % (headers) )
results = {'headers': headers, 'data': dataList, 'meta': meta}
logger.debug('jxDataQuery :: results: %s' % (results) )
return HttpResponse(json.dumps(results))
@context
def jxDataSwitchOrder(request, **args):
"""
Change order in a data table
** Attributes **
* ``request``
* ``args``
** Html Attributes **
* ``dbClass``:str
* ``orderCurrent``:str ????
* ``orderNew``:str
* ``pk``
"""
logger.debug('jxDataSwitchOrder...')
# TODO: Hit master for this operation
# get current order
# get list fields from current order to new order
# new order higher or lower than current order?
orderCurrent = int(request.REQUEST['orderCurrent'])
orderNew = int(request.REQUEST['orderCurrent'])
pk = request.REQUEST['pk']
dbClass = request.REQUEST['dbClass']
orderField = 'order'
if request.REQUEST.has_key('orderField'):
orderField = request.REQUEST['orderField']
logger.debug('jxDataSwitchOrder :: pk: %s orderCurrent: %s orderNew: %s dbClass: %s' % (pk, orderCurrent, orderNew, dbClass) )
dbApplication = ApplicationDAO(args['ctx'])
app = request.REQUEST['app']
application = dbApplication.get(name=app)
# app: ximpia_site.web, MyDAO => ximpia_site.web.data.MyDAO
classPath = app + '.data.' + dbClass
cls = get_class( classPath )
obj = cls(args['ctx']) #@UnusedVariable
item = obj.get(pk=pk)
logger.debug('jxDataSwitchOrder :: change order : %s -> %s' % (orderCurrent, orderNew) )
item.__setattr__(orderField, orderNew)
orderDbCurrent = eval('item.' + orderField)
logger.debug('jxDataSwitchOrder :: orderDbCurrent: %s' % (orderDbCurrent) )
if orderCurrent != orderDbCurrent:
raise XpMsgException(None, _('Sorting error. Please retry later. Thanks'))
if orderNew > orderCurrent:
# Moving down the list
logger.debug('jxDataSwitchOrder :: Moving down the list...')
itemsToUpdate = obj.objects.filter(order__gt = orderCurrent, order__lte = orderNew).values()
logger.debug('jxDataSwitchOrder :: itemsToUpdate. %s' % (itemsToUpdate))
for itemToUpdate in itemsToUpdate:
logger.debug('jxDataSwitchOrder :: Move down: %s -> %s' % (itemToUpdate[orderField], itemToUpdate[orderField]-1) )
itemToUpdate[orderField] -= 1
itemToUpdate.save()
else:
# Moving up the list
logger.debug('jxDataSwitchOrder :: Moving up the list...')
itemsToUpdate = obj.objects.filter(order__gt = orderNew, order__lt = orderCurrent).values()
logger.debug('jxDataSwitchOrder :: itemsToUpdate. %s' % (itemsToUpdate))
for itemToUpdate in itemsToUpdate:
logger.debug('jxDataSwitchOrder :: Move up: %s -> %s' % (itemToUpdate[orderField], itemToUpdate[orderField]+1) )
itemToUpdate[orderField] += 1
itemToUpdate.save()
logger.debug('jxDataSwitchOrder :: finished!!!')
return HttpResponse(json.dumps('OK'))
@ctx()
@transaction.commit_on_success
def jxService(request, **args):
"""Excutes the business class: bsClass, method {bsClass: '', method: ''}
@param request: Request
@param result: Result"""
logger.debug( 'jxService...' )
#raw_input('Continue???')
#time.sleep(1.5)
logger.debug( json.dumps(request.REQUEST.items()) )
request.session.set_test_cookie()
request.session.delete_test_cookie()
#logger.debug( 'session: %s' % (json.dumps(request.session.items())) )
#logger.debug( 'session: %s' % json.dumps(request.session.items()) + ' ' + json.dumps(request.session.session_key) )
if (request.REQUEST.has_key('view') or request.REQUEST.has_key('action')) and request.is_ajax() is True:
viewAttrs = {}
dbApplication = ApplicationDAO(args['ctx'])
app = request.REQUEST['app']
application = dbApplication.get(name=app)
if request.REQUEST.has_key('view'):
view = request.REQUEST['view']
logger.debug( 'view: %s' % (view) )
dbView = ViewDAO(args['ctx'])
viewObj = dbView.get(application__name=app, name=view)
args['ctx'].viewAuth = viewObj.hasAuth
impl = viewObj.implementation
# view attributes
viewAttrs = json.loads(request.REQUEST['params']) if 'params' in request.REQUEST else {}
args['ctx'].viewNameSource = view
args['ctx'].path = '/apps/' + application.slug + '/' + viewObj.slug
elif request.REQUEST.has_key('action'):
action = request.REQUEST['action']
logger.debug( 'action: %s' % (action) )
dbAction = ActionDAO(args['ctx'])
dbView = ViewDAO(args['ctx'])
actionObj = dbAction.get(application__name=app, name=action)
#if args['ctx'].has_key('viewNameSource') and len(args['ctx']['viewNameSource']) != 0:
if len(args['ctx'].viewNameSource) != 0:
# Get view name and check its application code with application code of action
logger.debug( 'viewNameSource: %s' % (args['ctx'].viewNameSource) )
viewObj = dbView.get(application__name=app, name=args['ctx'].viewNameSource)
if actionObj.application.name != viewObj.application.name:
raise XpMsgException(None, _('Action is not in same application as view source'))
impl = actionObj.implementation
args['ctx'].path = '/apps/' + application.slug + '/do/' + actionObj.slug
implFields = impl.split('.')
method = implFields[len(implFields)-1]
classPath = ".".join(implFields[:-1])
logger.debug('classPath: %s' % (classPath))
if method.find('_') == -1 or method.find('__') == -1:
cls = get_class(classPath)
obj = cls(args['ctx'])
super(cls, obj).__init__(args['ctx'])
obj.request = request
if (len(viewAttrs) == 0):
result = getattr(obj, method)()
else:
result = getattr(obj, method)(**viewAttrs)
else:
logger.debug( 'private methods...' )
raise Http404
else:
logger.debug( 'Unvalid business request' )
raise Http404
return result
@ctx()
@transaction.commit_on_success
def jxSave(request, **args):
"""
Save register. Operation executed when clicking on "Save" button on forms. Saves all instances related to forms, included
many to many relationships.
** Attributes **
* ``request``
* ``**args``
"""
logger.debug( 'jxSave...' )
logger.debug( json.dumps(request.REQUEST.items()) )
request.session.set_test_cookie()
request.session.delete_test_cookie()
if (request.REQUEST.has_key('action')) and request.is_ajax() == True:
action = request.REQUEST['action']
logger.debug( 'action: %s' % (action) )
if action == 'save':
# resolve form, set to args['ctx'].form
logger.debug('jxSave :: form: %s' % (request.REQUEST['form']) )
formId = request.REQUEST['form']
app = request.REQUEST['app']
app_path = get_app_full_path(app)
logger.debug('formId: {} app: {} app_path: {}'.format(formId, app, app_path))
formModule = getattr(getattr(__import__(app_path + '.forms'), app_path.split('.')[1]), 'forms')
logger.debug('formModule: {}'.format(formModule))
classes = dir(formModule)
resolvedForm = None
for myClass in classes:
try:
formIdTarget = eval('formModule.' + myClass + '._XP_FORM_ID')
if formIdTarget == formId:
resolvedForm = eval('formModule.' + myClass)
except AttributeError:
pass
logger.debug('jxSave :: resolvedForm: %s' % (resolvedForm) )
# Instantiate form, validate form
logger.debug('jxSave :: post: %s' % (args['ctx'].post) )
# instantiate form for create and update with db instances dbObjects from form
# dbObjects : pk, model
instances = {}
dbObjects = json.loads(args['ctx'].post['dbObjects'].replace("'", '"'))
logger.debug('jxSave :: dbObjects: %s' % (dbObjects) )
# TODO: In case we support more masters than 'default', resolve appropiate master db name
for key in dbObjects:
# Get instance model by pk
impl = dbObjects[key]['impl']
cls = get_class( impl )
instances[key] = cls.objects.using('default').get(pk=dbObjects[key]['pk'])
logger.debug('jxSave :: instances. %s' % (instances) )
if len(instances) == 0:
args['ctx'].form = resolvedForm(args['ctx'].post, ctx=args['ctx'])
else:
args['ctx'].form = resolvedForm(args['ctx'].post, ctx=args['ctx'], instances=instances)
logger.debug('jxSave :: instantiated form')
args['ctx'].jsData = JsResultDict()
isFormValid = args['ctx'].form.is_valid()
#isFormValid = False
logger.debug('jxSave :: isFormValid: %s' % (isFormValid) )
obj = CommonService(args['ctx'])
obj.request = request
if isFormValid == True:
logger.debug('jxSave :: Form is valid!!!')
obj._set_main_form(args['ctx'].form)
result = obj.save()
else:
if settings.DEBUG == True:
logger.debug( 'Validation error!!!!!' )
logger.debug( args['ctx'].form.errors )
if args['ctx'].form.errors.has_key('invalid'):
logger.debug( args['ctx'].form.errors['invalid'] )
traceback.print_exc()
if args['ctx'].form.errors.has_key('invalid'):
errorDict = {'': args['ctx'].form.errors['invalid'][0]}
logger.debug( 'errorDict: %s' % (errorDict) )
result = obj._buildJSONResult(obj._getErrorResultDict(errorDict, pageError=True))
else:
# Build errordict
errorDict = {}
for field in args['ctx'].form.errors:
if field != '__all__':
errorDict[field] = args['ctx'].form.errors[field][0]
logger.debug( 'errorDict: %s' % (errorDict) )
result = obj._buildJSONResult(obj._getErrorResultDict(errorDict, pageError=False))
return result
else:
logger.debug( 'Invalid action name. Only save is allowed' )
raise Http404
else:
logger.debug( 'Unvalid business request' )
raise Http404
return result
@ctx()
@transaction.commit_on_success
def jxDelete(request, **args):
"""
Deletes registers associated to form. In case more than one instance associated to form, button must define instances to delete.
This operation is executed when "Delete" button is clicked on forms.
Deletes the register by pk: dbInstance.objects.get(pk=myPk).delete()
** Attributes **
* ``request``
* ``**args``
"""
logger.debug('jxDelete ...')
logger.debug('jxDelete :: args: %s' % (args) )
request.session.set_test_cookie()
request.session.delete_test_cookie()
if (request.REQUEST.has_key('action')) and request.is_ajax() == True:
action = request.REQUEST['action']
logger.debug( 'action: %s' % (action) )
if action == 'delete':
# resolve form, set to args['ctx'].form
logger.debug('jxDelete :: form: %s' % (request.REQUEST['form']) )
formId = request.REQUEST['form']
app = request.REQUEST['app']
logger.debug('jxDelete :: app: {}'.format(app))
formModule = __import__(app, globals(), locals(), ['forms'], -1).forms
classes = dir(formModule)
resolvedForm = None
for myClass in classes:
try:
formIdTarget = eval('formModule.' + myClass + '._XP_FORM_ID')
if formIdTarget == formId:
resolvedForm = eval('formModule.' + myClass)
except AttributeError:
pass
logger.debug('jxDelete :: resolvedForm: %s' % (resolvedForm) )
args['ctx'].form = resolvedForm(args['ctx'].post, ctx=args['ctx'])
args['ctx'].jsData = JsResultDict()
# Instantiate form, validate form
logger.debug('jxDelete :: post: %s' % (args['ctx'].post) )
# instantiate form for create and update with db instances dbObjects from form
# dbObjects : pk, model
obj = CommonService(args['ctx'])
obj._set_main_form(args['ctx'].form)
obj.request = request
result = obj.delete()
else:
logger.debug( 'Invalid action name. Only save is allowed' )
raise Http404
else:
logger.debug( 'Unvalid business request' )
raise Http404
return result
@context_view()
@view_tmpl()
def showView(request, appSlug='front', viewSlug='home', viewAttrs='', **args):
"""
Show url view. Application code and view name are parsed from the url.
urls not following /appSlug/viewSlug mapped into urls???? appSlug would be default app from settings
**Required Attributes**
**Optional Attributes**
**Returns**
"""
#logger.debug( 'xpcore showView :: context: %s' % (json.dumps(args['ctx'])) )
dbApplication = ApplicationDAO(args['ctx'])
application = dbApplication.get(slug=appSlug)
db = ViewDAO(args['ctx'])
view = db.get(application=application, slug=viewSlug)
args['ctx'].viewAuth = view.hasAuth
classPath, method, viewAttrTuple = __showView(view, viewAttrs, args['ctx'])
if method.find('_') == -1 or method.find('__') == -1:
logger.debug('showView :: classPath: %s method: %s viewAttrTuple: %s' % (classPath, method, viewAttrTuple))
cls = get_class( classPath )
obj = cls(args['ctx'])
super(cls, obj).__init__(args['ctx'])
obj.request = request
if (len(viewAttrTuple) == 0):
result = eval('obj.' + method)()
else:
result = eval('obj.' + method)(*viewAttrTuple)
else:
logger.debug( 'xpcore :: showView :: private methods...' )
raise Http404
return result
@transaction.commit_on_success
@context_view(mode='action')
@view_tmpl()
def execActionMsg(request, appSlug, actionSlug, actionAttrs, **args):
"""
Executes an action and shows a message of result of action.
"""
logger.debug('execActionMsg :: appslug: %s actionslug: %s actionAttrs: %s' % (appSlug, actionSlug, actionAttrs) )
dbApplication = ApplicationDAO(args['ctx'])
application = dbApplication.get(slug=appSlug)
db = ActionDAO(args['ctx'])
action = db.get(application=application, slug=actionSlug)
impl = action.implementation
implFields = impl.split('.')
method = implFields[len(implFields)-1]
classPath = ".".join(implFields[:-1])
args['ctx'].path = '/apps/' + application.slug + '/' + action.slug
if actionAttrs.find('/') != -1:
actionAttrTuple = actionAttrs.split('/')
else:
if len(actionAttrs) == 0:
actionAttrTuple = []
else:
actionAttrTuple = [actionAttrs]
# Instance and call method for view, get result
if method.find('_') == -1 or method.find('__') == -1:
cls = get_class( classPath )
obj = cls(args['ctx'])
super(cls, obj).__init__(args['ctx'])
obj.request = request
if (len(actionAttrTuple) == 0):
result = eval('obj.' + method)()
else:
result = eval('obj.' + method)(*actionAttrTuple)
else:
logger.debug('xpcore :: execAction :: private methods...')
raise Http404
return result
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
webdriver_manager/manager.py
|
import os
from webdriver_manager.driver_cache import DriverCache
from webdriver_manager.logger import log
from webdriver_manager.utils import download_file, session, new_session
class DriverManager(object):
def __init__(self, root_dir=None, log_level=None, print_first_line=None, cache_valid_range=1):
self.driver_cache = DriverCache(root_dir, cache_valid_range)
if os.environ.get('WDM_PRINT_FIRST_LINE', str(print_first_line)) == 'True':
log("\n", formatter='%(message)s', level=log_level)
log("====== WebDriver manager ======", level=log_level)
new_session()
def dont_verify_ssl(self):
session().verify = False
return self
def install(self):
raise NotImplementedError("Please Implement this method")
def _get_driver_path(self, driver):
browser_version = driver.browser_version
driver_name = driver.get_name()
os_type = driver.get_os_type()
driver_version = driver.get_version()
binary_path = self.driver_cache.find_driver(browser_version, driver_name, os_type,
driver_version)
if binary_path:
return binary_path
file = download_file(driver.get_url())
binary_path = self.driver_cache.save_file_to_cache(file, browser_version,
driver_name, os_type, driver_version)
return binary_path
|
[] |
[] |
[
"WDM_PRINT_FIRST_LINE"
] |
[]
|
["WDM_PRINT_FIRST_LINE"]
|
python
| 1 | 0 | |
internal/common/golang.go
|
package common
import (
"go/build"
"os"
"path/filepath"
)
// Golang is what can be injected into a subcommand when you need Go specific items
type Golang struct{}
// GoPath returns a relative path inside $GOPATH
func (g Golang) GoPath(subpath ...string) string {
gopath := os.Getenv("GOPATH")
if gopath == "" {
gopath = build.Default.GOPATH
}
subpath = append([]string{gopath}, subpath...)
return filepath.Join(subpath...)
}
// GoBin returns a relative path inside $GOPATH/bin
func (g Golang) GoBin(subpath ...string) string {
subpath = append([]string{"bin"}, subpath...)
return g.GoPath(subpath...)
}
// IsDebug returns true of the DEBUG env var is set
func (g Golang) IsDebug() bool {
return os.Getenv("GO_JAMES_DEBUG") != ""
}
|
[
"\"GOPATH\"",
"\"GO_JAMES_DEBUG\""
] |
[] |
[
"GOPATH",
"GO_JAMES_DEBUG"
] |
[]
|
["GOPATH", "GO_JAMES_DEBUG"]
|
go
| 2 | 0 | |
ika_classifier/app/api/database/mongo.py
|
# -*- coding: utf-8 -*-
from pymongo import MongoClient
import os
MONGO_URI = os.environ.get("MONGO_URI", default=False)
myclient = MongoClient(MONGO_URI)
mdb = myclient["ika"]
|
[] |
[] |
[
"MONGO_URI"
] |
[]
|
["MONGO_URI"]
|
python
| 1 | 0 | |
azure-iot-device/azure/iot/device/iothub/abstract_clients.py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""This module contains abstract classes for the various clients of the Azure IoT Hub Device SDK
"""
import six
import abc
import logging
import threading
import os
import io
import time
from . import pipeline
from azure.iot.device.common.auth import connection_string as cs
from azure.iot.device.common.auth import sastoken as st
from azure.iot.device.iothub import client_event
from azure.iot.device import exceptions
from azure.iot.device.common import auth
from . import edge_hsm
logger = logging.getLogger(__name__)
def _validate_kwargs(exclude=[], **kwargs):
"""Helper function to validate user provided kwargs.
Raises TypeError if an invalid option has been provided"""
valid_kwargs = [
"product_info",
"websockets",
"cipher",
"server_verification_cert",
"proxy_options",
"sastoken_ttl",
"keep_alive",
"auto_connect",
"connection_retry",
"connection_retry_interval",
]
for kwarg in kwargs:
if (kwarg not in valid_kwargs) or (kwarg in exclude):
raise TypeError("Unsupported keyword argument: '{}'".format(kwarg))
def _get_config_kwargs(**kwargs):
"""Get the subset of kwargs which pertain the config object"""
valid_config_kwargs = [
"product_info",
"websockets",
"cipher",
"server_verification_cert",
"proxy_options",
"keep_alive",
"auto_connect",
"connection_retry",
"connection_retry_interval",
]
config_kwargs = {}
for kwarg in kwargs:
if kwarg in valid_config_kwargs:
config_kwargs[kwarg] = kwargs[kwarg]
return config_kwargs
def _form_sas_uri(hostname, device_id, module_id=None):
if module_id:
return "{hostname}/devices/{device_id}/modules/{module_id}".format(
hostname=hostname, device_id=device_id, module_id=module_id
)
else:
return "{hostname}/devices/{device_id}".format(hostname=hostname, device_id=device_id)
def _extract_sas_uri_values(uri):
d = {}
items = uri.split("/")
if len(items) != 3 and len(items) != 5:
raise ValueError("Invalid SAS URI")
if items[1] != "devices":
raise ValueError("Cannot extract device id from SAS URI")
if len(items) > 3 and items[3] != "modules":
raise ValueError("Cannot extract module id from SAS URI")
d["hostname"] = items[0]
d["device_id"] = items[2]
try:
d["module_id"] = items[4]
except IndexError:
d["module_id"] = None
return d
# Receive Type constant defs
RECEIVE_TYPE_NONE_SET = "none_set" # Type of receiving has not been set
RECEIVE_TYPE_HANDLER = "handler" # Only use handlers for receive
RECEIVE_TYPE_API = "api" # Only use APIs for receive
@six.add_metaclass(abc.ABCMeta)
class AbstractIoTHubClient(object):
"""A superclass representing a generic IoTHub client.
This class needs to be extended for specific clients.
"""
def __init__(self, mqtt_pipeline, http_pipeline):
"""Initializer for a generic client.
:param mqtt_pipeline: The pipeline used to connect to the IoTHub endpoint.
:type mqtt_pipeline: :class:`azure.iot.device.iothub.pipeline.MQTTPipeline`
"""
self._mqtt_pipeline = mqtt_pipeline
self._http_pipeline = http_pipeline
self._inbox_manager = None # this will be overridden in child class
self._handler_manager = None # this will be overridden in child class
self._receive_type = RECEIVE_TYPE_NONE_SET
self._client_lock = threading.Lock()
def _on_connected(self):
"""Helper handler that is called upon an iothub pipeline connect"""
logger.info("Connection State - Connected")
client_event_inbox = self._inbox_manager.get_client_event_inbox()
event = client_event.ClientEvent(client_event.CONNECTION_STATE_CHANGE)
client_event_inbox.put(event)
# Ensure that all handlers are running now that connection is re-established.
self._handler_manager.ensure_running()
def _on_disconnected(self):
"""Helper handler that is called upon an iothub pipeline disconnect"""
logger.info("Connection State - Disconnected")
client_event_inbox = self._inbox_manager.get_client_event_inbox()
event = client_event.ClientEvent(client_event.CONNECTION_STATE_CHANGE)
client_event_inbox.put(event)
# Locally stored method requests on client are cleared.
# They will be resent by IoTHub on reconnect.
self._inbox_manager.clear_all_method_requests()
logger.info("Cleared all pending method requests due to disconnect")
# def _on_new_sastoken_required(self):
# logger.info("New SasToken required from user")
# client_event_inbox = self._inbox_manager.get_client_event_inbox()
# event = client_event.ClientEvent(client_event.NEW_SASTOKEN_REQUIRED)
# client_event_inbox.put(event)
def _check_receive_mode_is_api(self):
"""Call this function first in EVERY receive API"""
with self._client_lock:
if self._receive_type is RECEIVE_TYPE_NONE_SET:
# Lock the client to ONLY use receive APIs (no handlers)
self._receive_type = RECEIVE_TYPE_API
elif self._receive_type is RECEIVE_TYPE_HANDLER:
raise exceptions.ClientError(
"Cannot use receive APIs - receive handler(s) have already been set"
)
else:
pass
def _check_receive_mode_is_handler(self):
"""Call this function first in EVERY handler setter"""
with self._client_lock:
if self._receive_type is RECEIVE_TYPE_NONE_SET:
# Lock the client to ONLY use receive handlers (no APIs)
self._receive_type = RECEIVE_TYPE_HANDLER
# Set the inbox manager to use unified msg receives
self._inbox_manager.use_unified_msg_mode = True
elif self._receive_type is RECEIVE_TYPE_API:
raise exceptions.ClientError(
"Cannot set receive handlers - receive APIs have already been used"
)
else:
pass
def _replace_user_supplied_sastoken(self, sastoken_str):
"""
Replaces the pipeline's NonRenewableSasToken with a new one based on a provided
sastoken string. Also does validation.
This helper only updates the PipelineConfig - it does not reauthorize the connection.
"""
if not isinstance(
self._mqtt_pipeline.pipeline_configuration.sastoken, st.NonRenewableSasToken
):
raise exceptions.ClientError(
"Cannot update sastoken when client was not created with one"
)
# Create new SasToken
try:
new_token_o = st.NonRenewableSasToken(sastoken_str)
except st.SasTokenError as e:
new_err = ValueError("Invalid SasToken provided")
new_err.__cause__ = e
raise new_err
# Extract values from SasToken
vals = _extract_sas_uri_values(new_token_o.resource_uri)
# Validate new token
if type(self).__name__ == "IoTHubDeviceClient" and vals["module_id"]:
raise ValueError("Provided SasToken is for a module")
if type(self).__name__ == "IoTHubModuleClient" and not vals["module_id"]:
raise ValueError("Provided SasToken is for a device")
if self._mqtt_pipeline.pipeline_configuration.device_id != vals["device_id"]:
raise ValueError("Provided SasToken does not match existing device id")
if self._mqtt_pipeline.pipeline_configuration.module_id != vals["module_id"]:
raise ValueError("Provided SasToken does not match existing module id")
if self._mqtt_pipeline.pipeline_configuration.hostname != vals["hostname"]:
raise ValueError("Provided SasToken does not match existing hostname")
if new_token_o.expiry_time < int(time.time()):
raise ValueError("Provided SasToken has already expired")
# Set token
# NOTE: We only need to set this on MQTT because this is a reference to the same object
# that is stored in HTTP. The HTTP pipeline is updated implicitly.
self._mqtt_pipeline.pipeline_configuration.sastoken = new_token_o
@classmethod
def create_from_connection_string(cls, connection_string, **kwargs):
"""
Instantiate the client from a IoTHub device or module connection string.
:param str connection_string: The connection string for the IoTHub you wish to connect to.
:param str server_verification_cert: Configuration Option. The trusted certificate chain.
Necessary when using connecting to an endpoint which has a non-standard root of trust,
such as a protocol gateway.
:param bool websockets: Configuration Option. Default is False. Set to true if using MQTT
over websockets.
:param cipher: Configuration Option. Cipher suite(s) for TLS/SSL, as a string in
"OpenSSL cipher list format" or as a list of cipher suite strings.
:type cipher: str or list(str)
:param str product_info: Configuration Option. Default is empty string. The string contains
arbitrary product info which is appended to the user agent string.
:param proxy_options: Options for sending traffic through proxy servers.
:type proxy_options: :class:`azure.iot.device.ProxyOptions`
:param int sastoken_ttl: The time to live (in seconds) for the created SasToken used for
authentication. Default is 3600 seconds (1 hour)
:param int keep_alive: Maximum period in seconds between communications with the
broker. If no other messages are being exchanged, this controls the
rate at which the client will send ping messages to the broker.
If not provided default value of 60 secs will be used.
:param bool auto_connect: Automatically connect the client to IoTHub when a method is
invoked which requires a connection to be established. (Default: True)
:param bool connection_retry: Attempt to re-establish a dropped connection (Default: True)
:param int connection_retry_interval: Interval, in seconds, between attempts to
re-establish a dropped connection (Default: 10)
:raises: ValueError if given an invalid connection_string.
:raises: TypeError if given an unsupported parameter.
:returns: An instance of an IoTHub client that uses a connection string for authentication.
"""
# TODO: Make this device/module specific and reject non-matching connection strings.
# Ensure no invalid kwargs were passed by the user
_validate_kwargs(**kwargs)
# Create SasToken
connection_string = cs.ConnectionString(connection_string)
uri = _form_sas_uri(
hostname=connection_string[cs.HOST_NAME],
device_id=connection_string[cs.DEVICE_ID],
module_id=connection_string.get(cs.MODULE_ID),
)
signing_mechanism = auth.SymmetricKeySigningMechanism(
key=connection_string[cs.SHARED_ACCESS_KEY]
)
token_ttl = kwargs.get("sastoken_ttl", 3600)
try:
sastoken = st.RenewableSasToken(uri, signing_mechanism, ttl=token_ttl)
except st.SasTokenError as e:
new_err = ValueError("Could not create a SasToken using provided values")
new_err.__cause__ = e
raise new_err
# Pipeline Config setup
config_kwargs = _get_config_kwargs(**kwargs)
pipeline_configuration = pipeline.IoTHubPipelineConfig(
device_id=connection_string[cs.DEVICE_ID],
module_id=connection_string.get(cs.MODULE_ID),
hostname=connection_string[cs.HOST_NAME],
gateway_hostname=connection_string.get(cs.GATEWAY_HOST_NAME),
sastoken=sastoken,
**config_kwargs
)
if cls.__name__ == "IoTHubDeviceClient":
pipeline_configuration.blob_upload = True
# Pipeline setup
http_pipeline = pipeline.HTTPPipeline(pipeline_configuration)
mqtt_pipeline = pipeline.MQTTPipeline(pipeline_configuration)
return cls(mqtt_pipeline, http_pipeline)
@classmethod
def create_from_sastoken(cls, sastoken, **kwargs):
"""Instantiate the client from a pre-created SAS Token string
:param str sastoken: The SAS Token string
:param str server_verification_cert: Configuration Option. The trusted certificate chain.
Necessary when using connecting to an endpoint which has a non-standard root of trust,
such as a protocol gateway.
:param bool websockets: Configuration Option. Default is False. Set to true if using MQTT
over websockets.
:param cipher: Configuration Option. Cipher suite(s) for TLS/SSL, as a string in
"OpenSSL cipher list format" or as a list of cipher suite strings.
:type cipher: str or list(str)
:param str product_info: Configuration Option. Default is empty string. The string contains
arbitrary product info which is appended to the user agent string.
:param proxy_options: Options for sending traffic through proxy servers.
:type proxy_options: :class:`azure.iot.device.ProxyOptions`
:param int keep_alive: Maximum period in seconds between communications with the
broker. If no other messages are being exchanged, this controls the
rate at which the client will send ping messages to the broker.
If not provided default value of 60 secs will be used.
:param bool auto_connect: Automatically connect the client to IoTHub when a method is
invoked which requires a connection to be established. (Default: True)
:param bool connection_retry: Attempt to re-establish a dropped connection (Default: True)
:param int connection_retry_interval: Interval, in seconds, between attempts to
re-establish a dropped connection (Default: 10)
:raises: TypeError if given an unsupported parameter.
:raises: ValueError if the sastoken parameter is invalid.
"""
# Ensure no invalid kwargs were passed by the user
excluded_kwargs = ["sastoken_ttl"]
_validate_kwargs(exclude=excluded_kwargs, **kwargs)
# Create SasToken object from string
try:
sastoken_o = st.NonRenewableSasToken(sastoken)
except st.SasTokenError as e:
new_err = ValueError("Invalid SasToken provided")
new_err.__cause__ = e
raise new_err
# Extract values from SasToken
vals = _extract_sas_uri_values(sastoken_o.resource_uri)
if cls.__name__ == "IoTHubDeviceClient" and vals["module_id"]:
raise ValueError("Provided SasToken is for a module")
if cls.__name__ == "IoTHubModuleClient" and not vals["module_id"]:
raise ValueError("Provided SasToken is for a device")
if sastoken_o.expiry_time < int(time.time()):
raise ValueError("Provided SasToken has already expired")
# Pipeline Config setup
config_kwargs = _get_config_kwargs(**kwargs)
pipeline_configuration = pipeline.IoTHubPipelineConfig(
device_id=vals["device_id"],
module_id=vals["module_id"],
hostname=vals["hostname"],
sastoken=sastoken_o,
**config_kwargs
)
if cls.__name__ == "IoTHubDeviceClient":
pipeline_configuration.blob_upload = True # Blob Upload is a feature on Device Clients
# Pipeline setup
http_pipeline = pipeline.HTTPPipeline(pipeline_configuration)
mqtt_pipeline = pipeline.MQTTPipeline(pipeline_configuration)
return cls(mqtt_pipeline, http_pipeline)
@abc.abstractmethod
def shutdown(self):
pass
@abc.abstractmethod
def connect(self):
pass
@abc.abstractmethod
def disconnect(self):
pass
@abc.abstractmethod
def update_sastoken(self, sastoken):
pass
@abc.abstractmethod
def send_message(self, message):
pass
@abc.abstractmethod
def receive_method_request(self, method_name=None):
pass
@abc.abstractmethod
def send_method_response(self, method_request, payload, status):
pass
@abc.abstractmethod
def get_twin(self):
pass
@abc.abstractmethod
def patch_twin_reported_properties(self, reported_properties_patch):
pass
@abc.abstractmethod
def receive_twin_desired_properties_patch(self):
pass
@property
def connected(self):
"""
Read-only property to indicate if the transport is connected or not.
"""
return self._mqtt_pipeline.connected
@abc.abstractproperty
def on_message_received(self):
pass
@abc.abstractproperty
def on_method_request_received(self):
pass
@abc.abstractproperty
def on_twin_desired_properties_patch_received(self):
pass
@property
def on_connection_state_change(self):
return self._handler_manager.on_connection_state_change
@on_connection_state_change.setter
def on_connection_state_change(self, value):
self._handler_manager.on_connection_state_change = value
# @property
# def on_new_sastoken_required(self):
# return self._handler_manager.on_new_sastoken_required
# @on_new_sastoken_required.setter
# def on_new_sastoken_required(self, value):
# self._handler_manager.on_new_sastoken_required = value
# @property
# def on_background_exception(self):
# return self._handler_manager.on_background_exception
# @on_background_exception.setter
# def on_background_exception(self, value):
# self._handler_manager.on_background_exception = value
@six.add_metaclass(abc.ABCMeta)
class AbstractIoTHubDeviceClient(AbstractIoTHubClient):
@classmethod
def create_from_x509_certificate(cls, x509, hostname, device_id, **kwargs):
"""
Instantiate a client using X509 certificate authentication.
:param str hostname: Host running the IotHub.
Can be found in the Azure portal in the Overview tab as the string hostname.
:param x509: The complete x509 certificate object.
To use the certificate the enrollment object needs to contain cert
(either the root certificate or one of the intermediate CA certificates).
If the cert comes from a CER file, it needs to be base64 encoded.
:type x509: :class:`azure.iot.device.X509`
:param str device_id: The ID used to uniquely identify a device in the IoTHub
:param str server_verification_cert: Configuration Option. The trusted certificate chain.
Necessary when using connecting to an endpoint which has a non-standard root of trust,
such as a protocol gateway.
:param bool websockets: Configuration Option. Default is False. Set to true if using MQTT
over websockets.
:param cipher: Configuration Option. Cipher suite(s) for TLS/SSL, as a string in
"OpenSSL cipher list format" or as a list of cipher suite strings.
:type cipher: str or list(str)
:param str product_info: Configuration Option. Default is empty string. The string contains
arbitrary product info which is appended to the user agent string.
:param proxy_options: Options for sending traffic through proxy servers.
:type proxy_options: :class:`azure.iot.device.ProxyOptions`
:param int keep_alive: Maximum period in seconds between communications with the
broker. If no other messages are being exchanged, this controls the
rate at which the client will send ping messages to the broker.
If not provided default value of 60 secs will be used.
:param bool auto_connect: Automatically connect the client to IoTHub when a method is
invoked which requires a connection to be established. (Default: True)
:param bool connection_retry: Attempt to re-establish a dropped connection (Default: True)
:param int connection_retry_interval: Interval, in seconds, between attempts to
re-establish a dropped connection (Default: 10)
:raises: TypeError if given an unsupported parameter.
:returns: An instance of an IoTHub client that uses an X509 certificate for authentication.
"""
# Ensure no invalid kwargs were passed by the user
excluded_kwargs = ["sastoken_ttl"]
_validate_kwargs(exclude=excluded_kwargs, **kwargs)
# Pipeline Config setup
config_kwargs = _get_config_kwargs(**kwargs)
pipeline_configuration = pipeline.IoTHubPipelineConfig(
device_id=device_id, hostname=hostname, x509=x509, **config_kwargs
)
pipeline_configuration.blob_upload = True # Blob Upload is a feature on Device Clients
# Pipeline setup
http_pipeline = pipeline.HTTPPipeline(pipeline_configuration)
mqtt_pipeline = pipeline.MQTTPipeline(pipeline_configuration)
return cls(mqtt_pipeline, http_pipeline)
@classmethod
def create_from_symmetric_key(cls, symmetric_key, hostname, device_id, **kwargs):
"""
Instantiate a client using symmetric key authentication.
:param symmetric_key: The symmetric key.
:param str hostname: Host running the IotHub.
Can be found in the Azure portal in the Overview tab as the string hostname.
:param device_id: The device ID
:param str server_verification_cert: Configuration Option. The trusted certificate chain.
Necessary when using connecting to an endpoint which has a non-standard root of trust,
such as a protocol gateway.
:param bool websockets: Configuration Option. Default is False. Set to true if using MQTT
over websockets.
:param cipher: Configuration Option. Cipher suite(s) for TLS/SSL, as a string in
"OpenSSL cipher list format" or as a list of cipher suite strings.
:type cipher: str or list(str)
:param str product_info: Configuration Option. Default is empty string. The string contains
arbitrary product info which is appended to the user agent string.
:param proxy_options: Options for sending traffic through proxy servers.
:type proxy_options: :class:`azure.iot.device.ProxyOptions`
:param int sastoken_ttl: The time to live (in seconds) for the created SasToken used for
authentication. Default is 3600 seconds (1 hour)
:param int keep_alive: Maximum period in seconds between communications with the
broker. If no other messages are being exchanged, this controls the
rate at which the client will send ping messages to the broker.
If not provided default value of 60 secs will be used.
:param bool auto_connect: Automatically connect the client to IoTHub when a method is
invoked which requires a connection to be established. (Default: True)
:param bool connection_retry: Attempt to re-establish a dropped connection (Default: True)
:param int connection_retry_interval: Interval, in seconds, between attempts to
re-establish a dropped connection (Default: 10)
:raises: TypeError if given an unsupported parameter.
:raises: ValueError if the provided parameters are invalid.
:return: An instance of an IoTHub client that uses a symmetric key for authentication.
"""
# Ensure no invalid kwargs were passed by the user
_validate_kwargs(**kwargs)
# Create SasToken
uri = _form_sas_uri(hostname=hostname, device_id=device_id)
signing_mechanism = auth.SymmetricKeySigningMechanism(key=symmetric_key)
token_ttl = kwargs.get("sastoken_ttl", 3600)
try:
sastoken = st.RenewableSasToken(uri, signing_mechanism, ttl=token_ttl)
except st.SasTokenError as e:
new_err = ValueError("Could not create a SasToken using provided values")
new_err.__cause__ = e
raise new_err
# Pipeline Config setup
config_kwargs = _get_config_kwargs(**kwargs)
pipeline_configuration = pipeline.IoTHubPipelineConfig(
device_id=device_id, hostname=hostname, sastoken=sastoken, **config_kwargs
)
pipeline_configuration.blob_upload = True # Blob Upload is a feature on Device Clients
# Pipeline setup
http_pipeline = pipeline.HTTPPipeline(pipeline_configuration)
mqtt_pipeline = pipeline.MQTTPipeline(pipeline_configuration)
return cls(mqtt_pipeline, http_pipeline)
@abc.abstractmethod
def receive_message(self):
pass
@abc.abstractmethod
def get_storage_info_for_blob(self, blob_name):
pass
@abc.abstractmethod
def notify_blob_upload_status(
self, correlation_id, is_success, status_code, status_description
):
pass
@six.add_metaclass(abc.ABCMeta)
class AbstractIoTHubModuleClient(AbstractIoTHubClient):
@classmethod
def create_from_edge_environment(cls, **kwargs):
"""
Instantiate the client from the IoT Edge environment.
This method can only be run from inside an IoT Edge container, or in a debugging
environment configured for Edge development (e.g. Visual Studio, Visual Studio Code)
:param bool websockets: Configuration Option. Default is False. Set to true if using MQTT
over websockets.
:param cipher: Configuration Option. Cipher suite(s) for TLS/SSL, as a string in
"OpenSSL cipher list format" or as a list of cipher suite strings.
:type cipher: str or list(str)
:param str product_info: Configuration Option. Default is empty string. The string contains
arbitrary product info which is appended to the user agent string.
:param proxy_options: Options for sending traffic through proxy servers.
:type proxy_options: :class:`azure.iot.device.ProxyOptions`
:param int sastoken_ttl: The time to live (in seconds) for the created SasToken used for
authentication. Default is 3600 seconds (1 hour)
:param int keep_alive: Maximum period in seconds between communications with the
broker. If no other messages are being exchanged, this controls the
rate at which the client will send ping messages to the broker.
If not provided default value of 60 secs will be used.
:param bool auto_connect: Automatically connect the client to IoTHub when a method is
invoked which requires a connection to be established. (Default: True)
:param bool connection_retry: Attempt to re-establish a dropped connection (Default: True)
:param int connection_retry_interval: Interval, in seconds, between attempts to
re-establish a dropped connection (Default: 10)
:raises: OSError if the IoT Edge container is not configured correctly.
:raises: ValueError if debug variables are invalid.
:raises: TypeError if given an unsupported parameter.
:returns: An instance of an IoTHub client that uses the IoT Edge environment for
authentication.
"""
# Ensure no invalid kwargs were passed by the user
excluded_kwargs = ["server_verification_cert"]
_validate_kwargs(exclude=excluded_kwargs, **kwargs)
# First try the regular Edge container variables
try:
hostname = os.environ["IOTEDGE_IOTHUBHOSTNAME"]
device_id = os.environ["IOTEDGE_DEVICEID"]
module_id = os.environ["IOTEDGE_MODULEID"]
gateway_hostname = os.environ["IOTEDGE_GATEWAYHOSTNAME"]
module_generation_id = os.environ["IOTEDGE_MODULEGENERATIONID"]
workload_uri = os.environ["IOTEDGE_WORKLOADURI"]
api_version = os.environ["IOTEDGE_APIVERSION"]
except KeyError:
# As a fallback, try the Edge local dev variables for debugging.
# These variables are set by VS/VS Code in order to allow debugging
# of Edge application code in a non-Edge dev environment.
try:
connection_string = os.environ["EdgeHubConnectionString"]
ca_cert_filepath = os.environ["EdgeModuleCACertificateFile"]
except KeyError as e:
new_err = OSError("IoT Edge environment not configured correctly")
new_err.__cause__ = e
raise new_err
# Read the certificate file to pass it on as a string
# TODO: variant server_verification_cert file vs data object that would remove the need for this fopen
try:
with io.open(ca_cert_filepath, mode="r") as ca_cert_file:
server_verification_cert = ca_cert_file.read()
except (OSError, IOError) as e:
# In Python 2, a non-existent file raises IOError, and an invalid file raises an IOError.
# In Python 3, a non-existent file raises FileNotFoundError, and an invalid file raises an OSError.
# However, FileNotFoundError inherits from OSError, and IOError has been turned into an alias for OSError,
# thus we can catch the errors for both versions in this block.
# Unfortunately, we can't distinguish cause of error from error type, so the raised ValueError has a generic
# message. If, in the future, we want to add detail, this could be accomplished by inspecting the e.errno
# attribute
new_err = ValueError("Invalid CA certificate file")
new_err.__cause__ = e
raise new_err
# Extract config values from connection string
connection_string = cs.ConnectionString(connection_string)
try:
device_id = connection_string[cs.DEVICE_ID]
module_id = connection_string[cs.MODULE_ID]
hostname = connection_string[cs.HOST_NAME]
gateway_hostname = connection_string[cs.GATEWAY_HOST_NAME]
except KeyError:
raise ValueError("Invalid Connection String")
# Use Symmetric Key authentication for local dev experience.
signing_mechanism = auth.SymmetricKeySigningMechanism(
key=connection_string[cs.SHARED_ACCESS_KEY]
)
else:
# Use an HSM for authentication in the general case
hsm = edge_hsm.IoTEdgeHsm(
module_id=module_id,
generation_id=module_generation_id,
workload_uri=workload_uri,
api_version=api_version,
)
try:
server_verification_cert = hsm.get_certificate()
except edge_hsm.IoTEdgeError as e:
new_err = OSError("Unexpected failure in IoTEdge")
new_err.__cause__ = e
raise new_err
signing_mechanism = hsm
# Create SasToken
uri = _form_sas_uri(hostname=hostname, device_id=device_id, module_id=module_id)
token_ttl = kwargs.get("sastoken_ttl", 3600)
try:
sastoken = st.RenewableSasToken(uri, signing_mechanism, ttl=token_ttl)
except st.SasTokenError as e:
new_err = ValueError(
"Could not create a SasToken using the values provided, or in the Edge environment"
)
new_err.__cause__ = e
raise new_err
# Pipeline Config setup
config_kwargs = _get_config_kwargs(**kwargs)
pipeline_configuration = pipeline.IoTHubPipelineConfig(
device_id=device_id,
module_id=module_id,
hostname=hostname,
gateway_hostname=gateway_hostname,
sastoken=sastoken,
server_verification_cert=server_verification_cert,
**config_kwargs
)
pipeline_configuration.method_invoke = (
True # Method Invoke is allowed on modules created from edge environment
)
# Pipeline setup
http_pipeline = pipeline.HTTPPipeline(pipeline_configuration)
mqtt_pipeline = pipeline.MQTTPipeline(pipeline_configuration)
return cls(mqtt_pipeline, http_pipeline)
@classmethod
def create_from_x509_certificate(cls, x509, hostname, device_id, module_id, **kwargs):
"""
Instantiate a client using X509 certificate authentication.
:param str hostname: Host running the IotHub.
Can be found in the Azure portal in the Overview tab as the string hostname.
:param x509: The complete x509 certificate object.
To use the certificate the enrollment object needs to contain cert
(either the root certificate or one of the intermediate CA certificates).
If the cert comes from a CER file, it needs to be base64 encoded.
:type x509: :class:`azure.iot.device.X509`
:param str device_id: The ID used to uniquely identify a device in the IoTHub
:param str module_id: The ID used to uniquely identify a module on a device on the IoTHub.
:param str server_verification_cert: Configuration Option. The trusted certificate chain.
Necessary when using connecting to an endpoint which has a non-standard root of trust,
such as a protocol gateway.
:param bool websockets: Configuration Option. Default is False. Set to true if using MQTT
over websockets.
:param cipher: Configuration Option. Cipher suite(s) for TLS/SSL, as a string in
"OpenSSL cipher list format" or as a list of cipher suite strings.
:type cipher: str or list(str)
:param str product_info: Configuration Option. Default is empty string. The string contains
arbitrary product info which is appended to the user agent string.
:param proxy_options: Options for sending traffic through proxy servers.
:type proxy_options: :class:`azure.iot.device.ProxyOptions`
:param int keep_alive: Maximum period in seconds between communications with the
broker. If no other messages are being exchanged, this controls the
rate at which the client will send ping messages to the broker.
If not provided default value of 60 secs will be used.
:param bool auto_connect: Automatically connect the client to IoTHub when a method is
invoked which requires a connection to be established. (Default: True)
:param bool connection_retry: Attempt to re-establish a dropped connection (Default: True)
:param int connection_retry_interval: Interval, in seconds, between attempts to
re-establish a dropped connection (Default: 10)
:raises: TypeError if given an unsupported parameter.
:returns: An instance of an IoTHub client that uses an X509 certificate for authentication.
"""
# Ensure no invalid kwargs were passed by the user
excluded_kwargs = ["sastoken_ttl"]
_validate_kwargs(exclude=excluded_kwargs, **kwargs)
# Pipeline Config setup
config_kwargs = _get_config_kwargs(**kwargs)
pipeline_configuration = pipeline.IoTHubPipelineConfig(
device_id=device_id, module_id=module_id, hostname=hostname, x509=x509, **config_kwargs
)
# Pipeline setup
http_pipeline = pipeline.HTTPPipeline(pipeline_configuration)
mqtt_pipeline = pipeline.MQTTPipeline(pipeline_configuration)
return cls(mqtt_pipeline, http_pipeline)
@abc.abstractmethod
def send_message_to_output(self, message, output_name):
pass
@abc.abstractmethod
def receive_message_on_input(self, input_name):
pass
@abc.abstractmethod
def invoke_method(self, method_params, device_id, module_id=None):
pass
|
[] |
[] |
[
"IOTEDGE_IOTHUBHOSTNAME",
"IOTEDGE_APIVERSION",
"IOTEDGE_WORKLOADURI",
"EdgeModuleCACertificateFile",
"IOTEDGE_MODULEID",
"IOTEDGE_DEVICEID",
"EdgeHubConnectionString",
"IOTEDGE_MODULEGENERATIONID",
"IOTEDGE_GATEWAYHOSTNAME"
] |
[]
|
["IOTEDGE_IOTHUBHOSTNAME", "IOTEDGE_APIVERSION", "IOTEDGE_WORKLOADURI", "EdgeModuleCACertificateFile", "IOTEDGE_MODULEID", "IOTEDGE_DEVICEID", "EdgeHubConnectionString", "IOTEDGE_MODULEGENERATIONID", "IOTEDGE_GATEWAYHOSTNAME"]
|
python
| 9 | 0 | |
vendor/github.com/heketi/heketi/executors/cmdexec/cmdexec.go
|
//
// Copyright (c) 2018 The heketi Authors
//
// This file is licensed to you under your choice of the GNU Lesser
// General Public License, version 3 or any later version (LGPLv3 or
// later), or the GNU General Public License, version 2 (GPLv2), in all
// cases as published by the Free Software Foundation.
//
package cmdexec
import (
"fmt"
"os"
"regexp"
"strconv"
"sync"
"github.com/heketi/heketi/pkg/logging"
rex "github.com/heketi/heketi/pkg/remoteexec"
)
var (
logger = logging.NewLogger("[cmdexec]", logging.LEVEL_DEBUG)
preallocRe = regexp.MustCompile("^[a-zA-Z0-9-_]+$")
)
type RemoteCommandTransport interface {
ExecCommands(host string, commands rex.Cmds, timeoutMinutes int) (rex.Results, error)
RebalanceOnExpansion() bool
SnapShotLimit() int
GlusterCliTimeout() uint32
PVDataAlignment() string
VGPhysicalExtentSize() string
LVChunkSize() string
XfsSw() int
XfsSu() int
}
type CmdExecutor struct {
config *CmdConfig
Throttlemap map[string]chan bool
Lock sync.Mutex
RemoteExecutor RemoteCommandTransport
Fstab string
BackupLVM bool
}
func (c *CmdExecutor) glusterCommand() string {
return fmt.Sprintf("gluster --mode=script --timeout=%v", c.GlusterCliTimeout())
}
func setWithEnvVariables(config *CmdConfig) {
var env string
env = os.Getenv("HEKETI_GLUSTER_CLI_TIMEOUT")
if env != "" {
value, err := strconv.ParseUint(env, 10, 32)
if err != nil {
logger.LogError("Error: While parsing HEKETI_GLUSTER_CLI_TIMEOUT: %v", err)
} else {
config.GlusterCliTimeout = uint32(value)
}
}
env = os.Getenv("HEKETI_DEBUG_UMOUNT_FAILURES")
if env != "" {
value, err := strconv.ParseBool(env)
if err != nil {
logger.LogError("Error: While parsing HEKETI_DEBUG_UMOUNT_FAILURES: %v", err)
} else {
config.DebugUmountFailures = value
}
}
env = os.Getenv("HEKETI_BLOCK_VOLUME_DEFAULT_PREALLOC")
if env != "" {
config.BlockVolumePrealloc = env
}
}
func (c *CmdExecutor) Init(config *CmdConfig) {
c.Throttlemap = make(map[string]chan bool)
c.config = config
setWithEnvVariables(config)
}
func (s *CmdExecutor) AccessConnection(host string) {
var (
c chan bool
ok bool
)
s.Lock.Lock()
if c, ok = s.Throttlemap[host]; !ok {
c = make(chan bool, 1)
s.Throttlemap[host] = c
}
s.Lock.Unlock()
c <- true
}
func (s *CmdExecutor) FreeConnection(host string) {
s.Lock.Lock()
c := s.Throttlemap[host]
s.Lock.Unlock()
<-c
}
func (s *CmdExecutor) SetLogLevel(level string) {
switch level {
case "none":
logger.SetLevel(logging.LEVEL_NOLOG)
case "critical":
logger.SetLevel(logging.LEVEL_CRITICAL)
case "error":
logger.SetLevel(logging.LEVEL_ERROR)
case "warning":
logger.SetLevel(logging.LEVEL_WARNING)
case "info":
logger.SetLevel(logging.LEVEL_INFO)
case "debug":
logger.SetLevel(logging.LEVEL_DEBUG)
}
}
func (s *CmdExecutor) Logger() *logging.Logger {
return logger
}
func (c *CmdExecutor) GlusterCliTimeout() uint32 {
if c.config.GlusterCliTimeout == 0 {
// Use a longer timeout (10 minutes) than gluster cli's default
// of 2 minutes, because some commands take longer in a system
// with many volumes.
return 600
}
return c.config.GlusterCliTimeout
}
// The timeout, in minutes, for the command execution.
// It used to be 10 minutes (or sometimes 5, for some simple commands),
// but now it needs to be longer than the gluster cli timeout at
// least where calling the gluster cli.
func (c *CmdExecutor) GlusterCliExecTimeout() int {
timeout := 1 + (int(c.GlusterCliTimeout())+1)/60
if timeout < 10 {
timeout = 10
}
return timeout
}
func (c *CmdExecutor) PVDataAlignment() string {
if c.config.PVDataAlignment == "" {
return "256K"
}
return c.config.PVDataAlignment
}
func (c *CmdExecutor) VGPhysicalExtentSize() string {
if c.config.VGPhysicalExtentSize == "" {
return "4M"
}
return c.config.VGPhysicalExtentSize
}
func (c *CmdExecutor) LVChunkSize() string {
if c.config.LVChunkSize == "" {
return "256K"
}
return c.config.LVChunkSize
}
func (c *CmdExecutor) XfsSw() int {
return c.config.XfsSw
}
func (c *CmdExecutor) XfsSu() int {
return c.config.XfsSu
}
func (c *CmdExecutor) DebugUmountFailures() bool {
return c.config.DebugUmountFailures
}
func (c *CmdExecutor) BlockVolumeDefaultPrealloc() string {
defaultValue := "full"
if c.config.BlockVolumePrealloc == "" {
return defaultValue
}
if !preallocRe.MatchString(c.config.BlockVolumePrealloc) {
logger.Warning(
"invalid value for prealloc option [%v], using default",
c.config.BlockVolumePrealloc)
return defaultValue
}
return c.config.BlockVolumePrealloc
}
|
[
"\"HEKETI_GLUSTER_CLI_TIMEOUT\"",
"\"HEKETI_DEBUG_UMOUNT_FAILURES\"",
"\"HEKETI_BLOCK_VOLUME_DEFAULT_PREALLOC\""
] |
[] |
[
"HEKETI_GLUSTER_CLI_TIMEOUT",
"HEKETI_DEBUG_UMOUNT_FAILURES",
"HEKETI_BLOCK_VOLUME_DEFAULT_PREALLOC"
] |
[]
|
["HEKETI_GLUSTER_CLI_TIMEOUT", "HEKETI_DEBUG_UMOUNT_FAILURES", "HEKETI_BLOCK_VOLUME_DEFAULT_PREALLOC"]
|
go
| 3 | 0 | |
src/cmd/services/m3coordinator/downsample/downsampler_test.go
|
// Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package downsample
import (
"bytes"
"fmt"
"os"
"strings"
"testing"
"time"
"github.com/m3db/m3/src/aggregator/client"
clusterclient "github.com/m3db/m3/src/cluster/client"
"github.com/m3db/m3/src/cluster/kv/mem"
dbclient "github.com/m3db/m3/src/dbnode/client"
"github.com/m3db/m3/src/metrics/aggregation"
"github.com/m3db/m3/src/metrics/generated/proto/metricpb"
"github.com/m3db/m3/src/metrics/generated/proto/rulepb"
"github.com/m3db/m3/src/metrics/matcher"
"github.com/m3db/m3/src/metrics/metadata"
"github.com/m3db/m3/src/metrics/metric/id"
"github.com/m3db/m3/src/metrics/metric/unaggregated"
"github.com/m3db/m3/src/metrics/policy"
"github.com/m3db/m3/src/metrics/rules"
ruleskv "github.com/m3db/m3/src/metrics/rules/store/kv"
"github.com/m3db/m3/src/metrics/rules/view"
"github.com/m3db/m3/src/metrics/transformation"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
"github.com/m3db/m3/src/query/storage/m3"
"github.com/m3db/m3/src/query/storage/m3/storagemetadata"
"github.com/m3db/m3/src/query/storage/mock"
"github.com/m3db/m3/src/query/ts"
"github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
xio "github.com/m3db/m3/src/x/io"
"github.com/m3db/m3/src/x/pool"
"github.com/m3db/m3/src/x/serialize"
xtest "github.com/m3db/m3/src/x/test"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
var (
testAggregationType = aggregation.Sum
testAggregationStoragePolicies = []policy.StoragePolicy{
policy.MustParseStoragePolicy("2s:1d"),
}
)
const (
nameTag = "__name__"
)
func TestDownsamplerAggregationWithAutoMappingRulesFromNamespacesWatcher(t *testing.T) {
ctrl := xtest.NewController(t)
defer ctrl.Finish()
gaugeMetrics, _ := testGaugeMetrics(testGaugeMetricsOptions{})
require.Equal(t, 1, len(gaugeMetrics))
gaugeMetric := gaugeMetrics[0]
numSamples := len(gaugeMetric.samples)
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: gaugeMetrics,
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: gaugeMetric.tags,
// NB(nate): Automapping rules generated from cluster namespaces currently
// hardcode 'Last' as the aggregation type. As such, expect value to be the last value
// in the sample.
values: []expectedValue{{value: gaugeMetric.samples[numSamples-1]}},
},
},
},
})
origStagedMetadata := originalStagedMetadata(t, testDownsampler)
session := dbclient.NewMockSession(ctrl)
setAggregatedNamespaces(t, testDownsampler, session, m3.AggregatedClusterNamespaceDefinition{
NamespaceID: ident.StringID("2s:1d"),
Resolution: 2 * time.Second,
Retention: 24 * time.Hour,
Session: session,
})
waitForStagedMetadataUpdate(t, testDownsampler, origStagedMetadata)
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesStore(t *testing.T) {
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{})
rulesStore := testDownsampler.rulesStore
// Create rules
nss, err := rulesStore.ReadNamespaces()
require.NoError(t, err)
_, err = nss.AddNamespace("default", testUpdateMetadata())
require.NoError(t, err)
rule := view.MappingRule{
ID: "mappingrule",
Name: "mappingrule",
Filter: "app:test*",
AggregationID: aggregation.MustCompressTypes(testAggregationType),
StoragePolicies: testAggregationStoragePolicies,
}
rs := rules.NewEmptyRuleSet("default", testUpdateMetadata())
_, err = rs.AddMappingRule(rule, testUpdateMetadata())
require.NoError(t, err)
err = rulesStore.WriteAll(nss, rs)
require.NoError(t, err)
logger := testDownsampler.instrumentOpts.Logger().
With(zap.String("test", t.Name()))
// Wait for mapping rule to appear
logger.Info("waiting for mapping rules to propagate")
matcher := testDownsampler.matcher
testMatchID := newTestID(t, map[string]string{
"__name__": "foo",
"app": "test123",
})
for {
now := time.Now().UnixNano()
res := matcher.ForwardMatch(testMatchID, now, now+1)
results := res.ForExistingIDAt(now)
if !results.IsDefault() {
break
}
time.Sleep(100 * time.Millisecond)
}
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigMappingRules(t *testing.T) {
gaugeMetric := testGaugeMetric{
tags: map[string]string{
nameTag: "foo_metric",
"app": "nginx_edge",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "app:nginx*",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 5 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: gaugeMetric.tags,
values: []expectedValue{{value: 30}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 5 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigMappingRulesPartialReplaceAutoMappingRuleFromNamespacesWatcher(t *testing.T) {
ctrl := xtest.NewController(t)
defer ctrl.Finish()
gaugeMetric := testGaugeMetric{
tags: map[string]string{
nameTag: "foo_metric",
"app": "nginx_edge",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "app:nginx*",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 2 * time.Second,
Retention: 24 * time.Hour,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
// Expect the max to be used and override the default auto
// mapping rule for the storage policy 2s:24h.
{
tags: gaugeMetric.tags,
values: []expectedValue{{value: 30}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 2 * time.Second,
Retention: 24 * time.Hour,
},
},
// Expect last to still be used for the storage
// policy 4s:48h.
{
tags: gaugeMetric.tags,
// NB(nate): Automapping rules generated from cluster namespaces currently
// hardcode 'Last' as the aggregation type. As such, expect value to be the last value
// in the sample.
values: []expectedValue{{value: 0}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 4 * time.Second,
Retention: 48 * time.Hour,
},
},
},
},
})
origStagedMetadata := originalStagedMetadata(t, testDownsampler)
session := dbclient.NewMockSession(ctrl)
setAggregatedNamespaces(t, testDownsampler, session, m3.AggregatedClusterNamespaceDefinition{
NamespaceID: ident.StringID("2s:24h"),
Resolution: 2 * time.Second,
Retention: 24 * time.Hour,
Session: session,
}, m3.AggregatedClusterNamespaceDefinition{
NamespaceID: ident.StringID("4s:48h"),
Resolution: 4 * time.Second,
Retention: 48 * time.Hour,
Session: session,
})
waitForStagedMetadataUpdate(t, testDownsampler, origStagedMetadata)
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigMappingRulesReplaceAutoMappingRuleFromNamespacesWatcher(t *testing.T) {
ctrl := xtest.NewController(t)
defer ctrl.Finish()
gaugeMetric := testGaugeMetric{
tags: map[string]string{
nameTag: "foo_metric",
"app": "nginx_edge",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "app:nginx*",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 2 * time.Second,
Retention: 24 * time.Hour,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
// Expect the max to be used and override the default auto
// mapping rule for the storage policy 2s:24h.
{
tags: gaugeMetric.tags,
values: []expectedValue{{value: 30}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 2 * time.Second,
Retention: 24 * time.Hour,
},
},
},
},
})
origStagedMetadata := originalStagedMetadata(t, testDownsampler)
session := dbclient.NewMockSession(ctrl)
setAggregatedNamespaces(t, testDownsampler, session, m3.AggregatedClusterNamespaceDefinition{
NamespaceID: ident.StringID("2s:24h"),
Resolution: 2 * time.Second,
Retention: 24 * time.Hour,
Session: session,
})
waitForStagedMetadataUpdate(t, testDownsampler, origStagedMetadata)
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigMappingRulesNoNameTag(t *testing.T) {
gaugeMetric := testGaugeMetric{
tags: map[string]string{
"app": "nginx_edge",
"endpoint": "health",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
identTag: "endpoint",
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "app:nginx*",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 5 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: gaugeMetric.tags,
values: []expectedValue{{value: 30}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 5 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigMappingRulesTypeFilter(t *testing.T) {
gaugeMetric := testGaugeMetric{
tags: map[string]string{
"app": "nginx_edge",
"endpoint": "health",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
identTag: "endpoint",
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "__m3_type__:counter",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 5 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
},
sampleAppenderOpts: &SampleAppenderOptions{
MetricType: ts.M3MetricTypeCounter,
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
"app": "nginx_edge",
"endpoint": "health",
},
values: []expectedValue{{value: 30}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 5 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigMappingRulesTypeFilterNoMatch(t *testing.T) {
gaugeMetric := testGaugeMetric{
tags: map[string]string{
"app": "nginx_edge",
"endpoint": "health",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
identTag: "endpoint",
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "__m3_type__:counter",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 5 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
},
sampleAppenderOpts: &SampleAppenderOptions{
MetricType: ts.M3MetricTypeGauge,
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigMappingRulesAggregationType(t *testing.T) {
gaugeMetric := testGaugeMetric{
tags: map[string]string{
"__g0__": "nginx_edge",
"__g1__": "health",
"__option_id_scheme__": "graphite",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
tags := []Tag{{Name: "__m3_graphite_aggregation__"}}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
identTag: "__g2__",
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "__m3_type__:gauge",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 5 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
Tags: tags,
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
"__g0__": "nginx_edge",
"__g1__": "health",
"__g2__": "Max",
},
values: []expectedValue{{value: 30}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 5 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigMappingRulesMultipleAggregationType(t *testing.T) {
gaugeMetric := testGaugeMetric{
tags: map[string]string{
"__g0__": "nginx_edge",
"__g1__": "health",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
tags := []Tag{{Name: "__m3_graphite_aggregation__"}}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
identTag: "__g2__",
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "__m3_type__:gauge",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 5 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
Tags: tags,
},
{
Filter: "__m3_type__:gauge",
Aggregations: []aggregation.Type{aggregation.Sum},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 5 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
Tags: tags,
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
"__g0__": "nginx_edge",
"__g1__": "health",
"__g2__": "Max",
},
values: []expectedValue{{value: 30}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 5 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
{
tags: map[string]string{
"__g0__": "nginx_edge",
"__g1__": "health",
"__g2__": "Sum",
},
values: []expectedValue{{value: 60}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 5 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigMappingRulesGraphitePrefixAndAggregationTags(t *testing.T) {
gaugeMetric := testGaugeMetric{
tags: map[string]string{
"__g0__": "nginx_edge",
"__g1__": "health",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
tags := []Tag{
{Name: "__m3_graphite_aggregation__"},
{Name: "__m3_graphite_prefix__", Value: "stats.counter"},
}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
identTag: "__g4__",
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "__m3_type__:gauge",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 5 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
Tags: tags,
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
"__g0__": "stats",
"__g1__": "counter",
"__g2__": "nginx_edge",
"__g3__": "health",
"__g4__": "Max",
},
values: []expectedValue{{value: 30}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 5 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigMappingRulesGraphitePrefixTag(t *testing.T) {
gaugeMetric := testGaugeMetric{
tags: map[string]string{
"__g0__": "nginx_edge",
"__g1__": "health",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
tags := []Tag{
{Name: "__m3_graphite_prefix__", Value: "stats.counter"},
}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
identTag: "__g3__",
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "__m3_type__:gauge",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 5 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
Tags: tags,
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
"__g0__": "stats",
"__g1__": "counter",
"__g2__": "nginx_edge",
"__g3__": "health",
},
values: []expectedValue{{value: 30}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 5 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigMappingRulesAugmentTag(t *testing.T) {
gaugeMetric := testGaugeMetric{
tags: map[string]string{
"app": "nginx_edge",
"endpoint": "health",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 15}, {value: 10}, {value: 30}, {value: 5}, {value: 0},
},
}
tags := []Tag{
{Name: "datacenter", Value: "abc"},
}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
identTag: "app",
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "app:nginx*",
Aggregations: []aggregation.Type{aggregation.Max},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 5 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
Tags: tags,
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
"app": "nginx_edge",
"endpoint": "health",
"datacenter": "abc",
},
values: []expectedValue{{value: 30}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: 5 * time.Second,
Retention: 30 * 24 * time.Hour,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigRollupRulesNoNameTag(t *testing.T) {
gaugeMetric := testGaugeMetric{
tags: map[string]string{
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"not_rolled_up": "not_rolled_up_value",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 42},
{value: 64, offset: 5 * time.Second},
},
}
res := 5 * time.Second
ret := 30 * 24 * time.Hour
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
identTag: "endpoint",
rulesConfig: &RulesConfiguration{
RollupRules: []RollupRuleConfiguration{
{
Filter: fmt.Sprintf(
"%s:http_requests app:* status_code:* endpoint:*",
nameTag),
Transforms: []TransformConfiguration{
{
Transform: &TransformOperationConfiguration{
Type: transformation.PerSecond,
},
},
{
Rollup: &RollupOperationConfiguration{
MetricName: "http_requests_by_status_code",
GroupBy: []string{"app", "status_code", "endpoint"},
Aggregations: []aggregation.Type{aggregation.Sum},
},
},
},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: res,
Retention: ret,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigRollupRulesPerSecondSum(t *testing.T) {
gaugeMetric := testGaugeMetric{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"not_rolled_up": "not_rolled_up_value",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 42},
{value: 64, offset: 5 * time.Second},
},
}
res := 5 * time.Second
ret := 30 * 24 * time.Hour
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
rulesConfig: &RulesConfiguration{
RollupRules: []RollupRuleConfiguration{
{
Filter: fmt.Sprintf(
"%s:http_requests app:* status_code:* endpoint:*",
nameTag),
Transforms: []TransformConfiguration{
{
Transform: &TransformOperationConfiguration{
Type: transformation.PerSecond,
},
},
{
Rollup: &RollupOperationConfiguration{
MetricName: "http_requests_by_status_code",
GroupBy: []string{"app", "status_code", "endpoint"},
Aggregations: []aggregation.Type{aggregation.Sum},
},
},
},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: res,
Retention: ret,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
nameTag: "http_requests_by_status_code",
string(rollupTagName): string(rollupTagValue),
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
},
values: []expectedValue{{value: 4.4}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: res,
Retention: ret,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigRollupRulesIncreaseAdd(t *testing.T) {
gaugeMetrics := []testGaugeMetric{
testGaugeMetric{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"not_rolled_up": "not_rolled_up_value_1",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 42, offset: 5 * time.Second}, // +42 (should not be accounted since is a reset)
// Explicit no value.
{value: 12, offset: 15 * time.Second}, // +12 - simulate a reset (should not be accounted)
{value: 33, offset: 20 * time.Second}, // +21
},
},
testGaugeMetric{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"not_rolled_up": "not_rolled_up_value_2",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 13, offset: 5 * time.Second}, // +13 (should not be accounted since is a reset)
{value: 27, offset: 10 * time.Second}, // +14
// Explicit no value.
{value: 42, offset: 20 * time.Second}, // +15
},
},
}
res := 5 * time.Second
ret := 30 * 24 * time.Hour
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
rulesConfig: &RulesConfiguration{
RollupRules: []RollupRuleConfiguration{
{
Filter: fmt.Sprintf(
"%s:http_requests app:* status_code:* endpoint:*",
nameTag),
Transforms: []TransformConfiguration{
{
Transform: &TransformOperationConfiguration{
Type: transformation.Increase,
},
},
{
Rollup: &RollupOperationConfiguration{
MetricName: "http_requests_by_status_code",
GroupBy: []string{"app", "status_code", "endpoint"},
Aggregations: []aggregation.Type{aggregation.Sum},
},
},
{
Transform: &TransformOperationConfiguration{
Type: transformation.Add,
},
},
},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: res,
Retention: ret,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: gaugeMetrics,
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
nameTag: "http_requests_by_status_code",
string(rollupTagName): string(rollupTagValue),
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
},
values: []expectedValue{
{value: 14},
{value: 50, offset: 10 * time.Second},
},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: res,
Retention: ret,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRulesConfigRollupRuleAndDropPolicy(t *testing.T) {
gaugeMetric := testGaugeMetric{
tags: map[string]string{
nameTag: "http_requests",
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
"not_rolled_up": "not_rolled_up_value",
},
timedSamples: []testGaugeMetricTimedSample{
{value: 42},
{value: 64, offset: 5 * time.Second},
},
expectDropPolicyApplied: true,
}
res := 5 * time.Second
ret := 30 * 24 * time.Hour
filter := fmt.Sprintf("%s:http_requests app:* status_code:* endpoint:*", nameTag)
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: filter,
Drop: true,
},
},
RollupRules: []RollupRuleConfiguration{
{
Filter: filter,
Transforms: []TransformConfiguration{
{
Transform: &TransformOperationConfiguration{
Type: transformation.PerSecond,
},
},
{
Rollup: &RollupOperationConfiguration{
MetricName: "http_requests_by_status_code",
GroupBy: []string{"app", "status_code", "endpoint"},
Aggregations: []aggregation.Type{aggregation.Sum},
},
},
},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: res,
Retention: ret,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
gaugeMetrics: []testGaugeMetric{gaugeMetric},
},
expect: &testDownsamplerOptionsExpect{
writes: []testExpectedWrite{
{
tags: map[string]string{
nameTag: "http_requests_by_status_code",
string(rollupTagName): string(rollupTagValue),
"app": "nginx_edge",
"status_code": "500",
"endpoint": "/foo/bar",
},
values: []expectedValue{{value: 4.4}},
attributes: &storagemetadata.Attributes{
MetricsType: storagemetadata.AggregatedMetricsType,
Resolution: res,
Retention: ret,
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithTimedSamples(t *testing.T) {
counterMetrics, counterMetricsExpect := testCounterMetrics(testCounterMetricsOptions{
timedSamples: true,
})
gaugeMetrics, gaugeMetricsExpect := testGaugeMetrics(testGaugeMetricsOptions{
timedSamples: true,
})
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
ingest: &testDownsamplerOptionsIngest{
counterMetrics: counterMetrics,
gaugeMetrics: gaugeMetrics,
},
expect: &testDownsamplerOptionsExpect{
writes: append(counterMetricsExpect, gaugeMetricsExpect...),
},
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "__name__:*",
Aggregations: []aggregation.Type{testAggregationType},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 2 * time.Second,
Retention: 24 * time.Hour,
},
},
},
},
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithOverrideRules(t *testing.T) {
counterMetrics, counterMetricsExpect := testCounterMetrics(testCounterMetricsOptions{})
counterMetricsExpect[0].values = []expectedValue{{value: 2}}
gaugeMetrics, gaugeMetricsExpect := testGaugeMetrics(testGaugeMetricsOptions{})
gaugeMetricsExpect[0].values = []expectedValue{{value: 5}}
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
sampleAppenderOpts: &SampleAppenderOptions{
Override: true,
OverrideRules: SamplesAppenderOverrideRules{
MappingRules: []AutoMappingRule{
{
Aggregations: []aggregation.Type{aggregation.Mean},
Policies: []policy.StoragePolicy{
policy.MustParseStoragePolicy("4s:1d"),
},
},
},
},
},
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "__name__:*",
Aggregations: []aggregation.Type{testAggregationType},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 2 * time.Second,
Retention: 24 * time.Hour,
},
},
},
},
},
ingest: &testDownsamplerOptionsIngest{
counterMetrics: counterMetrics,
gaugeMetrics: gaugeMetrics,
},
expect: &testDownsamplerOptionsExpect{
writes: append(counterMetricsExpect, gaugeMetricsExpect...),
},
})
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
func TestDownsamplerAggregationWithRemoteAggregatorClient(t *testing.T) {
ctrl := xtest.NewController(t)
defer ctrl.Finish()
// Create mock client
remoteClientMock := client.NewMockClient(ctrl)
remoteClientMock.EXPECT().Init().Return(nil)
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
rulesConfig: &RulesConfiguration{
MappingRules: []MappingRuleConfiguration{
{
Filter: "__name__:*",
Aggregations: []aggregation.Type{testAggregationType},
StoragePolicies: []StoragePolicyConfiguration{
{
Resolution: 2 * time.Second,
Retention: 24 * time.Hour,
},
},
},
},
},
remoteClientMock: remoteClientMock,
})
// Test expected output
testDownsamplerRemoteAggregation(t, testDownsampler)
}
func originalStagedMetadata(t *testing.T, testDownsampler testDownsampler) []metricpb.StagedMetadatas {
ds, ok := testDownsampler.downsampler.(*downsampler)
require.True(t, ok)
origStagedMetadata := ds.metricsAppenderOpts.defaultStagedMetadatasProtos
return origStagedMetadata
}
func waitForStagedMetadataUpdate(t *testing.T, testDownsampler testDownsampler, origStagedMetadata []metricpb.StagedMetadatas) {
ds, ok := testDownsampler.downsampler.(*downsampler)
require.True(t, ok)
require.True(t, clock.WaitUntil(func() bool {
ds.RLock()
defer ds.RUnlock()
return !assert.ObjectsAreEqual(origStagedMetadata, ds.metricsAppenderOpts.defaultStagedMetadatasProtos)
}, time.Second))
}
type testExpectedWrite struct {
tags map[string]string
values []expectedValue // use values for multi expected values
valueAllowedError float64 // use for allowing for slightly inexact values due to timing, etc
attributes *storagemetadata.Attributes
}
type expectedValue struct {
offset time.Duration
value float64
}
type testCounterMetric struct {
tags map[string]string
samples []int64
timedSamples []testCounterMetricTimedSample
expectDropPolicyApplied bool
}
type testCounterMetricTimedSample struct {
time time.Time
offset time.Duration
value int64
}
type testGaugeMetric struct {
tags map[string]string
samples []float64
timedSamples []testGaugeMetricTimedSample
expectDropPolicyApplied bool
}
type testGaugeMetricTimedSample struct {
time time.Time
offset time.Duration
value float64
}
type testCounterMetricsOptions struct {
timedSamples bool
}
func testCounterMetrics(opts testCounterMetricsOptions) (
[]testCounterMetric,
[]testExpectedWrite,
) {
metric := testCounterMetric{
tags: map[string]string{nameTag: "counter0", "app": "testapp", "foo": "bar"},
samples: []int64{1, 2, 3},
}
if opts.timedSamples {
metric.samples = nil
metric.timedSamples = []testCounterMetricTimedSample{
{value: 1}, {value: 2}, {value: 3},
}
}
write := testExpectedWrite{
tags: metric.tags,
values: []expectedValue{{value: 6}},
}
return []testCounterMetric{metric}, []testExpectedWrite{write}
}
type testGaugeMetricsOptions struct {
timedSamples bool
}
func testGaugeMetrics(opts testGaugeMetricsOptions) ([]testGaugeMetric, []testExpectedWrite) {
metric := testGaugeMetric{
tags: map[string]string{nameTag: "gauge0", "app": "testapp", "qux": "qaz"},
samples: []float64{4, 5, 6},
}
if opts.timedSamples {
metric.samples = nil
metric.timedSamples = []testGaugeMetricTimedSample{
{value: 4}, {value: 5}, {value: 6},
}
}
write := testExpectedWrite{
tags: metric.tags,
values: []expectedValue{{value: 15}},
}
return []testGaugeMetric{metric}, []testExpectedWrite{write}
}
func testDownsamplerAggregation(
t *testing.T,
testDownsampler testDownsampler,
) {
testOpts := testDownsampler.testOpts
logger := testDownsampler.instrumentOpts.Logger().
With(zap.String("test", t.Name()))
counterMetrics, counterMetricsExpect := testCounterMetrics(testCounterMetricsOptions{})
gaugeMetrics, gaugeMetricsExpect := testGaugeMetrics(testGaugeMetricsOptions{})
expectedWrites := append(counterMetricsExpect, gaugeMetricsExpect...)
// Allow overrides
if ingest := testOpts.ingest; ingest != nil {
counterMetrics = ingest.counterMetrics
gaugeMetrics = ingest.gaugeMetrics
}
if expect := testOpts.expect; expect != nil {
expectedWrites = expect.writes
}
// Ingest points
testDownsamplerAggregationIngest(t, testDownsampler,
counterMetrics, gaugeMetrics)
// Wait for writes
logger.Info("wait for test metrics to appear")
logWritesAccumulated := os.Getenv("TEST_LOG_WRITES_ACCUMULATED") == "true"
logWritesAccumulatedTicker := time.NewTicker(time.Second)
logWritesMatch := os.Getenv("TEST_LOG_WRITES_MATCH") == "true"
logWritesMatchTicker := time.NewTicker(time.Second)
identTag := nameTag
if len(testDownsampler.testOpts.identTag) > 0 {
identTag = testDownsampler.testOpts.identTag
}
CheckAllWritesArrivedLoop:
for {
allWrites := testDownsampler.storage.Writes()
if logWritesAccumulated {
select {
case <-logWritesAccumulatedTicker.C:
logger.Info("logging accmulated writes",
zap.Int("numAllWrites", len(allWrites)))
for _, write := range allWrites {
logger.Info("accumulated write",
zap.ByteString("tags", write.Tags().ID()),
zap.Any("datapoints", write.Datapoints),
zap.Any("attributes", write.Attributes))
}
default:
}
}
for _, expectedWrite := range expectedWrites {
name := expectedWrite.tags[identTag]
attrs := expectedWrite.attributes
writesForNameAndAttrs, _ := findWrites(allWrites, name, identTag, attrs)
if len(writesForNameAndAttrs) != len(expectedWrite.values) {
if logWritesMatch {
select {
case <-logWritesMatchTicker.C:
logger.Info("continuing wait for accumulated writes",
zap.String("name", name),
zap.Any("attributes", attrs),
zap.Int("numWritesForNameAndAttrs", len(writesForNameAndAttrs)),
zap.Int("numExpectedWriteValues", len(expectedWrite.values)),
)
default:
}
}
time.Sleep(100 * time.Millisecond)
continue CheckAllWritesArrivedLoop
}
}
break
}
// Verify writes
logger.Info("verify test metrics")
allWrites := testDownsampler.storage.Writes()
if logWritesAccumulated {
logger.Info("logging accmulated writes to verify",
zap.Int("numAllWrites", len(allWrites)))
for _, write := range allWrites {
logger.Info("accumulated write",
zap.ByteString("tags", write.Tags().ID()),
zap.Any("datapoints", write.Datapoints()))
}
}
for _, expectedWrite := range expectedWrites {
name := expectedWrite.tags[identTag]
expectedValues := expectedWrite.values
allowedError := expectedWrite.valueAllowedError
writesForNameAndAttrs, found := findWrites(allWrites, name, identTag, expectedWrite.attributes)
require.True(t, found)
require.Equal(t, len(expectedValues), len(writesForNameAndAttrs))
for i, expectedValue := range expectedValues {
write := writesForNameAndAttrs[i]
assert.Equal(t, expectedWrite.tags, tagsToStringMap(write.Tags()))
require.Equal(t, 1, len(write.Datapoints()))
actualValue := write.Datapoints()[0].Value
if allowedError == 0 {
// Exact match value.
assert.Equal(t, expectedValue.value, actualValue)
} else {
// Fuzzy match value.
lower := expectedValue.value - allowedError
upper := expectedValue.value + allowedError
withinBounds := (lower <= actualValue) && (actualValue <= upper)
msg := fmt.Sprintf("expected within: lower=%f, upper=%f, actual=%f",
lower, upper, actualValue)
assert.True(t, withinBounds, msg)
}
if expectedOffset := expectedValue.offset; expectedOffset > 0 {
// Check if distance between datapoints as expected (use
// absolute offset from first write).
firstTimestamp := writesForNameAndAttrs[0].Datapoints()[0].Timestamp
actualOffset := write.Datapoints()[0].Timestamp.Sub(firstTimestamp)
assert.Equal(t, expectedOffset, actualOffset)
}
if attrs := expectedWrite.attributes; attrs != nil {
assert.Equal(t, *attrs, write.Attributes())
}
}
}
}
func testDownsamplerRemoteAggregation(
t *testing.T,
testDownsampler testDownsampler,
) {
testOpts := testDownsampler.testOpts
expectTestCounterMetrics, _ := testCounterMetrics(testCounterMetricsOptions{})
testCounterMetrics, _ := testCounterMetrics(testCounterMetricsOptions{})
expectTestGaugeMetrics, _ := testGaugeMetrics(testGaugeMetricsOptions{})
testGaugeMetrics, _ := testGaugeMetrics(testGaugeMetricsOptions{})
remoteClientMock := testOpts.remoteClientMock
require.NotNil(t, remoteClientMock)
// Expect ingestion
checkedCounterSamples := 0
remoteClientMock.EXPECT().
WriteUntimedCounter(gomock.Any(), gomock.Any()).
AnyTimes().
Do(func(counter unaggregated.Counter,
metadatas metadata.StagedMetadatas,
) error {
for _, c := range expectTestCounterMetrics {
if !strings.Contains(counter.ID.String(), c.tags[nameTag]) {
continue
}
var remainingSamples []int64
found := false
for _, s := range c.samples {
if !found && s == counter.Value {
found = true
} else {
remainingSamples = append(remainingSamples, s)
}
}
c.samples = remainingSamples
if found {
checkedCounterSamples++
}
break
}
return nil
})
checkedGaugeSamples := 0
remoteClientMock.EXPECT().
WriteUntimedGauge(gomock.Any(), gomock.Any()).
AnyTimes().
Do(func(gauge unaggregated.Gauge,
metadatas metadata.StagedMetadatas,
) error {
for _, g := range expectTestGaugeMetrics {
if !strings.Contains(gauge.ID.String(), g.tags[nameTag]) {
continue
}
var remainingSamples []float64
found := false
for _, s := range g.samples {
if !found && s == gauge.Value {
found = true
} else {
remainingSamples = append(remainingSamples, s)
}
}
g.samples = remainingSamples
if found {
checkedGaugeSamples++
}
break
}
return nil
})
// Ingest points
testDownsamplerAggregationIngest(t, testDownsampler,
testCounterMetrics, testGaugeMetrics)
// Ensure we checked counters and gauges
samplesCounters := 0
for _, c := range testCounterMetrics {
samplesCounters += len(c.samples)
}
samplesGauges := 0
for _, c := range testGaugeMetrics {
samplesGauges += len(c.samples)
}
require.Equal(t, samplesCounters, checkedCounterSamples)
require.Equal(t, samplesGauges, checkedGaugeSamples)
}
func testDownsamplerAggregationIngest(
t *testing.T,
testDownsampler testDownsampler,
testCounterMetrics []testCounterMetric,
testGaugeMetrics []testGaugeMetric,
) {
downsampler := testDownsampler.downsampler
testOpts := testDownsampler.testOpts
logger := testDownsampler.instrumentOpts.Logger().
With(zap.String("test", t.Name()))
logger.Info("write test metrics")
appender, err := downsampler.NewMetricsAppender()
require.NoError(t, err)
defer appender.Finalize()
var opts SampleAppenderOptions
if testOpts.sampleAppenderOpts != nil {
opts = *testOpts.sampleAppenderOpts
}
for _, metric := range testCounterMetrics {
appender.NextMetric()
for name, value := range metric.tags {
appender.AddTag([]byte(name), []byte(value))
}
samplesAppenderResult, err := appender.SamplesAppender(opts)
require.NoError(t, err)
require.Equal(t, metric.expectDropPolicyApplied,
samplesAppenderResult.IsDropPolicyApplied)
samplesAppender := samplesAppenderResult.SamplesAppender
for _, sample := range metric.samples {
err = samplesAppender.AppendCounterSample(sample)
require.NoError(t, err)
}
for _, sample := range metric.timedSamples {
if sample.time.IsZero() {
sample.time = time.Now() // Allow empty time to mean "now"
}
if sample.offset > 0 {
sample.time = sample.time.Add(sample.offset)
}
err = samplesAppender.AppendCounterTimedSample(sample.time, sample.value)
require.NoError(t, err)
}
}
for _, metric := range testGaugeMetrics {
appender.NextMetric()
for name, value := range metric.tags {
appender.AddTag([]byte(name), []byte(value))
}
samplesAppenderResult, err := appender.SamplesAppender(opts)
require.NoError(t, err)
require.Equal(t, metric.expectDropPolicyApplied,
samplesAppenderResult.IsDropPolicyApplied)
samplesAppender := samplesAppenderResult.SamplesAppender
for _, sample := range metric.samples {
err = samplesAppender.AppendGaugeSample(sample)
require.NoError(t, err)
}
for _, sample := range metric.timedSamples {
if sample.time.IsZero() {
sample.time = time.Now() // Allow empty time to mean "now"
}
if sample.offset > 0 {
sample.time = sample.time.Add(sample.offset)
}
err = samplesAppender.AppendGaugeTimedSample(sample.time, sample.value)
require.NoError(t, err)
}
}
}
func setAggregatedNamespaces(
t *testing.T,
testDownsampler testDownsampler,
session dbclient.Session,
namespaces ...m3.AggregatedClusterNamespaceDefinition,
) {
clusters, err := m3.NewClusters(m3.UnaggregatedClusterNamespaceDefinition{
NamespaceID: ident.StringID("default"),
Retention: 48 * time.Hour,
Session: session,
}, namespaces...)
require.NoError(t, err)
require.NoError(t, testDownsampler.opts.ClusterNamespacesWatcher.Update(clusters.ClusterNamespaces()))
}
func tagsToStringMap(tags models.Tags) map[string]string {
stringMap := make(map[string]string, tags.Len())
for _, t := range tags.Tags {
stringMap[string(t.Name)] = string(t.Value)
}
return stringMap
}
type testDownsampler struct {
opts DownsamplerOptions
testOpts testDownsamplerOptions
downsampler Downsampler
matcher matcher.Matcher
storage mock.Storage
rulesStore rules.Store
instrumentOpts instrument.Options
}
type testDownsamplerOptions struct {
clockOpts clock.Options
instrumentOpts instrument.Options
identTag string
// Options for the test
autoMappingRules []AutoMappingRule
sampleAppenderOpts *SampleAppenderOptions
remoteClientMock *client.MockClient
rulesConfig *RulesConfiguration
// Test ingest and expectations overrides
ingest *testDownsamplerOptionsIngest
expect *testDownsamplerOptionsExpect
}
type testDownsamplerOptionsIngest struct {
counterMetrics []testCounterMetric
gaugeMetrics []testGaugeMetric
}
type testDownsamplerOptionsExpect struct {
writes []testExpectedWrite
}
func newTestDownsampler(t *testing.T, opts testDownsamplerOptions) testDownsampler {
storage := mock.NewMockStorage()
rulesKVStore := mem.NewStore()
clockOpts := clock.NewOptions()
if opts.clockOpts != nil {
clockOpts = opts.clockOpts
}
// Use a test instrument options by default to get the debug logs on by default.
instrumentOpts := instrument.NewTestOptions(t)
if opts.instrumentOpts != nil {
instrumentOpts = opts.instrumentOpts
}
matcherOpts := matcher.NewOptions()
// Initialize the namespaces
_, err := rulesKVStore.Set(matcherOpts.NamespacesKey(), &rulepb.Namespaces{})
require.NoError(t, err)
rulesetKeyFmt := matcherOpts.RuleSetKeyFn()([]byte("%s"))
rulesStoreOpts := ruleskv.NewStoreOptions(matcherOpts.NamespacesKey(),
rulesetKeyFmt, nil)
rulesStore := ruleskv.NewStore(rulesKVStore, rulesStoreOpts)
tagEncoderOptions := serialize.NewTagEncoderOptions()
tagDecoderOptions := serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{})
tagEncoderPoolOptions := pool.NewObjectPoolOptions().
SetSize(2).
SetInstrumentOptions(instrumentOpts.
SetMetricsScope(instrumentOpts.MetricsScope().
SubScope("tag-encoder-pool")))
tagDecoderPoolOptions := pool.NewObjectPoolOptions().
SetSize(2).
SetInstrumentOptions(instrumentOpts.
SetMetricsScope(instrumentOpts.MetricsScope().
SubScope("tag-decoder-pool")))
metricsAppenderPoolOptions := pool.NewObjectPoolOptions().
SetSize(2).
SetInstrumentOptions(instrumentOpts.
SetMetricsScope(instrumentOpts.MetricsScope().
SubScope("metrics-appender-pool")))
var cfg Configuration
if opts.remoteClientMock != nil {
// Optionally set an override to use remote aggregation
// with a mock client
cfg.RemoteAggregator = &RemoteAggregatorConfiguration{
clientOverride: opts.remoteClientMock,
}
}
if opts.rulesConfig != nil {
cfg.Rules = opts.rulesConfig
}
instance, err := cfg.NewDownsampler(DownsamplerOptions{
Storage: storage,
ClusterClient: clusterclient.NewMockClient(gomock.NewController(t)),
RulesKVStore: rulesKVStore,
ClusterNamespacesWatcher: m3.NewClusterNamespacesWatcher(),
ClockOptions: clockOpts,
InstrumentOptions: instrumentOpts,
TagEncoderOptions: tagEncoderOptions,
TagDecoderOptions: tagDecoderOptions,
TagEncoderPoolOptions: tagEncoderPoolOptions,
TagDecoderPoolOptions: tagDecoderPoolOptions,
MetricsAppenderPoolOptions: metricsAppenderPoolOptions,
RWOptions: xio.NewOptions(),
TagOptions: models.NewTagOptions(),
})
require.NoError(t, err)
downcast, ok := instance.(*downsampler)
require.True(t, ok)
return testDownsampler{
opts: downcast.opts,
testOpts: opts,
downsampler: instance,
matcher: downcast.agg.matcher,
storage: storage,
rulesStore: rulesStore,
instrumentOpts: instrumentOpts,
}
}
func newTestID(t *testing.T, tags map[string]string) id.ID {
tagEncoderPool := serialize.NewTagEncoderPool(serialize.NewTagEncoderOptions(),
pool.NewObjectPoolOptions().SetSize(1))
tagEncoderPool.Init()
tagsIter := newTags()
for name, value := range tags {
tagsIter.append([]byte(name), []byte(value))
}
tagEncoder := tagEncoderPool.Get()
err := tagEncoder.Encode(tagsIter)
require.NoError(t, err)
data, ok := tagEncoder.Data()
require.True(t, ok)
size := 1
tagDecoderPool := serialize.NewTagDecoderPool(
serialize.NewTagDecoderOptions(serialize.TagDecoderOptionsConfig{
CheckBytesWrapperPoolSize: &size,
}),
pool.NewObjectPoolOptions().SetSize(size))
tagDecoderPool.Init()
tagDecoder := tagDecoderPool.Get()
iter := serialize.NewMetricTagsIterator(tagDecoder, nil)
iter.Reset(data.Bytes())
return iter
}
func findWrites(
writes []*storage.WriteQuery,
name, identTag string,
optionalMatchAttrs *storagemetadata.Attributes,
) ([]*storage.WriteQuery, bool) {
var results []*storage.WriteQuery
for _, w := range writes {
if t, ok := w.Tags().Get([]byte(identTag)); ok {
if !bytes.Equal(t, []byte(name)) {
// Does not match name.
continue
}
if optionalMatchAttrs != nil && w.Attributes() != *optionalMatchAttrs {
// Tried to match attributes and not matched.
continue
}
// Matches name and all optional lookups.
results = append(results, w)
}
}
return results, len(results) > 0
}
func testUpdateMetadata() rules.UpdateMetadata {
return rules.NewRuleSetUpdateHelper(0).NewUpdateMetadata(time.Now().UnixNano(), "test")
}
|
[
"\"TEST_LOG_WRITES_ACCUMULATED\"",
"\"TEST_LOG_WRITES_MATCH\""
] |
[] |
[
"TEST_LOG_WRITES_ACCUMULATED",
"TEST_LOG_WRITES_MATCH"
] |
[]
|
["TEST_LOG_WRITES_ACCUMULATED", "TEST_LOG_WRITES_MATCH"]
|
go
| 2 | 0 | |
go-apps/meep-rnis/server/rnis.go
|
/*
* Copyright (c) 2020 InterDigital Communications, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package server
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"sync"
"time"
sbi "github.com/InterDigitalInc/AdvantEDGE/go-apps/meep-rnis/sbi"
dkm "github.com/InterDigitalInc/AdvantEDGE/go-packages/meep-data-key-mgr"
httpLog "github.com/InterDigitalInc/AdvantEDGE/go-packages/meep-http-logger"
log "github.com/InterDigitalInc/AdvantEDGE/go-packages/meep-logger"
redis "github.com/InterDigitalInc/AdvantEDGE/go-packages/meep-redis"
sm "github.com/InterDigitalInc/AdvantEDGE/go-packages/meep-sessions"
"github.com/gorilla/mux"
)
const moduleName = "meep-rnis"
const rnisBasePath = "/rni/v2/"
const rnisKey string = "rnis:"
const logModuleRNIS string = "meep-rnis"
//const module string = "rnis"
var redisAddr string = "meep-redis-master.default.svc.cluster.local:6379"
var influxAddr string = "http://meep-influxdb.default.svc.cluster.local:8086"
const cellChangeSubscriptionType = "cell_change"
const rabEstSubscriptionType = "rab_est"
const rabRelSubscriptionType = "rab_rel"
var ccSubscriptionMap = map[int]*CellChangeSubscription{}
var reSubscriptionMap = map[int]*RabEstSubscription{}
var rrSubscriptionMap = map[int]*RabRelSubscription{}
var subscriptionExpiryMap = map[int][]int{}
var currentStoreName = ""
const CELL_CHANGE_SUBSCRIPTION = "CellChangeSubscription"
const RAB_EST_SUBSCRIPTION = "RabEstSubscription"
const RAB_REL_SUBSCRIPTION = "RabRelSubscription"
const CELL_CHANGE_NOTIFICATION = "CellChangeNotification"
const RAB_EST_NOTIFICATION = "RabEstNotification"
const RAB_REL_NOTIFICATION = "RabRelNotification"
var RNIS_DB = 5
var rc *redis.Connector
var sessionMgr *sm.SessionMgr
var hostUrl *url.URL
var sandboxName string
var basePath string
var baseKey string
var mutex sync.Mutex
var expiryTicker *time.Ticker
var nextSubscriptionIdAvailable int
var nextAvailableErabId int
const defaultSupportedQci = 80
type RabInfoData struct {
queryErabId int32
queryQci int32
queryCellIds []string
queryIpv4Addresses []string
rabInfo *RabInfo
}
type UeData struct {
Name string `json:"name"`
ErabId int32 `json:"erabId"`
Ecgi *Ecgi `json:"ecgi"`
Qci int32 `json:"qci"`
}
type DomainData struct {
Mcc string `json:"mcc"`
Mnc string `json:"mnc"`
CellId string `json:"cellId"`
}
type PlmnInfoResp struct {
PlmnInfoList []PlmnInfo
}
func notImplemented(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(http.StatusNotImplemented)
}
// Init - RNI Service initialization
func Init() (err error) {
// Retrieve Sandbox name from environment variable
sandboxNameEnv := strings.TrimSpace(os.Getenv("MEEP_SANDBOX_NAME"))
if sandboxNameEnv != "" {
sandboxName = sandboxNameEnv
}
if sandboxName == "" {
err = errors.New("MEEP_SANDBOX_NAME env variable not set")
log.Error(err.Error())
return err
}
log.Info("MEEP_SANDBOX_NAME: ", sandboxName)
// hostUrl is the url of the node serving the resourceURL
// Retrieve public url address where service is reachable, if not present, use Host URL environment variable
hostUrl, err = url.Parse(strings.TrimSpace(os.Getenv("MEEP_PUBLIC_URL")))
if err != nil || hostUrl == nil || hostUrl.String() == "" {
hostUrl, err = url.Parse(strings.TrimSpace(os.Getenv("MEEP_HOST_URL")))
if err != nil {
hostUrl = new(url.URL)
}
}
log.Info("resource URL: ", hostUrl)
// Set base path
basePath = "/" + sandboxName + rnisBasePath
// Get base store key
baseKey = dkm.GetKeyRoot(sandboxName) + rnisKey
// Connect to Redis DB
rc, err = redis.NewConnector(redisAddr, RNIS_DB)
if err != nil {
log.Error("Failed connection to Redis DB. Error: ", err)
return err
}
_ = rc.DBFlush(baseKey)
log.Info("Connected to Redis DB, RNI service table")
// Connect to Session Manager
sessionMgr, err = sm.NewSessionMgr(moduleName, sandboxName, redisAddr, redisAddr)
if err != nil {
log.Error("Failed connection to Session Manager: ", err.Error())
return err
}
log.Info("Connected to Session Manager")
reInit()
expiryTicker = time.NewTicker(time.Second)
go func() {
for range expiryTicker.C {
checkForExpiredSubscriptions()
}
}()
// Initialize SBI
sbiCfg := sbi.SbiCfg{
SandboxName: sandboxName,
RedisAddr: redisAddr,
UeDataCb: updateUeData,
AppEcgiInfoCb: updateAppEcgiInfo,
DomainDataCb: updateDomainData,
ScenarioNameCb: updateStoreName,
CleanUpCb: cleanUp,
}
err = sbi.Init(sbiCfg)
if err != nil {
log.Error("Failed initialize SBI. Error: ", err)
return err
}
log.Info("SBI Initialized")
return nil
}
// reInit - finds the value already in the DB to repopulate local stored info
func reInit() {
//next available subsId will be overrriden if subscriptions already existed
nextSubscriptionIdAvailable = 1
nextAvailableErabId = 1
keyName := baseKey + "subscriptions:" + "*"
_ = rc.ForEachJSONEntry(keyName, repopulateCcSubscriptionMap, nil)
_ = rc.ForEachJSONEntry(keyName, repopulateReSubscriptionMap, nil)
_ = rc.ForEachJSONEntry(keyName, repopulateRrSubscriptionMap, nil)
}
// Run - Start RNIS
func Run() (err error) {
return sbi.Run()
}
// Stop - Stop RNIS
func Stop() (err error) {
return sbi.Stop()
}
func updateUeData(name string, mnc string, mcc string, cellId string, erabIdValid bool) {
var plmn Plmn
var newEcgi Ecgi
plmn.Mnc = mnc
plmn.Mcc = mcc
newEcgi.CellId = cellId
newEcgi.Plmn = &plmn
var ueData UeData
ueData.Ecgi = &newEcgi
ueData.Name = name
ueData.Qci = defaultSupportedQci //only supporting one value
oldPlmn := new(Plmn)
oldPlmnMnc := ""
oldPlmnMcc := ""
oldCellId := ""
var oldErabId int32 = -1
//get from DB
jsonUeData, _ := rc.JSONGetEntry(baseKey+"UE:"+name, ".")
if jsonUeData != "" {
ueDataObj := convertJsonToUeData(jsonUeData)
if ueDataObj != nil {
if ueDataObj.Ecgi != nil {
oldPlmn = ueDataObj.Ecgi.Plmn
oldPlmnMnc = ueDataObj.Ecgi.Plmn.Mnc
oldPlmnMcc = ueDataObj.Ecgi.Plmn.Mcc
oldCellId = ueDataObj.Ecgi.CellId
oldErabId = ueDataObj.ErabId
}
}
}
//updateDB if changes occur
if newEcgi.Plmn.Mnc != oldPlmnMnc || newEcgi.Plmn.Mcc != oldPlmnMcc || newEcgi.CellId != oldCellId {
//allocating a new erabId if entering a 4G environment (using existence of an erabId)
if oldErabId == -1 { //if no erabId established (== -1), means not coming from a 4G environment
if erabIdValid { //if a new erabId should be allocated (meaning entering into a 4G environment)
//rab establishment case
ueData.ErabId = int32(nextAvailableErabId)
nextAvailableErabId++
} else { //was not connected to a 4G POA and still not connected to a 4G POA, so, no change
ueData.ErabId = oldErabId // = -1
}
} else {
if erabIdValid { //was connected to a 4G POA and still is, so, no change
ueData.ErabId = oldErabId // = sameAsBefore
} else { //was connected to a 4G POA, but now not connected to one, so need to release the 4G connection
//rab release case
ueData.ErabId = -1
}
}
_ = rc.JSONSetEntry(baseKey+"UE:"+name, ".", convertUeDataToJson(&ueData))
assocId := new(AssociateId)
assocId.Type_ = 1 //UE_IPV4_ADDRESS
assocId.Value = name
//log to model for all apps on that UE
checkCcNotificationRegisteredSubscriptions("", assocId, &plmn, oldPlmn, "", cellId, oldCellId)
//ueData contains newErabId
if oldErabId == -1 && ueData.ErabId != -1 {
checkReNotificationRegisteredSubscriptions("", assocId, &plmn, oldPlmn, -1, cellId, oldCellId, ueData.ErabId)
}
if oldErabId != -1 && ueData.ErabId == -1 { //sending oldErabId to release and no new 4G cellId
checkRrNotificationRegisteredSubscriptions("", assocId, &plmn, oldPlmn, -1, "", oldCellId, oldErabId)
}
}
}
func updateAppEcgiInfo(name string, mnc string, mcc string, cellId string) {
var plmn Plmn
var newEcgi Ecgi
plmn.Mnc = mnc
plmn.Mcc = mcc
newEcgi.CellId = cellId
newEcgi.Plmn = &plmn
//get from DB
jsonAppEcgiInfo, _ := rc.JSONGetEntry(baseKey+"APP:"+name, ".")
oldPlmnMnc := ""
oldPlmnMcc := ""
oldCellId := ""
if jsonAppEcgiInfo != "" {
ecgiInfo := convertJsonToEcgi(jsonAppEcgiInfo)
oldPlmnMnc = ecgiInfo.Plmn.Mnc
oldPlmnMcc = ecgiInfo.Plmn.Mcc
oldCellId = ecgiInfo.CellId
}
//updateDB if changes occur
if newEcgi.Plmn.Mnc != oldPlmnMnc || newEcgi.Plmn.Mcc != oldPlmnMcc || newEcgi.CellId != oldCellId {
//updateDB
_ = rc.JSONSetEntry(baseKey+"APP:"+name, ".", convertEcgiToJson(&newEcgi))
}
}
func updateDomainData(name string, mnc string, mcc string, cellId string) {
oldMnc := ""
oldMcc := ""
oldCellId := ""
//get from DB
jsonDomainData, _ := rc.JSONGetEntry(baseKey+"DOM:"+name, ".")
if jsonDomainData != "" {
domainDataObj := convertJsonToDomainData(jsonDomainData)
if domainDataObj != nil {
oldMnc = domainDataObj.Mnc
oldMcc = domainDataObj.Mcc
oldCellId = domainDataObj.CellId
}
}
//updateDB if changes occur
if mnc != oldMnc || mcc != oldMcc || cellId != oldCellId {
//updateDB
var domainData DomainData
domainData.Mnc = mnc
domainData.Mcc = mcc
domainData.CellId = cellId
_ = rc.JSONSetEntry(baseKey+"DOM:"+name, ".", convertDomainDataToJson(&domainData))
}
}
func checkForExpiredSubscriptions() {
nowTime := int(time.Now().Unix())
mutex.Lock()
defer mutex.Unlock()
for expiryTime, subsIndexList := range subscriptionExpiryMap {
if expiryTime <= nowTime {
subscriptionExpiryMap[expiryTime] = nil
for _, subsId := range subsIndexList {
cbRef := ""
if ccSubscriptionMap[subsId] != nil {
cbRef = ccSubscriptionMap[subsId].CallbackReference
} else if reSubscriptionMap[subsId] != nil {
cbRef = reSubscriptionMap[subsId].CallbackReference
} else if rrSubscriptionMap[subsId] != nil {
cbRef = rrSubscriptionMap[subsId].CallbackReference
} else {
continue
}
subsIdStr := strconv.Itoa(subsId)
var notif ExpiryNotification
seconds := time.Now().Unix()
var timeStamp TimeStamp
timeStamp.Seconds = int32(seconds)
var expiryTimeStamp TimeStamp
expiryTimeStamp.Seconds = int32(expiryTime)
link := new(ExpiryNotificationLinks)
link.Self = cbRef
notif.Links = link
notif.TimeStamp = &timeStamp
notif.ExpiryDeadline = &expiryTimeStamp
sendExpiryNotification(link.Self, notif)
_ = delSubscription(baseKey, subsIdStr, true)
}
}
}
}
func repopulateCcSubscriptionMap(key string, jsonInfo string, userData interface{}) error {
var subscription CellChangeSubscription
// Format response
err := json.Unmarshal([]byte(jsonInfo), &subscription)
if err != nil {
return err
}
selfUrl := strings.Split(subscription.Links.Self.Href, "/")
subsIdStr := selfUrl[len(selfUrl)-1]
subsId, _ := strconv.Atoi(subsIdStr)
mutex.Lock()
defer mutex.Unlock()
ccSubscriptionMap[subsId] = &subscription
if subscription.ExpiryDeadline != nil {
intList := subscriptionExpiryMap[int(subscription.ExpiryDeadline.Seconds)]
intList = append(intList, subsId)
subscriptionExpiryMap[int(subscription.ExpiryDeadline.Seconds)] = intList
}
//reinitialisation of next available Id for future subscription request
if subsId >= nextSubscriptionIdAvailable {
nextSubscriptionIdAvailable = subsId + 1
}
return nil
}
func repopulateReSubscriptionMap(key string, jsonInfo string, userData interface{}) error {
var subscription RabEstSubscription
// Format response
err := json.Unmarshal([]byte(jsonInfo), &subscription)
if err != nil {
return err
}
selfUrl := strings.Split(subscription.Links.Self.Href, "/")
subsIdStr := selfUrl[len(selfUrl)-1]
subsId, _ := strconv.Atoi(subsIdStr)
mutex.Lock()
defer mutex.Unlock()
reSubscriptionMap[subsId] = &subscription
if subscription.ExpiryDeadline != nil {
intList := subscriptionExpiryMap[int(subscription.ExpiryDeadline.Seconds)]
intList = append(intList, subsId)
subscriptionExpiryMap[int(subscription.ExpiryDeadline.Seconds)] = intList
}
//reinitialisation of next available Id for future subscription request
if subsId >= nextSubscriptionIdAvailable {
nextSubscriptionIdAvailable = subsId + 1
}
return nil
}
func repopulateRrSubscriptionMap(key string, jsonInfo string, userData interface{}) error {
var subscription RabRelSubscription
// Format response
err := json.Unmarshal([]byte(jsonInfo), &subscription)
if err != nil {
return err
}
selfUrl := strings.Split(subscription.Links.Self.Href, "/")
subsIdStr := selfUrl[len(selfUrl)-1]
subsId, _ := strconv.Atoi(subsIdStr)
mutex.Lock()
defer mutex.Unlock()
rrSubscriptionMap[subsId] = &subscription
if subscription.ExpiryDeadline != nil {
intList := subscriptionExpiryMap[int(subscription.ExpiryDeadline.Seconds)]
intList = append(intList, subsId)
subscriptionExpiryMap[int(subscription.ExpiryDeadline.Seconds)] = intList
}
//reinitialisation of next available Id for future subscription request
if subsId >= nextSubscriptionIdAvailable {
nextSubscriptionIdAvailable = subsId + 1
}
return nil
}
func isMatchCcFilterCriteriaAppInsId(filterCriteria interface{}, appId string) bool {
filter := filterCriteria.(*CellChangeSubscriptionFilterCriteriaAssocHo)
//if filter criteria is not set, it acts as a wildcard and accepts all
if filter.AppInstanceId == "" {
return true
}
return (appId == filter.AppInstanceId)
}
func isMatchRabFilterCriteriaAppInsId(filterCriteria interface{}, appId string) bool {
filter := filterCriteria.(*RabEstSubscriptionFilterCriteriaQci)
//if filter criteria is not set, it acts as a wildcard and accepts all
if filter.AppInstanceId == "" {
return true
}
return (appId == filter.AppInstanceId)
}
func isMatchRabRelFilterCriteriaAppInsId(filterCriteria interface{}, appId string) bool {
filter := filterCriteria.(*RabModSubscriptionFilterCriteriaQci)
//if filter criteria is not set, it acts as a wildcard and accepts all
if filter.AppInstanceId == "" {
return true
}
return (appId == filter.AppInstanceId)
}
func isMatchRabRelFilterCriteriaErabId(filterCriteria interface{}, erabId int32) bool {
filter := filterCriteria.(*RabModSubscriptionFilterCriteriaQci)
return (erabId == filter.ErabId)
}
func isMatchCcFilterCriteriaAssociateId(filterCriteria interface{}, assocId *AssociateId) bool {
filter := filterCriteria.(*CellChangeSubscriptionFilterCriteriaAssocHo)
//if filter criteria is not set, it acts as a wildcard and accepts all
if filter.AssociateId == nil {
return true
}
//if filter accepts something specific but no assocId, then we fail right away
if assocId == nil {
return false
}
for _, filterAssocId := range filter.AssociateId {
if assocId.Type_ == filterAssocId.Type_ && assocId.Value == filterAssocId.Value {
return true
}
}
return false
}
/* in v2, AssociateId is not part of the filterCriteria
func isMatchRabFilterCriteriaAssociateId(filterCriteria interface{}, assocId *AssociateId) bool {
filter := filterCriteria.(*RabEstSubscriptionFilterCriteriaQci)
//if filter criteria is not set, it acts as a wildcard and accepts all
if filter.AssociateId == nil {
return true
}
//if filter accepts something specific but no assocId, then we fail right away
if assocId == nil {
return false
}
return (assocId.Value == filter.AssociateId.Value)
}
*/
func isMatchCcFilterCriteriaEcgi(filterCriteria interface{}, newPlmn *Plmn, oldPlmn *Plmn, newCellId string, oldCellId string) bool {
filter := filterCriteria.(*CellChangeSubscriptionFilterCriteriaAssocHo)
//if filter criteria is not set, it acts as a wildcard and accepts all
if filter.Ecgi == nil {
return true
}
var matchingPlmn bool
for _, ecgi := range filter.Ecgi {
matchingPlmn = false
if ecgi.Plmn == nil {
matchingPlmn = true
} else {
if newPlmn != nil {
if newPlmn.Mnc == ecgi.Plmn.Mnc && newPlmn.Mcc == ecgi.Plmn.Mcc {
matchingPlmn = true
}
}
if oldPlmn != nil {
if oldPlmn.Mnc == ecgi.Plmn.Mnc && oldPlmn.Mcc == ecgi.Plmn.Mcc {
matchingPlmn = true
}
}
}
if matchingPlmn {
if ecgi.CellId == "" {
return true
}
if newCellId == ecgi.CellId {
return true
}
if oldCellId == ecgi.CellId {
return true
}
}
}
return false
}
func isMatchRabFilterCriteriaEcgi(filterCriteria interface{}, newPlmn *Plmn, oldPlmn *Plmn, newCellId string, oldCellId string) bool {
filter := filterCriteria.(*RabEstSubscriptionFilterCriteriaQci)
//if filter criteria is not set, it acts as a wildcard and accepts all
if filter.Ecgi == nil {
return true
}
var matchingPlmn bool
for _, ecgi := range filter.Ecgi {
matchingPlmn = false
if ecgi.Plmn == nil {
matchingPlmn = true
} else {
if newPlmn != nil {
if newPlmn.Mnc == ecgi.Plmn.Mnc && newPlmn.Mcc == ecgi.Plmn.Mcc {
matchingPlmn = true
}
}
if oldPlmn != nil {
if oldPlmn.Mnc == ecgi.Plmn.Mnc && oldPlmn.Mcc == ecgi.Plmn.Mcc {
matchingPlmn = true
}
}
}
if matchingPlmn {
if ecgi.CellId == "" {
return true
}
if newCellId == ecgi.CellId {
return true
}
if oldCellId == ecgi.CellId {
return true
}
}
}
return false
}
func isMatchRabRelFilterCriteriaEcgi(filterCriteria interface{}, newPlmn *Plmn, oldPlmn *Plmn, newCellId string, oldCellId string) bool {
filter := filterCriteria.(*RabModSubscriptionFilterCriteriaQci)
//if filter criteria is not set, it acts as a wildcard and accepts all
if filter.Ecgi == nil {
return true
}
var matchingPlmn bool
for _, ecgi := range filter.Ecgi {
matchingPlmn = false
if ecgi.Plmn == nil {
matchingPlmn = true
} else {
if newPlmn != nil {
if newPlmn.Mnc == ecgi.Plmn.Mnc && newPlmn.Mcc == ecgi.Plmn.Mcc {
matchingPlmn = true
}
}
if oldPlmn != nil {
if oldPlmn.Mnc == ecgi.Plmn.Mnc && oldPlmn.Mcc == ecgi.Plmn.Mcc {
matchingPlmn = true
}
}
}
if matchingPlmn {
if ecgi.CellId == "" {
return true
}
if newCellId == ecgi.CellId {
return true
}
if oldCellId == ecgi.CellId {
return true
}
}
}
return false
}
func isMatchFilterCriteriaAppInsId(subscriptionType string, filterCriteria interface{}, appId string) bool {
switch subscriptionType {
case cellChangeSubscriptionType:
return isMatchCcFilterCriteriaAppInsId(filterCriteria, appId)
case rabEstSubscriptionType:
return isMatchRabFilterCriteriaAppInsId(filterCriteria, appId)
case rabRelSubscriptionType:
return isMatchRabRelFilterCriteriaAppInsId(filterCriteria, appId)
}
return true
}
func isMatchFilterCriteriaAssociateId(subscriptionType string, filterCriteria interface{}, assocId *AssociateId) bool {
switch subscriptionType {
case cellChangeSubscriptionType:
return isMatchCcFilterCriteriaAssociateId(filterCriteria, assocId)
case rabEstSubscriptionType, rabRelSubscriptionType:
return true //not part of filter anymore in v2
}
return true
}
func isMatchFilterCriteriaEcgi(subscriptionType string, filterCriteria interface{}, newPlmn *Plmn, oldPlmn *Plmn, newCellId string, oldCellId string) bool {
switch subscriptionType {
case cellChangeSubscriptionType:
return isMatchCcFilterCriteriaEcgi(filterCriteria, newPlmn, oldPlmn, newCellId, oldCellId)
case rabEstSubscriptionType:
return isMatchRabFilterCriteriaEcgi(filterCriteria, newPlmn, oldPlmn, newCellId, oldCellId)
case rabRelSubscriptionType:
return isMatchRabRelFilterCriteriaEcgi(filterCriteria, newPlmn, oldPlmn, newCellId, oldCellId)
}
return true
}
func checkCcNotificationRegisteredSubscriptions(appId string, assocId *AssociateId, newPlmn *Plmn, oldPlmn *Plmn, hoStatus string, newCellId string, oldCellId string) {
//no cell change if no cellIds present (cell change within 3gpp elements only)
if newCellId == "" || oldCellId == "" {
return
}
mutex.Lock()
defer mutex.Unlock()
//check all that applies
for subsId, sub := range ccSubscriptionMap {
if sub != nil {
//verifying every criteria of the filter
match := isMatchFilterCriteriaAppInsId(cellChangeSubscriptionType, sub.FilterCriteriaAssocHo, appId)
if match {
match = isMatchFilterCriteriaAssociateId(cellChangeSubscriptionType, sub.FilterCriteriaAssocHo, assocId)
}
if match {
match = isMatchFilterCriteriaEcgi(cellChangeSubscriptionType, sub.FilterCriteriaAssocHo, newPlmn, oldPlmn, newCellId, oldCellId)
}
//we ignore hoStatus
if match {
subsIdStr := strconv.Itoa(subsId)
jsonInfo, _ := rc.JSONGetEntry(baseKey+"subscriptions:"+subsIdStr, ".")
if jsonInfo == "" {
return
}
subscription := convertJsonToCellChangeSubscription(jsonInfo)
log.Info("Sending RNIS notification ", subscription.CallbackReference)
var notif CellChangeNotification
notif.NotificationType = CELL_CHANGE_NOTIFICATION
var newEcgi Ecgi
var notifNewPlmn Plmn
if newPlmn != nil {
notifNewPlmn.Mnc = newPlmn.Mnc
notifNewPlmn.Mcc = newPlmn.Mcc
} else {
notifNewPlmn.Mnc = ""
notifNewPlmn.Mcc = ""
}
newEcgi.Plmn = ¬ifNewPlmn
newEcgi.CellId = newCellId
var oldEcgi Ecgi
var notifOldPlmn Plmn
if oldPlmn != nil {
notifOldPlmn.Mnc = oldPlmn.Mnc
notifOldPlmn.Mcc = oldPlmn.Mcc
} else {
notifOldPlmn.Mnc = ""
notifOldPlmn.Mcc = ""
}
oldEcgi.Plmn = ¬ifOldPlmn
oldEcgi.CellId = oldCellId
var notifAssociateId AssociateId
notifAssociateId.Type_ = assocId.Type_
notifAssociateId.Value = assocId.Value
seconds := time.Now().Unix()
var timeStamp TimeStamp
timeStamp.Seconds = int32(seconds)
notif.TimeStamp = &timeStamp
notif.HoStatus = 3 //only supporting 3 = COMPLETED
notif.SrcEcgi = &oldEcgi
notif.TrgEcgi = []Ecgi{newEcgi}
notif.AssociateId = append(notif.AssociateId, notifAssociateId)
sendCcNotification(subscription.CallbackReference, notif)
log.Info("Cell_change Notification" + "(" + subsIdStr + ")")
}
}
}
}
func checkReNotificationRegisteredSubscriptions(appId string, assocId *AssociateId, newPlmn *Plmn, oldPlmn *Plmn, qci int32, newCellId string, oldCellId string, erabId int32) {
//checking filters only if we were not connected to a POA-4G and now connecting to one
//condition to be connecting to a POA-4G from non POA-4G: 1) had no plmn 2) had no cellId 3) has erabId being allocated to it
if oldPlmn != nil && oldCellId != "" && erabId == -1 {
return
}
mutex.Lock()
defer mutex.Unlock()
//check all that applies
for subsId, sub := range reSubscriptionMap {
if sub != nil {
//verifying every criteria of the filter
match := isMatchFilterCriteriaAppInsId(rabEstSubscriptionType, sub.FilterCriteriaQci, appId)
if match {
match = isMatchFilterCriteriaAssociateId(rabEstSubscriptionType, sub.FilterCriteriaQci, assocId)
}
if match {
match = isMatchFilterCriteriaEcgi(rabEstSubscriptionType, sub.FilterCriteriaQci, newPlmn, nil, newCellId, oldCellId)
}
//we ignore qci
if match {
subsIdStr := strconv.Itoa(subsId)
jsonInfo, _ := rc.JSONGetEntry(baseKey+"subscriptions:"+subsIdStr, ".")
if jsonInfo == "" {
return
}
subscription := convertJsonToRabEstSubscription(jsonInfo)
log.Info("Sending RNIS notification ", subscription.CallbackReference)
var notif RabEstNotification
notif.NotificationType = RAB_EST_NOTIFICATION
var newEcgi Ecgi
var notifNewPlmn Plmn
notifNewPlmn.Mnc = newPlmn.Mnc
notifNewPlmn.Mcc = newPlmn.Mcc
newEcgi.Plmn = ¬ifNewPlmn
newEcgi.CellId = newCellId
var erabQos RabEstNotificationErabQosParameters
erabQos.Qci = defaultSupportedQci
var notifAssociateId AssociateId
notifAssociateId.Type_ = assocId.Type_
notifAssociateId.Value = assocId.Value
seconds := time.Now().Unix()
var timeStamp TimeStamp
timeStamp.Seconds = int32(seconds)
notif.TimeStamp = &timeStamp
notif.ErabId = erabId
notif.Ecgi = &newEcgi
notif.ErabQosParameters = &erabQos
notif.AssociateId = append(notif.AssociateId, notifAssociateId)
sendReNotification(subscription.CallbackReference, notif)
log.Info("Rab_establishment Notification" + "(" + subsIdStr + ")")
}
}
}
}
func checkRrNotificationRegisteredSubscriptions(appId string, assocId *AssociateId, newPlmn *Plmn, oldPlmn *Plmn, qci int32, newCellId string, oldCellId string, erabId int32) {
//checking filters only if we were connected to a POA-4G and now disconnecting from one
//condition to be disconnecting from a POA-4G: 1) has an empty new plmn 2) has empty cellId
if newPlmn != nil && newCellId != "" {
return
}
mutex.Lock()
defer mutex.Unlock()
//check all that applies
for subsId, sub := range rrSubscriptionMap {
if sub != nil {
//verifying every criteria of the filter
match := isMatchFilterCriteriaAppInsId(rabRelSubscriptionType, sub.FilterCriteriaQci, appId)
if match {
match = isMatchFilterCriteriaAssociateId(rabRelSubscriptionType, sub.FilterCriteriaQci, assocId)
}
if match {
match = isMatchFilterCriteriaEcgi(rabRelSubscriptionType, sub.FilterCriteriaQci, nil, oldPlmn, newCellId, oldCellId)
}
if match {
match = isMatchRabRelFilterCriteriaErabId(sub.FilterCriteriaQci, erabId)
}
//we ignore qci
if match {
subsIdStr := strconv.Itoa(subsId)
jsonInfo, _ := rc.JSONGetEntry(baseKey+"subscriptions:"+subsIdStr, ".")
if jsonInfo == "" {
return
}
subscription := convertJsonToRabRelSubscription(jsonInfo)
log.Info("Sending RNIS notification ", subscription.CallbackReference)
var notif RabRelNotification
notif.NotificationType = RAB_REL_NOTIFICATION
var oldEcgi Ecgi
var notifOldPlmn Plmn
notifOldPlmn.Mnc = oldPlmn.Mnc
notifOldPlmn.Mcc = oldPlmn.Mcc
oldEcgi.Plmn = ¬ifOldPlmn
oldEcgi.CellId = oldCellId
var notifAssociateId AssociateId
notifAssociateId.Type_ = assocId.Type_
notifAssociateId.Value = assocId.Value
seconds := time.Now().Unix()
var timeStamp TimeStamp
timeStamp.Seconds = int32(seconds)
var erabRelInfo RabRelNotificationErabReleaseInfo
erabRelInfo.ErabId = erabId
notif.TimeStamp = &timeStamp
notif.Ecgi = &oldEcgi
notif.ErabReleaseInfo = &erabRelInfo
notif.AssociateId = append(notif.AssociateId, notifAssociateId)
sendRrNotification(subscription.CallbackReference, notif)
log.Info("Rab_release Notification" + "(" + subsIdStr + ")")
}
}
}
}
func sendCcNotification(notifyUrl string, notification CellChangeNotification) {
startTime := time.Now()
jsonNotif, err := json.Marshal(notification)
if err != nil {
log.Error(err.Error())
}
resp, err := http.Post(notifyUrl, "application/json", bytes.NewBuffer(jsonNotif))
_ = httpLog.LogTx(notifyUrl, "POST", string(jsonNotif), resp, startTime)
if err != nil {
log.Error(err)
return
}
defer resp.Body.Close()
}
func sendReNotification(notifyUrl string, notification RabEstNotification) {
startTime := time.Now()
jsonNotif, err := json.Marshal(notification)
if err != nil {
log.Error(err.Error())
}
resp, err := http.Post(notifyUrl, "application/json", bytes.NewBuffer(jsonNotif))
_ = httpLog.LogTx(notifyUrl, "POST", string(jsonNotif), resp, startTime)
if err != nil {
log.Error(err)
return
}
defer resp.Body.Close()
}
func sendRrNotification(notifyUrl string, notification RabRelNotification) {
startTime := time.Now()
jsonNotif, err := json.Marshal(notification)
if err != nil {
log.Error(err.Error())
}
resp, err := http.Post(notifyUrl, "application/json", bytes.NewBuffer(jsonNotif))
_ = httpLog.LogTx(notifyUrl, "POST", string(jsonNotif), resp, startTime)
if err != nil {
log.Error(err)
return
}
defer resp.Body.Close()
}
func sendExpiryNotification(notifyUrl string, notification ExpiryNotification) {
startTime := time.Now()
jsonNotif, err := json.Marshal(notification)
if err != nil {
log.Error(err.Error())
}
resp, err := http.Post(notifyUrl, "application/json", bytes.NewBuffer(jsonNotif))
_ = httpLog.LogTx(notifyUrl, "POST", string(jsonNotif), resp, startTime)
if err != nil {
log.Error(err)
return
}
defer resp.Body.Close()
}
func subscriptionsGet(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
vars := mux.Vars(r)
subIdParamStr := vars["subscriptionId"]
jsonRespDB, _ := rc.JSONGetEntry(baseKey+"subscriptions:"+subIdParamStr, ".")
if jsonRespDB == "" {
w.WriteHeader(http.StatusNotFound)
return
}
var subscriptionCommon SubscriptionCommon
err := json.Unmarshal([]byte(jsonRespDB), &subscriptionCommon)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var jsonResponse []byte
switch subscriptionCommon.SubscriptionType {
case CELL_CHANGE_SUBSCRIPTION:
var subscription CellChangeSubscription
err = json.Unmarshal([]byte(jsonRespDB), &subscription)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
jsonResponse, err = json.Marshal(subscription)
case RAB_EST_SUBSCRIPTION:
var subscription RabEstSubscription
err = json.Unmarshal([]byte(jsonRespDB), &subscription)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
jsonResponse, err = json.Marshal(subscription)
case RAB_REL_SUBSCRIPTION:
var subscription RabRelSubscription
err = json.Unmarshal([]byte(jsonRespDB), &subscription)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
jsonResponse, err = json.Marshal(subscription)
default:
log.Error("Unknown subscription type")
w.WriteHeader(http.StatusBadRequest)
return
}
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
}
func subscriptionsPost(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
var subscriptionCommon SubscriptionCommon
bodyBytes, _ := ioutil.ReadAll(r.Body)
err := json.Unmarshal(bodyBytes, &subscriptionCommon)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
//extract common body part
subscriptionType := subscriptionCommon.SubscriptionType
//mandatory parameter
if subscriptionCommon.CallbackReference == "" {
log.Error("Mandatory CallbackReference parameter not present")
http.Error(w, "Mandatory CallbackReference parameter not present", http.StatusBadRequest)
return
}
//new subscription id
newSubsId := nextSubscriptionIdAvailable
nextSubscriptionIdAvailable++
subsIdStr := strconv.Itoa(newSubsId)
link := new(CaReconfSubscriptionLinks)
self := new(LinkType)
self.Href = hostUrl.String() + basePath + "subscriptions/" + subsIdStr
link.Self = self
var jsonResponse []byte
switch subscriptionType {
case CELL_CHANGE_SUBSCRIPTION:
var subscription CellChangeSubscription
err = json.Unmarshal(bodyBytes, &subscription)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
subscription.Links = link
if subscription.FilterCriteriaAssocHo == nil {
log.Error("FilterCriteriaAssocHo should not be null for this subscription type")
http.Error(w, "FilterCriteriaAssocHo should not be null for this subscription type", http.StatusBadRequest)
return
}
if subscription.FilterCriteriaAssocHo.HoStatus == nil {
subscription.FilterCriteriaAssocHo.HoStatus = append(subscription.FilterCriteriaAssocHo.HoStatus, 3 /*COMPLETED*/)
}
for _, ecgi := range subscription.FilterCriteriaAssocHo.Ecgi {
if ecgi.Plmn == nil || ecgi.CellId == "" {
log.Error("For non null ecgi, plmn and cellId are mandatory")
http.Error(w, "For non null ecgi, plmn and cellId are mandatory", http.StatusBadRequest)
return
}
}
//registration
registerCc(&subscription, subsIdStr)
_ = rc.JSONSetEntry(baseKey+"subscriptions:"+subsIdStr, ".", convertCellChangeSubscriptionToJson(&subscription))
jsonResponse, err = json.Marshal(subscription)
case RAB_EST_SUBSCRIPTION:
var subscription RabEstSubscription
err = json.Unmarshal(bodyBytes, &subscription)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
subscription.Links = link
if subscription.FilterCriteriaQci == nil {
log.Error("FilterCriteriaQci should not be null for this subscription type")
http.Error(w, "FilterCriteriaQci should not be null for this subscription type", http.StatusBadRequest)
return
}
for _, ecgi := range subscription.FilterCriteriaQci.Ecgi {
if ecgi.Plmn == nil || ecgi.CellId == "" {
log.Error("For non null ecgi, plmn and cellId are mandatory")
http.Error(w, "For non null ecgi, plmn and cellId are mandatory", http.StatusBadRequest)
return
}
}
//registration
registerRe(&subscription, subsIdStr)
_ = rc.JSONSetEntry(baseKey+"subscriptions:"+subsIdStr, ".", convertRabEstSubscriptionToJson(&subscription))
jsonResponse, err = json.Marshal(subscription)
case RAB_REL_SUBSCRIPTION:
var subscription RabRelSubscription
err = json.Unmarshal(bodyBytes, &subscription)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
subscription.Links = link
if subscription.FilterCriteriaQci == nil {
log.Error("FilterCriteriaQci should not be null for this subscription type")
http.Error(w, "FilterCriteriaQci should not be null for this subscription type", http.StatusBadRequest)
return
}
for _, ecgi := range subscription.FilterCriteriaQci.Ecgi {
if ecgi.Plmn == nil || ecgi.CellId == "" {
log.Error("For non null ecgi, plmn and cellId are mandatory")
http.Error(w, "For non null ecgi, plmn and cellId are mandatory", http.StatusBadRequest)
return
}
}
//registration
registerRr(&subscription, subsIdStr)
_ = rc.JSONSetEntry(baseKey+"subscriptions:"+subsIdStr, ".", convertRabRelSubscriptionToJson(&subscription))
jsonResponse, err = json.Marshal(subscription)
default:
nextSubscriptionIdAvailable--
w.WriteHeader(http.StatusBadRequest)
return
}
//processing the error of the jsonResponse
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusCreated)
fmt.Fprintf(w, string(jsonResponse))
}
func subscriptionsPut(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
vars := mux.Vars(r)
subIdParamStr := vars["subscriptionId"]
var subscriptionCommon SubscriptionCommon
bodyBytes, _ := ioutil.ReadAll(r.Body)
err := json.Unmarshal(bodyBytes, &subscriptionCommon)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
//extract common body part
subscriptionType := subscriptionCommon.SubscriptionType
//mandatory parameter
if subscriptionCommon.CallbackReference == "" {
log.Error("Mandatory CallbackReference parameter not present")
http.Error(w, "Mandatory CallbackReference parameter not present", http.StatusBadRequest)
return
}
link := subscriptionCommon.Links
if link == nil || link.Self == nil {
log.Error("Mandatory Link parameter not present")
http.Error(w, "Mandatory Link parameter not present", http.StatusBadRequest)
return
}
selfUrl := strings.Split(link.Self.Href, "/")
subsIdStr := selfUrl[len(selfUrl)-1]
if subsIdStr != subIdParamStr {
log.Error("SubscriptionId in endpoint and in body not matching")
http.Error(w, "SubscriptionId in endpoint and in body not matching", http.StatusBadRequest)
return
}
alreadyRegistered := false
var jsonResponse []byte
switch subscriptionType {
case CELL_CHANGE_SUBSCRIPTION:
var subscription CellChangeSubscription
err = json.Unmarshal(bodyBytes, &subscription)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if subscription.FilterCriteriaAssocHo == nil {
log.Error("FilterCriteriaAssocHo should not be null for this subscription type")
http.Error(w, "FilterCriteriaAssocHo should not be null for this subscription type", http.StatusBadRequest)
return
}
if subscription.FilterCriteriaAssocHo.HoStatus == nil {
subscription.FilterCriteriaAssocHo.HoStatus = append(subscription.FilterCriteriaAssocHo.HoStatus, 3 /*COMPLETED*/)
}
//registration
if isSubscriptionIdRegisteredCc(subsIdStr) {
registerCc(&subscription, subsIdStr)
_ = rc.JSONSetEntry(baseKey+"subscriptions:"+subsIdStr, ".", convertCellChangeSubscriptionToJson(&subscription))
alreadyRegistered = true
jsonResponse, err = json.Marshal(subscription)
}
case RAB_EST_SUBSCRIPTION:
var subscription RabEstSubscription
err = json.Unmarshal(bodyBytes, &subscription)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if subscription.FilterCriteriaQci == nil {
log.Error("FilterCriteriaQci should not be null for this subscription type")
http.Error(w, "FilterCriteriaQci should not be null for this subscription type", http.StatusBadRequest)
return
}
//registration
if isSubscriptionIdRegisteredRe(subsIdStr) {
registerRe(&subscription, subsIdStr)
_ = rc.JSONSetEntry(baseKey+"subscriptions:"+subsIdStr, ".", convertRabEstSubscriptionToJson(&subscription))
alreadyRegistered = true
jsonResponse, err = json.Marshal(subscription)
}
case RAB_REL_SUBSCRIPTION:
var subscription RabRelSubscription
err = json.Unmarshal(bodyBytes, &subscription)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if subscription.FilterCriteriaQci == nil {
log.Error("FilterCriteriaQci should not be null for this subscription type")
http.Error(w, "FilterCriteriaQci should not be null for this subscription type", http.StatusBadRequest)
return
}
//registration
if isSubscriptionIdRegisteredRr(subsIdStr) {
registerRr(&subscription, subsIdStr)
_ = rc.JSONSetEntry(baseKey+"subscriptions:"+subsIdStr, ".", convertRabRelSubscriptionToJson(&subscription))
alreadyRegistered = true
jsonResponse, err = json.Marshal(subscription)
}
default:
w.WriteHeader(http.StatusBadRequest)
return
}
if alreadyRegistered {
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
} else {
w.WriteHeader(http.StatusNotFound)
}
}
func subscriptionsDelete(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
vars := mux.Vars(r)
subIdParamStr := vars["subscriptionId"]
jsonRespDB, _ := rc.JSONGetEntry(baseKey+"subscriptions:"+subIdParamStr, ".")
if jsonRespDB == "" {
w.WriteHeader(http.StatusNotFound)
return
}
err := delSubscription(baseKey+"subscriptions", subIdParamStr, false)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusNoContent)
}
func isSubscriptionIdRegisteredCc(subsIdStr string) bool {
var returnVal bool
subsId, _ := strconv.Atoi(subsIdStr)
mutex.Lock()
defer mutex.Unlock()
if ccSubscriptionMap[subsId] != nil {
returnVal = true
} else {
returnVal = false
}
return returnVal
}
func isSubscriptionIdRegisteredRe(subsIdStr string) bool {
subsId, _ := strconv.Atoi(subsIdStr)
var returnVal bool
mutex.Lock()
defer mutex.Unlock()
if reSubscriptionMap[subsId] != nil {
returnVal = true
} else {
returnVal = false
}
return returnVal
}
func isSubscriptionIdRegisteredRr(subsIdStr string) bool {
subsId, _ := strconv.Atoi(subsIdStr)
var returnVal bool
mutex.Lock()
defer mutex.Unlock()
if rrSubscriptionMap[subsId] != nil {
returnVal = true
} else {
returnVal = false
}
return returnVal
}
func registerCc(cellChangeSubscription *CellChangeSubscription, subsIdStr string) {
subsId, _ := strconv.Atoi(subsIdStr)
mutex.Lock()
defer mutex.Unlock()
ccSubscriptionMap[subsId] = cellChangeSubscription
if cellChangeSubscription.ExpiryDeadline != nil {
//get current list of subscription meant to expire at this time
intList := subscriptionExpiryMap[int(cellChangeSubscription.ExpiryDeadline.Seconds)]
intList = append(intList, subsId)
subscriptionExpiryMap[int(cellChangeSubscription.ExpiryDeadline.Seconds)] = intList
}
log.Info("New registration: ", subsId, " type: ", cellChangeSubscriptionType)
}
func registerRe(rabEstSubscription *RabEstSubscription, subsIdStr string) {
subsId, _ := strconv.Atoi(subsIdStr)
mutex.Lock()
defer mutex.Unlock()
reSubscriptionMap[subsId] = rabEstSubscription
if rabEstSubscription.ExpiryDeadline != nil {
//get current list of subscription meant to expire at this time
intList := subscriptionExpiryMap[int(rabEstSubscription.ExpiryDeadline.Seconds)]
intList = append(intList, subsId)
subscriptionExpiryMap[int(rabEstSubscription.ExpiryDeadline.Seconds)] = intList
}
log.Info("New registration: ", subsId, " type: ", rabEstSubscriptionType)
}
func registerRr(rabRelSubscription *RabRelSubscription, subsIdStr string) {
subsId, _ := strconv.Atoi(subsIdStr)
mutex.Lock()
defer mutex.Unlock()
rrSubscriptionMap[subsId] = rabRelSubscription
if rabRelSubscription.ExpiryDeadline != nil {
//get current list of subscription meant to expire at this time
intList := subscriptionExpiryMap[int(rabRelSubscription.ExpiryDeadline.Seconds)]
intList = append(intList, subsId)
subscriptionExpiryMap[int(rabRelSubscription.ExpiryDeadline.Seconds)] = intList
}
log.Info("New registration: ", subsId, " type: ", rabRelSubscriptionType)
}
func deregisterCc(subsIdStr string, mutexTaken bool) {
subsId, _ := strconv.Atoi(subsIdStr)
if !mutexTaken {
mutex.Lock()
defer mutex.Unlock()
}
ccSubscriptionMap[subsId] = nil
log.Info("Deregistration: ", subsId, " type: ", cellChangeSubscriptionType)
}
func deregisterRe(subsIdStr string, mutexTaken bool) {
subsId, _ := strconv.Atoi(subsIdStr)
if !mutexTaken {
mutex.Lock()
defer mutex.Unlock()
}
reSubscriptionMap[subsId] = nil
log.Info("Deregistration: ", subsId, " type: ", rabEstSubscriptionType)
}
func deregisterRr(subsIdStr string, mutexTaken bool) {
subsId, _ := strconv.Atoi(subsIdStr)
if !mutexTaken {
mutex.Lock()
defer mutex.Unlock()
}
rrSubscriptionMap[subsId] = nil
log.Info("Deregistration: ", subsId, " type: ", rabRelSubscriptionType)
}
func delSubscription(keyPrefix string, subsId string, mutexTaken bool) error {
err := rc.JSONDelEntry(keyPrefix+":"+subsId, ".")
deregisterCc(subsId, mutexTaken)
deregisterRe(subsId, mutexTaken)
deregisterRr(subsId, mutexTaken)
return err
}
func plmnInfoGet(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
//u, _ := url.Parse(r.URL.String())
//log.Info("url: ", u.RequestURI())
//q := u.Query()
//appInsId := q.Get("app_ins_id")
//appInsIdArray := strings.Split(appInsId, ",")
var response PlmnInfoResp
atLeastOne := false
//same for all plmnInfo
seconds := time.Now().Unix()
var timeStamp TimeStamp
timeStamp.Seconds = int32(seconds)
//forcing to ignore the appInsId parameter
//commenting the check but keeping the code
//if AppId is set, we return info as per AppIds, otherwise, we return the domain info only
/*if appInsId != "" {
for _, meAppName := range appInsIdArray {
meAppName = strings.TrimSpace(meAppName)
//get from DB
jsonAppEcgiInfo, _ := rc.JSONGetEntry(baseKey+"APP:"+meAppName, ".")
if jsonAppEcgiInfo != "" {
ecgi := convertJsonToEcgi(jsonAppEcgiInfo)
if ecgi != nil {
if ecgi.Plmn.Mnc != "" && ecgi.Plmn.Mcc != "" {
var plmnInfo PlmnInfo
plmnInfo.Plmn = ecgi.Plmn
plmnInfo.AppInsId = meAppName
plmnInfo.TimeStamp = &timeStamp
response.PlmnInfo = append(response.PlmnInfo, plmnInfo)
atLeastOne = true
}
}
}
}
} else {
*/
keyName := baseKey + "DOM:*"
err := rc.ForEachJSONEntry(keyName, populatePlmnInfo, &response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
//check if more than one plmnInfo in the array
if len(response.PlmnInfoList) > 0 {
atLeastOne = true
}
//}
if atLeastOne {
jsonResponse, err := json.Marshal(response.PlmnInfoList)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
} else {
w.WriteHeader(http.StatusNotFound)
}
}
func populatePlmnInfo(key string, jsonInfo string, response interface{}) error {
resp := response.(*PlmnInfoResp)
if resp == nil {
return errors.New("Response not defined")
}
// Retrieve user info from DB
var domainData DomainData
err := json.Unmarshal([]byte(jsonInfo), &domainData)
if err != nil {
return err
}
var plmnInfo PlmnInfo
var plmn Plmn
plmn.Mnc = domainData.Mnc
plmn.Mcc = domainData.Mcc
plmnInfo.Plmn = append(plmnInfo.Plmn, plmn)
resp.PlmnInfoList = append(resp.PlmnInfoList, plmnInfo)
return nil
}
func rabInfoGet(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
var rabInfoData RabInfoData
//default values
rabInfoData.queryErabId = -1
u, _ := url.Parse(r.URL.String())
log.Info("url: ", u.RequestURI())
q := u.Query()
meAppName := q.Get("app_ins_id")
erabIdStr := q.Get("erab_id")
if erabIdStr != "" {
tmpErabId, _ := strconv.Atoi(erabIdStr)
rabInfoData.queryErabId = int32(tmpErabId)
} else {
rabInfoData.queryErabId = -1
}
qciStr := q.Get("qci")
if qciStr != "" {
tmpQci, _ := strconv.Atoi(qciStr)
rabInfoData.queryQci = int32(tmpQci)
} else {
rabInfoData.queryQci = -1
}
/*comma separated list
cellIdStr := q.Get("cell_id")
cellIds := strings.Split(cellIdStr, ",")
rabInfoData.queryCellIds = cellIds
*/
rabInfoData.queryCellIds = q["cell_id"]
rabInfoData.queryIpv4Addresses = q["ue_ipv4_address"]
//same for all plmnInfo
seconds := time.Now().Unix()
var timeStamp TimeStamp
timeStamp.Seconds = int32(seconds)
//meAppName := strings.TrimSpace(appInsId)
//meApp is ignored, we use the whole network
var rabInfo RabInfo
rabInfoData.rabInfo = &rabInfo
//get from DB
//loop through each UE
keyName := baseKey + "UE:*"
err := rc.ForEachJSONEntry(keyName, populateRabInfo, &rabInfoData)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
rabInfo.RequestId = "1"
rabInfo.AppInstanceId = meAppName
rabInfo.TimeStamp = &timeStamp
// Send response
jsonResponse, err := json.Marshal(rabInfo)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
}
func populateRabInfo(key string, jsonInfo string, rabInfoData interface{}) error {
// Get query params & userlist from user data
data := rabInfoData.(*RabInfoData)
if data == nil || data.rabInfo == nil {
return errors.New("rabInfo not found in rabInfoData")
}
// Retrieve user info from DB
var ueData UeData
err := json.Unmarshal([]byte(jsonInfo), &ueData)
if err != nil {
return err
}
// Ignore entries with no rabId
if ueData.ErabId == -1 {
return nil
}
// Filter using query params
if data.queryErabId != -1 && ueData.ErabId != data.queryErabId {
return nil
}
// Filter using query params
if data.queryQci != -1 && ueData.Qci != data.queryQci {
return nil
}
partOfFilter := true
for _, cellId := range data.queryCellIds {
if cellId != "" {
partOfFilter = false
if cellId == ueData.Ecgi.CellId {
partOfFilter = true
break
}
}
}
if !partOfFilter {
return nil
}
//name of the element is used as the ipv4 address at the moment
partOfFilter = true
for _, address := range data.queryIpv4Addresses {
if address != "" {
partOfFilter = false
if address == ueData.Name {
partOfFilter = true
break
}
}
}
if !partOfFilter {
return nil
}
var ueInfo RabInfoUeInfo
assocId := new(AssociateId)
assocId.Type_ = 1 //UE_IPV4_ADDRESS
subKeys := strings.Split(key, ":")
assocId.Value = subKeys[len(subKeys)-1]
ueInfo.AssociateId = append(ueInfo.AssociateId, *assocId)
erabQos := new(RabEstNotificationErabQosParameters)
erabQos.Qci = defaultSupportedQci
erabInfo := new(RabInfoErabInfo)
erabInfo.ErabId = ueData.ErabId
erabInfo.ErabQosParameters = erabQos
ueInfo.ErabInfo = append(ueInfo.ErabInfo, *erabInfo)
found := false
//find if cellUserInfo already exists
var cellUserIndex int
for index, cellUserInfo := range data.rabInfo.CellUserInfo {
if cellUserInfo.Ecgi.Plmn.Mcc == ueData.Ecgi.Plmn.Mcc &&
cellUserInfo.Ecgi.Plmn.Mnc == ueData.Ecgi.Plmn.Mnc &&
cellUserInfo.Ecgi.CellId == ueData.Ecgi.CellId {
//add ue into the existing cellUserInfo
found = true
cellUserIndex = index
}
}
if !found {
newCellUserInfo := new(RabInfoCellUserInfo)
newEcgi := new(Ecgi)
newPlmn := new(Plmn)
newPlmn.Mcc = ueData.Ecgi.Plmn.Mcc
newPlmn.Mnc = ueData.Ecgi.Plmn.Mnc
newEcgi.Plmn = newPlmn
newEcgi.CellId = ueData.Ecgi.CellId
newCellUserInfo.Ecgi = newEcgi
newCellUserInfo.UeInfo = append(newCellUserInfo.UeInfo, ueInfo)
data.rabInfo.CellUserInfo = append(data.rabInfo.CellUserInfo, *newCellUserInfo)
} else {
data.rabInfo.CellUserInfo[cellUserIndex].UeInfo = append(data.rabInfo.CellUserInfo[cellUserIndex].UeInfo, ueInfo)
}
return nil
}
func createSubscriptionLinkList(subType string) *SubscriptionLinkList {
subscriptionLinkList := new(SubscriptionLinkList)
link := new(SubscriptionLinkListLinks)
self := new(LinkType)
self.Href = hostUrl.String() + basePath + "subscriptions"
link.Self = self
subscriptionLinkList.Links = link
//loop through all different types of subscription
mutex.Lock()
defer mutex.Unlock()
//loop through cell_change map
if subType == "" || subType == "cell_change" {
for _, ccSubscription := range ccSubscriptionMap {
if ccSubscription != nil {
var subscription SubscriptionLinkListLinksSubscription
subscription.Href = ccSubscription.Links.Self.Href
subscription.SubscriptionType = CELL_CHANGE_SUBSCRIPTION
subscriptionLinkList.Links.Subscription = append(subscriptionLinkList.Links.Subscription, subscription)
}
}
}
//loop through rab_est map
if subType == "" || subType == "rab_est" {
for _, reSubscription := range reSubscriptionMap {
if reSubscription != nil {
var subscription SubscriptionLinkListLinksSubscription
subscription.Href = reSubscription.Links.Self.Href
subscription.SubscriptionType = RAB_EST_SUBSCRIPTION
subscriptionLinkList.Links.Subscription = append(subscriptionLinkList.Links.Subscription, subscription)
}
}
}
//loop through rab_rel map
if subType == "" || subType == "rab_rel" {
for _, rrSubscription := range rrSubscriptionMap {
if rrSubscription != nil {
var subscription SubscriptionLinkListLinksSubscription
subscription.Href = rrSubscription.Links.Self.Href
subscription.SubscriptionType = RAB_REL_SUBSCRIPTION
subscriptionLinkList.Links.Subscription = append(subscriptionLinkList.Links.Subscription, subscription)
}
}
}
//no other maps to go through
return subscriptionLinkList
}
func subscriptionLinkListSubscriptionsGet(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
u, _ := url.Parse(r.URL.String())
log.Info("url: ", u.RequestURI())
q := u.Query()
subType := q.Get("subscription_type")
response := createSubscriptionLinkList(subType)
jsonResponse, err := json.Marshal(response)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, string(jsonResponse))
}
func cleanUp() {
log.Info("Terminate all")
rc.DBFlush(baseKey)
nextSubscriptionIdAvailable = 1
nextAvailableErabId = 1
mutex.Lock()
defer mutex.Unlock()
ccSubscriptionMap = map[int]*CellChangeSubscription{}
reSubscriptionMap = map[int]*RabEstSubscription{}
rrSubscriptionMap = map[int]*RabRelSubscription{}
subscriptionExpiryMap = map[int][]int{}
updateStoreName("")
}
func updateStoreName(storeName string) {
if currentStoreName != storeName {
currentStoreName = storeName
_ = httpLog.ReInit(logModuleRNIS, sandboxName, storeName, redisAddr, influxAddr)
}
}
|
[
"\"MEEP_SANDBOX_NAME\"",
"\"MEEP_PUBLIC_URL\"",
"\"MEEP_HOST_URL\""
] |
[] |
[
"MEEP_HOST_URL",
"MEEP_PUBLIC_URL",
"MEEP_SANDBOX_NAME"
] |
[]
|
["MEEP_HOST_URL", "MEEP_PUBLIC_URL", "MEEP_SANDBOX_NAME"]
|
go
| 3 | 0 | |
setup.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
from pathlib import Path
from posix import chmod
import setuptools
import setuptools.command.build_py
def gen_protoc_complier():
cur_dir = os.path.dirname(__file__)
template_name = "protoc_compiler_template.py"
with open(os.path.join(cur_dir, template_name)) as fd:
template_py = fd.read()
target_name = os.path.join(cur_dir, "protoc-gen-python_grpc")
with open(target_name, "w") as fd:
content = "#!%s\n" % sys.executable + template_py
fd.write(content)
chmod(target_name, 0o755)
os.environ["PATH"] = cur_dir + os.pathsep + os.environ["PATH"]
class BuildPyCommand(setuptools.command.build_py.build_py):
def run(self) -> None:
super().run()
# Generate pure python protoc compiler
gen_protoc_complier()
# Paths
root = Path(os.path.realpath(__file__)).parent
proto_file = root / "proto" / "idb.proto"
output_dir = root / "build" / "lib" / "idb" / "grpc"
grpclib_output = output_dir / "idb_grpc.py"
# Generate the grpc files
output_dir.mkdir(parents=True, exist_ok=True)
command = [
"grpc_tools.protoc",
"--proto_path={}".format(proto_file.parent),
"--python_out={}".format(output_dir),
"--python_grpc_out={}".format(output_dir),
] + [str(proto_file)]
# Needs to be imported after setuptools has ensured grpcio-tools is
# installed
from grpc_tools import protoc # pyre-ignore
if protoc.main(command) != 0:
raise Exception("error: {} failed".format(command))
# Fix the import paths
with open(grpclib_output, "r") as file:
filedata = file.read()
filedata = filedata.replace(
"import idb_pb2", "import idb.grpc.idb_pb2 as idb_pb2"
)
with open(grpclib_output, "w") as file:
file.write(filedata)
version = os.environ.get("FB_IDB_VERSION")
if not version:
raise Exception(
"""Cannot build with without a version number. Set the environment variable FB_IDB_VERSION"""
)
setuptools.setup(
name="fb-idb",
version=version,
author="Facebook",
author_email="[email protected]",
description="iOS debug bridge",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/facebook/idb",
packages=setuptools.find_packages(),
data_files=[("proto", ["proto/idb.proto"]), ("", ["protoc_compiler_template.py"])],
license="MIT",
classifiers=[
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=["aiofiles", "grpclib >= 0.4.0", "protobuf", "treelib"],
setup_requires=["grpcio-tools >= 1.29.0", "grpclib >= 0.3.2"],
entry_points={"console_scripts": ["idb = idb.cli.main:main"]},
python_requires=">=3.7",
cmdclass={"build_py": BuildPyCommand},
)
|
[] |
[] |
[
"FB_IDB_VERSION",
"PATH"
] |
[]
|
["FB_IDB_VERSION", "PATH"]
|
python
| 2 | 0 | |
instabot/unfollower.py
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import numpy as np
import time
import os
# Instagram user credentials:
instagram_username = os.environ['INSTA_USERNAME']
instagram_password = os.environ['INSTA_PASSWORD']
# All the X-Paths used: (Change if something stops working)
uname_input_path = '/html/body/div[1]/section/main/article/div[2]/div[1]/div/form/div/div[1]/div/label/input'
pass_input_path = '/html/body/div[1]/section/main/article/div[2]/div[1]/div/form/div/div[2]/div/label/input'
submit_btn_path = '/html/body/div[1]/section/main/article/div[2]/div[1]/div/form/div/div[3]/button'
followers_btn_path = '/html/body/div[1]/section/main/div/header/section/ul/li[2]/a'
following_btn_path = '/html/body/div[1]/section/main/div/header/section/ul/li[3]/a'
popup_path = '/html/body/div[5]/div/div/div[2]'
followers_count_path = '/html/body/div[1]/section/main/div/header/section/ul/li[2]/a/span'
following_count_path = '/html/body/div[1]/section/main/div/header/section/ul/li[3]/a/span'
account_name_link_path = '/div/div[1]/div[2]/div[1]/span/a'
unfollow_btn_path = '/html/body/div[1]/section/main/div/header/section/div[1]/div[1]/div/div[2]/div/span/span[1]/button'
confirm_unfollow_btn_path = '/html/body/div[5]/div/div/div/div[3]/button[1]'
# All the css selectors used: (Fallbacks for when xpath didn't work)
ul_for_list_profiles = '.jSC57'
# All links:
instagram = 'https://www.instagram.com'
profile_page = f'https://www.instagram.com/{instagram_username}/'
# Selenium setup:
driver_path = os.environ['WEBDRIVER_PATH']
browser = webdriver.Firefox(executable_path=driver_path)
browser.get(instagram)
time.sleep(5)
username_input = browser.find_element_by_xpath(uname_input_path)
pass_input = browser.find_element_by_xpath(pass_input_path)
submit_button = browser.find_element_by_xpath(submit_btn_path)
# Log the user in:
username_input.send_keys(instagram_username)
pass_input.send_keys(instagram_password)
submit_button.click()
time.sleep(5)
# Followers and following estimation:
list_of_followers = []
list_of_following = []
browser.get(profile_page)
time.sleep(5)
number_followers = int(browser.find_element_by_xpath(followers_count_path).text)
number_following = int(browser.find_element_by_xpath(following_count_path).text)
# Go to followers after login:
browser.get(profile_page)
time.sleep(5)
followers_btn = browser.find_element_by_xpath(followers_btn_path)
followers_btn.click()
time.sleep(5)
for _ in range(10):
popup = browser.find_element_by_xpath(popup_path)
popup.send_keys(Keys.END)
time.sleep(5)
ul_followers = browser.find_element_by_css_selector(ul_for_list_profiles)
list_followers = ul_followers.find_elements_by_tag_name('li')
for follower in list_followers:
account_name_link = follower.find_element_by_tag_name('a')
account_name = account_name_link.get_attribute('href')
list_of_followers.append(account_name)
# Go to following after login:
browser.get(profile_page)
time.sleep(5)
following_btn = browser.find_element_by_xpath(following_btn_path)
following_btn.click()
time.sleep(5)
for _ in range(10):
popup = browser.find_element_by_xpath(popup_path)
popup.send_keys(Keys.END)
time.sleep(5)
ul_following = browser.find_element_by_css_selector(ul_for_list_profiles)
list_following = ul_following.find_elements_by_tag_name('li')
for following in list_following:
account_name_link = following.find_element_by_tag_name('a')
account_name = account_name_link.get_attribute('href')
list_of_following.append(account_name)
if len(list_of_following) != number_following:
print('WARN: Looks like not all followings have been fetched, try increasing scrolling times.')
if len(list_of_followers) != number_followers:
print('WARN: Looks like not all followers have been fetched, try increasing scrolling times.')
# Determine which users don't follow you back:
list_not_follow_back = np.setdiff1d(list_of_following, list_of_followers)
# Unfollow each and every one that doesn't follow back:
for name in list_not_follow_back:
browser.get(name)
time.sleep(5)
unfollow_btn = browser.find_element_by_xpath(unfollow_btn_path)
unfollow_btn.click()
time.sleep(2)
confirm_btn = browser.find_element_by_xpath(confirm_unfollow_btn_path)
confirm_btn.click()
time.sleep(2)
print(f'Unfollowed: {name}')
if len(list_not_follow_back) == 0:
print('No accounts to unfollow!')
time.sleep(10)
browser.quit()
|
[] |
[] |
[
"INSTA_PASSWORD",
"WEBDRIVER_PATH",
"INSTA_USERNAME"
] |
[]
|
["INSTA_PASSWORD", "WEBDRIVER_PATH", "INSTA_USERNAME"]
|
python
| 3 | 0 | |
local_repository_test.go
|
package main
import (
. "github.com/onsi/gomega"
"io/ioutil"
"path/filepath"
"testing"
)
import (
"net/url"
"os"
)
func TestNewLocalRepository(t *testing.T) {
RegisterTestingT(t)
_localRepositoryRoots = []string{"/repos"}
r, err := LocalRepositoryFromFullPath("/repos/github.com/motemen/ghq")
Expect(err).To(BeNil())
Expect(r.NonHostPath()).To(Equal("motemen/ghq"))
Expect(r.Subpaths()).To(Equal([]string{"ghq", "motemen/ghq", "github.com/motemen/ghq"}))
r, err = LocalRepositoryFromFullPath("/repos/stash.com/scm/motemen/ghq")
Expect(err).To(BeNil())
Expect(r.NonHostPath()).To(Equal("scm/motemen/ghq"))
Expect(r.Subpaths()).To(Equal([]string{"ghq", "motemen/ghq", "scm/motemen/ghq", "stash.com/scm/motemen/ghq"}))
githubURL, _ := url.Parse("ssh://[email protected]/motemen/ghq.git")
r = LocalRepositoryFromURL(githubURL)
Expect(r.FullPath).To(Equal("/repos/github.com/motemen/ghq"))
stashURL, _ := url.Parse("ssh://[email protected]/scm/motemen/ghq")
r = LocalRepositoryFromURL(stashURL)
Expect(r.FullPath).To(Equal("/repos/stash.com/scm/motemen/ghq"))
svnSourceforgeURL, _ := url.Parse("http://svn.code.sf.net/p/ghq/code/trunk")
r = LocalRepositoryFromURL(svnSourceforgeURL)
Expect(r.FullPath).To(Equal("/repos/svn.code.sf.net/p/ghq/code/trunk"))
gitSourceforgeURL, _ := url.Parse("http://git.code.sf.net/p/ghq/code")
r = LocalRepositoryFromURL(gitSourceforgeURL)
Expect(r.FullPath).To(Equal("/repos/git.code.sf.net/p/ghq/code"))
svnSourceforgeJpURL, _ := url.Parse("http://scm.sourceforge.jp/svnroot/ghq/")
r = LocalRepositoryFromURL(svnSourceforgeJpURL)
Expect(r.FullPath).To(Equal("/repos/scm.sourceforge.jp/svnroot/ghq"))
gitSourceforgeJpURL, _ := url.Parse("http://scm.sourceforge.jp/gitroot/ghq/ghq.git")
r = LocalRepositoryFromURL(gitSourceforgeJpURL)
Expect(r.FullPath).To(Equal("/repos/scm.sourceforge.jp/gitroot/ghq/ghq"))
svnAssemblaURL, _ := url.Parse("https://subversion.assembla.com/svn/ghq/")
r = LocalRepositoryFromURL(svnAssemblaURL)
Expect(r.FullPath).To(Equal("/repos/subversion.assembla.com/svn/ghq"))
gitAssemblaURL, _ := url.Parse("https://git.assembla.com/ghq.git")
r = LocalRepositoryFromURL(gitAssemblaURL)
Expect(r.FullPath).To(Equal("/repos/git.assembla.com/ghq"))
}
func TestLocalRepositoryRoots(t *testing.T) {
RegisterTestingT(t)
defer func(orig string) { os.Setenv("GHQ_ROOT", orig) }(os.Getenv("GHQ_ROOT"))
_localRepositoryRoots = nil
os.Setenv("GHQ_ROOT", "/path/to/ghqroot")
Expect(localRepositoryRoots()).To(Equal([]string{"/path/to/ghqroot"}))
_localRepositoryRoots = nil
os.Setenv("GHQ_ROOT", "/path/to/ghqroot1"+string(os.PathListSeparator)+"/path/to/ghqroot2")
Expect(localRepositoryRoots()).To(Equal([]string{"/path/to/ghqroot1", "/path/to/ghqroot2"}))
}
// https://gist.github.com/kyanny/c231f48e5d08b98ff2c3
func TestList_Symlink(t *testing.T) {
RegisterTestingT(t)
root, err := ioutil.TempDir("", "")
Expect(err).To(BeNil())
symDir, err := ioutil.TempDir("", "")
Expect(err).To(BeNil())
_localRepositoryRoots = []string{root}
err = os.MkdirAll(filepath.Join(root, "github.com", "atom", "atom", ".git"), 0777)
Expect(err).To(BeNil())
err = os.MkdirAll(filepath.Join(root, "github.com", "zabbix", "zabbix", ".git"), 0777)
Expect(err).To(BeNil())
err = os.Symlink(symDir, filepath.Join(root, "github.com", "ghq"))
Expect(err).To(BeNil())
paths := []string{}
walkLocalRepositories(func(repo *LocalRepository) {
paths = append(paths, repo.RelPath)
})
Expect(paths).To(HaveLen(2))
}
|
[
"\"GHQ_ROOT\""
] |
[] |
[
"GHQ_ROOT"
] |
[]
|
["GHQ_ROOT"]
|
go
| 1 | 0 | |
test/smoke/testApps/Jdbc/src/main/java/com/microsoft/applicationinsights/smoketestapp/JdbcTestServlet.java
|
package com.microsoft.applicationinsights.smoketestapp;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
import javax.servlet.ServletException;
import javax.servlet.annotation.WebServlet;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import com.google.common.base.Stopwatch;
import com.google.common.base.Strings;
import org.hsqldb.jdbc.JDBCDriver;
@WebServlet("/*")
public class JdbcTestServlet extends HttpServlet {
public void init() throws ServletException {
try {
setupHsqldb();
if (!Strings.isNullOrEmpty(System.getenv("MYSQL"))) setupMysql();
if (!Strings.isNullOrEmpty(System.getenv("POSTGRES"))) setupPostgres();
if (!Strings.isNullOrEmpty(System.getenv("SQLSERVER"))) setupSqlServer();
// setupOracle();
} catch (Exception e) {
// surprisingly not all application servers seem to log init exceptions to stdout
e.printStackTrace();
throw new ServletException(e);
}
}
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException {
try {
doGetInternal(req);
resp.getWriter().println("ok");
} catch (ServletException e) {
throw e;
} catch (Exception e) {
throw new ServletException(e);
}
}
private void doGetInternal(HttpServletRequest req) throws Exception {
String pathInfo = req.getPathInfo();
if (pathInfo.equals("/hsqldbPreparedStatement")) {
hsqldbPreparedStatement();
} else if (pathInfo.equals("/hsqldbStatement")) {
hsqldbStatement();
} else if (pathInfo.equals("/hsqldbBatchPreparedStatement")) {
hsqldbBatchPreparedStatement();
} else if (pathInfo.equals("/hsqldbBatchStatement")) {
hsqldbBatchStatement();
} else if (pathInfo.equals("/mysqlPreparedStatement")) {
mysqlPreparedStatement();
} else if (pathInfo.equals("/mysqlStatement")) {
mysqlStatement();
} else if (pathInfo.equals("/postgresPreparedStatement")) {
postgresPreparedStatement();
} else if (pathInfo.equals("/postgresStatement")) {
postgresStatement();
} else if (pathInfo.equals("/sqlServerPreparedStatement")) {
sqlServerPreparedStatement();
} else if (pathInfo.equals("/sqlServerStatement")) {
sqlServerStatement();
} else if (pathInfo.equals("/oraclePreparedStatement")) {
oraclePreparedStatement();
} else if (pathInfo.equals("/oracleStatement")) {
oracleStatement();
} else if (!pathInfo.equals("/")) {
throw new ServletException("Unexpected url: " + pathInfo);
}
}
private void hsqldbPreparedStatement() throws Exception {
Connection connection = getHsqldbConnection();
executePreparedStatement(connection);
connection.close();
}
private void hsqldbStatement() throws Exception {
Connection connection = getHsqldbConnection();
executeStatement(connection);
connection.close();
}
private void hsqldbBatchPreparedStatement() throws Exception {
Connection connection = getHsqldbConnection();
executeBatchPreparedStatement(connection);
connection.close();
}
private void hsqldbBatchStatement() throws Exception {
Connection connection = getHsqldbConnection();
executeBatchStatement(connection);
connection.close();
}
private void mysqlPreparedStatement() throws Exception {
Connection connection = getMysqlConnection();
executePreparedStatement(connection);
connection.close();
}
private void mysqlStatement() throws Exception {
Connection connection = getMysqlConnection();
executeStatement(connection);
connection.close();
}
private void postgresPreparedStatement() throws Exception {
Connection connection = getPostgresConnection();
executePreparedStatement(connection);
connection.close();
}
private void postgresStatement() throws Exception {
Connection connection = getPostgresConnection();
executeStatement(connection);
connection.close();
}
private void sqlServerPreparedStatement() throws Exception {
Connection connection = getSqlServerConnection();
executePreparedStatement(connection);
connection.close();
}
private void sqlServerStatement() throws Exception {
Connection connection = getSqlServerConnection();
executeStatement(connection);
connection.close();
}
private void oraclePreparedStatement() throws Exception {
Connection connection = getOracleConnection();
executePreparedStatement(connection);
connection.close();
}
private void oracleStatement() throws Exception {
Connection connection = getOracleConnection();
executeStatement(connection);
connection.close();
}
private static void executePreparedStatement(Connection connection) throws SQLException {
PreparedStatement ps = connection.prepareStatement("select * from abc where xyz = ?");
ps.setString(1, "y");
ResultSet rs = ps.executeQuery();
while (rs.next()) {
}
rs.close();
ps.close();
}
private void executeStatement(Connection connection) throws SQLException {
Statement statement = connection.createStatement();
ResultSet rs = statement.executeQuery("select * from abc");
while (rs.next()) {
}
rs.close();
statement.close();
}
private static void executeBatchPreparedStatement(Connection connection) throws SQLException {
PreparedStatement ps = connection.prepareStatement("insert into abc (xyz) values (?)");
ps.setString(1, "q");
ps.addBatch();
ps.setString(1, "r");
ps.addBatch();
ps.setString(1, "s");
ps.addBatch();
ps.executeBatch();
ps.close();
}
private void executeBatchStatement(Connection connection) throws SQLException {
Statement statement = connection.createStatement();
statement.addBatch("insert into abc (xyz) values ('t')");
statement.addBatch("insert into abc (xyz) values ('u')");
statement.addBatch("insert into abc (xyz) values ('v')");
statement.executeBatch();
statement.close();
}
private static void setupHsqldb() throws Exception {
Connection connection = getConnection(new Callable<Connection>() {
@Override
public Connection call() throws Exception {
return getHsqldbConnection();
}
});
setup(connection);
connection.close();
}
private static void setupMysql() throws Exception {
Class.forName("com.mysql.jdbc.Driver");
Connection connection = getConnection(new Callable<Connection>() {
@Override
public Connection call() throws Exception {
Connection connection = getMysqlConnection();
testConnection(connection, "select 1");
return connection;
}
});
setup(connection);
connection.close();
}
private static void setupPostgres() throws Exception {
Class.forName("org.postgresql.Driver");
Connection connection = getConnection(new Callable<Connection>() {
@Override
public Connection call() throws Exception {
Connection connection = getPostgresConnection();
testConnection(connection, "select 1");
return connection;
}
});
setup(connection);
connection.close();
}
private static void setupSqlServer() throws Exception {
Class.forName("com.microsoft.sqlserver.jdbc.SQLServerDriver");
Connection connection = getConnection(new Callable<Connection>() {
@Override
public Connection call() throws Exception {
Connection connection = getSqlServerConnection();
testConnection(connection, "select 1");
return connection;
}
});
setup(connection);
connection.close();
}
private static void setupOracle() throws Exception {
Class.forName("com.microsoft.sqlserver.jdbc.SQLServerDriver");
Connection connection = getConnection(new Callable<Connection>() {
@Override
public Connection call() throws Exception {
Connection connection = getOracleConnection();
testConnection(connection, "select 1 from dual");
return connection;
}
});
setup(connection);
connection.close();
}
private static Connection getHsqldbConnection() throws Exception {
return JDBCDriver.getConnection("jdbc:hsqldb:mem:test", null);
}
private static Connection getMysqlConnection() throws Exception {
String hostname = System.getenv("MYSQL");
return DriverManager.getConnection("jdbc:mysql://" + hostname + "/mysql?autoReconnect=true&useSSL=true&verifyServerCertificate=false", "root", "password");
}
private static Connection getPostgresConnection() throws Exception {
String hostname = System.getenv("POSTGRES");
return DriverManager.getConnection("jdbc:postgresql://" + hostname + "/postgres", "postgres", "passw0rd2");
}
private static Connection getSqlServerConnection() throws Exception {
String hostname = System.getenv("SQLSERVER");
return DriverManager.getConnection("jdbc:sqlserver://" + hostname, "sa", "Password1");
}
private static Connection getOracleConnection() throws Exception {
String hostname = System.getenv("ORACLE");
return DriverManager.getConnection("jdbc:oracle:thin:@" + hostname, "system", "password");
}
private static Connection getConnection(Callable<Connection> callable) throws Exception {
Exception exception;
Stopwatch stopwatch = Stopwatch.createStarted();
do {
try {
return callable.call();
} catch (Exception e) {
exception = e;
}
} while (stopwatch.elapsed(TimeUnit.SECONDS) < 30);
throw exception;
}
private static void testConnection(Connection connection, String sql) throws SQLException {
Statement statement = connection.createStatement();
try {
statement.execute(sql);
} finally {
statement.close();
}
}
private static void setup(Connection connection) throws SQLException {
Statement statement = connection.createStatement();
try {
statement.execute("create table abc (xyz varchar(10))");
statement.execute("insert into abc (xyz) values ('x')");
statement.execute("insert into abc (xyz) values ('y')");
statement.execute("insert into abc (xyz) values ('z')");
} finally {
statement.close();
}
}
}
|
[
"\"MYSQL\"",
"\"POSTGRES\"",
"\"SQLSERVER\"",
"\"MYSQL\"",
"\"POSTGRES\"",
"\"SQLSERVER\"",
"\"ORACLE\""
] |
[] |
[
"MYSQL",
"POSTGRES",
"SQLSERVER",
"ORACLE"
] |
[]
|
["MYSQL", "POSTGRES", "SQLSERVER", "ORACLE"]
|
java
| 4 | 0 | |
components/measurement.py
|
import pandas as pd
import numpy as np
from collections import Counter
import matplotlib.pyplot as plt
import math
class Measurer:
def __init__(self,procedures):
self.procedures = procedures
self.alg_cnt = Counter([item[4:] for item in list(self.procedures.keys())])
def get_start_end(self):
start_end_list=[]
for k,v in self.procedures.items():
start_end_list.append(tuple(v.start_end))
return start_end_list
def get_batch_procedure(self):
procedure_names = list(self.procedures.keys())
procedure_names.sort()
length = len(procedure_names)
i=0
while i < length: # all procedures # multi-threading here
j = i
while procedure_names[j][:3] == procedure_names[i][:3] : # batch procedures
j += 1
if j >= length:
break
# batch_procedures = self.procedures[i:j]
batch_procedures = [v for k, v in self.procedures.items() if k in procedure_names[i:j]]
i=j
yield batch_procedures
class Procedure:
def __init__(self,procedure_name=None,procedure_dict=None,procedure_value=None,default_num_calls=100):
self.name = procedure_name
self.description = procedure_dict
self.data =procedure_value
self.default_num_calls = default_num_calls
self.tks = self.handover_instants.copy()
self.tks .insert(0, self.start_end[0])
self.tks .append(self.start_end[-1])
self.num_tks = len(self.tks)
self.pds = self.hand_pds()
def __getattr__(self, item):
return self.description[item]
def hand_pds(self):
hand_values =[]
for i in range(self.num_tks-1):
s,e = self.tks[i],self.tks[i+1]
hand_value = float(self.data.loc[s:e].sum())
hand_value/=(e-s)
hand_values.append(hand_value)
return list(np.exp(-np.array(hand_values)))
def inject(self,batch_calls):
calls_start_instants_pd=np.ones_like(batch_calls[:,0])
calls_end_instants_pd=np.ones_like(batch_calls[:,0])
#approach 1
# for call in batch_calls:
# for i in range(0,self.num_tks-1):
# if self.tks[i]<= call[0] and call[0] <self.tks[i+1]:
# calls_start_instants_pd.append(i)# if i-1 ==-1, call start is over started
# break
# i+=1
# for j in range(0,self.num_tks-1):
# if self.tks[j]<=call[1] and call[1]<self.tks[j+1]:
# calls_end_instants_pd.append(j+1)
# break
# j+=1
#
# calls_instants_pd = np.array([calls_start_instants_pd,calls_end_instants_pd]).transpose([1,0])
#approach 2
for i in range(self.num_tks-1):
mask = (self.tks[i]<=batch_calls[:,0]) &( batch_calls[:,0]< self.tks[i+1])
calls_start_instants_pd[mask] = i
mask = (self.tks[i]<= batch_calls[:,1] )&( batch_calls[:,1]< self.tks[i+1])
calls_end_instants_pd[mask] = i+1
calls_instants_pd = np.array([calls_start_instants_pd,calls_end_instants_pd]).T[:-1]
Ph = []
for instant in calls_instants_pd:
tmp = 1 - np.array(self.pds[instant[0]:instant[1]])
ret = 1
for item in tmp:
ret *= item
Ph.append(ret)
return 1 - np.array(Ph),calls_instants_pd
def get_desc_vec(pds,calls):
length = len(calls)
ret_01 = np.ones_like([1, length])
pass
def get_df(procedures):
instance_df = pd.DataFrame()
instance_df['procedure_name'] = np.array([v.name for k,v in procedures.items()])
instance_df['algorithm'] = [v.algorithm for k,v in procedures.items()]
instance_df['num_handovers'] = [v.num_handovers for k,v in procedures.items()]
instance_df['sim_duration'] = [v.sim_duration for k,v in procedures.items()]
instance_df['avg_signal'] = [v.avg_signal for k,v in procedures.items()]
instance_df['avg_hand_duration'] = [v.avg_hand_duration for k,v in procedures.items()]
return instance_df
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
registry/kubernetes/client/client.go
|
package client
import (
"crypto/tls"
"errors"
"io/ioutil"
"net/http"
"os"
"path"
log "github.com/micro/go-micro/v2/logger"
"github.com/micro/go-plugins/registry/kubernetes/v2/client/api"
"github.com/micro/go-plugins/registry/kubernetes/v2/client/watch"
)
var (
serviceAccountPath = "/var/run/secrets/kubernetes.io/serviceaccount"
ErrReadNamespace = errors.New("Could not read namespace from service account secret")
)
// Client ...
type client struct {
opts *api.Options
}
// ListPods ...
func (c *client) ListPods(labels map[string]string) (*PodList, error) {
var pods PodList
err := api.NewRequest(c.opts).Get().Resource("pods").Params(&api.Params{LabelSelector: labels}).Do().Into(&pods)
return &pods, err
}
// UpdatePod ...
func (c *client) UpdatePod(name string, p *Pod) (*Pod, error) {
var pod Pod
err := api.NewRequest(c.opts).Patch().Resource("pods").Name(name).Body(p).Do().Into(&pod)
return &pod, err
}
// WatchPods ...
func (c *client) WatchPods(labels map[string]string) (watch.Watch, error) {
return api.NewRequest(c.opts).Get().Resource("pods").Params(&api.Params{LabelSelector: labels}).Watch()
}
func detectNamespace() (string, error) {
nsPath := path.Join(serviceAccountPath, "namespace")
// Make sure it's a file and we can read it
if s, e := os.Stat(nsPath); e != nil {
return "", e
} else if s.IsDir() {
return "", ErrReadNamespace
}
// Read the file, and cast to a string
if ns, e := ioutil.ReadFile(nsPath); e != nil {
return string(ns), e
} else {
return string(ns), nil
}
}
// NewClientByHost sets up a client by host
func NewClientByHost(host string) Kubernetes {
tr := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
DisableCompression: true,
}
c := &http.Client{
Transport: tr,
}
return &client{
opts: &api.Options{
Client: c,
Host: host,
Namespace: "default",
},
}
}
// NewClientInCluster should work similarily to the official api
// NewInClient by setting up a client configuration for use within
// a k8s pod.
func NewClientInCluster() Kubernetes {
host := "https://" + os.Getenv("KUBERNETES_SERVICE_HOST") + ":" + os.Getenv("KUBERNETES_SERVICE_PORT")
s, err := os.Stat(serviceAccountPath)
if err != nil {
log.Fatal(err)
}
if s == nil || !s.IsDir() {
log.Fatal(errors.New("no k8s service account found"))
}
token, err := ioutil.ReadFile(path.Join(serviceAccountPath, "token"))
if err != nil {
log.Fatal(err)
}
t := string(token)
ns, err := detectNamespace()
if err != nil {
log.Fatal(err)
}
crt, err := CertPoolFromFile(path.Join(serviceAccountPath, "ca.crt"))
if err != nil {
log.Fatal(err)
}
c := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
RootCAs: crt,
},
DisableCompression: true,
},
}
return &client{
opts: &api.Options{
Client: c,
Host: host,
Namespace: ns,
BearerToken: &t,
},
}
}
|
[
"\"KUBERNETES_SERVICE_HOST\"",
"\"KUBERNETES_SERVICE_PORT\""
] |
[] |
[
"KUBERNETES_SERVICE_HOST",
"KUBERNETES_SERVICE_PORT"
] |
[]
|
["KUBERNETES_SERVICE_HOST", "KUBERNETES_SERVICE_PORT"]
|
go
| 2 | 0 | |
src/pkg/caendr/caendr/services/cloud/lifesciences.py
|
import os
from logzero import logger
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
from caendr.models.datastore import PipelineOperation, DatabaseOperation, NemascanMapping, IndelPrimer
from caendr.models.error import PipelineRunError
from caendr.services.cloud.datastore import query_ds_entities
from caendr.services.cloud.service_account import authenticate_google_service
from caendr.services.cloud.secret import get_secret
from caendr.utils.json import get_json_from_class
GOOGLE_CLOUD_PROJECT_NUMBER = os.environ.get('GOOGLE_CLOUD_PROJECT_NUMBER')
GOOGLE_CLOUD_REGION = os.environ.get('GOOGLE_CLOUD_REGION')
MODULE_API_PIPELINE_TASK_SERVICE_ACCOUNT_NAME = os.environ.get('MODULE_API_PIPELINE_TASK_SERVICE_ACCOUNT_NAME')
#sa_private_key_b64 = get_secret(MODULE_API_PIPELINE_TASK_SERVICE_ACCOUNT_NAME)
#gls_service = authenticate_google_service(sa_private_key_b64, None, 'lifesciences', 'v2beta')
gls_service = discovery.build('lifesciences', 'v2beta', credentials=GoogleCredentials.get_application_default())
parent_id = f"projects/{GOOGLE_CLOUD_PROJECT_NUMBER}/locations/{GOOGLE_CLOUD_REGION}"
def start_pipeline(pipeline_request):
req_body = get_json_from_class(pipeline_request)
logger.debug(f'Starting Pipeline Request: {req_body}')
try:
request = gls_service.projects().locations().pipelines().run(parent=parent_id, body=req_body)
response = request.execute()
logger.debug(f'Pipeline Response: {response}')
return response
except Exception as err:
raise PipelineRunError(err)
def create_pipeline_operation_record(task, response):
if response is None:
raise PipelineRunError()
name = response.get('name')
metadata = response.get('metadata')
if name is None or metadata is None:
raise PipelineRunError('Pipeline start response missing expected properties')
id = name.rsplit('/', 1)[-1]
data = {
'id': id,
'operation': name,
'operation_kind': task.kind,
'metadata': metadata,
'report_path': None,
'done': False,
'error': False
}
op = PipelineOperation(id)
op.set_properties(**data)
op.save()
return PipelineOperation(id)
def get_pipeline_status(operation_name):
logger.debug(f'get_pipeline_status: operation_name:{operation_name}')
request = gls_service.projects().locations().operations().get(name=operation_name)
response = request.execute()
logger.debug(response)
return response
def update_pipeline_operation_record(operation_name):
logger.debug(f'update_pipeline_operation_record: operation_name:{operation_name}')
status = get_pipeline_status(operation_name)
id = operation_name.rsplit('/', 1)[-1]
data = {
'done': status.get('done'),
'error': status.get('error')
}
op = PipelineOperation(id)
op.set_properties(**data)
op.save()
return PipelineOperation(id)
def update_all_linked_status_records(kind, operation_name):
logger.debug(f'update_all_linked_status_records: kind:{kind} operation_name:{operation_name}')
status = get_pipeline_status(operation_name)
done = status.get('done')
error = status.get('error')
if done:
status = "COMPLETE"
if error:
status = "ERROR"
else:
status = "RUNNING"
filters=[("operation_name", "=", operation_name)]
ds_entities = query_ds_entities(kind, filters=filters, keys_only=True)
for entity in ds_entities:
if kind == DatabaseOperation.kind:
status_record = DatabaseOperation(entity.key.name)
elif kind == IndelPrimer.kind:
status_record = IndelPrimer(entity.key.name)
elif kind == NemascanMapping.kind:
status_record = NemascanMapping(entity.key.name)
status_record.set_properties(status=status)
status_record.save()
|
[] |
[] |
[
"GOOGLE_CLOUD_REGION",
"GOOGLE_CLOUD_PROJECT_NUMBER",
"MODULE_API_PIPELINE_TASK_SERVICE_ACCOUNT_NAME"
] |
[]
|
["GOOGLE_CLOUD_REGION", "GOOGLE_CLOUD_PROJECT_NUMBER", "MODULE_API_PIPELINE_TASK_SERVICE_ACCOUNT_NAME"]
|
python
| 3 | 0 | |
vendor/src/github.com/sendgrid/sendgrid-go/examples/geo/geo.go
|
package main
import (
"fmt"
"github.com/sendgrid/sendgrid-go"
"log"
"os"
)
///////////////////////////////////////////////////
// Retrieve email statistics by country and state/province.
// GET /geo/stats
func Retrieveemailstatisticsbycountryandstateprovince() {
apiKey := os.Getenv("YOUR_SENDGRID_APIKEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/geo/stats", host)
request.Method = "GET"
queryParams := make(map[string]string)
queryParams["end_date"] = "2016-04-01"
queryParams["country"] = "US"
queryParams["aggregated_by"] = "day"
queryParams["limit"] = "1"
queryParams["offset"] = "1"
queryParams["start_date"] = "2016-01-01"
request.QueryParams = queryParams
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
func main() {
// add your function calls here
}
|
[
"\"YOUR_SENDGRID_APIKEY\""
] |
[] |
[
"YOUR_SENDGRID_APIKEY"
] |
[]
|
["YOUR_SENDGRID_APIKEY"]
|
go
| 1 | 0 | |
cmd/pork/main.go
|
package main
import (
"fmt"
"os"
"github.com/katoozi/pork"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var rootCmd *cobra.Command
func main() {
rootCmd.Execute()
}
func init() {
rootCmd = &cobra.Command{
Use: "pork",
Short: "pork is a github tools for search, fork, pull request.",
}
rootCmd.AddCommand(pork.SearchCmd)
rootCmd.AddCommand(pork.DocsCmd)
rootCmd.AddCommand(pork.CloneCmd)
rootCmd.AddCommand(pork.ForkCmd)
rootCmd.AddCommand(pork.DocsCmd)
rootCmd.AddCommand(pork.PullRequestCmd)
viper.SetDefault("location", os.Getenv("HOME"))
viper.SetConfigName("pork")
viper.AddConfigPath(".")
if err := viper.ReadInConfig(); err != nil {
fmt.Println("Config file not found!!")
}
// viper.SetDefault("location", os.Getenv("HOME"))
}
|
[
"\"HOME\"",
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
gobblin-yarn/src/main/java/org/apache/gobblin/yarn/GobblinYarnTaskRunner.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.yarn;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.gobblin.util.logs.LogCopier;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.helix.NotificationContext;
import org.apache.helix.messaging.handling.HelixTaskResult;
import org.apache.helix.messaging.handling.MessageHandler;
import org.apache.helix.messaging.handling.MultiTypeMessageHandlerFactory;
import org.apache.helix.model.Message;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.base.Strings;
import com.google.common.util.concurrent.Service;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigValueFactory;
import org.apache.gobblin.cluster.GobblinClusterConfigurationKeys;
import org.apache.gobblin.cluster.GobblinClusterUtils;
import org.apache.gobblin.cluster.GobblinTaskRunner;
import org.apache.gobblin.util.JvmUtils;
import org.apache.gobblin.util.logs.Log4jConfigurationHelper;
import org.apache.gobblin.yarn.event.DelegationTokenUpdatedEvent;
public class GobblinYarnTaskRunner extends GobblinTaskRunner {
private static final Logger LOGGER = LoggerFactory.getLogger(GobblinTaskRunner.class);
public static final String HELIX_YARN_INSTANCE_NAME_PREFIX = GobblinYarnTaskRunner.class.getSimpleName();
public GobblinYarnTaskRunner(String applicationName, String applicationId, String helixInstanceName, ContainerId containerId, Config config,
Optional<Path> appWorkDirOptional) throws Exception {
super(applicationName, helixInstanceName, applicationId, getTaskRunnerId(containerId),
GobblinClusterUtils.addDynamicConfig(config.withValue(GobblinYarnConfigurationKeys.CONTAINER_NUM_KEY,
ConfigValueFactory.fromAnyRef(YarnHelixUtils.getContainerNum(containerId.toString())))), appWorkDirOptional);
}
@Override
public List<Service> getServices() {
List<Service> services = new ArrayList<>();
services.addAll(super.getServices());
LogCopier logCopier = null;
if (clusterConfig.hasPath(GobblinYarnConfigurationKeys.LOGS_SINK_ROOT_DIR_KEY)) {
GobblinYarnLogSource gobblinYarnLogSource = new GobblinYarnLogSource();
String containerLogDir = clusterConfig.getString(GobblinYarnConfigurationKeys.LOGS_SINK_ROOT_DIR_KEY);
if (gobblinYarnLogSource.isLogSourcePresent()) {
try {
logCopier = gobblinYarnLogSource.buildLogCopier(this.clusterConfig, this.taskRunnerId, this.fs,
new Path(containerLogDir, GobblinClusterUtils.getAppWorkDirPath(this.applicationName, this.applicationId)));
services.add(logCopier);
} catch (Exception e) {
LOGGER.warn("Cannot add LogCopier service to the service manager due to", e);
}
}
}
if (UserGroupInformation.isSecurityEnabled()) {
LOGGER.info("Adding YarnContainerSecurityManager since security is enabled");
services.add(new YarnContainerSecurityManager(this.clusterConfig, this.fs, this.eventBus, logCopier));
}
return services;
}
@Override
public MultiTypeMessageHandlerFactory getUserDefinedMessageHandlerFactory() {
return new ParticipantUserDefinedMessageHandlerFactory();
}
/**
* A custom {@link MultiTypeMessageHandlerFactory} for {@link ParticipantUserDefinedMessageHandler}s that
* handle messages of type {@link org.apache.helix.model.Message.MessageType#USER_DEFINE_MSG}.
*/
private class ParticipantUserDefinedMessageHandlerFactory implements MultiTypeMessageHandlerFactory {
@Override
public MessageHandler createHandler(Message message, NotificationContext context) {
return new ParticipantUserDefinedMessageHandler(message, context);
}
@Override
public String getMessageType() {
return Message.MessageType.USER_DEFINE_MSG.toString();
}
public List<String> getMessageTypes() {
return Collections.singletonList(getMessageType());
}
@Override
public void reset() {
}
/**
* A custom {@link MessageHandler} for handling user-defined messages to the participants.
*
* <p>
* Currently it handles the following sub types of messages:
*
* <ul>
* <li>{@link org.apache.gobblin.cluster.HelixMessageSubTypes#TOKEN_FILE_UPDATED}</li>
* </ul>
* </p>
*/
private class ParticipantUserDefinedMessageHandler extends MessageHandler {
public ParticipantUserDefinedMessageHandler(Message message, NotificationContext context) {
super(message, context);
}
@Override
public HelixTaskResult handleMessage() {
String messageSubType = this._message.getMsgSubType();
if (messageSubType.equalsIgnoreCase(org.apache.gobblin.cluster.HelixMessageSubTypes.TOKEN_FILE_UPDATED.toString())) {
LOGGER.info("Handling message " + org.apache.gobblin.cluster.HelixMessageSubTypes.TOKEN_FILE_UPDATED.toString());
eventBus.post(new DelegationTokenUpdatedEvent());
HelixTaskResult helixTaskResult = new HelixTaskResult();
helixTaskResult.setSuccess(true);
return helixTaskResult;
}
throw new IllegalArgumentException(String
.format("Unknown %s message subtype: %s", Message.MessageType.USER_DEFINE_MSG.toString(), messageSubType));
}
@Override
public void onError(Exception e, ErrorCode code, ErrorType type) {
LOGGER.error(
String.format("Failed to handle message with exception %s, error code %s, error type %s", e, code, type));
}
}
}
private static String getApplicationId(ContainerId containerId) {
return containerId.getApplicationAttemptId().getApplicationId().toString();
}
private static String getTaskRunnerId(ContainerId containerId) {
return containerId.toString();
}
public static void main(String[] args) throws Exception {
Options options = buildOptions();
try {
CommandLine cmd = new DefaultParser().parse(options, args);
if (!cmd.hasOption(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME) || !cmd
.hasOption(GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_OPTION_NAME) || !cmd
.hasOption(GobblinClusterConfigurationKeys.APPLICATION_ID_OPTION_NAME)) {
printUsage(options);
System.exit(1);
}
Log4jConfigurationHelper.updateLog4jConfiguration(GobblinTaskRunner.class,
GobblinYarnConfigurationKeys.GOBBLIN_YARN_LOG4J_CONFIGURATION_FILE,
GobblinYarnConfigurationKeys.GOBBLIN_YARN_LOG4J_CONFIGURATION_FILE);
LOGGER.info(JvmUtils.getJvmInputArguments());
ContainerId containerId =
ConverterUtils.toContainerId(System.getenv().get(ApplicationConstants.Environment.CONTAINER_ID.key()));
String applicationName = cmd.getOptionValue(GobblinClusterConfigurationKeys.APPLICATION_NAME_OPTION_NAME);
String applicationId = cmd.getOptionValue(GobblinClusterConfigurationKeys.APPLICATION_ID_OPTION_NAME);
String helixInstanceName = cmd.getOptionValue(GobblinClusterConfigurationKeys.HELIX_INSTANCE_NAME_OPTION_NAME);
String helixInstanceTags = cmd.getOptionValue(GobblinClusterConfigurationKeys.HELIX_INSTANCE_TAGS_OPTION_NAME);
Config config = ConfigFactory.load();
if (!Strings.isNullOrEmpty(helixInstanceTags)) {
config = config.withValue(GobblinClusterConfigurationKeys.HELIX_INSTANCE_TAGS_KEY, ConfigValueFactory.fromAnyRef(helixInstanceTags));
}
GobblinTaskRunner gobblinTaskRunner =
new GobblinYarnTaskRunner(applicationName, applicationId, helixInstanceName, containerId, config,
Optional.<Path>absent());
gobblinTaskRunner.start();
} catch (ParseException pe) {
printUsage(options);
System.exit(1);
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
cmd/provisioner/hostpath-provisioner.go
|
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"errors"
"flag"
"os"
"path"
"strings"
"syscall"
"golang.org/x/sys/unix"
"github.com/golang/glog"
"kubevirt.io/hostpath-provisioner/controller"
v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
const (
defaultProvisionerName = "kubevirt.io/hostpath-provisioner"
annStorageProvisioner = "volume.beta.kubernetes.io/storage-provisioner"
)
var provisionerName string
type hostPathProvisioner struct {
pvDir string
identity string
nodeName string
useNamingPrefix bool
}
// Common allocation units
const (
KiB int64 = 1024
MiB int64 = 1024 * KiB
GiB int64 = 1024 * MiB
TiB int64 = 1024 * GiB
)
var provisionerID string
// NewHostPathProvisioner creates a new hostpath provisioner
func NewHostPathProvisioner() controller.Provisioner {
useNamingPrefix := false
nodeName := os.Getenv("NODE_NAME")
if nodeName == "" {
glog.Fatal("env variable NODE_NAME must be set so that this provisioner can identify itself")
}
// note that the pvDir variable informs us *where* the provisioner should be writing backing files to
// this needs to match the path speciied in the volumes.hostPath spec of the deployment
pvDir := os.Getenv("PV_DIR")
if pvDir == "" {
glog.Fatal("env variable PV_DIR must be set so that this provisioner knows where to place its data")
}
if strings.ToLower(os.Getenv("USE_NAMING_PREFIX")) == "true" {
useNamingPrefix = true
}
glog.Infof("initiating kubevirt/hostpath-provisioner on node: %s\n", nodeName)
provisionerName = "kubevirt.io/hostpath-provisioner"
return &hostPathProvisioner{
pvDir: pvDir,
identity: provisionerName,
nodeName: nodeName,
useNamingPrefix: useNamingPrefix,
}
}
var _ controller.Provisioner = &hostPathProvisioner{}
func isCorrectNodeByBindingMode(annotations map[string]string, nodeName string, bindingMode storage.VolumeBindingMode) bool {
glog.Infof("isCorrectNodeByBindingMode mode: %s", string(bindingMode))
if _, ok := annotations["kubevirt.io/provisionOnNode"]; ok {
if isCorrectNode(annotations, nodeName, "kubevirt.io/provisionOnNode") {
annotations[annStorageProvisioner] = defaultProvisionerName
return true
}
return false
} else if bindingMode == storage.VolumeBindingWaitForFirstConsumer {
return isCorrectNode(annotations, nodeName, "volume.kubernetes.io/selected-node")
}
return false
}
func isCorrectNode(annotations map[string]string, nodeName string, annotationName string) bool {
if val, ok := annotations[annotationName]; ok {
glog.Infof("claim included %s annotation: %s\n", annotationName, val)
if val == nodeName {
glog.Infof("matched %s: %s with this node: %s\n", annotationName, val, nodeName)
return true
}
glog.Infof("no match for %s: %s with this node: %s\n", annotationName, val, nodeName)
return false
}
glog.Infof("missing %s annotation, skipping operations for pvc", annotationName)
return false
}
func (p *hostPathProvisioner) ShouldProvision(pvc *v1.PersistentVolumeClaim, bindingMode *storage.VolumeBindingMode) bool {
shouldProvision := isCorrectNodeByBindingMode(pvc.GetAnnotations(), p.nodeName, *bindingMode)
if shouldProvision {
pvCapacity, err := calculatePvCapacity(p.pvDir)
if pvCapacity != nil && pvCapacity.Cmp(pvc.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]) < 0 {
glog.Error("PVC request size larger than total possible PV size")
shouldProvision = false
} else if err != nil {
glog.Errorf("Unable to determine pvCapacity %v", err)
shouldProvision = false
}
}
return shouldProvision
}
// Provision creates a storage asset and returns a PV object representing it.
func (p *hostPathProvisioner) Provision(options controller.ProvisionOptions) (*v1.PersistentVolume, error) {
vPath := path.Join(p.pvDir, options.PVName)
pvCapacity, err := calculatePvCapacity(p.pvDir)
if p.useNamingPrefix {
vPath = path.Join(p.pvDir, options.PVC.Name+"-"+options.PVName)
}
if pvCapacity != nil {
glog.Infof("creating backing directory: %v", vPath)
if err := os.MkdirAll(vPath, 0777); err != nil {
return nil, err
}
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: options.PVName,
Annotations: map[string]string{
"hostPathProvisionerIdentity": p.identity,
"kubevirt.io/provisionOnNode": p.nodeName,
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
AccessModes: options.PVC.Spec.AccessModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): *pvCapacity,
},
PersistentVolumeSource: v1.PersistentVolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: vPath,
},
},
NodeAffinity: &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "kubernetes.io/hostname",
Operator: v1.NodeSelectorOpIn,
Values: []string{
p.nodeName,
},
},
},
},
},
},
},
},
}
return pv, nil
}
return nil, err
}
// Delete removes the storage asset that was created by Provision represented
// by the given PV.
func (p *hostPathProvisioner) Delete(volume *v1.PersistentVolume) error {
ann, ok := volume.Annotations["hostPathProvisionerIdentity"]
if !ok {
return errors.New("identity annotation not found on PV")
}
if ann != p.identity {
return &controller.IgnoredError{Reason: "identity annotation on PV does not match ours"}
}
if !isCorrectNode(volume.Annotations, p.nodeName, "kubevirt.io/provisionOnNode") {
return &controller.IgnoredError{Reason: "identity annotation on pvc does not match ours, not deleting PV"}
}
path := volume.Spec.PersistentVolumeSource.HostPath.Path
glog.Infof("removing backing directory: %v", path)
if err := os.RemoveAll(path); err != nil {
return err
}
return nil
}
func calculatePvCapacity(path string) (*resource.Quantity, error) {
statfs := &unix.Statfs_t{}
err := unix.Statfs(path, statfs)
if err != nil {
return nil, err
}
// Capacity is total block count * block size
quantity := resource.NewQuantity(int64(roundDownCapacityPretty(int64(statfs.Blocks)*statfs.Bsize)), resource.BinarySI)
return quantity, nil
}
// Round down the capacity to an easy to read value. Blatantly stolen from here: https://github.com/kubernetes-incubator/external-storage/blob/master/local-volume/provisioner/pkg/discovery/discovery.go#L339
func roundDownCapacityPretty(capacityBytes int64) int64 {
easyToReadUnitsBytes := []int64{GiB, MiB}
// Round down to the nearest easy to read unit
// such that there are at least 10 units at that size.
for _, easyToReadUnitBytes := range easyToReadUnitsBytes {
// Round down the capacity to the nearest unit.
size := capacityBytes / easyToReadUnitBytes
if size >= 10 {
return size * easyToReadUnitBytes
}
}
return capacityBytes
}
func main() {
syscall.Umask(0)
flag.Parse()
flag.Set("logtostderr", "true")
// Create an InClusterConfig and use it to create a client for the controller
// to use to communicate with Kubernetes
config, err := rest.InClusterConfig()
if err != nil {
glog.Fatalf("Failed to create config: %v", err)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
glog.Fatalf("Failed to create client: %v", err)
}
// The controller needs to know what the server version is because out-of-tree
// provisioners aren't officially supported until 1.5
serverVersion, err := clientset.Discovery().ServerVersion()
if err != nil {
glog.Fatalf("Error getting server version: %v", err)
}
// Create the provisioner: it implements the Provisioner interface expected by
// the controller
hostPathProvisioner := NewHostPathProvisioner()
glog.Infof("creating provisioner controller with name: %s\n", provisionerName)
// Start the provision controller which will dynamically provision hostPath
// PVs
pc := controller.NewProvisionController(clientset, provisionerName, hostPathProvisioner, serverVersion.GitVersion)
pc.Run(wait.NeverStop)
}
|
[
"\"NODE_NAME\"",
"\"PV_DIR\"",
"\"USE_NAMING_PREFIX\""
] |
[] |
[
"USE_NAMING_PREFIX",
"PV_DIR",
"NODE_NAME"
] |
[]
|
["USE_NAMING_PREFIX", "PV_DIR", "NODE_NAME"]
|
go
| 3 | 0 | |
vendor/github.com/minio/cli/help.go
|
package cli
import (
"fmt"
"io"
"os"
"strings"
"text/tabwriter"
"text/template"
)
// AppHelpTemplate is the text template for the Default help topic.
// cli.go uses text/template to render templates. You can
// render custom help text by setting this variable.
var AppHelpTemplate = `NAME:
{{.Name}}{{if .Usage}} - {{.Usage}}{{end}}
USAGE:
{{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}}
VERSION:
{{.Version}}{{end}}{{end}}{{if .Description}}
DESCRIPTION:
{{.Description}}{{end}}{{if len .Authors}}
AUTHOR{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}:
{{range $index, $author := .Authors}}{{if $index}}
{{end}}{{$author}}{{end}}{{end}}{{if .VisibleCommands}}
COMMANDS:{{range .VisibleCategories}}{{if .Name}}
{{.Name}}:{{end}}{{range .VisibleCommands}}
{{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{end}}{{end}}{{if .VisibleFlags}}
GLOBAL FLAGS:
{{range $index, $option := .VisibleFlags}}{{if $index}}
{{end}}{{$option}}{{end}}{{end}}{{if .Copyright}}
COPYRIGHT:
{{.Copyright}}{{end}}
`
// CommandHelpTemplate is the text template for the command help topic.
// cli.go uses text/template to render templates. You can
// render custom help text by setting this variable.
var CommandHelpTemplate = `NAME:
{{.HelpName}} - {{.Usage}}
USAGE:
{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{if .Category}}
CATEGORY:
{{.Category}}{{end}}{{if .Description}}
DESCRIPTION:
{{.Description}}{{end}}{{if .VisibleFlags}}
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
`
// SubcommandHelpTemplate is the text template for the subcommand help topic.
// cli.go uses text/template to render templates. You can
// render custom help text by setting this variable.
var SubcommandHelpTemplate = `NAME:
{{.HelpName}} - {{if .Description}}{{.Description}}{{else}}{{.Usage}}{{end}}
USAGE:
{{.HelpName}} COMMAND{{if .VisibleFlags}} [COMMAND FLAGS | -h]{{end}} [ARGUMENTS...]
COMMANDS:
{{range .VisibleCommands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}}
{{end}}{{if .VisibleFlags}}
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
`
var helpCommand = Command{
Name: "help",
Aliases: []string{"h"},
Usage: "Shows a list of commands or help for one command",
ArgsUsage: "[command]",
Action: func(c *Context) error {
args := c.Args()
if args.Present() {
return ShowCommandHelp(c, args.First())
}
ShowAppHelp(c)
return nil
},
}
var helpSubcommand = Command{
Name: "help",
Aliases: []string{"h"},
Usage: "Shows a list of commands or help for one command",
ArgsUsage: "[command]",
Action: func(c *Context) error {
args := c.Args()
if args.Present() {
return ShowCommandHelp(c, args.First())
}
return ShowSubcommandHelp(c)
},
}
// Prints help for the App or Command
type helpPrinter func(w io.Writer, templ string, data interface{})
// Prints help for the App or Command with custom template function.
type helpPrinterCustom func(w io.Writer, templ string, data interface{}, customFunc map[string]interface{})
// HelpPrinter is a function that writes the help output. If not set a default
// is used. The function signature is:
// func(w io.Writer, templ string, data interface{})
var HelpPrinter helpPrinter = printHelp
// HelpPrinterCustom is same as HelpPrinter but
// takes a custom function for template function map.
var HelpPrinterCustom helpPrinterCustom = printHelpCustom
// VersionPrinter prints the version for the App
var VersionPrinter = printVersion
// ShowAppHelpAndExit - Prints the list of subcommands for the app and exits with exit code.
func ShowAppHelpAndExit(c *Context, exitCode int) {
ShowAppHelp(c)
os.Exit(exitCode)
}
// ShowAppHelp is an action that displays the help.
func ShowAppHelp(c *Context) (err error) {
if c.App.CustomAppHelpTemplate == "" {
HelpPrinter(c.App.Writer, AppHelpTemplate, c.App)
return
}
customAppData := func() map[string]interface{} {
if c.App.ExtraInfo == nil {
return nil
}
return map[string]interface{}{
"ExtraInfo": c.App.ExtraInfo,
}
}
HelpPrinterCustom(c.App.Writer, c.App.CustomAppHelpTemplate, c.App, customAppData())
return nil
}
// DefaultAppComplete prints the list of subcommands as the default app completion method
func DefaultAppComplete(c *Context) {
for _, command := range c.App.Commands {
if command.Hidden {
continue
}
for _, name := range command.Names() {
fmt.Fprintln(c.App.Writer, name)
}
}
}
// ShowCommandHelpAndExit - exits with code after showing help
func ShowCommandHelpAndExit(c *Context, command string, code int) {
ShowCommandHelp(c, command)
os.Exit(code)
}
// ShowCommandHelp prints help for the given command
func ShowCommandHelp(ctx *Context, command string) error {
// show the subcommand help for a command with subcommands
if command == "" {
HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App)
return nil
}
for _, c := range ctx.App.Commands {
if c.HasName(command) {
if c.CustomHelpTemplate != "" {
HelpPrinter(ctx.App.Writer, c.CustomHelpTemplate, c)
} else {
HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c)
}
return nil
}
}
if ctx.App.CommandNotFound == nil {
return NewExitError(fmt.Sprintf("No help topic for '%v'", command), 3)
}
ctx.App.CommandNotFound(ctx, command)
return nil
}
// ShowSubcommandHelp prints help for the given subcommand
func ShowSubcommandHelp(c *Context) error {
return ShowCommandHelp(c, c.Command.Name)
}
// ShowVersion prints the version number of the App
func ShowVersion(c *Context) {
VersionPrinter(c)
}
func printVersion(c *Context) {
fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version)
}
// ShowCompletions prints the lists of commands within a given context
func ShowCompletions(c *Context) {
a := c.App
if a != nil && a.BashComplete != nil {
a.BashComplete(c)
}
}
// ShowCommandCompletions prints the custom completions for a given command
func ShowCommandCompletions(ctx *Context, command string) {
c := ctx.App.Command(command)
if c != nil && c.BashComplete != nil {
c.BashComplete(ctx)
}
}
func printHelpCustom(out io.Writer, templ string, data interface{}, customFunc map[string]interface{}) {
funcMap := template.FuncMap{
"join": strings.Join,
}
if customFunc != nil {
for key, value := range customFunc {
funcMap[key] = value
}
}
w := tabwriter.NewWriter(out, 1, 8, 2, ' ', 0)
t := template.Must(template.New("help").Funcs(funcMap).Parse(templ))
err := t.Execute(w, data)
if err != nil {
// If the writer is closed, t.Execute will fail, and there's nothing
// we can do to recover.
if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" {
fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err)
}
return
}
w.Flush()
}
func printHelp(out io.Writer, templ string, data interface{}) {
printHelpCustom(out, templ, data, nil)
}
func checkVersion(c *Context) bool {
found := false
if VersionFlag.Name != "" {
eachName(VersionFlag.Name, func(name string) {
if c.GlobalBool(name) || c.Bool(name) {
found = true
}
})
}
return found
}
func checkHelp(c *Context) bool {
found := false
if HelpFlag.Name != "" {
eachName(HelpFlag.Name, func(name string) {
if c.GlobalBool(name) || c.Bool(name) {
found = true
}
})
}
return found
}
func checkCommandHelp(c *Context, name string) bool {
if c.Bool("h") || c.Bool("help") {
ShowCommandHelp(c, name)
return true
}
return false
}
func checkSubcommandHelp(c *Context) bool {
if c.Bool("h") || c.Bool("help") {
ShowSubcommandHelp(c)
return true
}
return false
}
func checkShellCompleteFlag(a *App, arguments []string) (bool, []string) {
if !a.EnableBashCompletion {
return false, arguments
}
pos := len(arguments) - 1
lastArg := arguments[pos]
if lastArg != "--"+BashCompletionFlag.Name {
return false, arguments
}
return true, arguments[:pos]
}
func checkCompletions(c *Context) bool {
if !c.shellComplete {
return false
}
if args := c.Args(); args.Present() {
name := args.First()
if cmd := c.App.Command(name); cmd != nil {
// let the command handle the completion
return false
}
}
ShowCompletions(c)
return true
}
func checkCommandCompletions(c *Context, name string) bool {
if !c.shellComplete {
return false
}
ShowCommandCompletions(c, name)
return true
}
|
[
"\"CLI_TEMPLATE_ERROR_DEBUG\""
] |
[] |
[
"CLI_TEMPLATE_ERROR_DEBUG"
] |
[]
|
["CLI_TEMPLATE_ERROR_DEBUG"]
|
go
| 1 | 0 | |
pkg/executor/executor_test.go
|
//
// This test depends on several env vars:
//
// KUBECONFIG has to point at a kube config with a cluster. The test
// will use the default context from that config. Be careful,
// don't point this at your production environment. The test is
// skipped if KUBECONFIG is undefined.
//
// TEST_SPECIALIZE_URL
// TEST_FETCHER_URL
// These need to point at <node ip>:30001 and <node ip>:30002,
// where <node ip> is the address of any node in the test
// cluster.
//
// FETCHER_IMAGE
// Optional. Set this to a fetcher image; otherwise uses the
// default.
//
// Here's how I run this on my setup, with minikube:
// TEST_SPECIALIZE_URL=http://192.168.99.100:30002/specialize TEST_FETCHER_URL=http://192.168.99.100:30001 FETCHER_IMAGE=minikube/fetcher:testing KUBECONFIG=/Users/soam/.kube/config go test -v .
package executor
import (
"context"
"fmt"
"io/ioutil"
"log"
"math/rand"
"net/http"
"os"
"testing"
"time"
"go.uber.org/zap"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes"
fv1 "github.com/fission/fission/pkg/apis/fission.io/v1"
"github.com/fission/fission/pkg/crd"
"github.com/fission/fission/pkg/executor/client"
)
func panicIf(err error) {
if err != nil {
log.Panicf("Error: %v", err)
}
}
// return the number of pods in the given namespace matching the given labels
func countPods(kubeClient *kubernetes.Clientset, ns string, labelz map[string]string) int {
pods, err := kubeClient.CoreV1().Pods(ns).List(metav1.ListOptions{
LabelSelector: labels.Set(labelz).AsSelector().String(),
})
if err != nil {
log.Panicf("Failed to list pods: %v", err)
}
return len(pods.Items)
}
func createTestNamespace(kubeClient *kubernetes.Clientset, ns string) {
_, err := kubeClient.CoreV1().Namespaces().Create(&apiv1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: ns,
},
})
if err != nil {
log.Panicf("failed to create ns %v: %v", ns, err)
}
log.Printf("Created namespace %v", ns)
}
// create a nodeport service
func createSvc(kubeClient *kubernetes.Clientset, ns string, name string, targetPort int, nodePort int32, labels map[string]string) *apiv1.Service {
svc, err := kubeClient.CoreV1().Services(ns).Create(&apiv1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: apiv1.ServiceSpec{
Type: apiv1.ServiceTypeNodePort,
Ports: []apiv1.ServicePort{
{
Protocol: apiv1.ProtocolTCP,
Port: 80,
TargetPort: intstr.FromInt(targetPort),
NodePort: nodePort,
},
},
Selector: labels,
},
})
if err != nil {
log.Panicf("Failed to create svc: %v", err)
}
return svc
}
func httpGet(url string) string {
resp, err := http.Get(url)
if err != nil {
log.Panicf("HTTP Get failed: URL %v: %v", url, err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Panicf("HTTP Get failed to read body: URL %v: %v", url, err)
}
return string(body)
}
func TestExecutor(t *testing.T) {
// run in a random namespace so we can have concurrent tests
// on a given cluster
rand.Seed(time.Now().UTC().UnixNano())
testId := rand.Intn(999)
fissionNs := fmt.Sprintf("test-%v", testId)
functionNs := fmt.Sprintf("test-function-%v", testId)
// skip test if no cluster available for testing
kubeconfig := os.Getenv("KUBECONFIG")
if len(kubeconfig) == 0 {
t.Skip("Skipping test, no kubernetes cluster")
return
}
// connect to k8s
// and get CRD client
fissionClient, kubeClient, apiExtClient, err := crd.MakeFissionClient()
if err != nil {
log.Panicf("failed to connect: %v", err)
}
// create the test's namespaces
createTestNamespace(kubeClient, fissionNs)
defer kubeClient.CoreV1().Namespaces().Delete(fissionNs, nil)
createTestNamespace(kubeClient, functionNs)
defer kubeClient.CoreV1().Namespaces().Delete(functionNs, nil)
logger, err := zap.NewDevelopment()
panicIf(err)
// make sure CRD types exist on cluster
err = crd.EnsureFissionCRDs(logger, apiExtClient)
if err != nil {
log.Panicf("failed to ensure crds: %v", err)
}
err = fissionClient.WaitForCRDs()
if err != nil {
log.Panicf("failed to wait crds: %v", err)
}
// create an env on the cluster
env, err := fissionClient.Environments(fissionNs).Create(&fv1.Environment{
Metadata: metav1.ObjectMeta{
Name: "nodejs",
Namespace: fissionNs,
},
Spec: fv1.EnvironmentSpec{
Version: 1,
Runtime: fv1.Runtime{
Image: "fission/node-env",
},
Builder: fv1.Builder{},
},
})
if err != nil {
log.Panicf("failed to create env: %v", err)
}
// create poolmgr
port := 9999
err = StartExecutor(logger, functionNs, "fission-builder", port)
if err != nil {
log.Panicf("failed to start poolmgr: %v", err)
}
// connect poolmgr client
poolmgrClient := client.MakeClient(logger, fmt.Sprintf("http://localhost:%v", port))
// Wait for pool to be created (we don't actually need to do
// this, since the API should do the right thing in any case).
// waitForPool(functionNs, "nodejs")
time.Sleep(6 * time.Second)
envRef := fv1.EnvironmentReference{
Namespace: env.Metadata.Namespace,
Name: env.Metadata.Name,
}
deployment := fv1.Archive{
Type: fv1.ArchiveTypeLiteral,
Literal: []byte(`module.exports = async function(context) { return { status: 200, body: "Hello, world!\n" }; }`),
}
// create a package
p := &fv1.Package{
Metadata: metav1.ObjectMeta{
Name: "hello",
Namespace: fissionNs,
},
Spec: fv1.PackageSpec{
Environment: envRef,
Deployment: deployment,
},
}
p, err = fissionClient.Packages(fissionNs).Create(p)
if err != nil {
log.Panicf("failed to create package: %v", err)
}
// create a function
f := &fv1.Function{
Metadata: metav1.ObjectMeta{
Name: "hello",
Namespace: fissionNs,
},
Spec: fv1.FunctionSpec{
Environment: envRef,
Package: fv1.FunctionPackageRef{
PackageRef: fv1.PackageRef{
Namespace: p.Metadata.Namespace,
Name: p.Metadata.Name,
ResourceVersion: p.Metadata.ResourceVersion,
},
},
},
}
_, err = fissionClient.Functions(fissionNs).Create(f)
if err != nil {
log.Panicf("failed to create function: %v", err)
}
// create a service to call fetcher and the env container
labels := map[string]string{"functionName": f.Metadata.Name}
var fetcherPort int32 = 30001
fetcherSvc := createSvc(kubeClient, functionNs, fmt.Sprintf("%v-%v", f.Metadata.Name, "fetcher"), 8000, fetcherPort, labels)
defer kubeClient.CoreV1().Services(functionNs).Delete(fetcherSvc.ObjectMeta.Name, nil)
var funcSvcPort int32 = 30002
functionSvc := createSvc(kubeClient, functionNs, f.Metadata.Name, 8888, funcSvcPort, labels)
defer kubeClient.CoreV1().Services(functionNs).Delete(functionSvc.ObjectMeta.Name, nil)
// the main test: get a service for a given function
t1 := time.Now()
svc, err := poolmgrClient.GetServiceForFunction(context.Background(), &f.Metadata)
if err != nil {
log.Panicf("failed to get func svc: %v", err)
}
log.Printf("svc for function created at: %v (in %v)", svc, time.Since(t1))
// ensure that a pod with the label functionName=f.Metadata.Name exists
podCount := countPods(kubeClient, functionNs, map[string]string{"functionName": f.Metadata.Name})
if podCount != 1 {
log.Panicf("expected 1 function pod, found %v", podCount)
}
// call the service to ensure it works
// wait for a bit
// tap service to simulate calling it again
// make sure the same pod is still there
// wait for idleTimeout to ensure the pod is removed
// remove env
// wait for pool to be destroyed
// that's it
}
|
[
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
qa/rpc-tests/p2p-acceptblock.py
|
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
'''
AcceptBlockTest -- test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers
versus non-whitelisted peers, this tests the behavior of both (effectively two
separate tests running in parallel).
Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
whitelist localhost, but node1 does. They will each be on their own chain for
this test.
We have one NodeConn connection to each, test_node and white_node respectively.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance.
3. Mine a block that forks the previous block, and deliver to each node from
corresponding peer.
Node0 should not process this block (just accept the header), because it is
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block.
Node0 should process this block but be stuck on the shorter chain, because
it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain.
Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
self.connection.send_message(msg_ping(nonce=self.ping_counter))
received_pong = False
sleep_time = 0.05
while not received_pong and timeout > 0:
time.sleep(sleep_time)
timeout -= sleep_time
with mininode_lock:
if self.last_pong.nonce == self.ping_counter:
received_pong = True
self.ping_counter += 1
return received_pong
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("DASHD", "firelynxcoind"),
help="bitcoind binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"],
binary=self.options.testbinary))
self.nodes.append(start_node(1, self.options.tmpdir,
["-debug", "-whitelist=127.0.0.1"],
binary=self.options.testbinary))
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = TestNode() # connects to node0 (not whitelisted)
white_node = TestNode() # connects to node1 (whitelisted)
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
test_node.add_connection(connections[0])
white_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
white_node.wait_for_verack()
# 1. Have both nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int ("0x" + n.getbestblockhash() + "L", 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted.
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in xrange(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
white_node.send_message(msg_block(blocks_h2[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2)
print "First height 2 block accepted by both nodes"
# 3. Send another block that builds on the original tip.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
for i in xrange(2):
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
blocks_h2f[i].solve()
test_node.send_message(msg_block(blocks_h2f[0]))
white_node.send_message(msg_block(blocks_h2f[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash:
assert_equal(x['status'], "headers-only")
for x in self.nodes[1].getchaintips():
if x['hash'] == blocks_h2f[1].hash:
assert_equal(x['status'], "valid-headers")
print "Second height 2 block accepted only from whitelisted peer"
# 4. Now send another block that builds on the forking chain.
blocks_h3 = []
for i in xrange(2):
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
blocks_h3[i].solve()
test_node.send_message(msg_block(blocks_h3[0]))
white_node.send_message(msg_block(blocks_h3[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
# Since the earlier block was not processed by node0, the new block
# can't be fully validated.
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash:
assert_equal(x['status'], "headers-only")
# But this block should be accepted by node0 since it has more work.
try:
self.nodes[0].getblock(blocks_h3[0].hash)
print "Unrequested more-work block accepted from non-whitelisted peer"
except:
raise AssertionError("Unrequested more work block was not processed")
# Node1 should have accepted and reorged.
assert_equal(self.nodes[1].getblockcount(), 3)
print "Successfully reorged to length 3 chain from whitelisted peer"
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = msg_headers()
all_blocks = [] # node0's blocks
for j in xrange(2):
for i in xrange(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(msg_block(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
time.sleep(2)
for x in all_blocks:
try:
self.nodes[0].getblock(x.hash)
if x == all_blocks[287]:
raise AssertionError("Unrequested block too far-ahead should have been ignored")
except:
if x == all_blocks[287]:
print "Unrequested block too far-ahead not processed"
else:
raise AssertionError("Unrequested block with more work should have been accepted")
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
try:
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
print "Unrequested block far ahead of tip accepted from whitelisted peer"
except:
raise AssertionError("Unrequested block from whitelisted peer not accepted")
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
test_node.send_message(msg_block(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the
# node hasn't processed the block by the time the sleep returns, and then
# the node processes it and incorrectly advances the tip).
# But this would be caught later on, when we verify that an inv triggers
# a getdata request for this block.
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
print "Unrequested block that would complete more-work chain was ignored"
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_getdata = None
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_getdata
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
print "Inv at tip triggered getdata for unprocessed block"
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(blocks_h2f[0]))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
print "Successfully reorged to longer chain from non-whitelisted peer"
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
|
[] |
[] |
[
"DASHD"
] |
[]
|
["DASHD"]
|
python
| 1 | 0 | |
adapter/config/yaml/yaml_test.go
|
// Copyright 2014 beego Author. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package yaml
import (
"fmt"
"os"
"testing"
"github.com/beego/beego/v2/adapter/config"
)
func TestYaml(t *testing.T) {
var (
yamlcontext = `
"appname": beeapi
"httpport": 8080
"mysqlport": 3600
"PI": 3.1415976
"runmode": dev
"autorender": false
"copyrequestbody": true
"PATH": GOPATH
"path1": ${GOPATH}
"path2": ${GOPATH||/home/go}
"empty": ""
`
keyValue = map[string]interface{}{
"appname": "beeapi",
"httpport": 8080,
"mysqlport": int64(3600),
"PI": 3.1415976,
"runmode": "dev",
"autorender": false,
"copyrequestbody": true,
"PATH": "GOPATH",
"path1": os.Getenv("GOPATH"),
"path2": os.Getenv("GOPATH"),
"error": "",
"emptystrings": []string{},
}
)
f, err := os.Create("testyaml.conf")
if err != nil {
t.Fatal(err)
}
_, err = f.WriteString(yamlcontext)
if err != nil {
f.Close()
t.Fatal(err)
}
f.Close()
defer os.Remove("testyaml.conf")
yamlconf, err := config.NewConfig("yaml", "testyaml.conf")
if err != nil {
t.Fatal(err)
}
if yamlconf.String("appname") != "beeapi" {
t.Fatal("appname not equal to beeapi")
}
for k, v := range keyValue {
var (
value interface{}
err error
)
switch v.(type) {
case int:
value, err = yamlconf.Int(k)
case int64:
value, err = yamlconf.Int64(k)
case float64:
value, err = yamlconf.Float(k)
case bool:
value, err = yamlconf.Bool(k)
case []string:
value = yamlconf.Strings(k)
case string:
value = yamlconf.String(k)
default:
value, err = yamlconf.DIY(k)
}
if err != nil {
t.Errorf("get key %q value fatal,%v err %s", k, v, err)
} else if fmt.Sprintf("%v", v) != fmt.Sprintf("%v", value) {
t.Errorf("get key %q value, want %v got %v .", k, v, value)
}
}
if err = yamlconf.Set("name", "astaxie"); err != nil {
t.Fatal(err)
}
if yamlconf.String("name") != "astaxie" {
t.Fatal("get name error")
}
}
|
[
"\"GOPATH\"",
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
docs/conf.py
|
# Ensure we get the local copy of tornado instead of what's on the standard path
import os
import sys
import time
sys.path.insert(0, os.path.abspath(".."))
import tornado
master_doc = "index"
project = "Tornado"
copyright = "2009-%s, The Tornado Authors" % time.strftime("%Y")
version = release = tornado.version
language = 'zh_CN'
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
]
primary_domain = 'py'
default_role = 'py:obj'
autodoc_member_order = "bysource"
autoclass_content = "both"
# Without this line sphinx includes a copy of object.__init__'s docstring
# on any class that doesn't define __init__.
# https://bitbucket.org/birkenfeld/sphinx/issue/1337/autoclass_content-both-uses-object__init__
autodoc_docstring_signature = False
coverage_skip_undoc_in_source = True
coverage_ignore_modules = [
"tornado.platform.asyncio",
"tornado.platform.caresresolver",
"tornado.platform.twisted",
]
# I wish this could go in a per-module file...
coverage_ignore_classes = [
# tornado.concurrent
"TracebackFuture",
# tornado.gen
"Runner",
# tornado.ioloop
"PollIOLoop",
# tornado.web
"ChunkedTransferEncoding",
"GZipContentEncoding",
"OutputTransform",
"TemplateModule",
"url",
# tornado.websocket
"WebSocketProtocol",
"WebSocketProtocol13",
"WebSocketProtocol76",
]
coverage_ignore_functions = [
# various modules
"doctests",
"main",
# tornado.escape
# parse_qs_bytes should probably be documented but it's complicated by
# having different implementations between py2 and py3.
"parse_qs_bytes",
]
html_favicon = 'favicon.ico'
latex_documents = [
('index', 'tornado.tex', 'Tornado Documentation', 'The Tornado Authors', 'manual', False),
]
# HACK: sphinx has limited support for substitutions with the |version|
# variable, but there doesn't appear to be any way to use this in a link
# target.
# http://stackoverflow.com/questions/1227037/substitutions-inside-links-in-rest-sphinx
# The extlink extension can be used to do link substitutions, but it requires a
# portion of the url to be literally contained in the document. Therefore,
# this link must be referenced as :current_tarball:`z`
extlinks = {
'current_tarball': (
'https://pypi.org/packages/source/t/tornado/tornado-%s.tar.g%%s' % version,
'tornado-%s.tar.g' % version),
}
intersphinx_mapping = {
'python': ('https://docs.python.org/3.4/', None),
}
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# On RTD we can't import sphinx_rtd_theme, but it will be applied by
# default anyway. This block will use the same theme when building locally
# as on RTD.
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
tests/test_cufft.py
|
"""
Copyright (c) 2014, Samsung Electronics Co.,Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of Samsung Electronics Co.,Ltd..
"""
"""
cuda4py - CUDA cffi bindings and helper classes.
URL: https://github.com/ajkxyz/cuda4py
Original author: Alexey Kazantsev <[email protected]>
"""
"""
Tests some of the api in cuda4py.cufft package.
"""
import cuda4py as cu
import cuda4py.cufft as cufft
import gc
import logging
import numpy
import os
import unittest
class Test(unittest.TestCase):
def setUp(self):
logging.basicConfig(level=logging.DEBUG)
self.old_env = os.environ.get("CUDA_DEVICE")
if self.old_env is None:
os.environ["CUDA_DEVICE"] = "0"
self.ctx = cu.Devices().create_some_context()
self.path = os.path.dirname(__file__)
if not len(self.path):
self.path = "."
def tearDown(self):
if self.old_env is None:
del os.environ["CUDA_DEVICE"]
else:
os.environ["CUDA_DEVICE"] = self.old_env
del self.old_env
del self.ctx
gc.collect()
def test_constants(self):
self.assertEqual(cufft.CUFFT_SUCCESS, 0)
self.assertEqual(cufft.CUFFT_INVALID_PLAN, 1)
self.assertEqual(cufft.CUFFT_ALLOC_FAILED, 2)
self.assertEqual(cufft.CUFFT_INVALID_TYPE, 3)
self.assertEqual(cufft.CUFFT_INVALID_VALUE, 4)
self.assertEqual(cufft.CUFFT_INTERNAL_ERROR, 5)
self.assertEqual(cufft.CUFFT_EXEC_FAILED, 6)
self.assertEqual(cufft.CUFFT_SETUP_FAILED, 7)
self.assertEqual(cufft.CUFFT_INVALID_SIZE, 8)
self.assertEqual(cufft.CUFFT_UNALIGNED_DATA, 9)
self.assertEqual(cufft.CUFFT_INCOMPLETE_PARAMETER_LIST, 10)
self.assertEqual(cufft.CUFFT_INVALID_DEVICE, 11)
self.assertEqual(cufft.CUFFT_PARSE_ERROR, 12)
self.assertEqual(cufft.CUFFT_NO_WORKSPACE, 13)
self.assertEqual(cufft.CUFFT_R2C, 0x2a)
self.assertEqual(cufft.CUFFT_C2R, 0x2c)
self.assertEqual(cufft.CUFFT_C2C, 0x29)
self.assertEqual(cufft.CUFFT_D2Z, 0x6a)
self.assertEqual(cufft.CUFFT_Z2D, 0x6c)
self.assertEqual(cufft.CUFFT_Z2Z, 0x69)
self.assertEqual(cufft.CUFFT_FORWARD, -1)
self.assertEqual(cufft.CUFFT_INVERSE, 1)
def test_errors(self):
idx = cu.CU.ERRORS[cufft.CUFFT_INVALID_PLAN].find(" | ")
self.assertGreater(idx, 0)
def test_version(self):
fft = cufft.CUFFT(self.ctx)
ver = fft.version
logging.debug("cuFFT version is %d", ver)
self.assertTrue(ver == int(ver))
def test_auto_allocation(self):
fft = cufft.CUFFT(self.ctx)
self.assertTrue(fft.auto_allocation)
fft.auto_allocation = False
self.assertFalse(fft.auto_allocation)
fft.auto_allocation = True
self.assertTrue(fft.auto_allocation)
def test_make_plan_many(self):
fft = cufft.CUFFT(self.ctx)
fft.auto_allocation = False
sz = fft.make_plan_many((256, 128), 8, cufft.CUFFT_C2C)
logging.debug(
"make_plan_many (default layout) for 256x128 x8 returned %d", sz)
logging.debug("size is %d", fft.size)
self.assertEqual(fft.execute, fft.exec_c2c)
fft = cufft.CUFFT(self.ctx)
fft.auto_allocation = False
sz = fft.make_plan_many((256, 128), 8, cufft.CUFFT_C2C,
(256, 128), 1, 256 * 128,
(256, 128), 1, 256 * 128)
logging.debug(
"make_plan_many (tight layout) for 256x128 x8 returned is %d", sz)
logging.debug("size is %d", fft.size)
def _test_exec(self, dtype):
x = numpy.zeros([32, 64], dtype=dtype)
x[:] = numpy.random.rand(x.size).reshape(x.shape) - 0.5
y = numpy.ones((x.shape[0], x.shape[1] // 2 + 1),
dtype={numpy.float32: numpy.complex64,
numpy.float64: numpy.complex128}[dtype])
x_gold = x.copy()
try:
y_gold = numpy.fft.rfft2(x)
except TypeError:
y_gold = None # for pypy
xbuf = cu.MemAlloc(self.ctx, x)
ybuf = cu.MemAlloc(self.ctx, y)
# Forward transform
fft = cufft.CUFFT(self.ctx)
fft.auto_allocation = False
sz = fft.make_plan_many(x.shape, 1,
{numpy.float32: cufft.CUFFT_R2C,
numpy.float64: cufft.CUFFT_D2Z}[dtype])
tmp = cu.MemAlloc(self.ctx, sz)
fft.workarea = tmp
self.assertEqual(fft.workarea, tmp)
self.assertEqual(fft.execute,
{numpy.float32: fft.exec_r2c,
numpy.float64: fft.exec_d2z}[dtype])
fft.execute(xbuf, ybuf)
ybuf.to_host(y)
if y_gold is not None:
delta = y - y_gold
max_diff = numpy.fabs(numpy.sqrt(delta.real * delta.real +
delta.imag * delta.imag)).max()
logging.debug("Forward max_diff is %.6e", max_diff)
self.assertLess(max_diff, {numpy.float32: 1.0e-3,
numpy.float64: 1.0e-6}[dtype])
# Inverse transform
fft = cufft.CUFFT(self.ctx)
fft.auto_allocation = False
sz = fft.make_plan_many(x.shape, 1,
{numpy.float32: cufft.CUFFT_C2R,
numpy.float64: cufft.CUFFT_Z2D}[dtype])
fft.workarea = cu.MemAlloc(self.ctx, sz)
y /= x.size # correct scale before inverting
ybuf.to_device_async(y)
xbuf.memset32_async(0) # reset the resulting vector
self.assertEqual(fft.execute,
{numpy.float32: fft.exec_c2r,
numpy.float64: fft.exec_z2d}[dtype])
fft.execute(ybuf, xbuf)
xbuf.to_host(x)
max_diff = numpy.fabs(x - x_gold).max()
logging.debug("Inverse max_diff is %.6e", max_diff)
self.assertLess(max_diff, {numpy.float32: 1.0e-3,
numpy.float64: 1.0e-6}[dtype])
def test_exec_float(self):
logging.debug("ENTER: test_exec_float")
self._test_exec(numpy.float32)
logging.debug("EXIT: test_exec_float")
def test_exec_double(self):
logging.debug("ENTER: test_exec_double")
self._test_exec(numpy.float64)
logging.debug("EXIT: test_exec_double")
def _test_exec_complex(self, dtype):
x = numpy.zeros([32, 64], dtype=dtype)
x.real = numpy.random.rand(x.size).reshape(x.shape) - 0.5
x.imag = numpy.random.rand(x.size).reshape(x.shape) - 0.5
y = numpy.ones_like(x)
x_gold = x.copy()
try:
y_gold = numpy.fft.fft2(x)
except TypeError:
y_gold = None # for pypy
xbuf = cu.MemAlloc(self.ctx, x)
ybuf = cu.MemAlloc(self.ctx, y)
# Forward transform
fft = cufft.CUFFT(self.ctx)
fft.auto_allocation = False
sz = fft.make_plan_many(x.shape, 1,
{numpy.complex64: cufft.CUFFT_C2C,
numpy.complex128: cufft.CUFFT_Z2Z}[dtype])
tmp = cu.MemAlloc(self.ctx, sz)
fft.workarea = tmp
self.assertEqual(fft.workarea, tmp)
self.assertEqual(fft.execute, {numpy.complex64: fft.exec_c2c,
numpy.complex128: fft.exec_z2z}[dtype])
fft.execute(xbuf, ybuf, cufft.CUFFT_FORWARD)
ybuf.to_host(y)
if y_gold is not None:
delta = y - y_gold
max_diff = numpy.fabs(numpy.sqrt(delta.real * delta.real +
delta.imag * delta.imag)).max()
logging.debug("Forward max_diff is %.6e", max_diff)
self.assertLess(max_diff, {numpy.complex64: 1.0e-3,
numpy.complex128: 1.0e-6}[dtype])
# Inverse transform
y /= x.size # correct scale before inverting
ybuf.to_device_async(y)
xbuf.memset32_async(0) # reset the resulting vector
fft.execute(ybuf, xbuf, cufft.CUFFT_INVERSE)
xbuf.to_host(x)
delta = x - x_gold
max_diff = numpy.fabs(numpy.sqrt(delta.real * delta.real +
delta.imag * delta.imag)).max()
logging.debug("Inverse max_diff is %.6e", max_diff)
self.assertLess(max_diff, {numpy.complex64: 1.0e-3,
numpy.complex128: 1.0e-6}[dtype])
def test_exec_complex_float(self):
logging.debug("ENTER: test_exec_complex_float")
self._test_exec_complex(numpy.complex64)
logging.debug("EXIT: test_exec_complex_float")
def test_exec_complex_double(self):
logging.debug("ENTER: test_exec_complex_double")
self._test_exec_complex(numpy.complex128)
logging.debug("EXIT: test_exec_complex_double")
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
[] |
[] |
[
"CUDA_DEVICE"
] |
[]
|
["CUDA_DEVICE"]
|
python
| 1 | 0 | |
chain/store/store.go
|
package store
import (
"context"
"encoding/json"
"errors"
"os"
"strconv"
"strings"
"sync"
"time"
lru "github.com/hashicorp/golang-lru"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
dstore "github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/query"
cbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log/v2"
"go.opencensus.io/stats"
"go.opencensus.io/trace"
"go.uber.org/multierr"
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/pubsub"
"github.com/filecoin-project/lotus/api"
bstore "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/metrics"
)
var log = logging.Logger("chainstore")
var (
chainHeadKey = dstore.NewKey("head")
checkpointKey = dstore.NewKey("/chain/checks")
blockValidationCacheKeyPrefix = dstore.NewKey("blockValidation")
)
var DefaultTipSetCacheSize = 8192
var DefaultMsgMetaCacheSize = 2048
var ErrNotifeeDone = errors.New("notifee is done and should be removed")
func init() {
if s := os.Getenv("LOTUS_CHAIN_TIPSET_CACHE"); s != "" {
tscs, err := strconv.Atoi(s)
if err != nil {
log.Errorf("failed to parse 'LOTUS_CHAIN_TIPSET_CACHE' env var: %s", err)
}
DefaultTipSetCacheSize = tscs
}
if s := os.Getenv("LOTUS_CHAIN_MSGMETA_CACHE"); s != "" {
mmcs, err := strconv.Atoi(s)
if err != nil {
log.Errorf("failed to parse 'LOTUS_CHAIN_MSGMETA_CACHE' env var: %s", err)
}
DefaultMsgMetaCacheSize = mmcs
}
}
// ReorgNotifee represents a callback that gets called upon reorgs.
type ReorgNotifee = func(rev, app []*types.TipSet) error
// Journal event types.
const (
evtTypeHeadChange = iota
)
type HeadChangeEvt struct {
From types.TipSetKey
FromHeight abi.ChainEpoch
To types.TipSetKey
ToHeight abi.ChainEpoch
RevertCount int
ApplyCount int
}
type WeightFunc func(ctx context.Context, stateBs bstore.Blockstore, ts *types.TipSet) (types.BigInt, error)
// ChainStore is the main point of access to chain data.
//
// Raw chain data is stored in the Blockstore, with relevant markers (genesis,
// latest head tipset references) being tracked in the Datastore (key-value
// store).
//
// To alleviate disk access, the ChainStore has two ARC caches:
// 1. a tipset cache
// 2. a block => messages references cache.
type ChainStore struct {
chainBlockstore bstore.Blockstore
stateBlockstore bstore.Blockstore
metadataDs dstore.Batching
weight WeightFunc
chainLocalBlockstore bstore.Blockstore
heaviestLk sync.RWMutex
heaviest *types.TipSet
checkpoint *types.TipSet
bestTips *pubsub.PubSub
pubLk sync.Mutex
tstLk sync.Mutex
tipsets map[abi.ChainEpoch][]cid.Cid
cindex *ChainIndex
reorgCh chan<- reorg
reorgNotifeeCh chan ReorgNotifee
mmCache *lru.ARCCache // msg meta cache (mh.Messages -> secp, bls []cid)
tsCache *lru.ARCCache
evtTypes [1]journal.EventType
journal journal.Journal
cancelFn context.CancelFunc
wg sync.WaitGroup
}
func NewChainStore(chainBs bstore.Blockstore, stateBs bstore.Blockstore, ds dstore.Batching, weight WeightFunc, j journal.Journal) *ChainStore {
c, _ := lru.NewARC(DefaultMsgMetaCacheSize)
tsc, _ := lru.NewARC(DefaultTipSetCacheSize)
if j == nil {
j = journal.NilJournal()
}
ctx, cancel := context.WithCancel(context.Background())
// unwraps the fallback store in case one is configured.
// some methods _need_ to operate on a local blockstore only.
localbs, _ := bstore.UnwrapFallbackStore(chainBs)
cs := &ChainStore{
chainBlockstore: chainBs,
stateBlockstore: stateBs,
chainLocalBlockstore: localbs,
weight: weight,
metadataDs: ds,
bestTips: pubsub.New(64),
tipsets: make(map[abi.ChainEpoch][]cid.Cid),
mmCache: c,
tsCache: tsc,
cancelFn: cancel,
journal: j,
}
cs.evtTypes = [1]journal.EventType{
evtTypeHeadChange: j.RegisterEventType("sync", "head_change"),
}
ci := NewChainIndex(cs.LoadTipSet)
cs.cindex = ci
hcnf := func(rev, app []*types.TipSet) error {
cs.pubLk.Lock()
defer cs.pubLk.Unlock()
notif := make([]*api.HeadChange, len(rev)+len(app))
for i, r := range rev {
notif[i] = &api.HeadChange{
Type: HCRevert,
Val: r,
}
}
for i, r := range app {
notif[i+len(rev)] = &api.HeadChange{
Type: HCApply,
Val: r,
}
}
cs.bestTips.Pub(notif, "headchange")
return nil
}
hcmetric := func(rev, app []*types.TipSet) error {
for _, r := range app {
stats.Record(context.Background(), metrics.ChainNodeHeight.M(int64(r.Height())))
}
return nil
}
cs.reorgNotifeeCh = make(chan ReorgNotifee)
cs.reorgCh = cs.reorgWorker(ctx, []ReorgNotifee{hcnf, hcmetric})
return cs
}
func (cs *ChainStore) Close() error {
cs.cancelFn()
cs.wg.Wait()
return nil
}
func (cs *ChainStore) Load(ctx context.Context) error {
if err := cs.loadHead(ctx); err != nil {
return err
}
if err := cs.loadCheckpoint(ctx); err != nil {
return err
}
return nil
}
func (cs *ChainStore) loadHead(ctx context.Context) error {
head, err := cs.metadataDs.Get(ctx, chainHeadKey)
if err == dstore.ErrNotFound {
log.Warn("no previous chain state found")
return nil
}
if err != nil {
return xerrors.Errorf("failed to load chain state from datastore: %w", err)
}
var tscids []cid.Cid
if err := json.Unmarshal(head, &tscids); err != nil {
return xerrors.Errorf("failed to unmarshal stored chain head: %w", err)
}
ts, err := cs.LoadTipSet(ctx, types.NewTipSetKey(tscids...))
if err != nil {
return xerrors.Errorf("loading tipset: %w", err)
}
cs.heaviest = ts
return nil
}
func (cs *ChainStore) loadCheckpoint(ctx context.Context) error {
tskBytes, err := cs.metadataDs.Get(ctx, checkpointKey)
if err == dstore.ErrNotFound {
return nil
}
if err != nil {
return xerrors.Errorf("failed to load checkpoint from datastore: %w", err)
}
var tsk types.TipSetKey
err = json.Unmarshal(tskBytes, &tsk)
if err != nil {
return err
}
ts, err := cs.LoadTipSet(ctx, tsk)
if err != nil {
return xerrors.Errorf("loading tipset: %w", err)
}
cs.checkpoint = ts
return nil
}
func (cs *ChainStore) writeHead(ctx context.Context, ts *types.TipSet) error {
data, err := json.Marshal(ts.Cids())
if err != nil {
return xerrors.Errorf("failed to marshal tipset: %w", err)
}
if err := cs.metadataDs.Put(ctx, chainHeadKey, data); err != nil {
return xerrors.Errorf("failed to write chain head to datastore: %w", err)
}
return nil
}
const (
HCRevert = "revert"
HCApply = "apply"
HCCurrent = "current"
)
func (cs *ChainStore) SubHeadChanges(ctx context.Context) chan []*api.HeadChange {
cs.pubLk.Lock()
subch := cs.bestTips.Sub("headchange")
head := cs.GetHeaviestTipSet()
cs.pubLk.Unlock()
out := make(chan []*api.HeadChange, 16)
out <- []*api.HeadChange{{
Type: HCCurrent,
Val: head,
}}
go func() {
defer func() {
// Tell the caller we're done first, the following may block for a bit.
close(out)
// Unsubscribe.
cs.bestTips.Unsub(subch)
// Drain the channel.
for range subch {
}
}()
for {
select {
case val, ok := <-subch:
if !ok {
// Shutting down.
return
}
select {
case out <- val.([]*api.HeadChange):
default:
log.Errorf("closing head change subscription due to slow reader")
return
}
if len(out) > 5 {
log.Warnf("head change sub is slow, has %d buffered entries", len(out))
}
case <-ctx.Done():
return
}
}
}()
return out
}
func (cs *ChainStore) SubscribeHeadChanges(f ReorgNotifee) {
cs.reorgNotifeeCh <- f
}
func (cs *ChainStore) IsBlockValidated(ctx context.Context, blkid cid.Cid) (bool, error) {
key := blockValidationCacheKeyPrefix.Instance(blkid.String())
return cs.metadataDs.Has(ctx, key)
}
func (cs *ChainStore) MarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error {
key := blockValidationCacheKeyPrefix.Instance(blkid.String())
if err := cs.metadataDs.Put(ctx, key, []byte{0}); err != nil {
return xerrors.Errorf("cache block validation: %w", err)
}
return nil
}
func (cs *ChainStore) UnmarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error {
key := blockValidationCacheKeyPrefix.Instance(blkid.String())
if err := cs.metadataDs.Delete(ctx, key); err != nil {
return xerrors.Errorf("removing from valid block cache: %w", err)
}
return nil
}
func (cs *ChainStore) SetGenesis(ctx context.Context, b *types.BlockHeader) error {
ts, err := types.NewTipSet([]*types.BlockHeader{b})
if err != nil {
return err
}
if err := cs.PutTipSet(ctx, ts); err != nil {
return err
}
return cs.metadataDs.Put(ctx, dstore.NewKey("0"), b.Cid().Bytes())
}
func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error {
for _, b := range ts.Blocks() {
if err := cs.PersistBlockHeaders(ctx, b); err != nil {
return err
}
}
expanded, err := cs.expandTipset(ctx, ts.Blocks()[0])
if err != nil {
return xerrors.Errorf("errored while expanding tipset: %w", err)
}
log.Debugf("expanded %s into %s\n", ts.Cids(), expanded.Cids())
if err := cs.MaybeTakeHeavierTipSet(ctx, expanded); err != nil {
return xerrors.Errorf("MaybeTakeHeavierTipSet failed in PutTipSet: %w", err)
}
return nil
}
// MaybeTakeHeavierTipSet evaluates the incoming tipset and locks it in our
// internal state as our new head, if and only if it is heavier than the current
// head and does not exceed the maximum fork length.
func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipSet) error {
for {
cs.heaviestLk.Lock()
if len(cs.reorgCh) < reorgChBuf/2 {
break
}
cs.heaviestLk.Unlock()
log.Errorf("reorg channel is heavily backlogged, waiting a bit before trying to take process new tipsets")
select {
case <-time.After(time.Second / 2):
case <-ctx.Done():
return ctx.Err()
}
}
defer cs.heaviestLk.Unlock()
w, err := cs.weight(ctx, cs.StateBlockstore(), ts)
if err != nil {
return err
}
heaviestW, err := cs.weight(ctx, cs.StateBlockstore(), cs.heaviest)
if err != nil {
return err
}
heavier := w.GreaterThan(heaviestW)
if w.Equals(heaviestW) && !ts.Equals(cs.heaviest) {
log.Errorw("weight draw", "currTs", cs.heaviest, "ts", ts)
heavier = breakWeightTie(ts, cs.heaviest)
}
if heavier {
// TODO: don't do this for initial sync. Now that we don't have a
// difference between 'bootstrap sync' and 'caught up' sync, we need
// some other heuristic.
exceeds, err := cs.exceedsForkLength(ctx, cs.heaviest, ts)
if err != nil {
return err
}
if exceeds {
return nil
}
return cs.takeHeaviestTipSet(ctx, ts)
}
return nil
}
// Check if the two tipsets have a fork length above `ForkLengthThreshold`.
// `synced` is the head of the chain we are currently synced to and `external`
// is the incoming tipset potentially belonging to a forked chain. It assumes
// the external chain has already been validated and available in the ChainStore.
// The "fast forward" case is covered in this logic as a valid fork of length 0.
//
// FIXME: We may want to replace some of the logic in `syncFork()` with this.
// `syncFork()` counts the length on both sides of the fork at the moment (we
// need to settle on that) but here we just enforce it on the `synced` side.
func (cs *ChainStore) exceedsForkLength(ctx context.Context, synced, external *types.TipSet) (bool, error) {
if synced == nil || external == nil {
// FIXME: If `cs.heaviest` is nil we should just bypass the entire
// `MaybeTakeHeavierTipSet` logic (instead of each of the called
// functions having to handle the nil case on their own).
return false, nil
}
var err error
// `forkLength`: number of tipsets we need to walk back from the our `synced`
// chain to the common ancestor with the new `external` head in order to
// adopt the fork.
for forkLength := 0; forkLength < int(build.ForkLengthThreshold); forkLength++ {
// First walk back as many tipsets in the external chain to match the
// `synced` height to compare them. If we go past the `synced` height
// the subsequent match will fail but it will still be useful to get
// closer to the `synced` head parent's height in the next loop.
for external.Height() > synced.Height() {
if external.Height() == 0 {
// We reached the genesis of the external chain without a match;
// this is considered a fork outside the allowed limit (of "infinite"
// length).
return true, nil
}
external, err = cs.LoadTipSet(ctx, external.Parents())
if err != nil {
return false, xerrors.Errorf("failed to load parent tipset in external chain: %w", err)
}
}
// Now check if we arrived at the common ancestor.
if synced.Equals(external) {
return false, nil
}
// Now check to see if we've walked back to the checkpoint.
if synced.Equals(cs.checkpoint) {
return true, nil
}
// If we didn't, go back *one* tipset on the `synced` side (incrementing
// the `forkLength`).
if synced.Height() == 0 {
// Same check as the `external` side, if we reach the start (genesis)
// there is no common ancestor.
return true, nil
}
synced, err = cs.LoadTipSet(ctx, synced.Parents())
if err != nil {
return false, xerrors.Errorf("failed to load parent tipset in synced chain: %w", err)
}
}
// We traversed the fork length allowed without finding a common ancestor.
return true, nil
}
// ForceHeadSilent forces a chain head tipset without triggering a reorg
// operation.
//
// CAUTION: Use it only for testing, such as to teleport the chain to a
// particular tipset to carry out a benchmark, verification, etc. on a chain
// segment.
func (cs *ChainStore) ForceHeadSilent(ctx context.Context, ts *types.TipSet) error {
log.Warnf("(!!!) forcing a new head silently; new head: %s", ts)
cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock()
if err := cs.removeCheckpoint(ctx); err != nil {
return err
}
cs.heaviest = ts
err := cs.writeHead(ctx, ts)
if err != nil {
err = xerrors.Errorf("failed to write chain head: %s", err)
}
return err
}
type reorg struct {
old *types.TipSet
new *types.TipSet
}
const reorgChBuf = 32
func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNotifee) chan<- reorg {
out := make(chan reorg, reorgChBuf)
notifees := make([]ReorgNotifee, len(initialNotifees))
copy(notifees, initialNotifees)
cs.wg.Add(1)
go func() {
defer cs.wg.Done()
defer log.Warn("reorgWorker quit")
for {
select {
case n := <-cs.reorgNotifeeCh:
notifees = append(notifees, n)
case r := <-out:
revert, apply, err := cs.ReorgOps(ctx, r.old, r.new)
if err != nil {
log.Error("computing reorg ops failed: ", err)
continue
}
cs.journal.RecordEvent(cs.evtTypes[evtTypeHeadChange], func() interface{} {
return HeadChangeEvt{
From: r.old.Key(),
FromHeight: r.old.Height(),
To: r.new.Key(),
ToHeight: r.new.Height(),
RevertCount: len(revert),
ApplyCount: len(apply),
}
})
// reverse the apply array
for i := len(apply)/2 - 1; i >= 0; i-- {
opp := len(apply) - 1 - i
apply[i], apply[opp] = apply[opp], apply[i]
}
var toremove map[int]struct{}
for i, hcf := range notifees {
err := hcf(revert, apply)
switch err {
case nil:
case ErrNotifeeDone:
if toremove == nil {
toremove = make(map[int]struct{})
}
toremove[i] = struct{}{}
default:
log.Error("head change func errored (BAD): ", err)
}
}
if len(toremove) > 0 {
newNotifees := make([]ReorgNotifee, 0, len(notifees)-len(toremove))
for i, hcf := range notifees {
_, remove := toremove[i]
if remove {
continue
}
newNotifees = append(newNotifees, hcf)
}
notifees = newNotifees
}
case <-ctx.Done():
return
}
}
}()
return out
}
// takeHeaviestTipSet actually sets the incoming tipset as our head both in
// memory and in the ChainStore. It also sends a notification to deliver to
// ReorgNotifees.
func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet) error {
_, span := trace.StartSpan(ctx, "takeHeaviestTipSet")
defer span.End()
if cs.heaviest != nil { // buf
if len(cs.reorgCh) > 0 {
log.Warnf("Reorg channel running behind, %d reorgs buffered", len(cs.reorgCh))
}
cs.reorgCh <- reorg{
old: cs.heaviest,
new: ts,
}
} else {
log.Warnf("no heaviest tipset found, using %s", ts.Cids())
}
span.AddAttributes(trace.BoolAttribute("newHead", true))
log.Infof("New heaviest tipset! %s (height=%d)", ts.Cids(), ts.Height())
cs.heaviest = ts
if err := cs.writeHead(ctx, ts); err != nil {
log.Errorf("failed to write chain head: %s", err)
return nil
}
return nil
}
// FlushValidationCache removes all results of block validation from the
// chain metadata store. Usually the first step after a new chain import.
func (cs *ChainStore) FlushValidationCache(ctx context.Context) error {
return FlushValidationCache(ctx, cs.metadataDs)
}
func FlushValidationCache(ctx context.Context, ds dstore.Batching) error {
log.Infof("clearing block validation cache...")
dsWalk, err := ds.Query(ctx, query.Query{
// Potential TODO: the validation cache is not a namespace on its own
// but is rather constructed as prefixed-key `foo:bar` via .Instance(), which
// in turn does not work with the filter, which can match only on `foo/bar`
//
// If this is addressed (blockcache goes into its own sub-namespace) then
// strings.HasPrefix(...) below can be skipped
//
//Prefix: blockValidationCacheKeyPrefix.String()
KeysOnly: true,
})
if err != nil {
return xerrors.Errorf("failed to initialize key listing query: %w", err)
}
allKeys, err := dsWalk.Rest()
if err != nil {
return xerrors.Errorf("failed to run key listing query: %w", err)
}
batch, err := ds.Batch(ctx)
if err != nil {
return xerrors.Errorf("failed to open a DS batch: %w", err)
}
delCnt := 0
for _, k := range allKeys {
if strings.HasPrefix(k.Key, blockValidationCacheKeyPrefix.String()) {
delCnt++
batch.Delete(ctx, dstore.RawKey(k.Key)) // nolint:errcheck
}
}
if err := batch.Commit(ctx); err != nil {
return xerrors.Errorf("failed to commit the DS batch: %w", err)
}
log.Infof("%d block validation entries cleared.", delCnt)
return nil
}
// SetHead sets the chainstores current 'best' head node.
// This should only be called if something is broken and needs fixing.
//
// This function will bypass and remove any checkpoints.
func (cs *ChainStore) SetHead(ctx context.Context, ts *types.TipSet) error {
cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock()
if err := cs.removeCheckpoint(ctx); err != nil {
return err
}
return cs.takeHeaviestTipSet(context.TODO(), ts)
}
// RemoveCheckpoint removes the current checkpoint.
func (cs *ChainStore) RemoveCheckpoint(ctx context.Context) error {
cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock()
return cs.removeCheckpoint(ctx)
}
func (cs *ChainStore) removeCheckpoint(ctx context.Context) error {
if err := cs.metadataDs.Delete(ctx, checkpointKey); err != nil {
return err
}
cs.checkpoint = nil
return nil
}
// SetCheckpoint will set a checkpoint past which the chainstore will not allow forks.
//
// NOTE: Checkpoints cannot be set beyond ForkLengthThreshold epochs in the past.
func (cs *ChainStore) SetCheckpoint(ctx context.Context, ts *types.TipSet) error {
tskBytes, err := json.Marshal(ts.Key())
if err != nil {
return err
}
cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock()
if ts.Height() > cs.heaviest.Height() {
return xerrors.Errorf("cannot set a checkpoint in the future")
}
// Otherwise, this operation could get _very_ expensive.
if cs.heaviest.Height()-ts.Height() > build.ForkLengthThreshold {
return xerrors.Errorf("cannot set a checkpoint before the fork threshold")
}
if !ts.Equals(cs.heaviest) {
anc, err := cs.IsAncestorOf(ctx, ts, cs.heaviest)
if err != nil {
return xerrors.Errorf("cannot determine whether checkpoint tipset is in main-chain: %w", err)
}
if !anc {
return xerrors.Errorf("cannot mark tipset as checkpoint, since it isn't in the main-chain: %w", err)
}
}
err = cs.metadataDs.Put(ctx, checkpointKey, tskBytes)
if err != nil {
return err
}
cs.checkpoint = ts
return nil
}
func (cs *ChainStore) GetCheckpoint() *types.TipSet {
cs.heaviestLk.RLock()
chkpt := cs.checkpoint
cs.heaviestLk.RUnlock()
return chkpt
}
// Contains returns whether our BlockStore has all blocks in the supplied TipSet.
func (cs *ChainStore) Contains(ctx context.Context, ts *types.TipSet) (bool, error) {
for _, c := range ts.Cids() {
has, err := cs.chainBlockstore.Has(ctx, c)
if err != nil {
return false, err
}
if !has {
return false, nil
}
}
return true, nil
}
// GetBlock fetches a BlockHeader with the supplied CID. It returns
// blockstore.ErrNotFound if the block was not found in the BlockStore.
func (cs *ChainStore) GetBlock(ctx context.Context, c cid.Cid) (*types.BlockHeader, error) {
var blk *types.BlockHeader
err := cs.chainLocalBlockstore.View(ctx, c, func(b []byte) (err error) {
blk, err = types.DecodeBlock(b)
return err
})
return blk, err
}
func (cs *ChainStore) LoadTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) {
v, ok := cs.tsCache.Get(tsk)
if ok {
return v.(*types.TipSet), nil
}
// Fetch tipset block headers from blockstore in parallel
var eg errgroup.Group
cids := tsk.Cids()
blks := make([]*types.BlockHeader, len(cids))
for i, c := range cids {
i, c := i, c
eg.Go(func() error {
b, err := cs.GetBlock(ctx, c)
if err != nil {
return xerrors.Errorf("get block %s: %w", c, err)
}
blks[i] = b
return nil
})
}
err := eg.Wait()
if err != nil {
return nil, err
}
ts, err := types.NewTipSet(blks)
if err != nil {
return nil, err
}
cs.tsCache.Add(tsk, ts)
return ts, nil
}
// IsAncestorOf returns true if 'a' is an ancestor of 'b'
func (cs *ChainStore) IsAncestorOf(ctx context.Context, a, b *types.TipSet) (bool, error) {
if b.Height() <= a.Height() {
return false, nil
}
cur := b
for !a.Equals(cur) && cur.Height() > a.Height() {
next, err := cs.LoadTipSet(ctx, cur.Parents())
if err != nil {
return false, err
}
cur = next
}
return cur.Equals(a), nil
}
func (cs *ChainStore) NearestCommonAncestor(ctx context.Context, a, b *types.TipSet) (*types.TipSet, error) {
l, _, err := cs.ReorgOps(ctx, a, b)
if err != nil {
return nil, err
}
return cs.LoadTipSet(ctx, l[len(l)-1].Parents())
}
// ReorgOps takes two tipsets (which can be at different heights), and walks
// their corresponding chains backwards one step at a time until we find
// a common ancestor. It then returns the respective chain segments that fork
// from the identified ancestor, in reverse order, where the first element of
// each slice is the supplied tipset, and the last element is the common
// ancestor.
//
// If an error happens along the way, we return the error with nil slices.
func (cs *ChainStore) ReorgOps(ctx context.Context, a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) {
return ReorgOps(ctx, cs.LoadTipSet, a, b)
}
func ReorgOps(ctx context.Context, lts func(ctx context.Context, _ types.TipSetKey) (*types.TipSet, error), a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) {
left := a
right := b
var leftChain, rightChain []*types.TipSet
for !left.Equals(right) {
if left.Height() > right.Height() {
leftChain = append(leftChain, left)
par, err := lts(ctx, left.Parents())
if err != nil {
return nil, nil, err
}
left = par
} else {
rightChain = append(rightChain, right)
par, err := lts(ctx, right.Parents())
if err != nil {
log.Infof("failed to fetch right.Parents: %s", err)
return nil, nil, err
}
right = par
}
}
return leftChain, rightChain, nil
}
// GetHeaviestTipSet returns the current heaviest tipset known (i.e. our head).
func (cs *ChainStore) GetHeaviestTipSet() (ts *types.TipSet) {
cs.heaviestLk.RLock()
ts = cs.heaviest
cs.heaviestLk.RUnlock()
return
}
func (cs *ChainStore) AddToTipSetTracker(ctx context.Context, b *types.BlockHeader) error {
cs.tstLk.Lock()
defer cs.tstLk.Unlock()
tss := cs.tipsets[b.Height]
for _, oc := range tss {
if oc == b.Cid() {
log.Debug("tried to add block to tipset tracker that was already there")
return nil
}
h, err := cs.GetBlock(ctx, oc)
if err == nil && h != nil {
if h.Miner == b.Miner {
log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache %s-%s", b.Miner, b.Height, b.Cid(), h.Cid())
}
}
}
// This function is called 5 times per epoch on average
// It is also called with tipsets that are done with initial validation
// so they cannot be from the future.
// We are guaranteed not to use tipsets older than 900 epochs (fork limit)
// This means that we ideally want to keep only most recent 900 epochs in here
// Golang's map iteration starts at a random point in a map.
// With 5 tries per epoch, and 900 entries to keep, on average we will have
// ~136 garbage entires in the `cs.tipsets` map. (solve for 1-(1-x/(900+x))^5 == 0.5)
// Seems good enough to me
for height := range cs.tipsets {
if height < b.Height-build.Finality {
delete(cs.tipsets, height)
}
break
}
cs.tipsets[b.Height] = append(tss, b.Cid())
return nil
}
func (cs *ChainStore) PersistBlockHeaders(ctx context.Context, b ...*types.BlockHeader) error {
sbs := make([]block.Block, len(b))
for i, header := range b {
var err error
sbs[i], err = header.ToStorageBlock()
if err != nil {
return err
}
}
batchSize := 256
calls := len(b) / batchSize
var err error
for i := 0; i <= calls; i++ {
start := batchSize * i
end := start + batchSize
if end > len(b) {
end = len(b)
}
err = multierr.Append(err, cs.chainLocalBlockstore.PutMany(ctx, sbs[start:end]))
}
return err
}
func (cs *ChainStore) expandTipset(ctx context.Context, b *types.BlockHeader) (*types.TipSet, error) {
// Hold lock for the whole function for now, if it becomes a problem we can
// fix pretty easily
cs.tstLk.Lock()
defer cs.tstLk.Unlock()
all := []*types.BlockHeader{b}
tsets, ok := cs.tipsets[b.Height]
if !ok {
return types.NewTipSet(all)
}
inclMiners := map[address.Address]cid.Cid{b.Miner: b.Cid()}
for _, bhc := range tsets {
if bhc == b.Cid() {
continue
}
h, err := cs.GetBlock(ctx, bhc)
if err != nil {
return nil, xerrors.Errorf("failed to load block (%s) for tipset expansion: %w", bhc, err)
}
if cid, found := inclMiners[h.Miner]; found {
log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache %s-%s", h.Miner, h.Height, h.Cid(), cid)
continue
}
if types.CidArrsEqual(h.Parents, b.Parents) {
all = append(all, h)
inclMiners[h.Miner] = bhc
}
}
// TODO: other validation...?
return types.NewTipSet(all)
}
func (cs *ChainStore) AddBlock(ctx context.Context, b *types.BlockHeader) error {
if err := cs.PersistBlockHeaders(ctx, b); err != nil {
return err
}
ts, err := cs.expandTipset(ctx, b)
if err != nil {
return err
}
if err := cs.MaybeTakeHeavierTipSet(ctx, ts); err != nil {
return xerrors.Errorf("MaybeTakeHeavierTipSet failed: %w", err)
}
return nil
}
func (cs *ChainStore) GetGenesis(ctx context.Context) (*types.BlockHeader, error) {
data, err := cs.metadataDs.Get(ctx, dstore.NewKey("0"))
if err != nil {
return nil, err
}
c, err := cid.Cast(data)
if err != nil {
return nil, err
}
return cs.GetBlock(ctx, c)
}
// GetPath returns the sequence of atomic head change operations that
// need to be applied in order to switch the head of the chain from the `from`
// tipset to the `to` tipset.
func (cs *ChainStore) GetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*api.HeadChange, error) {
fts, err := cs.LoadTipSet(ctx, from)
if err != nil {
return nil, xerrors.Errorf("loading from tipset %s: %w", from, err)
}
tts, err := cs.LoadTipSet(ctx, to)
if err != nil {
return nil, xerrors.Errorf("loading to tipset %s: %w", to, err)
}
revert, apply, err := cs.ReorgOps(ctx, fts, tts)
if err != nil {
return nil, xerrors.Errorf("error getting tipset branches: %w", err)
}
path := make([]*api.HeadChange, len(revert)+len(apply))
for i, r := range revert {
path[i] = &api.HeadChange{Type: HCRevert, Val: r}
}
for j, i := 0, len(apply)-1; i >= 0; j, i = j+1, i-1 {
path[j+len(revert)] = &api.HeadChange{Type: HCApply, Val: apply[i]}
}
return path, nil
}
// ChainBlockstore returns the chain blockstore. Currently the chain and state
// // stores are both backed by the same physical store, albeit with different
// // caching policies, but in the future they will segregate.
func (cs *ChainStore) ChainBlockstore() bstore.Blockstore {
return cs.chainBlockstore
}
// StateBlockstore returns the state blockstore. Currently the chain and state
// stores are both backed by the same physical store, albeit with different
// caching policies, but in the future they will segregate.
func (cs *ChainStore) StateBlockstore() bstore.Blockstore {
return cs.stateBlockstore
}
func ActorStore(ctx context.Context, bs bstore.Blockstore) adt.Store {
return adt.WrapStore(ctx, cbor.NewCborStore(bs))
}
func (cs *ChainStore) ActorStore(ctx context.Context) adt.Store {
return ActorStore(ctx, cs.stateBlockstore)
}
func (cs *ChainStore) TryFillTipSet(ctx context.Context, ts *types.TipSet) (*FullTipSet, error) {
var out []*types.FullBlock
for _, b := range ts.Blocks() {
bmsgs, smsgs, err := cs.MessagesForBlock(ctx, b)
if err != nil {
// TODO: check for 'not found' errors, and only return nil if this
// is actually a 'not found' error
return nil, nil
}
fb := &types.FullBlock{
Header: b,
BlsMessages: bmsgs,
SecpkMessages: smsgs,
}
out = append(out, fb)
}
return NewFullTipSet(out), nil
}
// GetTipsetByHeight returns the tipset on the chain behind 'ts' at the given
// height. In the case that the given height is a null round, the 'prev' flag
// selects the tipset before the null round if true, and the tipset following
// the null round if false.
func (cs *ChainStore) GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, ts *types.TipSet, prev bool) (*types.TipSet, error) {
if ts == nil {
ts = cs.GetHeaviestTipSet()
}
if h > ts.Height() {
return nil, xerrors.Errorf("looking for tipset with height greater than start point")
}
if h == ts.Height() {
return ts, nil
}
lbts, err := cs.cindex.GetTipsetByHeight(ctx, ts, h)
if err != nil {
return nil, err
}
if lbts.Height() < h {
log.Warnf("chain index returned the wrong tipset at height %d, using slow retrieval", h)
lbts, err = cs.cindex.GetTipsetByHeightWithoutCache(ctx, ts, h)
if err != nil {
return nil, err
}
}
if lbts.Height() == h || !prev {
return lbts, nil
}
return cs.LoadTipSet(ctx, lbts.Parents())
}
func (cs *ChainStore) Weight(ctx context.Context, hts *types.TipSet) (types.BigInt, error) { // todo remove
return cs.weight(ctx, cs.StateBlockstore(), hts)
}
// true if ts1 wins according to the filecoin tie-break rule
func breakWeightTie(ts1, ts2 *types.TipSet) bool {
s := len(ts1.Blocks())
if s > len(ts2.Blocks()) {
s = len(ts2.Blocks())
}
// blocks are already sorted by ticket
for i := 0; i < s; i++ {
if ts1.Blocks()[i].Ticket.Less(ts2.Blocks()[i].Ticket) {
log.Infof("weight tie broken in favour of %s", ts1.Key())
return true
}
}
log.Infof("weight tie left unbroken, default to %s", ts2.Key())
return false
}
func (cs *ChainStore) GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) {
if tsk.IsEmpty() {
return cs.GetHeaviestTipSet(), nil
}
return cs.LoadTipSet(ctx, tsk)
}
func (cs *ChainStore) GetLatestBeaconEntry(ctx context.Context, ts *types.TipSet) (*types.BeaconEntry, error) {
cur := ts
for i := 0; i < 20; i++ {
cbe := cur.Blocks()[0].BeaconEntries
if len(cbe) > 0 {
return &cbe[len(cbe)-1], nil
}
if cur.Height() == 0 {
return nil, xerrors.Errorf("made it back to genesis block without finding beacon entry")
}
next, err := cs.LoadTipSet(ctx, cur.Parents())
if err != nil {
return nil, xerrors.Errorf("failed to load parents when searching back for latest beacon entry: %w", err)
}
cur = next
}
if os.Getenv("LOTUS_IGNORE_DRAND") == "_yes_" {
return &types.BeaconEntry{
Data: []byte{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9},
}, nil
}
return nil, xerrors.Errorf("found NO beacon entries in the 20 latest tipsets")
}
|
[
"\"LOTUS_CHAIN_TIPSET_CACHE\"",
"\"LOTUS_CHAIN_MSGMETA_CACHE\"",
"\"LOTUS_IGNORE_DRAND\""
] |
[] |
[
"LOTUS_CHAIN_TIPSET_CACHE",
"LOTUS_IGNORE_DRAND",
"LOTUS_CHAIN_MSGMETA_CACHE"
] |
[]
|
["LOTUS_CHAIN_TIPSET_CACHE", "LOTUS_IGNORE_DRAND", "LOTUS_CHAIN_MSGMETA_CACHE"]
|
go
| 3 | 0 | |
vendor/github.com/go-kit/kit/sd/etcdv3/integration_test.go
|
// +build flaky_integration
package etcdv3
import (
"context"
"io"
"os"
"testing"
"time"
"github.com/go-kit/kit/endpoint"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/sd"
)
func runIntegration(settings integrationSettings, client Client, service Service, t *testing.T) {
// Verify test data is initially empty.
entries, err := client.GetEntries(settings.key)
if err != nil {
t.Fatalf("GetEntries(%q): expected no error, got one: %v", settings.key, err)
}
if len(entries) > 0 {
t.Fatalf("GetEntries(%q): expected no instance entries, got %d", settings.key, len(entries))
}
t.Logf("GetEntries(%q): %v (OK)", settings.key, entries)
// Instantiate a new Registrar, passing in test data.
registrar := NewRegistrar(
client,
service,
log.With(log.NewLogfmtLogger(os.Stderr), "component", "registrar"),
)
// Register our instance.
registrar.Register()
t.Log("Registered")
// Retrieve entries from etcd manually.
entries, err = client.GetEntries(settings.key)
if err != nil {
t.Fatalf("client.GetEntries(%q): %v", settings.key, err)
}
if want, have := 1, len(entries); want != have {
t.Fatalf("client.GetEntries(%q): want %d, have %d", settings.key, want, have)
}
if want, have := settings.value, entries[0]; want != have {
t.Fatalf("want %q, have %q", want, have)
}
instancer, err := NewInstancer(
client,
settings.prefix,
log.With(log.NewLogfmtLogger(os.Stderr), "component", "instancer"),
)
if err != nil {
t.Fatalf("NewInstancer: %v", err)
}
t.Log("Constructed Instancer OK")
defer instancer.Stop()
endpointer := sd.NewEndpointer(
instancer,
func(string) (endpoint.Endpoint, io.Closer, error) { return endpoint.Nop, nil, nil },
log.With(log.NewLogfmtLogger(os.Stderr), "component", "instancer"),
)
t.Log("Constructed Endpointer OK")
defer endpointer.Close()
if !within(time.Second, func() bool {
endpoints, err := endpointer.Endpoints()
return err == nil && len(endpoints) == 1
}) {
t.Fatal("Endpointer didn't see Register in time")
}
t.Log("Endpointer saw Register OK")
// Deregister first instance of test data.
registrar.Deregister()
t.Log("Deregistered")
// Check it was deregistered.
if !within(time.Second, func() bool {
endpoints, err := endpointer.Endpoints()
t.Logf("Checking Deregister: len(endpoints) = %d, err = %v", len(endpoints), err)
return err == nil && len(endpoints) == 0
}) {
t.Fatalf("Endpointer didn't see Deregister in time")
}
// Verify test data no longer exists in etcd.
entries, err = client.GetEntries(settings.key)
if err != nil {
t.Fatalf("GetEntries(%q): expected no error, got one: %v", settings.key, err)
}
if len(entries) > 0 {
t.Fatalf("GetEntries(%q): expected no entries, got %v", settings.key, entries)
}
t.Logf("GetEntries(%q): %v (OK)", settings.key, entries)
}
type integrationSettings struct {
addr string
prefix string
instance string
key string
value string
}
func testIntegrationSettings(t *testing.T) integrationSettings {
var settings integrationSettings
settings.addr = os.Getenv("ETCD_ADDR")
if settings.addr == "" {
t.Skip("ETCD_ADDR not set; skipping integration test")
}
settings.prefix = "/services/foosvc/" // known at compile time
settings.instance = "1.2.3.4:8080" // taken from runtime or platform, somehow
settings.key = settings.prefix + settings.instance
settings.value = "http://" + settings.instance // based on our transport
return settings
}
// Package sd/etcd provides a wrapper around the etcd key/value store. This
// example assumes the user has an instance of etcd installed and running
// locally on port 2379.
func TestIntegration(t *testing.T) {
settings := testIntegrationSettings(t)
client, err := NewClient(context.Background(), []string{settings.addr}, ClientOptions{
DialTimeout: 2 * time.Second,
DialKeepAlive: 2 * time.Second,
})
if err != nil {
t.Fatalf("NewClient(%q): %v", settings.addr, err)
}
service := Service{
Key: settings.key,
Value: settings.value,
}
runIntegration(settings, client, service, t)
}
func TestIntegrationTTL(t *testing.T) {
settings := testIntegrationSettings(t)
client, err := NewClient(context.Background(), []string{settings.addr}, ClientOptions{
DialTimeout: 2 * time.Second,
DialKeepAlive: 2 * time.Second,
})
if err != nil {
t.Fatalf("NewClient(%q): %v", settings.addr, err)
}
service := Service{
Key: settings.key,
Value: settings.value,
TTL: NewTTLOption(time.Second*3, time.Second*10),
}
defer client.Deregister(service)
runIntegration(settings, client, service, t)
}
func TestIntegrationRegistrarOnly(t *testing.T) {
settings := testIntegrationSettings(t)
client, err := NewClient(context.Background(), []string{settings.addr}, ClientOptions{
DialTimeout: 2 * time.Second,
DialKeepAlive: 2 * time.Second,
})
if err != nil {
t.Fatalf("NewClient(%q): %v", settings.addr, err)
}
service := Service{
Key: settings.key,
Value: settings.value,
TTL: NewTTLOption(time.Second*3, time.Second*10),
}
defer client.Deregister(service)
// Verify test data is initially empty.
entries, err := client.GetEntries(settings.key)
if err != nil {
t.Fatalf("GetEntries(%q): expected no error, got one: %v", settings.key, err)
}
if len(entries) > 0 {
t.Fatalf("GetEntries(%q): expected no instance entries, got %d", settings.key, len(entries))
}
t.Logf("GetEntries(%q): %v (OK)", settings.key, entries)
// Instantiate a new Registrar, passing in test data.
registrar := NewRegistrar(
client,
service,
log.With(log.NewLogfmtLogger(os.Stderr), "component", "registrar"),
)
// Register our instance.
registrar.Register()
t.Log("Registered")
// Deregister our instance. (so we test registrar only scenario)
registrar.Deregister()
t.Log("Deregistered")
}
func within(d time.Duration, f func() bool) bool {
deadline := time.Now().Add(d)
for time.Now().Before(deadline) {
if f() {
return true
}
time.Sleep(d / 10)
}
return false
}
|
[
"\"ETCD_ADDR\""
] |
[] |
[
"ETCD_ADDR"
] |
[]
|
["ETCD_ADDR"]
|
go
| 1 | 0 | |
cmd/gardenlet/app/gardenlet.go
|
// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package app
import (
"bufio"
"context"
"errors"
"flag"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/gardener/gardener/cmd/utils"
gardencore "github.com/gardener/gardener/pkg/apis/core"
gardencorev1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1"
gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
gardencoreinformers "github.com/gardener/gardener/pkg/client/core/informers/externalversions"
"github.com/gardener/gardener/pkg/client/kubernetes"
"github.com/gardener/gardener/pkg/client/kubernetes/clientmap"
clientmapbuilder "github.com/gardener/gardener/pkg/client/kubernetes/clientmap/builder"
"github.com/gardener/gardener/pkg/client/kubernetes/clientmap/keys"
"github.com/gardener/gardener/pkg/features"
"github.com/gardener/gardener/pkg/gardenlet/apis/config"
configv1alpha1 "github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1"
configvalidation "github.com/gardener/gardener/pkg/gardenlet/apis/config/validation"
"github.com/gardener/gardener/pkg/gardenlet/bootstrap"
"github.com/gardener/gardener/pkg/gardenlet/bootstrap/certificate"
"github.com/gardener/gardener/pkg/gardenlet/controller"
seedcontroller "github.com/gardener/gardener/pkg/gardenlet/controller/seed"
gardenletfeatures "github.com/gardener/gardener/pkg/gardenlet/features"
"github.com/gardener/gardener/pkg/healthz"
"github.com/gardener/gardener/pkg/logger"
"github.com/gardener/gardener/pkg/server"
gardenerutils "github.com/gardener/gardener/pkg/utils"
kutil "github.com/gardener/gardener/pkg/utils/kubernetes"
"github.com/gardener/gardener/pkg/utils/secrets"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/client-go/informers"
kubeinformers "k8s.io/client-go/informers"
kubernetesclientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/record"
"k8s.io/component-base/version"
"k8s.io/component-base/version/verflag"
)
// Options has all the context and parameters needed to run a Gardenlet.
type Options struct {
// ConfigFile is the location of the Gardenlet's configuration file.
ConfigFile string
config *config.GardenletConfiguration
scheme *runtime.Scheme
codecs serializer.CodecFactory
}
// AddFlags adds flags for a specific Gardenlet to the specified FlagSet.
func (o *Options) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&o.ConfigFile, "config", o.ConfigFile, "The path to the configuration file.")
}
// NewOptions returns a new Options object.
func NewOptions() (*Options, error) {
o := &Options{
config: new(config.GardenletConfiguration),
}
o.scheme = runtime.NewScheme()
o.codecs = serializer.NewCodecFactory(o.scheme)
if err := config.AddToScheme(o.scheme); err != nil {
return nil, err
}
if err := configv1alpha1.AddToScheme(o.scheme); err != nil {
return nil, err
}
if err := gardencore.AddToScheme(o.scheme); err != nil {
return nil, err
}
if err := gardencorev1beta1.AddToScheme(o.scheme); err != nil {
return nil, err
}
return o, nil
}
// loadConfigFromFile loads the content of file and decodes it as a
// GardenletConfiguration object.
func (o *Options) loadConfigFromFile(file string) (*config.GardenletConfiguration, error) {
data, err := ioutil.ReadFile(file)
if err != nil {
return nil, err
}
return o.decodeConfig(data)
}
// decodeConfig decodes data as a GardenletConfiguration object.
func (o *Options) decodeConfig(data []byte) (*config.GardenletConfiguration, error) {
gardenletConfig := &config.GardenletConfiguration{}
if _, _, err := o.codecs.UniversalDecoder().Decode(data, nil, gardenletConfig); err != nil {
return nil, err
}
return gardenletConfig, nil
}
func (o *Options) configFileSpecified() error {
if len(o.ConfigFile) == 0 {
return fmt.Errorf("missing Gardenlet config file")
}
return nil
}
// Validate validates all the required options.
func (o *Options) validate(args []string) error {
if len(args) != 0 {
return errors.New("arguments are not supported")
}
return nil
}
func run(ctx context.Context, o *Options) error {
if len(o.ConfigFile) > 0 {
c, err := o.loadConfigFromFile(o.ConfigFile)
if err != nil {
return fmt.Errorf("unable to read the configuration file: %v", err)
}
if errs := configvalidation.ValidateGardenletConfiguration(c, nil, false); len(errs) > 0 {
return fmt.Errorf("errors validating the configuration: %+v", errs)
}
o.config = c
}
// Add feature flags
if err := gardenletfeatures.FeatureGate.SetFromMap(o.config.FeatureGates); err != nil {
return err
}
kubernetes.UseCachedRuntimeClients = gardenletfeatures.FeatureGate.Enabled(features.CachedRuntimeClients)
if gardenletfeatures.FeatureGate.Enabled(features.ReversedVPN) &&
(!gardenletfeatures.FeatureGate.Enabled(features.APIServerSNI) ||
gardenletfeatures.FeatureGate.Enabled(features.KonnectivityTunnel)) {
return fmt.Errorf("inconsistent feature gate: APIServerSNI is required for ReversedVPN (APIServerSNI: %t, ReversedVPN: %t) and ReversedVPN is not compatible with KonnectivityTunnel (KonnectivityTunnel: %t)",
gardenletfeatures.FeatureGate.Enabled(features.APIServerSNI), gardenletfeatures.FeatureGate.Enabled(features.ReversedVPN), gardenletfeatures.FeatureGate.Enabled(features.KonnectivityTunnel))
}
gardenlet, err := NewGardenlet(ctx, o.config)
if err != nil {
return err
}
return gardenlet.Run(ctx)
}
// NewCommandStartGardenlet creates a *cobra.Command object with default parameters
func NewCommandStartGardenlet() *cobra.Command {
opts, err := NewOptions()
if err != nil {
panic(err)
}
cmd := &cobra.Command{
Use: "gardenlet",
Short: "Launch the Gardenlet",
Long: `In essence, the Gardener is an extension API server along with a bundle
of Kubernetes controllers which introduce new API objects in an existing Kubernetes
cluster (which is called Garden cluster) in order to use them for the management of
further Kubernetes clusters (which are called Shoot clusters).
To do that reliably and to offer a certain quality of service, it requires to control
the main components of a Kubernetes cluster (etcd, API server, controller manager, scheduler).
These so-called control plane components are hosted in Kubernetes clusters themselves
(which are called Seed clusters).`,
RunE: func(cmd *cobra.Command, args []string) error {
verflag.PrintAndExitIfRequested()
if err := opts.configFileSpecified(); err != nil {
return err
}
if err := opts.validate(args); err != nil {
return err
}
return run(cmd.Context(), opts)
},
}
flags := cmd.Flags()
verflag.AddFlags(flags)
opts.AddFlags(flags)
return cmd
}
// Gardenlet represents all the parameters required to start the
// Gardenlet.
type Gardenlet struct {
Config *config.GardenletConfiguration
Identity *gardencorev1beta1.Gardener
GardenClusterIdentity string
ClientMap clientmap.ClientMap
K8sGardenCoreInformers gardencoreinformers.SharedInformerFactory
KubeInformerFactory informers.SharedInformerFactory
Logger *logrus.Logger
Recorder record.EventRecorder
LeaderElection *leaderelection.LeaderElectionConfig
HealthManager healthz.Manager
CertificateManager *certificate.Manager
}
// NewGardenlet is the main entry point of instantiating a new Gardenlet.
func NewGardenlet(ctx context.Context, cfg *config.GardenletConfiguration) (*Gardenlet, error) {
if cfg == nil {
return nil, errors.New("config is required")
}
// Initialize logger
logger := logger.NewLogger(*cfg.LogLevel)
logger.Info("Starting Gardenlet...")
logger.Infof("Feature Gates: %s", gardenletfeatures.FeatureGate.String())
if flag := flag.Lookup("v"); flag != nil {
if err := flag.Value.Set(fmt.Sprintf("%d", cfg.KubernetesLogLevel)); err != nil {
return nil, err
}
}
// Prepare a Kubernetes client object for the Garden cluster which contains all the Clientsets
// that can be used to access the Kubernetes API.
if kubeconfig := os.Getenv("GARDEN_KUBECONFIG"); kubeconfig != "" {
cfg.GardenClientConnection.Kubeconfig = kubeconfig
}
if kubeconfig := os.Getenv("KUBECONFIG"); kubeconfig != "" {
cfg.SeedClientConnection.Kubeconfig = kubeconfig
}
var (
kubeconfigFromBootstrap []byte
csrName string
seedName string
err error
)
// constructs a seed client for `SeedClientConnection.kubeconfig` or if not set,
// creates a seed client based on the service account token mounted into the gardenlet container running in Kubernetes
// when running outside of Kubernetes, `SeedClientConnection.kubeconfig` has to be set either directly or via the environment variable "KUBECONFIG"
seedClient, err := kubernetes.NewClientFromFile(
"",
cfg.SeedClientConnection.ClientConnectionConfiguration.Kubeconfig,
kubernetes.WithClientConnectionOptions(cfg.SeedClientConnection.ClientConnectionConfiguration),
kubernetes.WithDisabledCachedClient(),
)
if err != nil {
return nil, err
}
if cfg.GardenClientConnection.KubeconfigSecret != nil {
kubeconfigFromBootstrap, csrName, seedName, err = bootstrapKubeconfig(ctx, logger, seedClient.Client(), cfg)
if err != nil {
return nil, err
}
} else {
logger.Info("No kubeconfig secret given in the configuration under `.gardenClientConnection.kubeconfigSecret`. Skipping the kubeconfig bootstrap process and certificate rotation.")
}
if kubeconfigFromBootstrap == nil {
logger.Info("Falling back to the kubeconfig specified in the configuration under `.gardenClientConnection.kubeconfig`")
if len(cfg.GardenClientConnection.Kubeconfig) == 0 {
return nil, fmt.Errorf("the configuration file needs to either specify a Garden API Server kubeconfig under `.gardenClientConnection.kubeconfig` or provide bootstrapping information. " +
"To configure the Gardenlet for bootstrapping, provide the secret containing the bootstrap kubeconfig under `.gardenClientConnection.kubeconfigSecret` and also the secret name where the created kubeconfig should be stored for further use via`.gardenClientConnection.kubeconfigSecret`")
}
}
restCfg, err := kubernetes.RESTConfigFromClientConnectionConfiguration(&cfg.GardenClientConnection.ClientConnectionConfiguration, kubeconfigFromBootstrap)
if err != nil {
return nil, err
}
gardenClientMapBuilder := clientmapbuilder.NewGardenClientMapBuilder().
WithRESTConfig(restCfg).
// gardenlet does not have the required RBAC permissions for listing/watching the following resources, so let's prevent any
// attempts to cache them
WithUncached(
&gardencorev1beta1.Project{},
&gardencorev1alpha1.ShootState{},
)
if seedConfig := cfg.SeedConfig; seedConfig != nil {
gardenClientMapBuilder = gardenClientMapBuilder.ForSeed(seedConfig.Name)
}
seedClientMapBuilder := clientmapbuilder.NewSeedClientMapBuilder().
WithInCluster(cfg.SeedSelector == nil).
WithClientConnectionConfig(&cfg.SeedClientConnection.ClientConnectionConfiguration)
shootClientMapBuilder := clientmapbuilder.NewShootClientMapBuilder().
WithClientConnectionConfig(&cfg.ShootClientConnection.ClientConnectionConfiguration)
clientMap, err := clientmapbuilder.NewDelegatingClientMapBuilder().
WithGardenClientMapBuilder(gardenClientMapBuilder).
WithSeedClientMapBuilder(seedClientMapBuilder).
WithShootClientMapBuilder(shootClientMapBuilder).
WithLogger(logger).
Build()
if err != nil {
return nil, fmt.Errorf("failed to build ClientMap: %w", err)
}
k8sGardenClient, err := clientMap.GetClient(ctx, keys.ForGarden())
if err != nil {
return nil, fmt.Errorf("failed to get garden client: %w", err)
}
// Delete bootstrap auth data if certificate was newly acquired
if len(csrName) > 0 && len(seedName) > 0 {
logger.Infof("Deleting bootstrap authentication data used to request a certificate")
if err := bootstrap.DeleteBootstrapAuth(ctx, k8sGardenClient.APIReader(), k8sGardenClient.Client(), csrName, seedName); err != nil {
return nil, err
}
}
// Set up leader election if enabled and prepare event recorder.
var (
leaderElectionConfig *leaderelection.LeaderElectionConfig
recorder = utils.CreateRecorder(k8sGardenClient.Kubernetes(), "gardenlet")
)
if cfg.LeaderElection.LeaderElect {
seedRestCfg, err := kubernetes.RESTConfigFromClientConnectionConfiguration(&cfg.SeedClientConnection.ClientConnectionConfiguration, nil)
if err != nil {
return nil, err
}
k8sSeedClientLeaderElection, err := kubernetesclientset.NewForConfig(seedRestCfg)
if err != nil {
return nil, fmt.Errorf("failed to create client for leader election: %w", err)
}
leaderElectionConfig, err = utils.MakeLeaderElectionConfig(
cfg.LeaderElection.LeaderElectionConfiguration,
*cfg.LeaderElection.LockObjectNamespace,
*cfg.LeaderElection.LockObjectName,
k8sSeedClientLeaderElection,
utils.CreateRecorder(k8sSeedClientLeaderElection, "gardenlet"),
)
if err != nil {
return nil, err
}
}
identity, err := determineGardenletIdentity()
if err != nil {
return nil, err
}
gardenClusterIdentity := &corev1.ConfigMap{}
if err := k8sGardenClient.APIReader().Get(ctx, kutil.Key(metav1.NamespaceSystem, v1beta1constants.ClusterIdentity), gardenClusterIdentity); err != nil {
return nil, fmt.Errorf("unable to get Gardener`s cluster-identity ConfigMap: %v", err)
}
clusterIdentity, ok := gardenClusterIdentity.Data[v1beta1constants.ClusterIdentity]
if !ok {
return nil, errors.New("unable to extract Gardener`s cluster identity from cluster-identity ConfigMap")
}
// create the certificate manager to schedule certificate rotations
var certificateManager *certificate.Manager
if cfg.GardenClientConnection.KubeconfigSecret != nil {
certificateManager = certificate.NewCertificateManager(clientMap, seedClient.Client(), cfg)
}
return &Gardenlet{
Identity: identity,
GardenClusterIdentity: clusterIdentity,
Config: cfg,
Logger: logger,
Recorder: recorder,
ClientMap: clientMap,
K8sGardenCoreInformers: gardencoreinformers.NewSharedInformerFactory(k8sGardenClient.GardenCore(), 0),
KubeInformerFactory: kubeinformers.NewSharedInformerFactory(k8sGardenClient.Kubernetes(), 0),
LeaderElection: leaderElectionConfig,
CertificateManager: certificateManager,
}, nil
}
// Run runs the Gardenlet. This should never exit.
func (g *Gardenlet) Run(ctx context.Context) error {
controllerCtx, controllerCancel := context.WithCancel(ctx)
defer controllerCancel()
// Initialize /healthz manager.
g.HealthManager = healthz.NewPeriodicHealthz(seedcontroller.LeaseResyncGracePeriodSeconds * time.Second)
if g.CertificateManager != nil {
g.CertificateManager.ScheduleCertificateRotation(controllerCtx, controllerCancel, g.Recorder)
}
// Start HTTPS server.
if g.Config.Server.HTTPS.TLS == nil {
g.Logger.Info("No TLS server certificates provided... self-generating them now...")
_, _, tempDir, err := secrets.SelfGenerateTLSServerCertificate("gardenlet", []string{
"gardenlet",
fmt.Sprintf("gardenlet.%s", v1beta1constants.GardenNamespace),
fmt.Sprintf("gardenlet.%s.svc", v1beta1constants.GardenNamespace),
}, nil)
if err != nil {
return err
}
g.Config.Server.HTTPS.TLS = &config.TLSServer{
ServerCertPath: filepath.Join(tempDir, secrets.DataKeyCertificate),
ServerKeyPath: filepath.Join(tempDir, secrets.DataKeyPrivateKey),
}
g.Logger.Info("TLS server certificates successfully self-generated.")
}
go server.
NewBuilder().
WithBindAddress(g.Config.Server.HTTPS.BindAddress).
WithPort(g.Config.Server.HTTPS.Port).
WithTLS(g.Config.Server.HTTPS.TLS.ServerCertPath, g.Config.Server.HTTPS.TLS.ServerKeyPath).
WithHandler("/metrics", promhttp.Handler()).
WithHandlerFunc("/healthz", healthz.HandlerFunc(g.HealthManager)).
Build().
Start(ctx)
// Prepare a reusable run function.
run := func(ctx context.Context) error {
g.HealthManager.Start()
return g.startControllers(ctx)
}
leaderElectionCtx, leaderElectionCancel := context.WithCancel(context.Background())
// If leader election is enabled, run via LeaderElector until done and exit.
if g.LeaderElection != nil {
g.LeaderElection.Callbacks = leaderelection.LeaderCallbacks{
OnStartedLeading: func(_ context.Context) {
g.Logger.Info("Acquired leadership, starting controllers.")
if err := run(controllerCtx); err != nil {
g.Logger.Errorf("failed to run gardenlet controllers: %v", err)
}
leaderElectionCancel()
},
OnStoppedLeading: func() {
g.Logger.Info("Lost leadership, terminating.")
controllerCancel()
},
}
leaderElector, err := leaderelection.NewLeaderElector(*g.LeaderElection)
if err != nil {
return fmt.Errorf("couldn't create leader elector: %v", err)
}
leaderElector.Run(leaderElectionCtx)
return nil
}
// Leader election is disabled, thus run directly until done.
leaderElectionCancel()
err := run(controllerCtx)
if err != nil {
g.Logger.Errorf("failed to run gardenlet controllers: %v", err)
}
return err
}
func (g *Gardenlet) startControllers(ctx context.Context) error {
return controller.NewGardenletControllerFactory(
g.ClientMap,
g.K8sGardenCoreInformers,
g.KubeInformerFactory,
g.Config,
g.Identity,
g.GardenClusterIdentity,
g.Recorder,
g.HealthManager,
).Run(ctx)
}
// We want to determine the Docker container id of the currently running Gardenlet because
// we need to identify for still ongoing operations whether another Gardenlet instance is
// still operating the respective Shoots. When running locally, we generate a random string because
// there is no container id.
func determineGardenletIdentity() (*gardencorev1beta1.Gardener, error) {
var (
validID = regexp.MustCompile(`([0-9a-f]{64})`)
gardenletID string
gardenletName string
err error
)
gardenletName, err = os.Hostname()
if err != nil {
return nil, fmt.Errorf("unable to get hostname: %v", err)
}
// If running inside a Kubernetes cluster (as container) we can read the container id from the proc file system.
// Otherwise generate a random string for the gardenletID
if cGroupFile, err := os.Open("/proc/self/cgroup"); err == nil {
defer cGroupFile.Close()
reader := bufio.NewReader(cGroupFile)
var cgroupV1 string
for {
line, err := reader.ReadString('\n')
if err != nil {
break
}
// Store cgroup-v1 result for fall back
if strings.HasPrefix(line, "1:name=systemd") {
cgroupV1 = line
}
// Always prefer cgroup-v2
if strings.HasPrefix(line, "0::") {
if id := extractID(line); validID.MatchString(id) {
gardenletID = id
break
}
}
}
// Fall-back to cgroup-v1 if possible
if len(gardenletID) == 0 && len(cgroupV1) > 0 {
gardenletID = extractID(cgroupV1)
}
}
if gardenletID == "" {
gardenletID, err = gardenerutils.GenerateRandomString(64)
if err != nil {
return nil, fmt.Errorf("unable to generate gardenletID: %v", err)
}
}
return &gardencorev1beta1.Gardener{
ID: gardenletID,
Name: gardenletName,
Version: version.Get().GitVersion,
}, nil
}
func extractID(line string) string {
var (
id string
splitBySlash = strings.Split(line, "/")
)
if len(splitBySlash) == 0 {
return ""
}
id = strings.TrimSpace(splitBySlash[len(splitBySlash)-1])
id = strings.TrimSuffix(id, ".scope")
id = strings.TrimPrefix(id, "docker-")
return id
}
|
[
"\"GARDEN_KUBECONFIG\"",
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG",
"GARDEN_KUBECONFIG"
] |
[]
|
["KUBECONFIG", "GARDEN_KUBECONFIG"]
|
go
| 2 | 0 | |
selfdrive/crash.py
|
"""Install exception handler for process crash."""
import os
import sys
import capnp
import requests
import threading
import traceback
from common.params import Params
from selfdrive.version import version, dirty, origin, branch
from selfdrive.swaglog import cloudlog
from common.android import ANDROID
def save_exception(exc_text):
i = 0
log_file = '{}/{}'.format(CRASHES_DIR, datetime.now().strftime('%d-%m-%Y--%I:%M%p.log'))
if os.path.exists(log_file):
while os.path.exists(log_file + str(i)):
i += 1
log_file += str(i)
with open(log_file, 'w') as f:
f.write(exc_text)
print('Logged current crash to {}'.format(log_file))
if os.getenv("NOLOG") or os.getenv("NOCRASH") or not ANDROID:
def capture_exception(*args, **kwargs):
pass
def bind_user(**kwargs):
pass
def bind_extra(**kwargs):
pass
def install():
pass
else:
from raven import Client
from raven.transport.http import HTTPTransport
from common.op_params import opParams
from datetime import datetime
COMMUNITY_DIR = '/data/community'
CRASHES_DIR = '{}/crashes'.format(COMMUNITY_DIR)
if not os.path.exists(COMMUNITY_DIR):
os.mkdir(COMMUNITY_DIR)
if not os.path.exists(CRASHES_DIR):
os.mkdir(CRASHES_DIR)
params = Params()
try:
dongle_id = params.get("DongleId").decode('utf8')
except AttributeError:
dongle_id = "None"
try:
distance_traveled = params.get("DistanceTraveled").decode('utf8')
except AttributeError:
distance_traveled = "None"
try:
distance_traveled_engaged = params.get("DistanceTraveledEngaged").decode('utf8')
except AttributeError:
distance_traveled_engaged = "None"
try:
distance_traveled_override = params.get("DistanceTraveledOverride").decode('utf8')
except AttributeError:
distance_traveled_override = "None"
try:
ipaddress = requests.get('https://checkip.amazonaws.com/').text.strip()
except:
ipaddress = "255.255.255.255"
error_tags = {'dirty': dirty, 'dongle_id': dongle_id, 'branch': branch, 'remote': origin, 'distance_traveled': distance_traveled, 'distance_traveled_engaged': distance_traveled_engaged, 'distance_traveled_override': distance_traveled_override}
#uniqueID = op_params.get('uniqueID')
username = opParams().get('username')
if username is None or not isinstance(username, str):
username = 'undefined'
error_tags['username'] = username
u_tag = []
if isinstance(username, str):
u_tag.append(username)
#if isinstance(uniqueID, str):
#u_tag.append(uniqueID)
if len(u_tag) > 0:
error_tags['username'] = ''.join(u_tag)
client = Client('https://137e8e621f114f858f4c392c52e18c6d:[email protected]/1404547',
install_sys_hook=False, transport=HTTPTransport, release=version, tags=error_tags)
def capture_exception(*args, **kwargs):
save_exception(traceback.format_exc())
exc_info = sys.exc_info()
if not exc_info[0] is capnp.lib.capnp.KjException:
client.captureException(*args, **kwargs)
cloudlog.error("crash", exc_info=kwargs.get('exc_info', 1))
def bind_user(**kwargs):
client.user_context(kwargs)
def capture_warning(warning_string):
bind_user(id=dongle_id, ip_address=ipaddress)
client.captureMessage(warning_string, level='warning')
def capture_info(info_string):
bind_user(id=dongle_id, ip_address=ipaddress)
client.captureMessage(info_string, level='info')
def bind_extra(**kwargs):
client.extra_context(kwargs)
def install():
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
Source: https://stackoverflow.com/a/31622038
"""
# installs a sys.excepthook
__excepthook__ = sys.excepthook
def handle_exception(*exc_info):
if exc_info[0] not in (KeyboardInterrupt, SystemExit):
capture_exception()
__excepthook__(*exc_info)
sys.excepthook = handle_exception
init_original = threading.Thread.__init__
def init(self, *args, **kwargs):
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
|
[] |
[] |
[
"NOCRASH",
"NOLOG"
] |
[]
|
["NOCRASH", "NOLOG"]
|
python
| 2 | 0 | |
internal/log/log.go
|
package log
import (
"os"
"github.com/sirupsen/logrus"
)
const (
// GitalyLogDirEnvKey defines the environment variable used to specify the Gitaly log directory
GitalyLogDirEnvKey = "GITALY_LOG_DIR"
// LogTimestampFormat defines the timestamp format in log files
LogTimestampFormat = "2006-01-02T15:04:05.000Z"
)
var (
defaultLogger = logrus.StandardLogger()
grpcGo = logrus.New()
// Loggers is convenient when you want to apply configuration to all
// loggers
Loggers = []*logrus.Logger{defaultLogger, grpcGo}
)
func init() {
// This ensures that any log statements that occur before
// the configuration has been loaded will be written to
// stdout instead of stderr
for _, l := range Loggers {
l.Out = os.Stdout
}
}
// Configure sets the format and level on all loggers. It applies level
// mapping to the GrpcGo logger.
func Configure(loggers []*logrus.Logger, format string, level string) {
var formatter logrus.Formatter
switch format {
case "json":
formatter = &logrus.JSONFormatter{TimestampFormat: LogTimestampFormat}
case "text":
formatter = &logrus.TextFormatter{TimestampFormat: LogTimestampFormat}
case "":
// Just stick with the default
default:
logrus.WithField("format", format).Fatal("invalid logger format")
}
logrusLevel, err := logrus.ParseLevel(level)
if err != nil {
logrusLevel = logrus.InfoLevel
}
for _, l := range loggers {
if l == grpcGo {
l.SetLevel(mapGrpcLogLevel(logrusLevel))
} else {
l.SetLevel(logrusLevel)
}
if formatter != nil {
l.Formatter = formatter
}
}
}
func mapGrpcLogLevel(level logrus.Level) logrus.Level {
// Honor grpc-go's debug settings: https://github.com/grpc/grpc-go#how-to-turn-on-logging
logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL")
if logLevel != "" {
switch logLevel {
case "ERROR", "error":
return logrus.ErrorLevel
case "WARNING", "warning":
return logrus.WarnLevel
case "INFO", "info":
return logrus.InfoLevel
}
}
// grpc-go is too verbose at level 'info'. So when config.toml requests
// level info, we tell grpc-go to log at 'warn' instead.
if level == logrus.InfoLevel {
return logrus.WarnLevel
}
return level
}
// Default is the default logrus logger
func Default() *logrus.Entry { return defaultLogger.WithField("pid", os.Getpid()) }
// GrpcGo is a dedicated logrus logger for the grpc-go library. We use it
// to control the library's chattiness.
func GrpcGo() *logrus.Entry { return grpcGo.WithField("pid", os.Getpid()) }
|
[
"\"GRPC_GO_LOG_SEVERITY_LEVEL\""
] |
[] |
[
"GRPC_GO_LOG_SEVERITY_LEVEL"
] |
[]
|
["GRPC_GO_LOG_SEVERITY_LEVEL"]
|
go
| 1 | 0 | |
scripts/run_mots_depth_inference.py
|
""" Script for running depth inference assuming MOTS dataset structure """
import logging
import os
import sys
from pathlib import Path, PurePath
import click
import matplotlib.pyplot as plt
import numpy as np
import tensorflow.compat.v1 as tf
from IPython.core import ultratb
from PIL import Image
import diw
from diw.model import Model, get_vars_to_save_and_restore
sys.excepthook = ultratb.FormattedTB(mode="Verbose", color_scheme="Linux", call_pdb=1)
_logger = logging.getLogger(__name__)
def load_image(img_file):
"""Load image from disk. Output value range: [0,255]."""
return Image.open(img_file).convert("RGB")
def resize_img(img, img_shape):
""" resizes an image """
return img.resize(img_shape, Image.LANCZOS).convert("RGB")
def plot_image(image, image_type="RGB"):
""" plots image with matplotlib """
plt.figure()
color_map = None
if image_type != "RGB":
color_map = plt.cm.get_cmap("plasma").reversed()
plt.imshow(image, cmap=color_map)
plt.show() # display it
return plt
@click.command()
@click.option(
"--checkpoint_dir",
"checkpoint_dir",
default="./data/checkpoints/test",
type=click.Path(exists=True),
help="Path to the model checkpoint",
)
@click.option(
"--data_dir",
"data_dir",
default="./data/test/mots_data",
type=click.Path(exists=True),
help="Path to MOTS data",
)
@click.option(
"--save_img",
"save_img",
flag_value=True,
help="Flag to whether save the image of the depth (besides numpy array)",
)
@click.version_option(diw.__version__)
def main(data_dir, checkpoint_dir, save_img):
if save_img:
plt.figure()
height, width = 128, 416
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true" # to fix CUDA bug
inference_model = Model(
is_training=False, batch_size=1, img_height=height, img_width=width
)
checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
vars_to_restore = get_vars_to_save_and_restore(checkpoint)
saver = tf.train.Saver(vars_to_restore)
with tf.Session() as sess:
saver.restore(sess, checkpoint)
sequence_paths = [p for p in Path(data_dir).glob("*") if p.is_dir()]
for seq_path in sequence_paths:
model_name = PurePath(checkpoint_dir).parts[-1]
(seq_path / model_name).mkdir(parents=True, exist_ok=True)
if save_img:
(seq_path / (model_name + "_depth_images")).mkdir(
parents=True, exist_ok=True
)
img_paths = sorted(
[p for p in (seq_path / "img1").glob("*") if p.is_file()],
key=lambda path: str(path),
)
for img_path in img_paths:
img_name = img_path.parts[-1].split(".")[0]
print("Processing sequence: {}, image: {}".format(seq_path, img_name))
image = load_image(str(img_path))
image = resize_img(image, (width, height))
image = np.array(image)
image = image[None, ...]
depth = inference_model.inference_depth(image, sess)
depth = depth[0, :, :, 0]
np.save(str(seq_path / model_name / img_name), depth)
if save_img:
plt.imshow(depth, plt.cm.get_cmap("plasma").reversed())
plt.savefig(
str(seq_path / (model_name + "_depth_images"))
+ "/"
+ (img_name + ".png")
)
plt.clf()
if __name__ == "__main__":
main()
|
[] |
[] |
[
"TF_FORCE_GPU_ALLOW_GROWTH"
] |
[]
|
["TF_FORCE_GPU_ALLOW_GROWTH"]
|
python
| 1 | 0 | |
cmd/get-hardware-details/main.go
|
// get-hardware-details is a tool that can be used to convert raw Ironic introspection data into the HardwareDetails
// type used by Metal3.
package main
import (
"encoding/json"
"fmt"
"os"
"strings"
"github.com/gophercloud/gophercloud/openstack/baremetalintrospection/v1/introspection"
"github.com/metal3-io/baremetal-operator/pkg/ironic/clients"
"github.com/metal3-io/baremetal-operator/pkg/ironic/hardwaredetails"
)
type options struct {
Endpoint string
AuthConfig clients.AuthConfig
NodeID string
}
func main() {
opts := getOptions()
ironicTrustedCAFile := os.Getenv("IRONIC_CACERT_FILE")
ironicInsecureStr := os.Getenv("IRONIC_INSECURE")
ironicInsecure := false
if strings.ToLower(ironicInsecureStr) == "true" {
ironicInsecure = true
}
tlsConf := clients.TLSConfig{
TrustedCAFile: ironicTrustedCAFile,
InsecureSkipVerify: ironicInsecure,
}
inspector, err := clients.InspectorClient(opts.Endpoint, opts.AuthConfig, tlsConf)
if err != nil {
fmt.Printf("could not get inspector client: %s", err)
os.Exit(1)
}
introData := introspection.GetIntrospectionData(inspector, opts.NodeID)
data, err := introData.Extract()
if err != nil {
fmt.Printf("could not get introspection data: %s", err)
os.Exit(1)
}
json, err := json.MarshalIndent(hardwaredetails.GetHardwareDetails(data), "", "\t")
if err != nil {
fmt.Printf("could not convert introspection data: %s", err)
os.Exit(1)
}
fmt.Println(string(json))
}
func getOptions() (o options) {
if len(os.Args) != 3 {
fmt.Println("Usage: get-hardware-details <inspector URI> <node UUID>")
os.Exit(1)
}
var err error
o.Endpoint, o.AuthConfig, err = clients.ConfigFromEndpointURL(os.Args[1])
if err != nil {
fmt.Printf("Error: %s\n", err)
os.Exit(1)
}
o.NodeID = os.Args[2]
return
}
|
[
"\"IRONIC_CACERT_FILE\"",
"\"IRONIC_INSECURE\""
] |
[] |
[
"IRONIC_INSECURE",
"IRONIC_CACERT_FILE"
] |
[]
|
["IRONIC_INSECURE", "IRONIC_CACERT_FILE"]
|
go
| 2 | 0 | |
default_pfx.py
|
#!/usr/bin/env python3
# usage: default_pfx.py path/to/default_pfx_dir path/to/dist
"Helper module for building the default prefix"
import os
import subprocess
def file_is_wine_builtin_dll(path):
if not os.path.exists(path):
return False
try:
sfile = open(path, "rb")
sfile.seek(0x40)
tag = sfile.read(20)
return tag.startswith((b"Wine placeholder DLL", b"Wine builtin DLL"))
except IOError:
return False
def little_endian_bytes_to_uint(b):
result = 0
multiplier = 1
for i in b:
result += i * multiplier
multiplier <<= 8
return result
def dll_bitness(path):
if not os.path.exists(path):
return 0
try:
sfile = open(path, "rb")
sfile.seek(0x3c)
ntheader_ofs = little_endian_bytes_to_uint(sfile.read(4))
sfile.seek(0x18 + ntheader_ofs)
magic = sfile.read(2)
if magic == bytes((11, 1)):
return 32
if magic == bytes((11, 2)):
return 64
return 0
except IOError:
return 0
def make_relative_symlink(target, linkname):
target = os.path.abspath(target)
linkname = os.path.abspath(linkname)
rel = os.path.relpath(target, os.path.dirname(linkname))
os.symlink(rel, linkname)
def setup_dll_symlinks(default_pfx_dir, dist_dir):
for walk_dir, dirs, files in os.walk(default_pfx_dir):
for file_ in files:
filename = os.path.join(walk_dir, file_)
if os.path.isfile(filename) and file_is_wine_builtin_dll(filename):
bitness = dll_bitness(filename)
if bitness == 32:
libdir = os.path.join(dist_dir, 'lib/wine')
elif bitness == 64:
libdir = os.path.join(dist_dir, 'lib64/wine')
else:
continue
if os.path.exists(os.path.join(libdir, file_)):
target = os.path.join(libdir, file_)
elif os.path.exists(os.path.join(libdir, 'fakedlls', file_)):
target = os.path.join(libdir, 'fakedlls', file_)
else:
continue
os.unlink(filename)
make_relative_symlink(target, filename)
def make_default_pfx(default_pfx_dir, dist_dir, runtime):
local_env = dict(os.environ)
ld_path = dist_dir + "/lib64:" + dist_dir + "/lib"
if runtime is None:
local_env["LD_LIBRARY_PATH"] = ld_path
local_env["WINEPREFIX"] = default_pfx_dir
local_env["WINEDEBUG"] = "-all"
runtime_args = []
else:
#the runtime clears the environment, so we pass it in on the CL via env
runtime_args = runtime + ["env",
"LD_LIBRARY_PATH=" + ld_path,
"WINEPREFIX=" + default_pfx_dir,
"WINEDEBUG=-all"]
subprocess.run(runtime_args + ["/bin/bash", "-c",
os.path.join(dist_dir, 'bin', 'wine') + " wineboot && " +
os.path.join(dist_dir, 'bin', 'wineserver') + " -w"],
env=local_env, check=True)
setup_dll_symlinks(default_pfx_dir, dist_dir)
if __name__ == '__main__':
import sys
if len(sys.argv) > 3:
make_default_pfx(sys.argv[1], sys.argv[2], sys.argv[3:])
else:
make_default_pfx(sys.argv[1], sys.argv[2], None)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
bot/__main__.py
|
import datetime
from dotenv import load_dotenv
from discord.ext import tasks, commands
import discord
from bot import mongo, rss
import os
load_dotenv(verbose=True)
# Discord Variables
TOKEN = os.environ.get("TOKEN")
COMMAND_PREFIX = os.environ.get("COMMAND_PREFIX")
CRAWL_INTERVAL_MINUTES = int(os.environ.get("CRAWL_INTERVAL_MINUTES"))
bot = commands.Bot(command_prefix=COMMAND_PREFIX)
def get_channel(channel_id: int):
channel = bot.get_channel(channel_id)
if channel is None:
mongo.delete_channel_subscriptions(channel_id)
return channel
async def exec(url: str):
feeds = rss.get_feeds(url)
channels = [get_channel(channel_id) for channel_id in mongo.get_channel_ids(url)]
for (title, url) in feeds:
if mongo.feed(title, url):
for channel in channels:
if channel is None:
continue
await channel.send(url)
def log(message: str):
print(f"[{datetime.datetime.today()}] {message}")
@tasks.loop(minutes=CRAWL_INTERVAL_MINUTES)
async def loop():
for url in mongo.get_all_subscription_urls():
log(f"Start crawl {url}")
await exec(url)
log("Crawl completed.")
@bot.event
async def on_ready():
log("Ready to crawl")
loop.start()
@bot.command()
async def subscribe(context: commands.Context, url: str):
title = rss.get_title(url)
if title is None:
await context.send(f"Invalid URL: {url}")
else:
mongo.subscribe(context.channel.id, title, url)
await context.send(f"Success")
result = "failed" if title is None else "succeeded"
log(f"Subscribe command {result}. (USER ID: {context.author.id},URL: {url})")
@bot.command(name="subscriptions")
async def get_subscriptions(context: commands.Context):
subscriptions = [f"{s[mongo.TITLE]}({s[mongo.URL]})" for s in mongo.get_subscriptions(context.channel.id)]
if len(subscriptions) == 0:
await context.send("No subscriptions.")
else:
message = "\n".join(subscriptions)
await context.send(message)
@bot.command()
async def unsbscribe(context: commands.Context, url: str):
if mongo.unsibscribe(context.channel.id, url):
await context.send("Success")
else:
await context.send("Subscription not found.")
def run():
bot.run(TOKEN)
if __name__ == "__main__":
print("Start Discord RSS")
print(f"[MongoDB Address] {mongo.MONGO_ADDRESS}")
print(f"[Bot command prefix] {COMMAND_PREFIX}")
print(f"[Bot crawl interval] {CRAWL_INTERVAL_MINUTES}(min)")
run()
|
[] |
[] |
[
"CRAWL_INTERVAL_MINUTES",
"TOKEN",
"COMMAND_PREFIX"
] |
[]
|
["CRAWL_INTERVAL_MINUTES", "TOKEN", "COMMAND_PREFIX"]
|
python
| 3 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "goliath.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
vmworkstation/provider_test.go
|
package vmworkstation
import (
"os"
"testing"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/terraform"
)
var testAccProviders map[string]terraform.ResourceProvider
var testAccProvider *schema.Provider
func init() {
testAccProvider = Provider().(*schema.Provider)
testAccProviders = map[string]terraform.ResourceProvider{
"example": testAccProvider,
}
}
func TestProvider(t *testing.T) {
if err := Provider().(*schema.Provider).InternalValidate(); err != nil {
t.Fatalf("err: %s", err)
}
}
func testAccPreCheck(t *testing.T) {
if v := os.Getenv("VMWS_USER"); v == "" {
t.Fatal("VMWS_USER must be set for acceptance tests")
}
if v := os.Getenv("VMWS_PASSWORD"); v == "" {
t.Fatal("VMWS_PASSWORD must be set for acceptance tests")
}
if v := os.Getenv("VMWS_URL"); v == "" {
t.Fatal("VMWS_URL must be set for acceptance tests")
}
}
|
[
"\"VMWS_USER\"",
"\"VMWS_PASSWORD\"",
"\"VMWS_URL\""
] |
[] |
[
"VMWS_URL",
"VMWS_USER",
"VMWS_PASSWORD"
] |
[]
|
["VMWS_URL", "VMWS_USER", "VMWS_PASSWORD"]
|
go
| 3 | 0 | |
command/media/defaultreceiver/vimeo/vimeo_test.go
|
package vimeo
import (
"io"
"os"
"strings"
"testing"
)
func TestIframeExtraction(t *testing.T) {
cc := []struct {
body io.Reader
expected string
}{
{
body: strings.NewReader(`
<component is="lesson-view" inline-template>
<div>
<div class="video-player-wrap">
<div class="video-player" v-cloak>
<video-player lesson="1148" vimeo-id="231780045" inline-template>
<div id="laracasts-video" class="container"></div>
</video-player>
<div class="next-lesson-arrow previous" v-cloak>`),
expected: "https://player.vimeo.com/video/231780045"},
}
for _, c := range cc {
got, err := extractIframeFromPage(c.body)
if got != c.expected {
t.Errorf("got '%s', expected '%s'", got, c.expected)
}
if err != nil {
t.Errorf("got unexpected error: %w", err)
}
}
}
func TestMp4Extraction(t *testing.T) {
cc := []struct {
body io.Reader
expected string
}{
{
body: strings.NewReader(`,"default_cdn":"akfire_interconnect_quic","cdns":{"akfire_interconnect_quic":{"url":"https://46skyfiregce-vimeo.akamaized.net/exp=1529961460~acl=%2F231780045%2F%2A~hmac=3e9c6fb6936f69d51b891d6a4213bec94f4efa6e2640dd41acad57344861b3af/231780045/video/820169965,820170024,820170021,820169954/master.m3u8","origin":"gcs"},"fastly_skyfire":{"url":"https://skyfire.vimeocdn.com/1529961460-0x1e04e1b6f2d57441c4e46b0eb86067758218cb97/231780045/video/820169965,820170024,820170021,820169954/master.m3u8","origin":"gcs"}}},"progressive":[{"profile":174,"width":1280,"mime":"video/mp4","fps":30,"url":"https://gcs-vimeo.akamaized.net/exp=1529961460~acl=%2A%2F820170024.mp4%2A~hmac=1b0c809bc92d1924a50ae061b2cc633f08087ccfafdedfbf53e5200e3250cce9/vimeo-prod-skyfire-std-us/01/1356/9/231780045/820170024.mp4","cdn":"akamai_interconnect","quality":"720p","id":820170024,"origin":"gcs","height":720},{"profile":175,"width":1920,"mime":"video/mp4","fps":30,"url":"https://gcs-vimeo.akamaized.net/exp=1529961460~acl=%2A%2F820170021.mp4%2A~hmac=8cc4c2cb65f4269a693c4de059bd74d5f2797057ed73bc8cd7d9b0c4dc0582df/vimeo-prod-skyfire-std-us/01/1356/9/231780045/820170021.mp4","cdn":"akamai_interconnect","quality":"1080p","id":820170021,"origin":"gcs","height":1080},{"profile":164,"width":640,"mime":"video/mp4","fps":30,"url":"https://gcs-vimeo.akamaized.net/exp=1529961460~acl=%2A%2F820169965.mp4%2A~hmac=1486b9fbe03f66d405e8e2c3b651518185d06d282477c7d2c2c715d9fa1a6e52/vimeo-prod-skyfire-std-us/01/1356/9/231780045/820169965.mp4","cdn":"akamai_interconnect","quality":"360p","id":820169965,"origin":"gcs","height":360},{"profile":165,"width":960,"mime":"video/mp4","fps":30,"url":"https://gcs-vimeo.akamaized.net/exp=1529961460~acl=%2A%2F820169954.mp4%2A~hmac=df91aed8aa7cc15851e44ecbdbf3baa4906d6c9213539d51f6e7d5e6e73bf88c/vimeo-prod-skyfire-std-us/01/1356/9/231780045/820169954.mp4","cdn":"akamai_interconnect","quality":"540p","id":820169954,"origin":"gcs","height":540}]},"lang":"en","sentry":{"url":"https://[email protected]/2","enabled":false,"debug_enab`),
expected: "https://46skyfiregce-vimeo.akamaized.net/exp=1529961460~acl=%2F231780045%2F%2A~hmac=3e9c6fb6936f69d51b891d6a4213bec94f4efa6e2640dd41acad57344861b3af/231780045/video/820169965,820170024,820170021,820169954/master.m3u8",
},
{
body: strings.NewReader(`8","origin":"gcs"},"fastly_skyfire":{"url":"https://skyfire.vimeocdn.com/1529964151-0x626e27b83ed503bb96417a8a85643ad5106742a3/238821524/video/853435837,853435913,853435901,853435825/master.m3u8?token","origin":"gcs"}}},"progressive":[{"profile":175,"width":1920,"mime":"video/mp4","fps":30,"url":"https://fpdl.vimeocdn.com/vimeo-prod-skyfire-std-us/01/2764/9/238821524/853435913.mp4?token=1529964151-0x901607a1e3b3243c38d31be12c27d71548916b7b","cdn":"fastly","quality":"1080p","id":853435913,"origin":"gcs","height":1080},{"profile":165,"width":960,"mime":"video/mp4","fps":30,`),
expected: "https://skyfire.vimeocdn.com/1529964151-0x626e27b83ed503bb96417a8a85643ad5106742a3/238821524/video/853435837,853435913,853435901,853435825/master.m3u8?token",
},
}
for _, c := range cc {
got, err := extractM3u8FromIframe(c.body)
if got != c.expected {
t.Errorf("got '%s', expected '%s'", got, c.expected)
}
if err != nil {
t.Errorf("got unexpected error: %w", err)
}
}
}
func TestPageRequest(t *testing.T) {
if os.Getenv("TEST_ONLINE") == "" {
t.Skip("online test skipped")
}
cc := []struct {
url string
iframe string
}{
{
url: "https://laracasts.com/series/whats-new-in-laravel-5-5/episodes/20",
iframe: "https://player.vimeo.com/video/231780045",
},
}
for _, c := range cc {
got, err := ExtractIframe(c.url)
if got != c.iframe {
t.Errorf("got '%s', expected '%s' for '%s'", got, c.iframe, c.url)
}
if err != nil {
t.Errorf("got unexpected error: %w", err)
}
}
}
func TestIframeRequest(t *testing.T) {
if os.Getenv("TEST_ONLINE") == "" {
t.Skip("online test skipped")
}
cc := []struct {
url string
iframe string
mp4prefix string
}{
{
url: "https://laracasts.com/series/whats-new-in-laravel-5-5/episodes/20",
iframe: "https://player.vimeo.com/video/231780045",
mp4prefix: "https://gcs-vimeo.akamaized.net/",
},
}
for _, c := range cc {
got, err := ExtractM3u8(c.url, c.iframe)
if !strings.HasPrefix(got, c.mp4prefix) {
t.Errorf("got '%s', expected '%s' prefix for '%s'", got, c.mp4prefix, c.url)
}
if err != nil {
t.Errorf("got unexpected error: %w", err)
}
}
}
|
[
"\"TEST_ONLINE\"",
"\"TEST_ONLINE\""
] |
[] |
[
"TEST_ONLINE"
] |
[]
|
["TEST_ONLINE"]
|
go
| 1 | 0 | |
api/manage.py
|
#!/usr/bin/env python3
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rps.settings.settings_dev")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cloud/pkg/leaderelection/leaderelection.go
|
package leaderelection
import (
"context"
"encoding/json"
"fmt"
"os"
"syscall"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
componentbaseconfig "k8s.io/component-base/config"
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"github.com/kubeedge/beehive/pkg/core"
beehiveContext "github.com/kubeedge/beehive/pkg/core/context"
"github.com/kubeedge/kubeedge/cloud/pkg/common/client"
"github.com/kubeedge/kubeedge/cloud/pkg/common/informers"
"github.com/kubeedge/kubeedge/cloud/pkg/devicecontroller/utils"
config "github.com/kubeedge/kubeedge/pkg/apis/componentconfig/cloudcore/v1alpha1"
)
func Run(cfg *config.CloudCoreConfig, readyzAdaptor *ReadyzAdaptor) {
// To help debugging, immediately log config for LeaderElection
klog.Infof("Config for LeaderElection : %v", *cfg.LeaderElection)
// Init Context for leaderElection
beehiveContext.InitContext(beehiveContext.MsgCtxTypeChannel)
// Init podReadinessGate to false at the begin of Run
if err := TryToPatchPodReadinessGate(corev1.ConditionFalse); err != nil {
klog.Errorf("Error init pod readinessGate: %v", err)
}
coreBroadcaster := record.NewBroadcaster()
cli := client.GetKubeEdgeClient()
if err := CreateNamespaceIfNeeded(cli, "kubeedge"); err != nil {
klog.Warningf("Create Namespace kubeedge failed with error: %s", err)
return
}
coreRecorder := coreBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "CloudCore"})
leaderElectionConfig, err := makeLeaderElectionConfig(*cfg.LeaderElection, cli, coreRecorder)
if err != nil {
klog.Errorf("couldn't create leaderElectorConfig: %v", err)
return
}
leaderElectionConfig.Callbacks = leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) {
// Start all modules,
core.StartModules()
informers.GetGlobalInformers().Start(beehiveContext.Done())
// Patch PodReadinessGate if program run in pod
err := TryToPatchPodReadinessGate(corev1.ConditionTrue)
if err != nil {
// Terminate the program gracefully
klog.Errorf("Error patching pod readinessGate: %v", err)
TriggerGracefulShutdown()
}
},
OnStoppedLeading: func() {
// TODO: is it necessary to terminate the program gracefully?
//klog.Fatalf("leaderelection lost, rudely terminate program")
klog.Errorf("leaderelection lost, gracefully terminate program")
// Reset PodReadinessGate to false if cloudcore stop
err := TryToPatchPodReadinessGate(corev1.ConditionFalse)
if err != nil {
klog.Errorf("Error reset pod readinessGate: %v", err)
}
// Trigger core.GracefulShutdown()
TriggerGracefulShutdown()
},
}
leaderElector, err := leaderelection.NewLeaderElector(*leaderElectionConfig)
// Set readyzAdaptor manually
readyzAdaptor.SetLeaderElection(leaderElector)
if err != nil {
klog.Errorf("couldn't create leader elector: %v", err)
return
}
// Start leaderElection until becoming leader, terminate program if leader lost or context.cancel
go leaderElector.Run(beehiveContext.GetContext())
// Monitor system signal and shutdown gracefully and it should be in main gorutine
core.GracefulShutdown()
}
// makeLeaderElectionConfig builds a leader election configuration. It will
// create a new resource lock associated with the configuration.
func makeLeaderElectionConfig(config componentbaseconfig.LeaderElectionConfiguration, client clientset.Interface, recorder record.EventRecorder) (*leaderelection.LeaderElectionConfig, error) {
hostname, err := os.Hostname()
if err != nil {
return nil, fmt.Errorf("unable to get hostname: %v", err)
}
// add a uniquifier so that two processes on the same host don't accidentally both become active
id := hostname + "_" + string(uuid.NewUUID())
rl, err := resourcelock.New(config.ResourceLock,
config.ResourceNamespace,
config.ResourceName,
client.CoreV1(),
client.CoordinationV1(),
resourcelock.ResourceLockConfig{
Identity: id,
EventRecorder: recorder,
})
if err != nil {
return nil, fmt.Errorf("couldn't create resource lock: %v", err)
}
return &leaderelection.LeaderElectionConfig{
Lock: rl,
LeaseDuration: config.LeaseDuration.Duration,
RenewDeadline: config.RenewDeadline.Duration,
RetryPeriod: config.RetryPeriod.Duration,
WatchDog: nil,
Name: "cloudcore",
}, nil
}
// Try to patch PodReadinessGate if program runs in pod
func TryToPatchPodReadinessGate(status corev1.ConditionStatus) error {
podname, isInPod := os.LookupEnv("CLOUDCORE_POD_NAME")
if isInPod {
namespace := os.Getenv("CLOUDCORE_POD_NAMESPACE")
klog.Infof("CloudCore is running in pod %v/%v, try to patch PodReadinessGate", namespace, podname)
//TODO: use specific clients
cli, err := utils.KubeClient()
if err != nil {
return fmt.Errorf("create kube client for patching podReadinessGate failed with error: %v", err)
}
//Creat patchBytes
getPod, err := cli.CoreV1().Pods(namespace).Get(context.Background(), podname, metav1.GetOptions{})
originalJSON, err := json.Marshal(getPod)
if err != nil {
return fmt.Errorf("failed to marshal modified pod %q into JSON: %v", podname, err)
}
//Todo: Read PodReadinessGate from CloudCore configuration or env
condition := corev1.PodCondition{Type: "kubeedge.io/CloudCoreIsLeader", Status: status}
podutil.UpdatePodCondition(&getPod.Status, &condition)
newJSON, err := json.Marshal(getPod)
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSON, newJSON, corev1.Pod{})
if err != nil {
return fmt.Errorf("failed to create two way merge patch: %v", err)
}
var maxRetries = 3
var isPatchSuccess = false
for i := 1; i <= maxRetries; i++ {
_, err = cli.CoreV1().Pods(namespace).Patch(context.Background(), podname, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
if err == nil {
isPatchSuccess = true
klog.Infof("Successfully patching podReadinessGate: kubeedge.io/CloudCoreIsLeader to pod %q through apiserver", podname)
break
}
if errors.IsConflict(err) {
// If the patch failure is due to update conflict, the necessary retransmission is performed
if i >= maxRetries {
klog.Errorf("updateMaxRetries(%d) has reached, failed to patching podReadinessGate: kubeedge.io/CloudCoreIsLeader because of update conflict", maxRetries)
}
continue
}
break
}
if !isPatchSuccess {
return err
}
} else {
klog.Infoln("CloudCore is not running in pod")
}
return nil
}
// Trigger core.GracefulShutdown()
func TriggerGracefulShutdown() {
if beehiveContext.GetContext().Err() != nil {
klog.Errorln("Program is in gracefully shutdown")
return
}
klog.Errorln("Trigger graceful shutdown!")
p, err := os.FindProcess(syscall.Getpid())
if err != nil {
klog.Errorf("Failed to find self process: %v", err)
}
err = p.Signal(os.Interrupt)
if err != nil {
klog.Errorf("Failed to trigger graceful shutdown: %v", err)
}
}
func CreateNamespaceIfNeeded(cli client.KubeEdgeClient, ns string) error {
c := cli.CoreV1()
if _, err := c.Namespaces().Get(context.Background(), ns, metav1.GetOptions{}); err == nil {
// the namespace already exists
return nil
}
newNs := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: ns,
Namespace: "",
},
}
_, err := c.Namespaces().Create(context.Background(), newNs, metav1.CreateOptions{})
if err != nil && errors.IsAlreadyExists(err) {
err = nil
}
return err
}
|
[
"\"CLOUDCORE_POD_NAMESPACE\""
] |
[] |
[
"CLOUDCORE_POD_NAMESPACE"
] |
[]
|
["CLOUDCORE_POD_NAMESPACE"]
|
go
| 1 | 0 | |
misc/run-valgrind.py
|
#!/usr/bin/env python
# Copyright 2021, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Run code with Nuitka compiled and put that through valgrind.
"""
import os
import sys
# Find nuitka package relative to us.
sys.path.insert(
0, os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
)
# isort:start
import shutil
import tempfile
from nuitka.tools.testing.Valgrind import getBinarySizes, runValgrind
input_file = sys.argv[1]
nuitka_binary = os.environ.get(
"NUITKA_BINARY", os.path.join(os.path.dirname(__file__), "../bin/nuitka")
)
nuitka_binary = os.path.normpath(nuitka_binary)
basename = os.path.basename(input_file)
tempdir = tempfile.mkdtemp(
prefix=basename + "-", dir=None if not os.path.exists("/var/tmp") else "/var/tmp"
)
output_binary = os.path.join(
tempdir, (basename[:-3] if input_file.endswith(".py") else basename) + ".bin"
)
os.environ["PYTHONHASHSEED"] = "0"
# To make that python run well despite the "-S" flag for things that need site
# to expand sys.path.
os.environ["PYTHONPATH"] = os.pathsep.join(sys.path)
os.system(
"%s %s --python-flag=-S --no-progress --output-dir=%s %s %s %s %s"
% (
sys.executable,
nuitka_binary,
tempdir,
"--unstripped",
"--quiet",
os.environ.get("NUITKA_EXTRA_OPTIONS", ""),
input_file,
)
)
if not os.path.exists(output_binary):
sys.exit("Seeming failure of Nuitka to compile, no %r." % output_binary)
log_base = basename[:-3] if input_file.endswith(".py") else basename
if "number" in sys.argv or "numbers" in sys.argv:
log_file = log_base + ".log"
else:
log_file = None
log_file = log_base + ".log"
sys.stdout.flush()
ticks = runValgrind(
None, "callgrind", [output_binary], include_startup=False, save_logfilename=log_file
)
if "number" in sys.argv or "numbers" in sys.argv:
sizes = getBinarySizes(output_binary)
print("SIZE=%d" % (sizes[0] + sizes[1]))
print("TICKS=%s" % ticks)
print("BINARY=%s" % nuitka_binary)
max_mem = runValgrind(None, "massif", [output_binary], include_startup=True)
print("MEM=%s" % max_mem)
shutil.rmtree(tempdir)
else:
os.system("kcachegrind 2>/dev/null 1>/dev/null %s &" % log_file)
|
[] |
[] |
[
"NUITKA_BINARY",
"NUITKA_EXTRA_OPTIONS",
"PYTHONPATH",
"PYTHONHASHSEED"
] |
[]
|
["NUITKA_BINARY", "NUITKA_EXTRA_OPTIONS", "PYTHONPATH", "PYTHONHASHSEED"]
|
python
| 4 | 0 | |
vendor/github.com/jackc/pgx/conn.go
|
package pgx
import (
"bufio"
"crypto/md5"
"crypto/tls"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"net"
"net/url"
"os"
"os/user"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
)
// DialFunc is a function that can be used to connect to a PostgreSQL server
type DialFunc func(network, addr string) (net.Conn, error)
// ConnConfig contains all the options used to establish a connection.
type ConnConfig struct {
Host string // host (e.g. localhost) or path to unix domain socket directory (e.g. /private/tmp)
Port uint16 // default: 5432
Database string
User string // default: OS user name
Password string
TLSConfig *tls.Config // config for TLS connection -- nil disables TLS
UseFallbackTLS bool // Try FallbackTLSConfig if connecting with TLSConfig fails. Used for preferring TLS, but allowing unencrypted, or vice-versa
FallbackTLSConfig *tls.Config // config for fallback TLS connection (only used if UseFallBackTLS is true)-- nil disables TLS
Logger Logger
LogLevel int
Dial DialFunc
RuntimeParams map[string]string // Run-time parameters to set on connection as session default values (e.g. search_path or application_name)
}
// Conn is a PostgreSQL connection handle. It is not safe for concurrent usage.
// Use ConnPool to manage access to multiple database connections from multiple
// goroutines.
type Conn struct {
conn net.Conn // the underlying TCP or unix domain socket connection
lastActivityTime time.Time // the last time the connection was used
reader *bufio.Reader // buffered reader to improve read performance
wbuf [1024]byte
writeBuf WriteBuf
Pid int32 // backend pid
SecretKey int32 // key to use to send a cancel query message to the server
RuntimeParams map[string]string // parameters that have been reported by the server
PgTypes map[Oid]PgType // oids to PgTypes
config ConnConfig // config used when establishing this connection
TxStatus byte
preparedStatements map[string]*PreparedStatement
channels map[string]struct{}
notifications []*Notification
alive bool
causeOfDeath error
logger Logger
logLevel int
mr msgReader
fp *fastpath
pgsql_af_inet byte
pgsql_af_inet6 byte
busy bool
poolResetCount int
preallocatedRows []Rows
}
// PreparedStatement is a description of a prepared statement
type PreparedStatement struct {
Name string
SQL string
FieldDescriptions []FieldDescription
ParameterOids []Oid
}
// Notification is a message received from the PostgreSQL LISTEN/NOTIFY system
type Notification struct {
Pid int32 // backend pid that sent the notification
Channel string // channel from which notification was received
Payload string
}
// PgType is information about PostgreSQL type and how to encode and decode it
type PgType struct {
Name string // name of type e.g. int4, text, date
DefaultFormat int16 // default format (text or binary) this type will be requested in
}
// CommandTag is the result of an Exec function
type CommandTag string
// RowsAffected returns the number of rows affected. If the CommandTag was not
// for a row affecting command (such as "CREATE TABLE") then it returns 0
func (ct CommandTag) RowsAffected() int64 {
s := string(ct)
index := strings.LastIndex(s, " ")
if index == -1 {
return 0
}
n, _ := strconv.ParseInt(s[index+1:], 10, 64)
return n
}
// ErrNoRows occurs when rows are expected but none are returned.
var ErrNoRows = errors.New("no rows in result set")
// ErrNotificationTimeout occurs when WaitForNotification times out.
var ErrNotificationTimeout = errors.New("notification timeout")
// ErrDeadConn occurs on an attempt to use a dead connection
var ErrDeadConn = errors.New("conn is dead")
// ErrTLSRefused occurs when the connection attempt requires TLS and the
// PostgreSQL server refuses to use TLS
var ErrTLSRefused = errors.New("server refused TLS connection")
// ErrConnBusy occurs when the connection is busy (for example, in the middle of
// reading query results) and another action is attempts.
var ErrConnBusy = errors.New("conn is busy")
// ErrInvalidLogLevel occurs on attempt to set an invalid log level.
var ErrInvalidLogLevel = errors.New("invalid log level")
// ProtocolError occurs when unexpected data is received from PostgreSQL
type ProtocolError string
func (e ProtocolError) Error() string {
return string(e)
}
// Connect establishes a connection with a PostgreSQL server using config.
// config.Host must be specified. config.User will default to the OS user name.
// Other config fields are optional.
func Connect(config ConnConfig) (c *Conn, err error) {
c = new(Conn)
c.config = config
if c.config.LogLevel != 0 {
c.logLevel = c.config.LogLevel
} else {
// Preserve pre-LogLevel behavior by defaulting to LogLevelDebug
c.logLevel = LogLevelDebug
}
c.logger = c.config.Logger
c.mr.log = c.log
c.mr.shouldLog = c.shouldLog
if c.config.User == "" {
user, err := user.Current()
if err != nil {
return nil, err
}
c.config.User = user.Username
if c.shouldLog(LogLevelDebug) {
c.log(LogLevelDebug, "Using default connection config", "User", c.config.User)
}
}
if c.config.Port == 0 {
c.config.Port = 5432
if c.shouldLog(LogLevelDebug) {
c.log(LogLevelDebug, "Using default connection config", "Port", c.config.Port)
}
}
network := "tcp"
address := fmt.Sprintf("%s:%d", c.config.Host, c.config.Port)
// See if host is a valid path, if yes connect with a socket
if _, err := os.Stat(c.config.Host); err == nil {
// For backward compatibility accept socket file paths -- but directories are now preferred
network = "unix"
address = c.config.Host
if !strings.Contains(address, "/.s.PGSQL.") {
address = filepath.Join(address, ".s.PGSQL.") + strconv.FormatInt(int64(c.config.Port), 10)
}
}
if c.config.Dial == nil {
c.config.Dial = (&net.Dialer{KeepAlive: 5 * time.Minute}).Dial
}
err = c.connect(config, network, address, config.TLSConfig)
if err != nil && config.UseFallbackTLS {
err = c.connect(config, network, address, config.FallbackTLSConfig)
}
if err != nil {
return nil, err
}
return c, nil
}
func (c *Conn) connect(config ConnConfig, network, address string, tlsConfig *tls.Config) (err error) {
if c.shouldLog(LogLevelInfo) {
c.log(LogLevelInfo, fmt.Sprintf("Dialing PostgreSQL server at %s address: %s", network, address))
}
c.conn, err = c.config.Dial(network, address)
if err != nil {
if c.shouldLog(LogLevelError) {
c.log(LogLevelError, fmt.Sprintf("Connection failed: %v", err))
}
return err
}
defer func() {
if c != nil && err != nil {
c.conn.Close()
c.alive = false
if c.shouldLog(LogLevelError) {
c.log(LogLevelError, err.Error())
}
}
}()
c.RuntimeParams = make(map[string]string)
c.preparedStatements = make(map[string]*PreparedStatement)
c.channels = make(map[string]struct{})
c.alive = true
c.lastActivityTime = time.Now()
if tlsConfig != nil {
if c.shouldLog(LogLevelDebug) {
c.log(LogLevelDebug, "Starting TLS handshake")
}
if err := c.startTLS(tlsConfig); err != nil {
if c.shouldLog(LogLevelError) {
c.log(LogLevelError, fmt.Sprintf("TLS failed: %v", err))
}
return err
}
}
c.reader = bufio.NewReader(c.conn)
c.mr.reader = c.reader
msg := newStartupMessage()
// Default to disabling TLS renegotiation.
//
// Go does not support (https://github.com/golang/go/issues/5742)
// PostgreSQL recommends disabling (http://www.postgresql.org/docs/9.4/static/runtime-config-connection.html#GUC-SSL-RENEGOTIATION-LIMIT)
if tlsConfig != nil {
msg.options["ssl_renegotiation_limit"] = "0"
}
// Copy default run-time params
for k, v := range config.RuntimeParams {
msg.options[k] = v
}
msg.options["user"] = c.config.User
if c.config.Database != "" {
msg.options["database"] = c.config.Database
}
if err = c.txStartupMessage(msg); err != nil {
return err
}
for {
var t byte
var r *msgReader
t, r, err = c.rxMsg()
if err != nil {
return err
}
switch t {
case backendKeyData:
c.rxBackendKeyData(r)
case authenticationX:
if err = c.rxAuthenticationX(r); err != nil {
return err
}
case readyForQuery:
c.rxReadyForQuery(r)
if c.shouldLog(LogLevelInfo) {
c.log(LogLevelInfo, "Connection established")
}
err = c.loadPgTypes()
if err != nil {
return err
}
err = c.loadInetConstants()
if err != nil {
return err
}
return nil
default:
if err = c.processContextFreeMsg(t, r); err != nil {
return err
}
}
}
}
func (c *Conn) loadPgTypes() error {
rows, err := c.Query("select t.oid, t.typname from pg_type t where t.typtype='b'")
if err != nil {
return err
}
c.PgTypes = make(map[Oid]PgType, 128)
for rows.Next() {
var oid Oid
var t PgType
rows.Scan(&oid, &t.Name)
// The zero value is text format so we ignore any types without a default type format
t.DefaultFormat, _ = DefaultTypeFormats[t.Name]
c.PgTypes[oid] = t
}
return rows.Err()
}
// Family is needed for binary encoding of inet/cidr. The constant is based on
// the server's definition of AF_INET. In theory, this could differ between
// platforms, so request an IPv4 and an IPv6 inet and get the family from that.
func (c *Conn) loadInetConstants() error {
var ipv4, ipv6 []byte
err := c.QueryRow("select '127.0.0.1'::inet, '1::'::inet").Scan(&ipv4, &ipv6)
if err != nil {
return err
}
c.pgsql_af_inet = ipv4[0]
c.pgsql_af_inet6 = ipv6[0]
return nil
}
// Close closes a connection. It is safe to call Close on a already closed
// connection.
func (c *Conn) Close() (err error) {
if !c.IsAlive() {
return nil
}
wbuf := newWriteBuf(c, 'X')
wbuf.closeMsg()
_, err = c.conn.Write(wbuf.buf)
c.die(errors.New("Closed"))
if c.shouldLog(LogLevelInfo) {
c.log(LogLevelInfo, "Closed connection")
}
return err
}
// ParseURI parses a database URI into ConnConfig
//
// Query parameters not used by the connection process are parsed into ConnConfig.RuntimeParams.
func ParseURI(uri string) (ConnConfig, error) {
var cp ConnConfig
url, err := url.Parse(uri)
if err != nil {
return cp, err
}
if url.User != nil {
cp.User = url.User.Username()
cp.Password, _ = url.User.Password()
}
parts := strings.SplitN(url.Host, ":", 2)
cp.Host = parts[0]
if len(parts) == 2 {
p, err := strconv.ParseUint(parts[1], 10, 16)
if err != nil {
return cp, err
}
cp.Port = uint16(p)
}
cp.Database = strings.TrimLeft(url.Path, "/")
err = configSSL(url.Query().Get("sslmode"), &cp)
if err != nil {
return cp, err
}
ignoreKeys := map[string]struct{}{
"sslmode": struct{}{},
}
cp.RuntimeParams = make(map[string]string)
for k, v := range url.Query() {
if _, ok := ignoreKeys[k]; ok {
continue
}
cp.RuntimeParams[k] = v[0]
}
return cp, nil
}
var dsn_regexp = regexp.MustCompile(`([a-zA-Z_]+)=((?:"[^"]+")|(?:[^ ]+))`)
// ParseDSN parses a database DSN (data source name) into a ConnConfig
//
// e.g. ParseDSN("user=username password=password host=1.2.3.4 port=5432 dbname=mydb sslmode=disable")
//
// Any options not used by the connection process are parsed into ConnConfig.RuntimeParams.
//
// e.g. ParseDSN("application_name=pgxtest search_path=admin user=username password=password host=1.2.3.4 dbname=mydb")
//
// ParseDSN tries to match libpq behavior with regard to sslmode. See comments
// for ParseEnvLibpq for more information on the security implications of
// sslmode options.
func ParseDSN(s string) (ConnConfig, error) {
var cp ConnConfig
m := dsn_regexp.FindAllStringSubmatch(s, -1)
var sslmode string
cp.RuntimeParams = make(map[string]string)
for _, b := range m {
switch b[1] {
case "user":
cp.User = b[2]
case "password":
cp.Password = b[2]
case "host":
cp.Host = b[2]
case "port":
if p, err := strconv.ParseUint(b[2], 10, 16); err != nil {
return cp, err
} else {
cp.Port = uint16(p)
}
case "dbname":
cp.Database = b[2]
case "sslmode":
sslmode = b[2]
default:
cp.RuntimeParams[b[1]] = b[2]
}
}
err := configSSL(sslmode, &cp)
if err != nil {
return cp, err
}
return cp, nil
}
// ParseEnvLibpq parses the environment like libpq does into a ConnConfig
//
// See http://www.postgresql.org/docs/9.4/static/libpq-envars.html for details
// on the meaning of environment variables.
//
// ParseEnvLibpq currently recognizes the following environment variables:
// PGHOST
// PGPORT
// PGDATABASE
// PGUSER
// PGPASSWORD
// PGSSLMODE
// PGAPPNAME
//
// Important TLS Security Notes:
// ParseEnvLibpq tries to match libpq behavior with regard to PGSSLMODE. This
// includes defaulting to "prefer" behavior if no environment variable is set.
//
// See http://www.postgresql.org/docs/9.4/static/libpq-ssl.html#LIBPQ-SSL-PROTECTION
// for details on what level of security each sslmode provides.
//
// "require" and "verify-ca" modes currently are treated as "verify-full". e.g.
// They have stronger security guarantees than they would with libpq. Do not
// rely on this behavior as it may be possible to match libpq in the future. If
// you need full security use "verify-full".
//
// Several of the PGSSLMODE options (including the default behavior of "prefer")
// will set UseFallbackTLS to true and FallbackTLSConfig to a disabled or
// weakened TLS mode. This means that if ParseEnvLibpq is used, but TLSConfig is
// later set from a different source that UseFallbackTLS MUST be set false to
// avoid the possibility of falling back to weaker or disabled security.
func ParseEnvLibpq() (ConnConfig, error) {
var cc ConnConfig
cc.Host = os.Getenv("PGHOST")
if pgport := os.Getenv("PGPORT"); pgport != "" {
if port, err := strconv.ParseUint(pgport, 10, 16); err == nil {
cc.Port = uint16(port)
} else {
return cc, err
}
}
cc.Database = os.Getenv("PGDATABASE")
cc.User = os.Getenv("PGUSER")
cc.Password = os.Getenv("PGPASSWORD")
sslmode := os.Getenv("PGSSLMODE")
err := configSSL(sslmode, &cc)
if err != nil {
return cc, err
}
cc.RuntimeParams = make(map[string]string)
if appname := os.Getenv("PGAPPNAME"); appname != "" {
cc.RuntimeParams["application_name"] = appname
}
return cc, nil
}
func configSSL(sslmode string, cc *ConnConfig) error {
// Match libpq default behavior
if sslmode == "" {
sslmode = "prefer"
}
switch sslmode {
case "disable":
case "allow":
cc.UseFallbackTLS = true
cc.FallbackTLSConfig = &tls.Config{InsecureSkipVerify: true}
case "prefer":
cc.TLSConfig = &tls.Config{InsecureSkipVerify: true}
cc.UseFallbackTLS = true
cc.FallbackTLSConfig = nil
case "require", "verify-ca", "verify-full":
cc.TLSConfig = &tls.Config{
ServerName: cc.Host,
}
default:
return errors.New("sslmode is invalid")
}
return nil
}
// Prepare creates a prepared statement with name and sql. sql can contain placeholders
// for bound parameters. These placeholders are referenced positional as $1, $2, etc.
//
// Prepare is idempotent; i.e. it is safe to call Prepare multiple times with the same
// name and sql arguments. This allows a code path to Prepare and Query/Exec without
// concern for if the statement has already been prepared.
func (c *Conn) Prepare(name, sql string) (ps *PreparedStatement, err error) {
if name != "" {
if ps, ok := c.preparedStatements[name]; ok && ps.SQL == sql {
return ps, nil
}
}
if c.shouldLog(LogLevelError) {
defer func() {
if err != nil {
c.log(LogLevelError, fmt.Sprintf("Prepare `%s` as `%s` failed: %v", name, sql, err))
}
}()
}
// parse
wbuf := newWriteBuf(c, 'P')
wbuf.WriteCString(name)
wbuf.WriteCString(sql)
wbuf.WriteInt16(0)
// describe
wbuf.startMsg('D')
wbuf.WriteByte('S')
wbuf.WriteCString(name)
// sync
wbuf.startMsg('S')
wbuf.closeMsg()
_, err = c.conn.Write(wbuf.buf)
if err != nil {
c.die(err)
return nil, err
}
ps = &PreparedStatement{Name: name, SQL: sql}
var softErr error
for {
var t byte
var r *msgReader
t, r, err := c.rxMsg()
if err != nil {
return nil, err
}
switch t {
case parseComplete:
case parameterDescription:
ps.ParameterOids = c.rxParameterDescription(r)
if len(ps.ParameterOids) > 65535 && softErr == nil {
softErr = fmt.Errorf("PostgreSQL supports maximum of 65535 parameters, received %d", len(ps.ParameterOids))
}
case rowDescription:
ps.FieldDescriptions = c.rxRowDescription(r)
for i := range ps.FieldDescriptions {
t, _ := c.PgTypes[ps.FieldDescriptions[i].DataType]
ps.FieldDescriptions[i].DataTypeName = t.Name
ps.FieldDescriptions[i].FormatCode = t.DefaultFormat
}
case noData:
case readyForQuery:
c.rxReadyForQuery(r)
if softErr == nil {
c.preparedStatements[name] = ps
}
return ps, softErr
default:
if e := c.processContextFreeMsg(t, r); e != nil && softErr == nil {
softErr = e
}
}
}
}
// Deallocate released a prepared statement
func (c *Conn) Deallocate(name string) (err error) {
delete(c.preparedStatements, name)
// close
wbuf := newWriteBuf(c, 'C')
wbuf.WriteByte('S')
wbuf.WriteCString(name)
// flush
wbuf.startMsg('H')
wbuf.closeMsg()
_, err = c.conn.Write(wbuf.buf)
if err != nil {
c.die(err)
return err
}
for {
var t byte
var r *msgReader
t, r, err := c.rxMsg()
if err != nil {
return err
}
switch t {
case closeComplete:
return nil
default:
err = c.processContextFreeMsg(t, r)
if err != nil {
return err
}
}
}
}
// Listen establishes a PostgreSQL listen/notify to channel
func (c *Conn) Listen(channel string) error {
_, err := c.Exec("listen " + quoteIdentifier(channel))
if err != nil {
return err
}
c.channels[channel] = struct{}{}
return nil
}
// Unlisten unsubscribes from a listen channel
func (c *Conn) Unlisten(channel string) error {
_, err := c.Exec("unlisten " + quoteIdentifier(channel))
if err != nil {
return err
}
delete(c.channels, channel)
return nil
}
// WaitForNotification waits for a PostgreSQL notification for up to timeout.
// If the timeout occurs it returns pgx.ErrNotificationTimeout
func (c *Conn) WaitForNotification(timeout time.Duration) (*Notification, error) {
// Return already received notification immediately
if len(c.notifications) > 0 {
notification := c.notifications[0]
c.notifications = c.notifications[1:]
return notification, nil
}
stopTime := time.Now().Add(timeout)
for {
now := time.Now()
if now.After(stopTime) {
return nil, ErrNotificationTimeout
}
// If there has been no activity on this connection for a while send a nop message just to ensure
// the connection is alive
nextEnsureAliveTime := c.lastActivityTime.Add(15 * time.Second)
if nextEnsureAliveTime.Before(now) {
// If the server can't respond to a nop in 15 seconds, assume it's dead
err := c.conn.SetReadDeadline(now.Add(15 * time.Second))
if err != nil {
return nil, err
}
_, err = c.Exec("--;")
if err != nil {
return nil, err
}
c.lastActivityTime = now
}
var deadline time.Time
if stopTime.Before(nextEnsureAliveTime) {
deadline = stopTime
} else {
deadline = nextEnsureAliveTime
}
notification, err := c.waitForNotification(deadline)
if err != ErrNotificationTimeout {
return notification, err
}
}
}
func (c *Conn) waitForNotification(deadline time.Time) (*Notification, error) {
var zeroTime time.Time
for {
// Use SetReadDeadline to implement the timeout. SetReadDeadline will
// cause operations to fail with a *net.OpError that has a Timeout()
// of true. Because the normal pgx rxMsg path considers any error to
// have potentially corrupted the state of the connection, it dies
// on any errors. So to avoid timeout errors in rxMsg we set the
// deadline and peek into the reader. If a timeout error occurs there
// we don't break the pgx connection. If the Peek returns that data
// is available then we turn off the read deadline before the rxMsg.
err := c.conn.SetReadDeadline(deadline)
if err != nil {
return nil, err
}
// Wait until there is a byte available before continuing onto the normal msg reading path
_, err = c.reader.Peek(1)
if err != nil {
c.conn.SetReadDeadline(zeroTime) // we can only return one error and we already have one -- so ignore possiple error from SetReadDeadline
if err, ok := err.(*net.OpError); ok && err.Timeout() {
return nil, ErrNotificationTimeout
}
return nil, err
}
err = c.conn.SetReadDeadline(zeroTime)
if err != nil {
return nil, err
}
var t byte
var r *msgReader
if t, r, err = c.rxMsg(); err == nil {
if err = c.processContextFreeMsg(t, r); err != nil {
return nil, err
}
} else {
return nil, err
}
if len(c.notifications) > 0 {
notification := c.notifications[0]
c.notifications = c.notifications[1:]
return notification, nil
}
}
}
func (c *Conn) IsAlive() bool {
return c.alive
}
func (c *Conn) CauseOfDeath() error {
return c.causeOfDeath
}
func (c *Conn) sendQuery(sql string, arguments ...interface{}) (err error) {
if ps, present := c.preparedStatements[sql]; present {
return c.sendPreparedQuery(ps, arguments...)
}
return c.sendSimpleQuery(sql, arguments...)
}
func (c *Conn) sendSimpleQuery(sql string, args ...interface{}) error {
if len(args) == 0 {
wbuf := newWriteBuf(c, 'Q')
wbuf.WriteCString(sql)
wbuf.closeMsg()
_, err := c.conn.Write(wbuf.buf)
if err != nil {
c.die(err)
return err
}
return nil
}
ps, err := c.Prepare("", sql)
if err != nil {
return err
}
return c.sendPreparedQuery(ps, args...)
}
func (c *Conn) sendPreparedQuery(ps *PreparedStatement, arguments ...interface{}) (err error) {
if len(ps.ParameterOids) != len(arguments) {
return fmt.Errorf("Prepared statement \"%v\" requires %d parameters, but %d were provided", ps.Name, len(ps.ParameterOids), len(arguments))
}
// bind
wbuf := newWriteBuf(c, 'B')
wbuf.WriteByte(0)
wbuf.WriteCString(ps.Name)
wbuf.WriteInt16(int16(len(ps.ParameterOids)))
for i, oid := range ps.ParameterOids {
switch arg := arguments[i].(type) {
case Encoder:
wbuf.WriteInt16(arg.FormatCode())
case string, *string:
wbuf.WriteInt16(TextFormatCode)
default:
switch oid {
case BoolOid, ByteaOid, Int2Oid, Int4Oid, Int8Oid, Float4Oid, Float8Oid, TimestampTzOid, TimestampTzArrayOid, TimestampOid, TimestampArrayOid, DateOid, BoolArrayOid, ByteaArrayOid, Int2ArrayOid, Int4ArrayOid, Int8ArrayOid, Float4ArrayOid, Float8ArrayOid, TextArrayOid, VarcharArrayOid, OidOid, InetOid, CidrOid, InetArrayOid, CidrArrayOid:
wbuf.WriteInt16(BinaryFormatCode)
default:
wbuf.WriteInt16(TextFormatCode)
}
}
}
wbuf.WriteInt16(int16(len(arguments)))
for i, oid := range ps.ParameterOids {
if err := Encode(wbuf, oid, arguments[i]); err != nil {
return err
}
}
wbuf.WriteInt16(int16(len(ps.FieldDescriptions)))
for _, fd := range ps.FieldDescriptions {
wbuf.WriteInt16(fd.FormatCode)
}
// execute
wbuf.startMsg('E')
wbuf.WriteByte(0)
wbuf.WriteInt32(0)
// sync
wbuf.startMsg('S')
wbuf.closeMsg()
_, err = c.conn.Write(wbuf.buf)
if err != nil {
c.die(err)
}
return err
}
// Exec executes sql. sql can be either a prepared statement name or an SQL string.
// arguments should be referenced positionally from the sql string as $1, $2, etc.
func (c *Conn) Exec(sql string, arguments ...interface{}) (commandTag CommandTag, err error) {
if err = c.lock(); err != nil {
return commandTag, err
}
startTime := time.Now()
c.lastActivityTime = startTime
defer func() {
if err == nil {
if c.shouldLog(LogLevelInfo) {
endTime := time.Now()
c.log(LogLevelInfo, "Exec", "sql", sql, "args", logQueryArgs(arguments), "time", endTime.Sub(startTime), "commandTag", commandTag)
}
} else {
if c.shouldLog(LogLevelError) {
c.log(LogLevelError, "Exec", "sql", sql, "args", logQueryArgs(arguments), "error", err)
}
}
if unlockErr := c.unlock(); unlockErr != nil && err == nil {
err = unlockErr
}
}()
if err = c.sendQuery(sql, arguments...); err != nil {
return
}
var softErr error
for {
var t byte
var r *msgReader
t, r, err = c.rxMsg()
if err != nil {
return commandTag, err
}
switch t {
case readyForQuery:
c.rxReadyForQuery(r)
return commandTag, softErr
case rowDescription:
case dataRow:
case bindComplete:
case commandComplete:
commandTag = CommandTag(r.readCString())
default:
if e := c.processContextFreeMsg(t, r); e != nil && softErr == nil {
softErr = e
}
}
}
}
// Processes messages that are not exclusive to one context such as
// authentication or query response. The response to these messages
// is the same regardless of when they occur.
func (c *Conn) processContextFreeMsg(t byte, r *msgReader) (err error) {
switch t {
case 'S':
c.rxParameterStatus(r)
return nil
case errorResponse:
return c.rxErrorResponse(r)
case noticeResponse:
return nil
case emptyQueryResponse:
return nil
case notificationResponse:
c.rxNotificationResponse(r)
return nil
default:
return fmt.Errorf("Received unknown message type: %c", t)
}
}
func (c *Conn) rxMsg() (t byte, r *msgReader, err error) {
if !c.alive {
return 0, nil, ErrDeadConn
}
t, err = c.mr.rxMsg()
if err != nil {
c.die(err)
}
c.lastActivityTime = time.Now()
if c.shouldLog(LogLevelTrace) {
c.log(LogLevelTrace, "rxMsg", "type", string(t), "msgBytesRemaining", c.mr.msgBytesRemaining)
}
return t, &c.mr, err
}
func (c *Conn) rxAuthenticationX(r *msgReader) (err error) {
switch r.readInt32() {
case 0: // AuthenticationOk
case 3: // AuthenticationCleartextPassword
err = c.txPasswordMessage(c.config.Password)
case 5: // AuthenticationMD5Password
salt := r.readString(4)
digestedPassword := "md5" + hexMD5(hexMD5(c.config.Password+c.config.User)+salt)
err = c.txPasswordMessage(digestedPassword)
default:
err = errors.New("Received unknown authentication message")
}
return
}
func hexMD5(s string) string {
hash := md5.New()
io.WriteString(hash, s)
return hex.EncodeToString(hash.Sum(nil))
}
func (c *Conn) rxParameterStatus(r *msgReader) {
key := r.readCString()
value := r.readCString()
c.RuntimeParams[key] = value
}
func (c *Conn) rxErrorResponse(r *msgReader) (err PgError) {
for {
switch r.readByte() {
case 'S':
err.Severity = r.readCString()
case 'C':
err.Code = r.readCString()
case 'M':
err.Message = r.readCString()
case 'D':
err.Detail = r.readCString()
case 'H':
err.Hint = r.readCString()
case 'P':
s := r.readCString()
n, _ := strconv.ParseInt(s, 10, 32)
err.Position = int32(n)
case 'p':
s := r.readCString()
n, _ := strconv.ParseInt(s, 10, 32)
err.InternalPosition = int32(n)
case 'q':
err.InternalQuery = r.readCString()
case 'W':
err.Where = r.readCString()
case 's':
err.SchemaName = r.readCString()
case 't':
err.TableName = r.readCString()
case 'c':
err.ColumnName = r.readCString()
case 'd':
err.DataTypeName = r.readCString()
case 'n':
err.ConstraintName = r.readCString()
case 'F':
err.File = r.readCString()
case 'L':
s := r.readCString()
n, _ := strconv.ParseInt(s, 10, 32)
err.Line = int32(n)
case 'R':
err.Routine = r.readCString()
case 0: // End of error message
if err.Severity == "FATAL" {
c.die(err)
}
return
default: // Ignore other error fields
r.readCString()
}
}
}
func (c *Conn) rxBackendKeyData(r *msgReader) {
c.Pid = r.readInt32()
c.SecretKey = r.readInt32()
}
func (c *Conn) rxReadyForQuery(r *msgReader) {
c.TxStatus = r.readByte()
}
func (c *Conn) rxRowDescription(r *msgReader) (fields []FieldDescription) {
fieldCount := r.readInt16()
fields = make([]FieldDescription, fieldCount)
for i := int16(0); i < fieldCount; i++ {
f := &fields[i]
f.Name = r.readCString()
f.Table = r.readOid()
f.AttributeNumber = r.readInt16()
f.DataType = r.readOid()
f.DataTypeSize = r.readInt16()
f.Modifier = r.readInt32()
f.FormatCode = r.readInt16()
}
return
}
func (c *Conn) rxParameterDescription(r *msgReader) (parameters []Oid) {
// Internally, PostgreSQL supports greater than 64k parameters to a prepared
// statement. But the parameter description uses a 16-bit integer for the
// count of parameters. If there are more than 64K parameters, this count is
// wrong. So read the count, ignore it, and compute the proper value from
// the size of the message.
r.readInt16()
parameterCount := r.msgBytesRemaining / 4
parameters = make([]Oid, 0, parameterCount)
for i := int32(0); i < parameterCount; i++ {
parameters = append(parameters, r.readOid())
}
return
}
func (c *Conn) rxNotificationResponse(r *msgReader) {
n := new(Notification)
n.Pid = r.readInt32()
n.Channel = r.readCString()
n.Payload = r.readCString()
c.notifications = append(c.notifications, n)
}
func (c *Conn) startTLS(tlsConfig *tls.Config) (err error) {
err = binary.Write(c.conn, binary.BigEndian, []int32{8, 80877103})
if err != nil {
return
}
response := make([]byte, 1)
if _, err = io.ReadFull(c.conn, response); err != nil {
return
}
if response[0] != 'S' {
return ErrTLSRefused
}
c.conn = tls.Client(c.conn, tlsConfig)
return nil
}
func (c *Conn) txStartupMessage(msg *startupMessage) error {
_, err := c.conn.Write(msg.Bytes())
return err
}
func (c *Conn) txPasswordMessage(password string) (err error) {
wbuf := newWriteBuf(c, 'p')
wbuf.WriteCString(password)
wbuf.closeMsg()
_, err = c.conn.Write(wbuf.buf)
return err
}
func (c *Conn) die(err error) {
c.alive = false
c.causeOfDeath = err
c.conn.Close()
}
func (c *Conn) lock() error {
if c.busy {
return ErrConnBusy
}
c.busy = true
return nil
}
func (c *Conn) unlock() error {
if !c.busy {
return errors.New("unlock conn that is not busy")
}
c.busy = false
return nil
}
func (c *Conn) shouldLog(lvl int) bool {
return c.logger != nil && c.logLevel >= lvl
}
func (c *Conn) log(lvl int, msg string, ctx ...interface{}) {
if c.Pid != 0 {
ctx = append(ctx, "pid", c.Pid)
}
switch lvl {
case LogLevelTrace:
c.logger.Debug(msg, ctx...)
case LogLevelDebug:
c.logger.Debug(msg, ctx...)
case LogLevelInfo:
c.logger.Info(msg, ctx...)
case LogLevelWarn:
c.logger.Warn(msg, ctx...)
case LogLevelError:
c.logger.Error(msg, ctx...)
}
}
// SetLogger replaces the current logger and returns the previous logger.
func (c *Conn) SetLogger(logger Logger) Logger {
oldLogger := c.logger
c.logger = logger
return oldLogger
}
// SetLogLevel replaces the current log level and returns the previous log
// level.
func (c *Conn) SetLogLevel(lvl int) (int, error) {
oldLvl := c.logLevel
if lvl < LogLevelNone || lvl > LogLevelTrace {
return oldLvl, ErrInvalidLogLevel
}
c.logLevel = lvl
return lvl, nil
}
func quoteIdentifier(s string) string {
return `"` + strings.Replace(s, `"`, `""`, -1) + `"`
}
|
[
"\"PGHOST\"",
"\"PGPORT\"",
"\"PGDATABASE\"",
"\"PGUSER\"",
"\"PGPASSWORD\"",
"\"PGSSLMODE\"",
"\"PGAPPNAME\""
] |
[] |
[
"PGAPPNAME",
"PGPORT",
"PGDATABASE",
"PGUSER",
"PGSSLMODE",
"PGHOST",
"PGPASSWORD"
] |
[]
|
["PGAPPNAME", "PGPORT", "PGDATABASE", "PGUSER", "PGSSLMODE", "PGHOST", "PGPASSWORD"]
|
go
| 7 | 0 | |
main.go
|
package main
import (
"net/http"
"os"
"strings"
)
func init() {
accessToken := os.Getenv("NATUREREMO_TOKEN")
applianceName := os.Getenv("APPLIANCE")
signalNames := strings.Split(os.Getenv("SIGNALS"), ",")
s, err := NewServer(accessToken, applianceName, signalNames)
if err != nil {
panic(err)
}
http.Handle("/change", s)
http.Handle("/", http.FileServer(http.Dir("static")))
}
|
[
"\"NATUREREMO_TOKEN\"",
"\"APPLIANCE\"",
"\"SIGNALS\""
] |
[] |
[
"NATUREREMO_TOKEN",
"SIGNALS",
"APPLIANCE"
] |
[]
|
["NATUREREMO_TOKEN", "SIGNALS", "APPLIANCE"]
|
go
| 3 | 0 | |
demo_managing_keys.py
|
from dotenv import load_dotenv
load_dotenv()
import os
password = os.getenv('PASSWORD')
print(password)
# The chunk of code here will be publushed but the file (.env) will contain a series of key value pairs that we
# dont want to be published at all, so we add passwords, keys and connection strings into this file (.env).
# Now How to manage our keys, passwords ...etc on a web app or whatever that we have deployed it into a server or web server
# such as Azure .
# In Azure when you deployed your app, you can go for the web service and go to configuration and you will see key value pairs the names are displayed
# but the values are hidden.
|
[] |
[] |
[
"PASSWORD"
] |
[]
|
["PASSWORD"]
|
python
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.