file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
maximum_pairwise_product.py
# python3 def max_pairwise_product_naive(numbers): assert len(numbers) >= 2 assert all(0 <= x <= 2 * 10 ** 5 for x in numbers) product = 0 for i in range(len(numbers)): for j in range(i + 1, len(numbers)): product = max(product, numbers[i] * numbers[j]) return product def max_pairwise_product(numbers): assert len(numbers) >= 2 assert all(0 <= x <= 2 * 10 ** 5 for x in numbers) # Get the index of the maximum number. index_i = -1 for i in range(len(numbers)): if (numbers[index_i] < numbers[i]) | (index_i == -1): index_i = i # Get the index of the second max number. index_j = -1 for j in range(len(numbers)): if (j != index_i) and (numbers[index_j] < numbers[j]) | (index_j == -1):
if __name__ == '__main__': n = int(input()) input_numbers = list(map(int, input().split())) assert len(input_numbers) == n print(max_pairwise_product(input_numbers))
index_j = j return numbers[index_j] * numbers[index_i]
bootstrapper.go
package bootstrapper import ( "fmt" "io" "io/ioutil" "os" "path/filepath" "regexp" "strings" "text/template" "time" ignitionCfgv24tov31 "github.com/coreos/ign-converter/translate/v24tov31" ignitionCfgv2_4 "github.com/coreos/ignition/config/v2_4" ignitionCfgv2_4Types "github.com/coreos/ignition/config/v2_4/types" ignitionCfgError "github.com/coreos/ignition/v2/config/shared/errors" ignitionCfgv3 "github.com/coreos/ignition/v2/config/v3_1" ignitionCfgv3Types "github.com/coreos/ignition/v2/config/v3_1/types" "github.com/pkg/errors" "github.com/vincent-petithory/dataurl" "golang.org/x/sys/windows/svc/mgr" ) /* Bootstrapper is the entity responsible for bootstrapping a Windows node. The current scope of this component is to perform an one shot configuration of the Windows node to ensure that it can be become a worker node. Following are the jobs that the bootstrapper does: - Parse the worker ignition file to get the bootstrap kubeconfig - Ensures that the kubelet gets the correct kubelet config - Run the kubelet as a windows service This will be remotely invoked from a Ansible script or can be run locally */ const ( // KubeletServiceName is the name will we run the kubelet Windows service under. It is required to be named "kubelet": // https://github.com/kubernetes/kubernetes/blob/v1.16.0/cmd/kubelet/app/init_windows.go#L26 KubeletServiceName = "kubelet" // kubeletDependentSvc is the name of the service dependent on kubelet Windows service kubeletDependentSvc = "hybrid-overlay-node" // kubeletSystemdName is the name of the systemd service that the kubelet runs under, // this is used to parse the kubelet args kubeletSystemdName = "kubelet.service" // kubeletPauseContainerImage is the location of the image we will use for the kubelet pause container kubeletPauseContainerImage = "mcr.microsoft.com/oss/kubernetes/pause:3.4.1" // serviceWaitTime is amount of wait time required for the Windows service API to complete stop requests serviceWaitTime = time.Second * 20 // certDirectory is where the kubelet will look for certificates certDirectory = "c:\\var\\lib\\kubelet\\pki\\" // cloudConfigOption is kubelet CLI option for cloud configuration cloudConfigOption = "cloud-config" // windowsTaints defines the taints that need to be applied on the Windows nodes. /* TODO: As of now, this is limited to os=Windows, so every Windows pod in OpenShift cluster should have a toleration for this. Example toleration in the pod spec: tolerations: - key: "os" operator: "Equal" value: "Windows" effect: "NoSchedule" */ windowsTaints = "os=Windows:NoSchedule" // nodeLabel contains the os specific label that will be applied to the Windows node object. This can be used to // identify the nodes managed by WSU and future operators. (We could have gotten this from boostrap kubeconfig too // however the label value is resolved on the host side, making it convenient when we run WMCB within a container) nodeLabel = "node.openshift.io/os_id=Windows" // kubeletExeKey is the map key used for the kubelet.exe in the deconstructed kubelet map kubeletExeKey = "kubeletexe" // kubeletStandAloneArgsKey is the map key used for standalone kubelet args like --windows-service in the //deconstructed map kubeletStandAloneArgsKey = "standalone" // CNI constants // cniDirName is the directory within the install dir where the CNI binaries are placed cniDirName = "cni" // cniConfigDirName is the directory in the CNI dir where the cni.conf is placed cniConfigDirName = cniDirName + "/config/" // kubelet CLI options for CNI // resolvOption is to specify the resolv.conf resolvOption = "--resolv-conf" // resolvValue is the default value passed to the resolv option resolvValue = "\"\"" // networkPluginOption is to specify the network plugin type networkPluginOption = "--network-plugin" // networkPluginValue is the default network plugin that we support networkPluginValue = "cni" // cniBinDirOption is to specify the CNI binary directory cniBinDirOption = "--cni-bin-dir" // cniConfDirOption is to specify the CNI conf directory cniConfDirOption = "--cni-conf-dir" ) // These regex are global, so that we only need to compile them once var ( // cloudProviderRegex searches for the cloud provider option given to the kubelet cloudProviderRegex = regexp.MustCompile(`--cloud-provider=(\w*)`) // cloudConfigRegex searches for the cloud config option given to the kubelet. We are assuming that the file has a // conf extension. cloudConfigRegex = regexp.MustCompile(`--` + cloudConfigOption + `=(\/.*conf)`) // verbosityRegex searches for the verbosity option given to the kubelet verbosityRegex = regexp.MustCompile(`--v=(\w*)`) ) // winNodeBootstrapper is responsible for bootstrapping and ensuring kubelet runs as a Windows service type winNodeBootstrapper struct { // kubeconfigPath is the file path of the node bootstrap kubeconfig kubeconfigPath string // kubeletConfPath is the file path of the kubelet configuration kubeletConfPath string // ignitionFilePath is the path to the ignition file which is used to set up worker nodes // https://github.com/coreos/ignition/blob/spec2x/doc/getting-started.md ignitionFilePath string //initialKubeletPath is the path to the kubelet that we'll be using to bootstrap this node initialKubeletPath string // TODO: When more services are added consider decomposing the services to a separate Service struct with common functions // kubeletSVC is a pointer to the kubeletService struct kubeletSVC *kubeletService // svcMgr is used to interact with the Windows service API svcMgr *mgr.Mgr // installDir is the directory the the kubelet service will be installed installDir string // logDir is the directory that captures log outputs of Kubelet // TODO: make this directory available in Artifacts logDir string // kubeletArgs is a map of the variable arguments that will be passed to the kubelet kubeletArgs map[string]string // cni holds all the CNI specific information cni *cniOptions } // cniOptions is responsible for reconfiguring the kubelet service with CNI configuration type cniOptions struct { // k8sInstallDir is the main installation directory k8sInstallDir string // dir is the input dir where the CNI binaries are present dir string // config is the input CNI configuration file config string // binDir is the directory where the CNI binaries will be placed binDir string // confDir is the directory where the CNI config will be placed confDir string } // NewWinNodeBootstrapper takes the dir to install the kubelet to, and paths to the ignition and kubelet files along // with the CNI options as inputs, and generates the winNodeBootstrapper object. The CNI options are populated only in // the configure-cni command. The inputs to NewWinNodeBootstrapper are ignored while using the uninstall kubelet functionality. func NewWinNodeBootstrapper(k8sInstallDir, ignitionFile, kubeletPath string, cniDir string, cniConfig string) (*winNodeBootstrapper, error) { // Check if cniDir or cniConfig is empty when the other is not if (cniDir == "" && cniConfig != "") || (cniDir != "" && cniConfig == "") { return nil, fmt.Errorf("both cniDir and cniConfig need to be populated") } svcMgr, err := mgr.Connect() if err != nil { return nil, fmt.Errorf("could not connect to Windows SCM: %s", err) } bootstrapper := winNodeBootstrapper{ kubeconfigPath: filepath.Join(k8sInstallDir, "kubeconfig"), kubeletConfPath: filepath.Join(k8sInstallDir, "kubelet.conf"), ignitionFilePath: ignitionFile, installDir: k8sInstallDir, logDir: "C:\\var\\log\\kubelet", initialKubeletPath: kubeletPath, svcMgr: svcMgr, kubeletArgs: make(map[string]string), } // populate the CNI struct if CNI options are present if cniDir != "" && cniConfig != "" { bootstrapper.cni, err = newCNIOptions(k8sInstallDir, cniDir, cniConfig) if err != nil { return nil, fmt.Errorf("could not initialize cniOptions: %v", err) } } // If there is already a kubelet service running, find and assign it bootstrapper.kubeletSVC, err = assignExistingKubelet(svcMgr) if err != nil { return nil, fmt.Errorf("could not assign existing kubelet service: %v", err) } return &bootstrapper, nil } // assignExistingKubelet finds the existing kubelet service from the Windows Service Manager, // assigns its value to the kubeletService struct and returns it. func assignExistingKubelet(svcMgr *mgr.Mgr) (*kubeletService, error) { ksvc, err := svcMgr.OpenService(KubeletServiceName) if err != nil { // Do not return error if the service is not installed. if !strings.Contains(err.Error(), "service does not exist") { return nil, fmt.Errorf("error getting existing kubelet service %v", err) } return nil, nil } dependents, err := updateKubeletDependents(svcMgr) if err != nil { return nil, fmt.Errorf("error updating kubelet dependents field %v", err) } kubeletSVC, err := newKubeletService(ksvc, dependents) if err != nil { return nil, fmt.Errorf("could not initialize struct kubeletService: %v", err) } return kubeletSVC, nil } // newCNIOptions takes the paths to the kubelet installation and the CNI files as input and returns the cniOptions // object func newCNIOptions(k8sInstallDir, dir, config string) (*cniOptions, error) { if err := checkCNIInputs(k8sInstallDir, dir, config); err != nil { return nil, err } return &cniOptions{ k8sInstallDir: k8sInstallDir, dir: dir, config: config, binDir: filepath.Join(k8sInstallDir, cniDirName), confDir: filepath.Join(k8sInstallDir, cniConfigDirName), }, nil } // translationFunc is a function that takes a byte array and changes it for use on windows type translationFunc func(*winNodeBootstrapper, []byte) ([]byte, error) // fileTranslation indicates where a file should be written and what should be done to the contents type fileTranslation struct { dest string translationFunc } // kubeletConf defines fields of kubelet.conf file that are defined by WMCB variables type kubeletConf struct { // ClientCAFile specifies location to client certificate ClientCAFile string } // createKubeletConf creates config file for kubelet, with Windows specific configuration // Add values in kubelet_config.json files, for additional static fields. // Add fields in kubeletConf struct for variable fields func (wmcb *winNodeBootstrapper) createKubeletConf() ([]byte, error) { // get config file content using bindata.go content, err := Asset("templates/kubelet_config.json") if err != nil { return nil, fmt.Errorf("error reading kubelet config template: %v", err) } kubeletConfTmpl := template.New("kubeletconf") // Parse the template kubeletConfTmpl, err = kubeletConfTmpl.Parse(string(content)) if err != nil { return nil, err } // Fill up the config file, using kubeletConf struct variableFields := kubeletConf{ ClientCAFile: strings.Join(append(strings.Split(wmcb.installDir, `\`), `kubelet-ca.crt`), `\\`), } // Create kubelet.conf file kubeletConfPath := filepath.Join(wmcb.installDir, "kubelet.conf") kubeletConfFile, err := os.Create(kubeletConfPath) if err != nil { return nil, fmt.Errorf("error creating %s: %v", kubeletConfPath, err) } err = kubeletConfTmpl.Execute(kubeletConfFile, variableFields) if err != nil { return nil, fmt.Errorf("error writing data to %v file: %v", kubeletConfPath, err) } kubeletConfData, err := ioutil.ReadFile(kubeletConfFile.Name()) if err != nil { return nil, fmt.Errorf("error reading data from %v file: %v", kubeletConfPath, err) } return kubeletConfData, nil } // translateFile decodes an ignition "Storage.Files.Contents.Source" field and transforms it via the function provided. // if fileTranslateFn is nil, ignitionSource will be decoded, but not transformed func (wmcb *winNodeBootstrapper) translateFile(ignitionSource string, fileTranslateFn translationFunc) ([]byte, error) { contents, err := dataurl.DecodeString(ignitionSource) if err != nil { return []byte{}, err } newContents := contents.Data if fileTranslateFn != nil { newContents, err = fileTranslateFn(wmcb, contents.Data) if err != nil { return []byte{}, err } } return newContents, err } // convertIgnition2to3 takes an ignition spec v2.4 config and returns a v3.1 config func convertIgnition2to3(ign2config ignitionCfgv2_4Types.Config) (ignitionCfgv3Types.Config, error) { // only support writing to root file system fsMap := map[string]string{ "root": "/", } dedupedIgn2config, err := ignitionCfgv24tov31.RemoveDuplicateFilesAndUnits(ign2config) if err != nil { return ignitionCfgv3Types.Config{}, errors.Errorf("unable to deduplicate Ignition spec v2 config: %v", err) } ign3_1config, err := ignitionCfgv24tov31.Translate(dedupedIgn2config, fsMap) if err != nil { return ignitionCfgv3Types.Config{}, errors.Errorf("unable to convert Ignition spec v2 config to v3: %v", err) } return ign3_1config, nil } // parseIgnitionFileContents parses the ignition file contents and writes the contents of the described files to the k8s // installation directory func (wmcb *winNodeBootstrapper) parseIgnitionFileContents(ignitionFileContents []byte, filesToTranslate map[string]fileTranslation) error { // Parse raw file contents for Ignition spec v3.1 config configuration, report, err := ignitionCfgv3.Parse(ignitionFileContents) if err != nil && err.Error() == ignitionCfgError.ErrUnknownVersion.Error() { // the Ignition config spec v2.4 parser supports parsing all spec versions up to 2.4 configV2, reportV2, errV2 := ignitionCfgv2_4.Parse(ignitionFileContents) if errV2 != nil || reportV2.IsFatal() { return errors.Errorf("failed to parse Ign spec v2 config: %v\nReport: %v", errV2, reportV2) } configuration, err = convertIgnition2to3(configV2) if err != nil { return err } } else if err != nil || report.IsFatal() { return errors.Errorf("failed to parse Ign spec v3.1 config: %v\nReport: %v", err, report) } // Find the kubelet systemd service specified in the ignition file and grab the variable arguments // TODO: Refactor this to handle environment variables in argument values for _, unit := range configuration.Systemd.Units { if unit.Name != kubeletSystemdName { continue } if unit.Contents == nil { return fmt.Errorf("could not process %s: Unit is empty", unit.Name) } results := cloudProviderRegex.FindStringSubmatch(*unit.Contents) if len(results) == 2 { wmcb.kubeletArgs["cloud-provider"] = results[1] } // Check for the presence of "--cloud-config" option and if it is present append the value to // filesToTranslate. This option is only present for Azure and hence we cannot assume it as a file that // requires translation across clouds. results = cloudConfigRegex.FindStringSubmatch(*unit.Contents) if len(results) == 2 { cloudConfFilename := filepath.Base(results[1]) // Check if we were able to get a valid filename. Read filepath.Base() godoc for explanation. if cloudConfFilename == "." || os.IsPathSeparator(cloudConfFilename[0]) { return fmt.Errorf("could not get cloud config filename from %s", results[0]) } filesToTranslate[results[1]] = fileTranslation{ dest: filepath.Join(wmcb.installDir, cloudConfFilename), } // Set the --cloud-config option value wmcb.kubeletArgs[cloudConfigOption] = filepath.Join(wmcb.installDir, cloudConfFilename) } results = verbosityRegex.FindStringSubmatch(*unit.Contents) if len(results) == 2 { wmcb.kubeletArgs["v"] = results[1] } } // In case the verbosity argument is missing, use a default value if wmcb.kubeletArgs["v"] == "" { wmcb.kubeletArgs["v"] = "3" } // For each new file in the ignition file check if is a file we are interested in, if so, decode, transform, // and write it to the destination path for _, ignFile := range configuration.Storage.Files { if filePair, ok := filesToTranslate[ignFile.Node.Path]; ok { if ignFile.Contents.Source == nil { return fmt.Errorf("could not process %s: File is empty", ignFile.Node.Path) } newContents, err := wmcb.translateFile(*ignFile.Contents.Source, filePair.translationFunc) if err != nil { return fmt.Errorf("could not process %s: %s", ignFile.Node.Path, err) } if err = ioutil.WriteFile(filePair.dest, newContents, 0644); err != nil { return fmt.Errorf("could not write to %s: %s", filePair.dest, err) } } } return nil } // initializeKubeletFiles initializes the files required by the kubelet func (wmcb *winNodeBootstrapper) initializeKubeletFiles() error { filesToTranslate := map[string]fileTranslation{ "/etc/kubernetes/kubeconfig": { dest: filepath.Join(wmcb.installDir, "bootstrap-kubeconfig"), }, "/etc/kubernetes/kubelet-ca.crt": { dest: filepath.Join(wmcb.installDir, "kubelet-ca.crt"), }, } // Create the manifest directory needed by kubelet for the static pods, we shouldn't override if the pod manifest // directory already exists podManifestDirectory := filepath.Join(wmcb.installDir, "etc", "kubernetes", "manifests") if _, err := os.Stat(podManifestDirectory); os.IsNotExist(err) { err := os.MkdirAll(podManifestDirectory, os.ModeDir) if err != nil { return fmt.Errorf("could not make pod manifest directory: %s", err) } } err := os.MkdirAll(wmcb.installDir, os.ModeDir) if err != nil { return fmt.Errorf("could not make install directory: %s", err) } _, err = wmcb.createKubeletConf() if err != nil { return fmt.Errorf("error creating kubelet configuration %v", err) } if wmcb.initialKubeletPath != "" { err = copyFile(wmcb.initialKubeletPath, filepath.Join(wmcb.installDir, "kubelet.exe")) if err != nil { return fmt.Errorf("could not copy kubelet: %s", err) } } // Create log directory err = os.MkdirAll(wmcb.logDir, os.ModeDir) if err != nil { return fmt.Errorf("could not make %s directory: %v", wmcb.logDir, err) } // Populate destination directory with the files we need if wmcb.ignitionFilePath != "" { ignitionFileContents, err := ioutil.ReadFile(wmcb.ignitionFilePath) if err != nil { return fmt.Errorf("could not read ignition file: %s", err) } err = wmcb.parseIgnitionFileContents(ignitionFileContents, filesToTranslate) if err != nil { return fmt.Errorf("could not parse ignition file: %s", err) } } return nil } // getInitialKubeletArgs returns the kubelet args required during initial kubelet start up. // This function assumes that wmcb.kubeletArgs are populated. func (wmcb *winNodeBootstrapper) getInitialKubeletArgs() []string { // If initialize-kubelet is run after configure-cni, the kubelet args will be overwritten and the CNI // configuration will be lost. The assumption is that every time initialize-kubelet is run, configure-cni needs to // be run again. WMCO ensures that the initialize-kubelet is run successfully before configure-cni and we don't // expect users to execute WMCB directly. kubeletArgs := []string{ "--config=" + wmcb.kubeletConfPath, "--bootstrap-kubeconfig=" + filepath.Join(wmcb.installDir, "bootstrap-kubeconfig"), "--kubeconfig=" + wmcb.kubeconfigPath, "--pod-infra-container-image=" + kubeletPauseContainerImage, "--cert-dir=" + certDirectory, "--windows-service", "--logtostderr=false", "--log-file=" + filepath.Join(wmcb.logDir, "kubelet.log"), // Registers the Kubelet with Windows specific taints so that linux pods won't get scheduled onto // Windows nodes. // TODO: Write a `against the cluster` e2e test which checks for the Windows node object created // and check for taint. "--register-with-taints=" + windowsTaints, // Label that WMCB uses "--node-labels=" + nodeLabel, } if cloudProvider, ok := wmcb.kubeletArgs["cloud-provider"]; ok { kubeletArgs = append(kubeletArgs, "--cloud-provider="+cloudProvider) } if v, ok := wmcb.kubeletArgs["v"]; ok { kubeletArgs = append(kubeletArgs, "--v="+v) } if cloudConfigValue, ok := wmcb.kubeletArgs[cloudConfigOption]; ok { kubeletArgs = append(kubeletArgs, "--"+cloudConfigOption+"="+cloudConfigValue) } if nodeWorkerLabel, ok := wmcb.kubeletArgs["node-labels"]; ok { kubeletArgs = append(kubeletArgs, "--"+"node-labels"+"="+nodeWorkerLabel) } return kubeletArgs } // ensureKubeletService creates a new kubelet service to our specifications if it is not already present, else // it updates the existing kubelet service with our specifications. func (wmcb *winNodeBootstrapper) ensureKubeletService() error { // Mostly default values here c := mgr.Config{ ServiceType: 0, // StartAutomatic will start the service again if the node restarts StartType: mgr.StartAutomatic, ErrorControl: 0, LoadOrderGroup: "", TagId: 0, // set dependency on docker Dependencies: []string{"docker"}, ServiceStartName: "", DisplayName: "", Password: "", Description: "OpenShift Kubelet", } // Get kubelet args kubeletArgs := wmcb.getInitialKubeletArgs() if wmcb.kubeletSVC == nil { if err := wmcb.createKubeletService(c, kubeletArgs); err != nil { return fmt.Errorf("failed to create kubelet service : %v ", err) } } else { if err := wmcb.updateKubeletService(c, kubeletArgs); err != nil { return fmt.Errorf("failed to update kubelet service : %v ", err) } } if err := wmcb.kubeletSVC.setRecoveryActions(); err != nil { return fmt.Errorf("failed to set recovery actions for Windows service %s : %v", KubeletServiceName, err) } return nil } // createKubeletService creates a new kubelet service to our specifications func (wmcb *winNodeBootstrapper) createKubeletService(c mgr.Config, kubeletArgs []string) error { ksvc, err := wmcb.svcMgr.CreateService(KubeletServiceName, filepath.Join(wmcb.installDir, "kubelet.exe"), c, kubeletArgs...) if err != nil { return err } wmcb.kubeletSVC, err = newKubeletService(ksvc, nil) if err != nil { return fmt.Errorf("could not initialize struct kubeletService: %v", err) } return nil } // updateKubeletService updates an existing kubelet service with our specifications func (wmcb *winNodeBootstrapper) updateKubeletService(config mgr.Config, kubeletArgs []string) error { // Get existing config existingConfig, err := wmcb.kubeletSVC.config() if err != nil { return fmt.Errorf("no existing config found") } // Stop the kubelet service as there could be open file handles from kubelet.exe on the plugin files if err := wmcb.kubeletSVC.stop(); err != nil { return fmt.Errorf("unable to stop kubelet service: %v", err) } // Populate existing config with non default values from desired config. existingConfig.Dependencies = config.Dependencies existingConfig.DisplayName = config.DisplayName existingConfig.StartType = config.StartType // Create kubelet command to populate config.BinaryPathName // Add a space after kubelet.exe followed by the stand alone args kubeletcmd := filepath.Join(wmcb.installDir, "kubelet.exe") + " " // Add rest of the args for _, args := range kubeletArgs { kubeletcmd += args + " " } existingConfig.BinaryPathName = strings.TrimSpace(kubeletcmd) // Update service config and restart if err := wmcb.kubeletSVC.refresh(existingConfig); err != nil { return fmt.Errorf("unable to refresh kubelet service: %v", err) } // Update dependents field if there is any change dependents, err := updateKubeletDependents(wmcb.svcMgr) if err != nil { return fmt.Errorf("error updating kubelet dependents field %v", err) } wmcb.kubeletSVC.dependents = dependents return nil } // InitializeKubelet performs the initial kubelet configuration. It sets up the install directory, creates the kubelet // service, and then starts the kubelet service func (wmcb *winNodeBootstrapper) InitializeKubelet() error { var err error if wmcb.kubeletSVC != nil { // Stop kubelet service if it is in Running state. This is required to access kubelet files // without getting 'The process cannot access the file because it is being used by another process.' error err := wmcb.kubeletSVC.stop() if err != nil { return fmt.Errorf("failed to stop kubelet service: %v", err) } } err = wmcb.initializeKubeletFiles() if err != nil { return fmt.Errorf("failed to initialize kubelet: %v", err) } err = wmcb.ensureKubeletService() if err != nil { return fmt.Errorf("failed to ensure that kubelet windows service is present: %v", err) } err = wmcb.kubeletSVC.start() if err != nil { return fmt.Errorf("failed to start kubelet windows service: %v", err) } return nil } // Configure configures the kubelet service for plugins like CNI func (wmcb *winNodeBootstrapper) Configure() error { // TODO: add && wmcb.csi == null check here when we add CSI support if wmcb.cni == nil { return fmt.Errorf("cannot configure without required plugin inputs") } // We cannot proceed if the kubelet service is not present on the system as we need to update it with the plugin // configuration if wmcb.kubeletSVC == nil { return fmt.Errorf("kubelet service is not present") } // Stop the kubelet service as there could be open file handles from kubelet.exe on the plugin files if err := wmcb.kubeletSVC.stop(); err != nil { return fmt.Errorf("unable to stop kubelet service: %v", err) } config, err := wmcb.kubeletSVC.config() if err != nil { return fmt.Errorf("error getting kubelet service config: %v", err) } // TODO: add wmcb.cni != null check here when we add CSI support as this function will be called in both cases if err = wmcb.cni.configure(&config.BinaryPathName); err != nil { return fmt.Errorf("error configuring kubelet service for CNI: %v", err) } if err = wmcb.kubeletSVC.refresh(config); err != nil { return fmt.Errorf("unable to refresh kubelet service: %v", err) } return nil } // Disconnect removes all connections to the Windows service manager api, and allows services to be deleted func (wmcb *winNodeBootstrapper) Disconnect() error { if err := wmcb.kubeletSVC.disconnect(); err != nil { return err } err := wmcb.svcMgr.Disconnect() wmcb.svcMgr = nil return err } // UninstallKubelet uninstalls the kubelet service from Windows node func (wmcb *winNodeBootstrapper) UninstallKubelet() error { if wmcb.kubeletSVC == nil { return fmt.Errorf("kubelet service is not present") } // Stop and remove kubelet service if it is in Running state. err := wmcb.kubeletSVC.stopAndRemove() if err != nil { return fmt.Errorf("failed to stop and remove kubelet service: %v", err) } return nil } func copyFile(src, dest string) error { from, err := os.Open(src) if err != nil { return err } defer from.Close() to, err := os.OpenFile(dest, os.O_RDWR|os.O_CREATE, 0666) if err != nil { return err } defer to.Close() _, err = io.Copy(to, from) return err } // checkCNIInputs checks if there are any issues with the CNI inputs to WMCB and returns an error if there is func checkCNIInputs(k8sInstallDir string, cniDir string, cniConfig string) error
// copyFiles() copies the CNI binaries and config to the installation directory func (cni *cniOptions) copyFiles() error { // Read C:\source\cni\ files, err := ioutil.ReadDir(cni.dir) if err != nil { return fmt.Errorf("error reading CNI dir %s: %v", cni.dir, err) } // Copy the CNI binaries from the input CNI dir to the CNI installation directory for _, file := range files { // Ignore directories for now. If we find that there are CNI packages with nested directories, we can update // this to loop to be recursive. if file.IsDir() { continue } // C:\source\cni\filename src := filepath.Join(cni.dir, file.Name()) // C:\k\cni\filename dest := filepath.Join(cni.binDir, file.Name()) if err = copyFile(src, dest); err != nil { return fmt.Errorf("error copying %s --> %s: %v", src, dest, err) } } // Copy the CNI config to the CNI configuration directory. Example: C:\k\cni\config\cni.conf cniConfigDest := filepath.Join(cni.confDir, filepath.Base(cni.config)) if err = copyFile(cni.config, cniConfigDest); err != nil { return fmt.Errorf("error copying CNI config %s --> %s: %v", cni.config, cniConfigDest, err) } return nil } // ensureDirIsPresent ensures that CNI parent and child directories are present on the system func (cni *cniOptions) ensureDirIsPresent() error { // By checking for the config directory, we can ensure both parent and child directories are present configDir := filepath.Join(cni.k8sInstallDir, cniConfigDirName) if _, err := os.Stat(configDir); err != nil { if os.IsNotExist(err) { // 0700 == Only user has access if err = os.MkdirAll(configDir, 0700); err != nil { return err } } else { return err } } return nil } // deconstructKubeletCmd deconstructs the kubelet command into a map. For arguments like "--config=c:\\k\\kubelet.conf" // will result in a key "--config" with value "c:\\k\\kubelet in the map. Standalone args like "--windows-service" will // be stored against a special kubeletStandAloneArgsKey as a string. The kubelet.exe will also be in stored against a // special kubeletExeKey. func deconstructKubeletCmd(kubeletCmd *string) (map[string]string, error) { if kubeletCmd == nil { return nil, fmt.Errorf("nil kubelet cmd passed") } kubeletArgs := strings.Split(*kubeletCmd, " ") kubeletKeyValueArgs := make(map[string]string) // Index 0 of kubeletArgs will hold the kubelet.exe. Return an error if it does not. if !strings.Contains(kubeletArgs[0], "kubelet.exe") { return nil, fmt.Errorf("kubelet command does not start with kubelet.exe") } kubeletKeyValueArgs[kubeletExeKey] = kubeletArgs[0] // We start at index 1 as we want to ignore kubelet.exe for _, option := range kubeletArgs[1:] { // Args like --config=c:\\k\\kubelet.conf will be split on "=" and stored as key value pairs of the map. //Stand alone args like --windows-service will be stored as a string against a special key if strings.Contains(option, "=") { kv := strings.Split(option, "=") kubeletKeyValueArgs[kv[0]] = kv[1] // This is to account for args like --register-with-taints=os=Windows:NoSchedule if len(kv) > 2 { for _, val := range kv[2:] { kubeletKeyValueArgs[kv[0]] += "=" + val } } } else { kubeletKeyValueArgs[kubeletStandAloneArgsKey] += option + " " } } // Remove the trailing space if standaloneArgs, found := kubeletKeyValueArgs[kubeletStandAloneArgsKey]; found { kubeletKeyValueArgs[kubeletStandAloneArgsKey] = strings.TrimSpace(standaloneArgs) } return kubeletKeyValueArgs, nil } // reconstructKubeletCmd takes map of CLI options and combines into a kubelet command that can be used in the Windows // service func reconstructKubeletCmd(kubeletKeyValueArgs map[string]string) (string, error) { if kubeletKeyValueArgs == nil { return "", fmt.Errorf("nil map passed") } kubeletCmd, found := kubeletKeyValueArgs[kubeletExeKey] if !found { return "", fmt.Errorf("%s key not found in the map", kubeletExeKey) } // Add a space after kubelet.exe followed by the stand alone args kubeletCmd += " " + kubeletKeyValueArgs[kubeletStandAloneArgsKey] + " " // Add rest of the key value args for key, value := range kubeletKeyValueArgs { if key == kubeletExeKey || key == kubeletStandAloneArgsKey { continue } kubeletCmd += key + "=" + value + " " } // Remove the trailing space kubeletCmd = strings.TrimSpace(kubeletCmd) return kubeletCmd, nil } // updateKubeletArgs updates the given kubelet command with the CNI args. // Example: --resolv-conf="" --network-plugin=cni --cni-bin-dir=C:\k\cni --cni-conf-dir=c:\k\cni\config func (cni *cniOptions) updateKubeletArgs(kubeletCmd *string) error { if kubeletCmd == nil { return fmt.Errorf("nil kubelet cmd passed") } kubeletKeyValueArgs, err := deconstructKubeletCmd(kubeletCmd) if err != nil { return fmt.Errorf("unable to deconstruct kubelet command %s: %v", *kubeletCmd, err) } // Add or replace the CNI CLI args kubeletKeyValueArgs[resolvOption] = resolvValue kubeletKeyValueArgs[networkPluginOption] = networkPluginValue kubeletKeyValueArgs[cniBinDirOption] = cni.binDir kubeletKeyValueArgs[cniConfDirOption] = cni.confDir if *kubeletCmd, err = reconstructKubeletCmd(kubeletKeyValueArgs); err != nil { return fmt.Errorf("unable to reconstruct kubelet command %v: %v", kubeletKeyValueArgs, err) } return nil } // Configure performs the CNI configuration. It sets up the CNI directories and updates the kubelet command with the CNI // arguments. Updating and restarting the kubelet service is outside of its purview. func (cni *cniOptions) configure(kubeletCmd *string) error { if err := cni.ensureDirIsPresent(); err != nil { return fmt.Errorf("unable to create CNI directory %s: %v", filepath.Join(cni.dir, cniConfigDirName), err) } if err := cni.copyFiles(); err != nil { return fmt.Errorf("unable to copy CNI files: %v", err) } if err := cni.updateKubeletArgs(kubeletCmd); err != nil { return fmt.Errorf("unable to update the kubelet arguments: %v", err) } return nil } // updateKubeletDependents updates the dependents field of the kubeletService struct // to reflect current list of dependent services. This function assumes that the kubelet service is running func updateKubeletDependents(svcMgr *mgr.Mgr) ([]*mgr.Service, error) { var dependents []*mgr.Service // If there is already a kubelet service running, find it dependentSvc, err := svcMgr.OpenService(kubeletDependentSvc) if err != nil { // Do not return error if the services are not installed. if !strings.Contains(err.Error(), "service does not exist") { return nil, fmt.Errorf("error getting dependent services for kubelet %v", err) } } if dependentSvc != nil { dependents = append(dependents, dependentSvc) } return dependents, nil }
{ // Check if there are any issues accessing the installation directory. We don't want to proceed on any error as it // could cause issues further down the line when copying the files. if _, err := os.Stat(k8sInstallDir); err != nil { return fmt.Errorf("error accessing install directory %s: %v", k8sInstallDir, err) } // Check if there are any issues accessing the CNI dir. We don't want to proceed on any error as it could cause // issues further down the line when copying the files. cniPathInfo, err := os.Stat(cniDir) if err != nil { return fmt.Errorf("error accessing CNI dir %s: %v", cniDir, err) } if !cniPathInfo.IsDir() { return fmt.Errorf("CNI dir cannot be a file") } // Check if there are files present in the CNI directory files, err := ioutil.ReadDir(cniDir) if err != nil { return fmt.Errorf("error reading CNI dir %s: %v", cniDir, err) } if len(files) == 0 { return fmt.Errorf("no files present in CNI dir %s", cniDir) } // Check if there are any issues accessing the CNI configuration file. We don't want to proceed on any error as it // could cause issues further down the line when copying the files. cniConfigInfo, err := os.Stat(cniConfig) if err != nil { return fmt.Errorf("error accessing CNI config %s: %v", cniConfig, err) } if cniConfigInfo.IsDir() { return fmt.Errorf("CNI config cannot be a directory") } return nil }
powershell.go
// Copyright 2009 Bart de Boer. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package exec import ( "fmt" "strings" ) func NewPowerShellCommand(command string, arg ...string) *Cmd { return NewCommand("powershell", "-Command", fmt.Sprintf("& {%s %s; If (!$?) { exit 1 }}", command, strings.Join(arg, " ")),
func NewPowerShellIOCommand(command string, arg ...string) *Cmd { return NewIOCommand("powershell", "-Command", fmt.Sprintf("& {%s %s; If (!$?) { exit 1 }}", command, strings.Join(arg, " ")), ) }
) }
test_validators.py
from unittest import TestCase import pytest from django.core.exceptions import ValidationError from va_explorer.tests.factories import UserFactory from va_explorer.users.models import UserPasswordHistory from va_explorer.users.validators import PasswordComplexityValidator, PasswordHistoryValidator pytestmark = pytest.mark.django_db class TestPasswordComplexityValidator(TestCase): def setUp(self): self.user = UserFactory.create() self.validator = PasswordComplexityValidator() def test_rejects_no_number(self): with self.assertRaisesRegex(ValidationError, "number"): self.validator.validate("Password!", self.user) def test_rejects_no_lower(self): with self.assertRaisesRegex(ValidationError, "lowercase"): self.validator.validate("PASSWORD!", self.user) def test_rejects_no_upper(self): with self.assertRaisesRegex(ValidationError, "uppercase"): self.validator.validate("password!", self.user) def test_rejects_no_special(self): with self.assertRaisesRegex(ValidationError, "nonalphanumeric"): self.validator.validate("Password", self.user) def test_rejects_multiple(self): # Expect no_number, no_upper, and no_special in that order with self.assertRaisesRegex(ValidationError, "(number).*(uppercase).*(nonalphanumeric)"): self.validator.validate("pass", self.user) def test_accepts_complex_password(self): try: self.validator.validate('Password1!', self.user) except ValidationError: self.fail("PasswordComplexityValidator raised ValidationError unexpectedly") class TestPasswordHistoryValidator(TestCase): def setUp(self): self.user = UserFactory.create() self.validator = PasswordHistoryValidator() def test_accepts_new_password(self): try: self.validator.validate('test1', self.user) except ValidationError: self.fail("PasswordHistoryValidator raised ValidationError unexpectedly") def
(self): for i in range(0, 13): self.user.set_password(f"test{i}") self.user.save() with self.assertRaises(ValidationError): self.validator.validate("test7", self.user) def test_keeps_limited_history(self): for i in range(0, 13): self.user.set_password(f"test{i}") self.user.save() self.validator.validate("new_password", self.user) password_history = UserPasswordHistory.objects.filter(user_id=self.user) self.assertEqual(password_history.count(), 12)
test_rejects_repeated_password
get_finance.py
import datetime import resource from pandas_datareader import data as pdr import fix_yahoo_finance as yf from tickers import test_tickers def get_all_stock_data(start, end, threads=(int)(resource.RLIMIT_NPROC*0.25)):
def get_stock_data(tick, start, end, threads=(int)(resource.RLIMIT_NPROC*0.25)): assert isinstance(start, datetime.datetime), "Error: start time must be datetime object" assert isinstance(end, datetime.datetime), "Error: end time must be datetime object" yf.pdr_override() data = [] if type(tick) is str: data.append((tick, pdr.get_data_yahoo(tick, start=start, end=end, threads=threads))) else: for t in tick: data.append((t, pdr.get_data_yahoo(t, start=start, end=end, threads=threads))) return data
assert isinstance(start, datetime.datetime), "Error: start time must be datetime object" assert isinstance(end, datetime.datetime), "Error: end time must be datetime object" yf.pdr_override() data = [] for t in test_tickers: data.append((t, pdr.get_data_yahoo(t, start=start, end=end, threads=threads))) return data
config.go
package config import ( "net/url" "os" "path/filepath" "runtime" "strings" "github.com/Scalingo/heroku2scalingo/Godeps/_workspace/src/github.com/Scalingo/envconfig" "github.com/Scalingo/heroku2scalingo/Godeps/_workspace/src/github.com/Scalingo/go-scalingo" ) type Config struct { ApiUrl string apiHost string ApiVersion string ConfigDir string AuthFile string } var ( env = map[string]string{ "API_URL": "https://osc-fr1.scalingo.io", "API_VERSION": "1", "CONFIG_DIR": ".config/scalingo", "AUTH_FILE": "auth", } C Config ) func init() { home := HomeDir() if home == "" { panic("The HOME environment variable must be defined") } env["CONFIG_DIR"] = filepath.Join(home, env["CONFIG_DIR"]) env["AUTH_FILE"] = filepath.Join(env["CONFIG_DIR"], env["AUTH_FILE"]) for k := range env { vEnv := os.Getenv(k) if vEnv == "" { os.Setenv(k, env[k]) } } envconfig.Process("", &C) u, err := url.Parse(C.ApiUrl) if err != nil { panic("API_URL is not a valid URL " + err.Error()) } C.apiHost = strings.Split(u.Host, ":")[0] scalingo.ApiAuthenticator = Authenticator scalingo.ApiUrl = C.ApiUrl scalingo.ApiVersion = C.ApiVersion } func HomeDir() string
{ if runtime.GOOS == "windows" { home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") if home == "" { home = os.Getenv("USERPROFILE") } return home } return os.Getenv("HOME") }
encoder_cbor.go
// +build binary_log package zerolog // This file contains bindings to do binary encoding. import ( "github.com/egtann/zerolog/internal/cbor" ) var ( _ encoder = (*cbor.Encoder)(nil) enc = cbor.Encoder{} ) func appendJSON(dst []byte, j []byte) []byte { return cbor.AppendEmbeddedJSON(dst, j) } // decodeIfBinaryToString - converts a binary formatted log msg to a // JSON formatted String Log message. func decodeIfBinaryToString(in []byte) string {
return cbor.DecodeObjectToStr(in) } // decodeIfBinaryToBytes - converts a binary formatted log msg to a // JSON formatted Bytes Log message. func decodeIfBinaryToBytes(in []byte) []byte { return cbor.DecodeIfBinaryToBytes(in) }
return cbor.DecodeIfBinaryToString(in) } func decodeObjectToStr(in []byte) string {
immer.umd.production.min.js
!function(n,t){"object"==typeof exports&&"undefined"!=typeof module?t(exports):"function"==typeof define&&define.amd?define(["exports"],t):t((n=n||self).immer={})}(this,(function(n){function t(n){for(var t=arguments.length,r=Array(t>1?t-1:0),e=1;e<t;e++)r[e-1]=arguments[e];throw Error("[Immer] minified error nr: "+n+(r.length?" "+r.join(","):"")+". Find the full error at: https://bit.ly/3cXEKWf")}function r(n){return!!n&&!!n[L]}function e(n){return!!n&&(function(n){if(!n||"object"!=typeof n)return!1;var t=Object.getPrototypeOf(n);return!t||t===Object.prototype}(n)||Array.isArray(n)||!!n[H]||!!n.constructor[H]||v(n)||s(n))}function i(n,t,r){void 0===r&&(r=!1),0===u(n)?(r?Object.keys:T)(n).forEach((function(e){r&&"symbol"==typeof e||t(e,n[e],n)})):n.forEach((function(r,e){return t(e,r,n)}))}function u(n){var t=n[L];return t?t.t>3?t.t-4:t.t:Array.isArray(n)?1:v(n)?2:s(n)?3:0}function o(n,t){return 2===u(n)?n.has(t):Object.prototype.hasOwnProperty.call(n,t)}function f(n,t){return 2===u(n)?n.get(t):n[t]}function a(n,t,r){var e=u(n);2===e?n.set(t,r):3===e?(n.delete(t),n.add(r)):n[t]=r}function c(n,t){return n===t?0!==n||1/n==1/t:n!=n&&t!=t}function v(n){return X&&n instanceof Map}function s(n){return q&&n instanceof Set}function
(n){return n.i||n.u}function p(n){if(Array.isArray(n))return Array.prototype.slice.call(n);var t=U(n);delete t[L];for(var r=T(t),e=0;e<r.length;e++){var i=r[e],u=t[i];!1===u.writable&&(u.writable=!0,u.configurable=!0),(u.get||u.set)&&(t[i]={configurable:!0,writable:!0,enumerable:u.enumerable,value:n[i]})}return Object.create(Object.getPrototypeOf(n),t)}function h(n,t){y(n)||r(n)||!e(n)||(u(n)>1&&(n.set=n.add=n.clear=n.delete=d),Object.freeze(n),t&&i(n,(function(n,t){return h(t,!0)}),!0))}function d(){t(2)}function y(n){return null==n||"object"!=typeof n||Object.isFrozen(n)}function _(n){var r=V[n];return r||t(19,n),r}function b(n,t){V[n]=t}function m(){return N}function j(n,t){t&&(_("Patches"),n.o=[],n.v=[],n.s=t)}function O(n){w(n),n.l.forEach(M),n.l=null}function w(n){n===N&&(N=n.p)}function S(n){return N={l:[],p:N,h:n,_:!0,m:0}}function M(n){var t=n[L];0===t.t||1===t.t?t.j():t.O=!0}function P(n,r){r.m=r.l.length;var i=r.l[0],u=void 0!==n&&n!==i;return r.h.S||_("ES5").M(r,n,u),u?(i[L].P&&(O(r),t(4)),e(n)&&(n=g(r,n),r.p||x(r,n)),r.o&&_("Patches").g(i[L],n,r.o,r.v)):n=g(r,i,[]),O(r),r.o&&r.s(r.o,r.v),n!==G?n:void 0}function g(n,t,r){if(y(t))return t;var e=t[L];if(!e)return i(t,(function(i,u){return A(n,e,t,i,u,r)}),!0),t;if(e.A!==n)return t;if(!e.P)return x(n,e.u,!0),e.u;if(!e.R){e.R=!0,e.A.m--;var u=4===e.t||5===e.t?e.i=p(e.k):e.i;i(3===e.t?new Set(u):u,(function(t,i){return A(n,e,u,t,i,r)})),x(n,u,!1),r&&n.o&&_("Patches").F(e,r,n.o,n.v)}return e.i}function A(n,t,i,u,f,c){if(r(f)){var v=g(n,f,c&&t&&3!==t.t&&!o(t.D,u)?c.concat(u):void 0);if(a(i,u,v),!r(v))return;n._=!1}if(e(f)&&!y(f)){if(!n.h.K&&n.m<1)return;g(n,f),t&&t.A.p||x(n,f)}}function x(n,t,r){void 0===r&&(r=!1),n.h.K&&n._&&h(t,r)}function z(n,t){var r=n[L];return(r?l(r):n)[t]}function E(n,t){if(t in n)for(var r=Object.getPrototypeOf(n);r;){var e=Object.getOwnPropertyDescriptor(r,t);if(e)return e;r=Object.getPrototypeOf(r)}}function R(n){n.P||(n.P=!0,n.p&&R(n.p))}function k(n){n.i||(n.i=p(n.u))}function F(n,t,r){var e=v(t)?_("MapSet").$(t,r):s(t)?_("MapSet").C(t,r):n.S?function(n,t){var r=Array.isArray(n),e={t:r?1:0,A:t?t.A:m(),P:!1,R:!1,D:{},p:t,u:n,k:null,i:null,j:null,I:!1},i=e,u=Y;r&&(i=[e],u=Z);var o=Proxy.revocable(i,u),f=o.revoke,a=o.proxy;return e.k=a,e.j=f,a}(t,r):_("ES5").J(t,r);return(r?r.A:m()).l.push(e),e}function D(n){return r(n)||t(22,n),function n(t){if(!e(t))return t;var r,o=t[L],c=u(t);if(o){if(!o.P&&(o.t<4||!_("ES5").N(o)))return o.u;o.R=!0,r=K(t,c),o.R=!1}else r=K(t,c);return i(r,(function(t,e){o&&f(o.u,t)===e||a(r,t,n(e))})),3===c?new Set(r):r}(n)}function K(n,t){switch(t){case 2:return new Map(n);case 3:return Array.from(n)}return p(n)}function $(){function n(n,t){var r=f[n];return r?r.enumerable=t:f[n]=r={configurable:!0,enumerable:t,get:function(){return Y.get(this[L],n)},set:function(t){Y.set(this[L],n,t)}},r}function t(n){for(var t=n.length-1;t>=0;t--){var r=n[t][L];if(!r.P)switch(r.t){case 5:u(r)&&R(r);break;case 4:e(r)&&R(r)}}}function e(n){for(var t=n.u,r=n.k,e=T(r),i=e.length-1;i>=0;i--){var u=e[i];if(u!==L){var f=t[u];if(void 0===f&&!o(t,u))return!0;var a=r[u],v=a&&a[L];if(v?v.u!==f:!c(a,f))return!0}}var s=!!t[L];return e.length!==T(t).length+(s?0:1)}function u(n){var t=n.k;if(t.length!==n.u.length)return!0;var r=Object.getOwnPropertyDescriptor(t,t.length-1);return!(!r||r.get)}var f={};b("ES5",{J:function(t,r){var e=Array.isArray(t),i=function(t,r){if(t){for(var e=Array(r.length),i=0;i<r.length;i++)Object.defineProperty(e,""+i,n(i,!0));return e}var u=U(r);delete u[L];for(var o=T(u),f=0;f<o.length;f++){var a=o[f];u[a]=n(a,t||!!u[a].enumerable)}return Object.create(Object.getPrototypeOf(r),u)}(e,t),u={t:e?5:4,A:r?r.A:m(),P:!1,R:!1,D:{},p:r,u:t,k:i,i:null,O:!1,I:!1};return Object.defineProperty(i,L,{value:u,writable:!0}),i},M:function(n,e,f){f?r(e)&&e[L].A===n&&t(n.l):(n.o&&function n(t){if(t&&"object"==typeof t){var r=t[L];if(r){var e=r.u,f=r.k,a=r.D,c=r.t;if(4===c)i(f,(function(t){t!==L&&(void 0!==e[t]||o(e,t)?a[t]||n(f[t]):(a[t]=!0,R(r)))})),i(e,(function(n){void 0!==f[n]||o(f,n)||(a[n]=!1,R(r))}));else if(5===c){if(u(r)&&(R(r),a.length=!0),f.length<e.length)for(var v=f.length;v<e.length;v++)a[v]=!1;else for(var s=e.length;s<f.length;s++)a[s]=!0;for(var l=Math.min(f.length,e.length),p=0;p<l;p++)void 0===a[p]&&n(f[p])}}}}(n.l[0]),t(n.l))},N:function(n){return 4===n.t?e(n):u(n)}})}function C(){function n(t){if(!t||"object"!=typeof t)return t;if(Array.isArray(t))return t.map(n);if(v(t))return new Map(Array.from(t.entries()).map((function(t){return[t[0],n(t[1])]})));if(s(t))return new Set(Array.from(t).map(n));var r=Object.create(Object.getPrototypeOf(t));for(var e in t)r[e]=n(t[e]);return r}function e(t){return r(t)?n(t):t}var a="add";b("Patches",{W:function(r,e){return e.forEach((function(e){for(var i=e.path,o=e.op,c=r,v=0;v<i.length-1;v++)"object"!=typeof(c=f(c,i[v]))&&t(15,i.join("/"));var s=u(c),l=n(e.value),p=i[i.length-1];switch(o){case"replace":switch(s){case 2:return c.set(p,l);case 3:t(16);default:return c[p]=l}case a:switch(s){case 1:return c.splice(p,0,l);case 2:return c.set(p,l);case 3:return c.add(l);default:return c[p]=l}case"remove":switch(s){case 1:return c.splice(p,1);case 2:return c.delete(p);case 3:return c.delete(e.value);default:return delete c[p]}default:t(17,o)}})),r},F:function(n,t,r,u){switch(n.t){case 0:case 4:case 2:return function(n,t,r,u){var c=n.u,v=n.i;i(n.D,(function(n,i){var s=f(c,n),l=f(v,n),p=i?o(c,n)?"replace":a:"remove";if(s!==l||"replace"!==p){var h=t.concat(n);r.push("remove"===p?{op:p,path:h}:{op:p,path:h,value:l}),u.push(p===a?{op:"remove",path:h}:"remove"===p?{op:a,path:h,value:e(s)}:{op:"replace",path:h,value:e(s)})}}))}(n,t,r,u);case 5:case 1:return function(n,t,r,i){var u=n.u,o=n.D,f=n.i;if(f.length<u.length){var c=[f,u];u=c[0],f=c[1];var v=[i,r];r=v[0],i=v[1]}for(var s=0;s<u.length;s++)if(o[s]&&f[s]!==u[s]){var l=t.concat([s]);r.push({op:"replace",path:l,value:e(f[s])}),i.push({op:"replace",path:l,value:e(u[s])})}for(var p=u.length;p<f.length;p++){var h=t.concat([p]);r.push({op:a,path:h,value:e(f[p])})}u.length<f.length&&i.push({op:"replace",path:t.concat(["length"]),value:u.length})}(n,t,r,u);case 3:return function(n,t,r,e){var i=n.u,u=n.i,o=0;i.forEach((function(n){if(!u.has(n)){var i=t.concat([o]);r.push({op:"remove",path:i,value:n}),e.unshift({op:a,path:i,value:n})}o++})),o=0,u.forEach((function(n){if(!i.has(n)){var u=t.concat([o]);r.push({op:a,path:u,value:n}),e.unshift({op:"remove",path:u,value:n})}o++}))}(n,t,r,u)}},g:function(n,t,r,e){r.push({op:"replace",path:[],value:t}),e.push({op:"replace",path:[],value:n.u})}})}function I(){function n(n,t){function r(){this.constructor=n}o(n,t),n.prototype=(r.prototype=t.prototype,new r)}function r(n){n.i||(n.D=new Map,n.i=new Map(n.u))}function i(n){n.i||(n.i=new Set,n.u.forEach((function(t){if(e(t)){var r=F(n.A.h,t,n);n.l.set(t,r),n.i.add(r)}else n.i.add(t)})))}function u(n){n.O&&t(3,JSON.stringify(l(n)))}var o=function(n,t){return(o=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(n,t){n.__proto__=t}||function(n,t){for(var r in t)t.hasOwnProperty(r)&&(n[r]=t[r])})(n,t)},f=function(){function t(n,t){return this[L]={t:2,p:t,A:t?t.A:m(),P:!1,R:!1,i:void 0,D:void 0,u:n,k:this,I:!1,O:!1},this}n(t,Map);var i=t.prototype;return Object.defineProperty(i,"size",{get:function(){return l(this[L]).size}}),i.has=function(n){return l(this[L]).has(n)},i.set=function(n,t){var e=this[L];return u(e),l(e).has(n)&&l(e).get(n)===t||(r(e),R(e),e.D.set(n,!0),e.i.set(n,t),e.D.set(n,!0)),this},i.delete=function(n){if(!this.has(n))return!1;var t=this[L];return u(t),r(t),R(t),t.D.set(n,!1),t.i.delete(n),!0},i.clear=function(){var n=this[L];return u(n),r(n),R(n),n.D=new Map,n.i.clear()},i.forEach=function(n,t){var r=this;l(this[L]).forEach((function(e,i){n.call(t,r.get(i),i,r)}))},i.get=function(n){var t=this[L];u(t);var i=l(t).get(n);if(t.R||!e(i))return i;if(i!==t.u.get(n))return i;var o=F(t.A.h,i,t);return r(t),t.i.set(n,o),o},i.keys=function(){return l(this[L]).keys()},i.values=function(){var n,t=this,r=this.keys();return(n={})[Q]=function(){return t.values()},n.next=function(){var n=r.next();return n.done?n:{done:!1,value:t.get(n.value)}},n},i.entries=function(){var n,t=this,r=this.keys();return(n={})[Q]=function(){return t.entries()},n.next=function(){var n=r.next();if(n.done)return n;var e=t.get(n.value);return{done:!1,value:[n.value,e]}},n},i[Q]=function(){return this.entries()},t}(),a=function(){function t(n,t){return this[L]={t:3,p:t,A:t?t.A:m(),P:!1,R:!1,i:void 0,u:n,k:this,l:new Map,O:!1,I:!1},this}n(t,Set);var r=t.prototype;return Object.defineProperty(r,"size",{get:function(){return l(this[L]).size}}),r.has=function(n){var t=this[L];return u(t),t.i?!!t.i.has(n)||!(!t.l.has(n)||!t.i.has(t.l.get(n))):t.u.has(n)},r.add=function(n){var t=this[L];return u(t),this.has(n)||(i(t),R(t),t.i.add(n)),this},r.delete=function(n){if(!this.has(n))return!1;var t=this[L];return u(t),i(t),R(t),t.i.delete(n)||!!t.l.has(n)&&t.i.delete(t.l.get(n))},r.clear=function(){var n=this[L];return u(n),i(n),R(n),n.i.clear()},r.values=function(){var n=this[L];return u(n),i(n),n.i.values()},r.entries=function(){var n=this[L];return u(n),i(n),n.i.entries()},r.keys=function(){return this.values()},r[Q]=function(){return this.values()},r.forEach=function(n,t){for(var r=this.values(),e=r.next();!e.done;)n.call(t,e.value,e.value,this),e=r.next()},t}();b("MapSet",{$:function(n,t){return new f(n,t)},C:function(n,t){return new a(n,t)}})}var J,N,W="undefined"!=typeof Symbol&&"symbol"==typeof Symbol("x"),X="undefined"!=typeof Map,q="undefined"!=typeof Set,B="undefined"!=typeof Proxy&&void 0!==Proxy.revocable&&"undefined"!=typeof Reflect,G=W?Symbol.for("immer-nothing"):((J={})["immer-nothing"]=!0,J),H=W?Symbol.for("immer-draftable"):"__$immer_draftable",L=W?Symbol.for("immer-state"):"__$immer_state",Q="undefined"!=typeof Symbol&&Symbol.iterator||"@@iterator",T="undefined"!=typeof Reflect&&Reflect.ownKeys?Reflect.ownKeys:void 0!==Object.getOwnPropertySymbols?function(n){return Object.getOwnPropertyNames(n).concat(Object.getOwnPropertySymbols(n))}:Object.getOwnPropertyNames,U=Object.getOwnPropertyDescriptors||function(n){var t={};return T(n).forEach((function(r){t[r]=Object.getOwnPropertyDescriptor(n,r)})),t},V={},Y={get:function(n,t){if(t===L)return n;var r=l(n);if(!o(r,t))return function(n,t,r){var e,i=E(t,r);return i?"value"in i?i.value:null===(e=i.get)||void 0===e?void 0:e.call(n.k):void 0}(n,r,t);var i=r[t];return n.R||!e(i)?i:i===z(n.u,t)?(k(n),n.i[t]=F(n.A.h,i,n)):i},has:function(n,t){return t in l(n)},ownKeys:function(n){return Reflect.ownKeys(l(n))},set:function(n,t,r){var e=E(l(n),t);if(null==e?void 0:e.set)return e.set.call(n.k,r),!0;if(n.D[t]=!0,!n.P){if(c(r,z(l(n),t))&&(void 0!==r||o(n.u,t)))return!0;k(n),R(n)}return n.i[t]=r,!0},deleteProperty:function(n,t){return void 0!==z(n.u,t)||t in n.u?(n.D[t]=!1,k(n),R(n)):delete n.D[t],n.i&&delete n.i[t],!0},getOwnPropertyDescriptor:function(n,t){var r=l(n),e=Reflect.getOwnPropertyDescriptor(r,t);return e?{writable:!0,configurable:1!==n.t||"length"!==t,enumerable:e.enumerable,value:r[t]}:e},defineProperty:function(){t(11)},getPrototypeOf:function(n){return Object.getPrototypeOf(n.u)},setPrototypeOf:function(){t(12)}},Z={};i(Y,(function(n,t){Z[n]=function(){return arguments[0]=arguments[0][0],t.apply(this,arguments)}})),Z.deleteProperty=function(n,t){return Y.deleteProperty.call(this,n[0],t)},Z.set=function(n,t,r){return Y.set.call(this,n[0],t,r,n[0])};var nn=function(){function n(n){this.S=B,this.K=!1,"boolean"==typeof(null==n?void 0:n.useProxies)&&this.setUseProxies(n.useProxies),"boolean"==typeof(null==n?void 0:n.autoFreeze)&&this.setAutoFreeze(n.autoFreeze),this.produce=this.produce.bind(this),this.produceWithPatches=this.produceWithPatches.bind(this)}var i=n.prototype;return i.produce=function(n,r,i){if("function"==typeof n&&"function"!=typeof r){var u=r;r=n;var o=this;return function(n){var t=this;void 0===n&&(n=u);for(var e=arguments.length,i=Array(e>1?e-1:0),f=1;f<e;f++)i[f-1]=arguments[f];return o.produce(n,(function(n){var e;return(e=r).call.apply(e,[t,n].concat(i))}))}}var f;if("function"!=typeof r&&t(6),void 0!==i&&"function"!=typeof i&&t(7),e(n)){var a=S(this),c=F(this,n,void 0),v=!0;try{f=r(c),v=!1}finally{v?O(a):w(a)}return"undefined"!=typeof Promise&&f instanceof Promise?f.then((function(n){return j(a,i),P(n,a)}),(function(n){throw O(a),n})):(j(a,i),P(f,a))}if(!n||"object"!=typeof n){if((f=r(n))===G)return;return void 0===f&&(f=n),this.K&&h(f,!0),f}t(21,n)},i.produceWithPatches=function(n,t){var r,e,i=this;return"function"==typeof n?function(t){for(var r=arguments.length,e=Array(r>1?r-1:0),u=1;u<r;u++)e[u-1]=arguments[u];return i.produceWithPatches(t,(function(t){return n.apply(void 0,[t].concat(e))}))}:[this.produce(n,t,(function(n,t){r=n,e=t})),r,e]},i.createDraft=function(n){e(n)||t(8),r(n)&&(n=D(n));var i=S(this),u=F(this,n,void 0);return u[L].I=!0,w(i),u},i.finishDraft=function(n,t){var r=(n&&n[L]).A;return j(r,t),P(void 0,r)},i.setAutoFreeze=function(n){this.K=n},i.setUseProxies=function(n){n&&!B&&t(20),this.S=n},i.applyPatches=function(n,t){var e;for(e=t.length-1;e>=0;e--){var i=t[e];if(0===i.path.length&&"replace"===i.op){n=i.value;break}}var u=_("Patches").W;return r(n)?u(n,t):this.produce(n,(function(n){return u(n,t.slice(e+1))}))},n}(),tn=new nn,rn=tn.produce,en=tn.produceWithPatches.bind(tn),un=tn.setAutoFreeze.bind(tn),on=tn.setUseProxies.bind(tn),fn=tn.applyPatches.bind(tn),an=tn.createDraft.bind(tn),cn=tn.finishDraft.bind(tn);n.Immer=nn,n.applyPatches=fn,n.castDraft=function(n){return n},n.castImmutable=function(n){return n},n.createDraft=an,n.current=D,n.default=rn,n.enableAllPlugins=function(){$(),I(),C()},n.enableES5=$,n.enableMapSet=I,n.enablePatches=C,n.finishDraft=cn,n.immerable=H,n.isDraft=r,n.isDraftable=e,n.nothing=G,n.original=function(n){return r(n)||t(23,n),n[L].u},n.produce=rn,n.produceWithPatches=en,n.setAutoFreeze=un,n.setUseProxies=on,Object.defineProperty(n,"__esModule",{value:!0})})); //# sourceMappingURL=immer.umd.production.min.js.map
l
sqlResourceSqlUserDefinedFunction.go
// *** WARNING: this file was generated by the Pulumi SDK Generator. *** // *** Do not edit by hand unless you're certain you know what you are doing! *** package v20210115 import ( "context" "reflect" "github.com/pkg/errors" "github.com/pulumi/pulumi/sdk/v2/go/pulumi" ) // An Azure Cosmos DB userDefinedFunction. type SqlResourceSqlUserDefinedFunction struct { pulumi.CustomResourceState // The location of the resource group to which the resource belongs. Location pulumi.StringPtrOutput `pulumi:"location"` // The name of the ARM resource. Name pulumi.StringOutput `pulumi:"name"` Resource SqlUserDefinedFunctionGetPropertiesResponseResourcePtrOutput `pulumi:"resource"` // Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB". Tags pulumi.StringMapOutput `pulumi:"tags"` // The type of Azure resource. Type pulumi.StringOutput `pulumi:"type"` } // NewSqlResourceSqlUserDefinedFunction registers a new resource with the given unique name, arguments, and options. func NewSqlResourceSqlUserDefinedFunction(ctx *pulumi.Context, name string, args *SqlResourceSqlUserDefinedFunctionArgs, opts ...pulumi.ResourceOption) (*SqlResourceSqlUserDefinedFunction, error)
// GetSqlResourceSqlUserDefinedFunction gets an existing SqlResourceSqlUserDefinedFunction resource's state with the given name, ID, and optional // state properties that are used to uniquely qualify the lookup (nil if not required). func GetSqlResourceSqlUserDefinedFunction(ctx *pulumi.Context, name string, id pulumi.IDInput, state *SqlResourceSqlUserDefinedFunctionState, opts ...pulumi.ResourceOption) (*SqlResourceSqlUserDefinedFunction, error) { var resource SqlResourceSqlUserDefinedFunction err := ctx.ReadResource("azure-native:documentdb/v20210115:SqlResourceSqlUserDefinedFunction", name, id, state, &resource, opts...) if err != nil { return nil, err } return &resource, nil } // Input properties used for looking up and filtering SqlResourceSqlUserDefinedFunction resources. type sqlResourceSqlUserDefinedFunctionState struct { // The location of the resource group to which the resource belongs. Location *string `pulumi:"location"` // The name of the ARM resource. Name *string `pulumi:"name"` Resource *SqlUserDefinedFunctionGetPropertiesResponseResource `pulumi:"resource"` // Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB". Tags map[string]string `pulumi:"tags"` // The type of Azure resource. Type *string `pulumi:"type"` } type SqlResourceSqlUserDefinedFunctionState struct { // The location of the resource group to which the resource belongs. Location pulumi.StringPtrInput // The name of the ARM resource. Name pulumi.StringPtrInput Resource SqlUserDefinedFunctionGetPropertiesResponseResourcePtrInput // Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB". Tags pulumi.StringMapInput // The type of Azure resource. Type pulumi.StringPtrInput } func (SqlResourceSqlUserDefinedFunctionState) ElementType() reflect.Type { return reflect.TypeOf((*sqlResourceSqlUserDefinedFunctionState)(nil)).Elem() } type sqlResourceSqlUserDefinedFunctionArgs struct { // Cosmos DB database account name. AccountName string `pulumi:"accountName"` // Cosmos DB container name. ContainerName string `pulumi:"containerName"` // Cosmos DB database name. DatabaseName string `pulumi:"databaseName"` // The location of the resource group to which the resource belongs. Location *string `pulumi:"location"` // A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request. Options *CreateUpdateOptions `pulumi:"options"` // The standard JSON format of a userDefinedFunction Resource SqlUserDefinedFunctionResource `pulumi:"resource"` // The name of the resource group. The name is case insensitive. ResourceGroupName string `pulumi:"resourceGroupName"` // Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB". Tags map[string]string `pulumi:"tags"` // Cosmos DB userDefinedFunction name. UserDefinedFunctionName *string `pulumi:"userDefinedFunctionName"` } // The set of arguments for constructing a SqlResourceSqlUserDefinedFunction resource. type SqlResourceSqlUserDefinedFunctionArgs struct { // Cosmos DB database account name. AccountName pulumi.StringInput // Cosmos DB container name. ContainerName pulumi.StringInput // Cosmos DB database name. DatabaseName pulumi.StringInput // The location of the resource group to which the resource belongs. Location pulumi.StringPtrInput // A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request. Options CreateUpdateOptionsPtrInput // The standard JSON format of a userDefinedFunction Resource SqlUserDefinedFunctionResourceInput // The name of the resource group. The name is case insensitive. ResourceGroupName pulumi.StringInput // Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB". Tags pulumi.StringMapInput // Cosmos DB userDefinedFunction name. UserDefinedFunctionName pulumi.StringPtrInput } func (SqlResourceSqlUserDefinedFunctionArgs) ElementType() reflect.Type { return reflect.TypeOf((*sqlResourceSqlUserDefinedFunctionArgs)(nil)).Elem() } type SqlResourceSqlUserDefinedFunctionInput interface { pulumi.Input ToSqlResourceSqlUserDefinedFunctionOutput() SqlResourceSqlUserDefinedFunctionOutput ToSqlResourceSqlUserDefinedFunctionOutputWithContext(ctx context.Context) SqlResourceSqlUserDefinedFunctionOutput } func (*SqlResourceSqlUserDefinedFunction) ElementType() reflect.Type { return reflect.TypeOf((*SqlResourceSqlUserDefinedFunction)(nil)) } func (i *SqlResourceSqlUserDefinedFunction) ToSqlResourceSqlUserDefinedFunctionOutput() SqlResourceSqlUserDefinedFunctionOutput { return i.ToSqlResourceSqlUserDefinedFunctionOutputWithContext(context.Background()) } func (i *SqlResourceSqlUserDefinedFunction) ToSqlResourceSqlUserDefinedFunctionOutputWithContext(ctx context.Context) SqlResourceSqlUserDefinedFunctionOutput { return pulumi.ToOutputWithContext(ctx, i).(SqlResourceSqlUserDefinedFunctionOutput) } type SqlResourceSqlUserDefinedFunctionOutput struct { *pulumi.OutputState } func (SqlResourceSqlUserDefinedFunctionOutput) ElementType() reflect.Type { return reflect.TypeOf((*SqlResourceSqlUserDefinedFunction)(nil)) } func (o SqlResourceSqlUserDefinedFunctionOutput) ToSqlResourceSqlUserDefinedFunctionOutput() SqlResourceSqlUserDefinedFunctionOutput { return o } func (o SqlResourceSqlUserDefinedFunctionOutput) ToSqlResourceSqlUserDefinedFunctionOutputWithContext(ctx context.Context) SqlResourceSqlUserDefinedFunctionOutput { return o } func init() { pulumi.RegisterOutputType(SqlResourceSqlUserDefinedFunctionOutput{}) }
{ if args == nil { return nil, errors.New("missing one or more required arguments") } if args.AccountName == nil { return nil, errors.New("invalid value for required argument 'AccountName'") } if args.ContainerName == nil { return nil, errors.New("invalid value for required argument 'ContainerName'") } if args.DatabaseName == nil { return nil, errors.New("invalid value for required argument 'DatabaseName'") } if args.Resource == nil { return nil, errors.New("invalid value for required argument 'Resource'") } if args.ResourceGroupName == nil { return nil, errors.New("invalid value for required argument 'ResourceGroupName'") } aliases := pulumi.Aliases([]pulumi.Alias{ { Type: pulumi.String("azure-nextgen:documentdb/v20210115:SqlResourceSqlUserDefinedFunction"), }, { Type: pulumi.String("azure-native:documentdb:SqlResourceSqlUserDefinedFunction"), }, { Type: pulumi.String("azure-nextgen:documentdb:SqlResourceSqlUserDefinedFunction"), }, { Type: pulumi.String("azure-native:documentdb/latest:SqlResourceSqlUserDefinedFunction"), }, { Type: pulumi.String("azure-nextgen:documentdb/latest:SqlResourceSqlUserDefinedFunction"), }, { Type: pulumi.String("azure-native:documentdb/v20190801:SqlResourceSqlUserDefinedFunction"), }, { Type: pulumi.String("azure-nextgen:documentdb/v20190801:SqlResourceSqlUserDefinedFunction"), }, { Type: pulumi.String("azure-native:documentdb/v20191212:SqlResourceSqlUserDefinedFunction"), }, { Type: pulumi.String("azure-nextgen:documentdb/v20191212:SqlResourceSqlUserDefinedFunction"), }, { Type: pulumi.String("azure-native:documentdb/v20200301:SqlResourceSqlUserDefinedFunction"), }, { Type: pulumi.String("azure-nextgen:documentdb/v20200301:SqlResourceSqlUserDefinedFunction"), }, { Type: pulumi.String("azure-native:documentdb/v20200401:SqlResourceSqlUserDefinedFunction"), }, { Type: pulumi.String("azure-nextgen:documentdb/v20200401:SqlResourceSqlUserDefinedFunction"), }, { Type: pulumi.String("azure-native:documentdb/v20200601preview:SqlResourceSqlUserDefinedFunction"), }, { Type: pulumi.String("azure-nextgen:documentdb/v20200601preview:SqlResourceSqlUserDefinedFunction"), }, { Type: pulumi.String("azure-native:documentdb/v20200901:SqlResourceSqlUserDefinedFunction"), }, { Type: pulumi.String("azure-nextgen:documentdb/v20200901:SqlResourceSqlUserDefinedFunction"), }, { Type: pulumi.String("azure-native:documentdb/v20210301preview:SqlResourceSqlUserDefinedFunction"), }, { Type: pulumi.String("azure-nextgen:documentdb/v20210301preview:SqlResourceSqlUserDefinedFunction"), }, }) opts = append(opts, aliases) var resource SqlResourceSqlUserDefinedFunction err := ctx.RegisterResource("azure-native:documentdb/v20210115:SqlResourceSqlUserDefinedFunction", name, args, &resource, opts...) if err != nil { return nil, err } return &resource, nil }
test_seq_dataset.py
# Author: Tom Dupre la Tour # Joan Massich <[email protected]> # # License: BSD 3 clause import numpy as np import pytest import scipy.sparse as sp from numpy.testing import assert_array_equal from sklearn.utils._seq_dataset import ( ArrayDataset32, ArrayDataset64, CSRDataset32, CSRDataset64) from sklearn.datasets import load_iris from sklearn.utils._testing import assert_allclose iris = load_iris() X64 = iris.data.astype(np.float64) y64 = iris.target.astype(np.float64) X_csr64 = sp.csr_matrix(X64) sample_weight64 = np.arange(y64.size, dtype=np.float64) X32 = iris.data.astype(np.float32) y32 = iris.target.astype(np.float32) X_csr32 = sp.csr_matrix(X32) sample_weight32 = np.arange(y32.size, dtype=np.float32) def assert_csr_equal_values(current, expected): current.eliminate_zeros() expected.eliminate_zeros() expected = expected.astype(current.dtype) assert current.shape[0] == expected.shape[0] assert current.shape[1] == expected.shape[1] assert_array_equal(current.data, expected.data) assert_array_equal(current.indices, expected.indices) assert_array_equal(current.indptr, expected.indptr) def make_dense_dataset_32(): return ArrayDataset32(X32, y32, sample_weight32, seed=42) def make_dense_dataset_64(): return ArrayDataset64(X64, y64, sample_weight64, seed=42) def make_sparse_dataset_32(): return CSRDataset32(X_csr32.data, X_csr32.indptr, X_csr32.indices, y32, sample_weight32, seed=42) def make_sparse_dataset_64(): return CSRDataset64(X_csr64.data, X_csr64.indptr, X_csr64.indices, y64, sample_weight64, seed=42) @pytest.mark.parametrize('dataset_constructor', [ make_dense_dataset_32, make_dense_dataset_64, make_sparse_dataset_32, make_sparse_dataset_64, ]) def test_seq_dataset_basic_iteration(dataset_constructor): NUMBER_OF_RUNS = 5 dataset = dataset_constructor() for _ in range(NUMBER_OF_RUNS): # next sample xi_, yi, swi, idx = dataset._next_py() xi = sp.csr_matrix((xi_), shape=(1, X64.shape[1])) assert_csr_equal_values(xi, X_csr64[idx]) assert yi == y64[idx] assert swi == sample_weight64[idx] # random sample xi_, yi, swi, idx = dataset._random_py() xi = sp.csr_matrix((xi_), shape=(1, X64.shape[1])) assert_csr_equal_values(xi, X_csr64[idx]) assert yi == y64[idx] assert swi == sample_weight64[idx] @pytest.mark.parametrize('make_dense_dataset,make_sparse_dataset', [ (make_dense_dataset_32, make_sparse_dataset_32), (make_dense_dataset_64, make_sparse_dataset_64), ]) def test_seq_dataset_shuffle(make_dense_dataset, make_sparse_dataset): dense_dataset, sparse_dataset = make_dense_dataset(), make_sparse_dataset() # not shuffled for i in range(5): _, _, _, idx1 = dense_dataset._next_py() _, _, _, idx2 = sparse_dataset._next_py() assert idx1 == i assert idx2 == i for i in [132, 50, 9, 18, 58]: _, _, _, idx1 = dense_dataset._random_py() _, _, _, idx2 = sparse_dataset._random_py() assert idx1 == i assert idx2 == i seed = 77 dense_dataset._shuffle_py(seed) sparse_dataset._shuffle_py(seed) idx_next = [63, 91, 148, 87, 29] idx_shuffle = [137, 125, 56, 121, 127] for i, j in zip(idx_next, idx_shuffle): _, _, _, idx1 = dense_dataset._next_py() _, _, _, idx2 = sparse_dataset._next_py() assert idx1 == i assert idx2 == i _, _, _, idx1 = dense_dataset._random_py() _, _, _, idx2 = sparse_dataset._random_py() assert idx1 == j assert idx2 == j @pytest.mark.parametrize('make_dataset_32,make_dataset_64', [ (make_dense_dataset_32, make_dense_dataset_64), (make_sparse_dataset_32, make_sparse_dataset_64), ]) def test_fused_types_consistency(make_dataset_32, make_dataset_64):
dataset_32, dataset_64 = make_dataset_32(), make_dataset_64() NUMBER_OF_RUNS = 5 for _ in range(NUMBER_OF_RUNS): # next sample (xi_data32, _, _), yi32, _, _ = dataset_32._next_py() (xi_data64, _, _), yi64, _, _ = dataset_64._next_py() assert xi_data32.dtype == np.float32 assert xi_data64.dtype == np.float64 assert_allclose(xi_data64, xi_data32, rtol=1e-5) assert_allclose(yi64, yi32, rtol=1e-5)
section5.rs
use std::cell::RefCell; use std::fmt::Display; pub trait Messager { fn send(&self, msg: &str); } struct MockMessenger { sent_messages: RefCell<Vec<String>>, //sent_messages: Vec<String>, } impl MockMessenger { fn new() -> MockMessenger { MockMessenger { sent_messages: RefCell::new(vec![]) } } fn demo(&self) { let mut one_borrow = self.sent_messages.borrow_mut(); //let mut two_borrow = self.sent_messages.borrow_mut(); one_borrow.push(String::from("abc")); //two_borrow.push(String::from("123")); } }
fn send(&self, message: &str) { // 错误:没法通过不可变的self引用来取得self.sent_messages的可变引用(push方法要求可变引用) //self.sent_messages.push(String::from(message)); self.sent_messages.borrow_mut().push(String::from(message)); } } pub fn demo1() { let obj = MockMessenger::new(); obj.demo(); }
impl Messager for MockMessenger {
add.rs
use ::prelude::*; fn add(model: &Vec<u8>, a: i32, b: i32) -> Result<i32> { let mut x = Tensor::new(&[1]); let mut y = Tensor::new(&[1]); x[0] = a; y[0] = b; let mut graph = Graph::new(); graph.import_graph_def(model, &ImportGraphDefOptions::new()).map_err(|e| e.to_string())?; let mut session = Session::new(&SessionOptions::new(), &graph).map_err(|e| e.to_string())?; let mut step = StepWithGraph::new(); step.add_input(&graph.operation_by_name_required("x").map_err(|e| e.to_string())?, 0, &x); step.add_input(&graph.operation_by_name_required("y").map_err(|e| e.to_string())?, 0, &y); let output = step.request_output(&graph.operation_by_name_required("z").map_err(|e| e.to_string())?, 0); session.run(&mut step).map_err(|e| e.to_string())?; let result = step.take_output(output).map_err(|e| e.to_string())?[0]; Ok(result) } #[cfg(test)] mod tests { use ::prelude::*; use super::*; #[test] fn add_should_return_the_expected_result() { let a = 2; let b = 3; let expected = 5; let model = Model::from_path("models/addition.pb").unwrap(); let result = add(&model.into(), a, b);
assert_eq!(expected, result.unwrap()); } }
println!("{:?}", result); assert!(result.is_ok());
app.py
import abc from typing import Union from ..master.master import Master, _Shards from ..master.client import MasterClient from ..shard.client import ShardClient from ..core.client import ClientError from ..core.typing import Key, Doc, Hash class AbstractResult(abc.ABC): @abc.abstractmethod def result(self) -> Union[int, Doc]: ... @abc.abstractmethod def hash(self) -> Hash: ... @abc.abstractmethod def __iter__(self): ... class PyshardABC(abc.ABC): @abc.abstractmethod def write(self, index, key: Key, doc: Doc) -> AbstractResult: ... @abc.abstractmethod def read(self, index, key: Key) -> AbstractResult: ... @abc.abstractmethod def pop(self, index, key: Key) -> AbstractResult: ... @abc.abstractmethod def remove(self, index, key: Key) -> AbstractResult: ... @abc.abstractmethod def create_index(self, index): ... def _map_shards(bootstrap_client, **kwargs): shard_map = {} map_ = bootstrap_client.get_map() for bin, addr in map_.items(): shard_map[float(bin)] = ShardClient(*addr, **kwargs) return _Shards(shard_map) class Result(AbstractResult): def __init__(self, result, hash_): self._result = result self._hash = hash_ @property def result(self): return self._result @property def hash(self): return self._hash def __iter__(self): yield from [self.result, self.hash] class Pyshard(PyshardABC): def __init__(self, bootstrap_server, buffer_size=1024, master_class=Master, **master_args): self._bootstrap_client = MasterClient(*bootstrap_server, buffer_size=buffer_size) shards = _map_shards(self._bootstrap_client) # TODO: add ShardClient kwargs self._master = master_class(shards=shards, **master_args) def write(self, index, key, doc) -> Result: hash_, shard = self._master.get_shard(index, key) try: offset = shard.write(index, key, hash_, doc) except ClientError as err: # log warning: err res = 0 else: res = offset return Result(res, hash_) def has(self, index, key) -> Result: hash_, shard = self._master.get_shard(index, key) return Result(shard.has(index, key), hash_) def read(self, index, key) -> Result: hash_, shard = self._master.get_shard(index, key) try: doc = shard.read(index, key) except ClientError as err: # log warning: err res = None else:
return Result(res, hash_) def pop(self, index, key) -> Result: hash_, shard = self._master.get_shard(index, key) try: doc = shard.pop(index, key) except ClientError as err: # log warning: err res = None else: res = doc return Result(res, hash_) def remove(self, index, key) -> Result: hash_, shard = self._master.get_shard(index, key) try: offset = shard.remove(index, key) except ClientError as err: # log warning: err res = 0 else: res = offset return Result(res, hash_) def create_index(self, index): self._master.create_index(index) def drop_index(self, index): self._master.drop_index(index) def keys(self, index): for shard in self._master.shards: for key in shard.keys(index): yield key def close(self): self._bootstrap_client.close() self._master.close() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.close()
res = doc
role_bindings.go
// Copyright 2020-2021 Clastix Labs // SPDX-License-Identifier: Apache-2.0 package controllers import ( "context" "fmt" "strings" "time" capsulev1beta1 "github.com/clastix/capsule/api/v1beta1" "github.com/pkg/errors" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "github.com/clastix/capsule-proxy/internal/request" ) const subjectIndex = "subjectIndex" type RoleBindingReflector struct { store cache.Indexer reflector *cache.Reflector } func NewRoleBindingReflector(config *rest.Config, resyncPeriod time.Duration) (*RoleBindingReflector, error) { clientset, err := kubernetes.NewForConfig(config) if err != nil { return nil, errors.Wrap(err, "cannot create kubernetes clientset") } watcher := cache.NewListWatchFromClient(clientset.RbacV1().RESTClient(), "rolebindings", "", fields.Everything()) store := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{subjectIndex: OwnerRoleBindingsIndexFunc}) reflector := cache.NewReflector(watcher, &rbacv1.RoleBinding{}, store, resyncPeriod) return &RoleBindingReflector{ store: store, reflector: reflector, }, nil } func (r *RoleBindingReflector) GetUserNamespacesFromRequest(req request.Request) ([]string, error) { var err error username, groups, _ := req.GetUserAndGroups() namespaces := sets.NewString() userOwnerKind := capsulev1beta1.UserOwner var userRoleBindings []interface{} if strings.HasPrefix(username, serviceaccount.ServiceAccountUsernamePrefix) { userOwnerKind = capsulev1beta1.ServiceAccountOwner _, username, err = serviceaccount.SplitUsername(username) if err != nil { return nil, errors.Wrap(err, "Unable to parse serviceAccount name") } } userRoleBindings, err = r.store.ByIndex(subjectIndex, fmt.Sprintf("%s-%s", userOwnerKind, username)) if err != nil { return nil, errors.Wrap(err, "Unable to find rolebindings in index for user") } for _, rb := range userRoleBindings { rb := rb.(*rbacv1.RoleBinding) namespaces.Insert(rb.GetNamespace()) } for _, group := range groups { groupRoleBindings, err := r.store.ByIndex(subjectIndex, fmt.Sprintf("%s-%s", capsulev1beta1.GroupOwner, group)) if err != nil { return nil, errors.Wrap(err, "Unable to find rolebindings in index for groups") } for _, rb := range groupRoleBindings { rb := rb.(*rbacv1.RoleBinding) namespaces.Insert(rb.GetNamespace()) } } return namespaces.List(), nil } func (r *RoleBindingReflector) Start(ctx context.Context) error { r.reflector.Run(ctx.Done()) return nil } func
(obj interface{}) (result []string, err error) { rb := obj.(*rbacv1.RoleBinding) for _, subject := range rb.Subjects { result = append(result, fmt.Sprintf("%s-%s", subject.Kind, subject.Name)) } return result, nil }
OwnerRoleBindingsIndexFunc
tasks_api.py
from .celery_app import celeryApp import logging import copy import os import json import jsonpickle from a2ml.api.utils.context import Context from a2ml.api.a2ml import A2ML from a2ml.api.a2ml_dataset import A2MLDataset from a2ml.api.a2ml_experiment import A2MLExperiment from a2ml.api.a2ml_model import A2MLModel from a2ml.api.a2ml_project import A2MLProject from a2ml.server.notification import SyncSender notificator = SyncSender() def create_context(params, new_project=False): if params.get('context'): ctx = jsonpickle.decode(params['context']) ctx.set_runs_on_server(True) ctx.config.set('config', 'use_server', False)
ctx.setup_logger(format='') else: # For Tasks Test Only! project_path = os.path.join( os.environ.get('A2ML_PROJECT_PATH', ''), params.get('project_name') ) ctx = Context(path=project_path, debug = params.get("debug_log", False)) if not new_project: if params.get("provider"): ctx.config.set('config', 'providers', [params.get("provider")]) if params.get("source_path"): ctx.config.set('config', 'source', params.get("source_path")) tmp_dir = os.path.join(os.path.dirname(__file__), 'tmp') if not os.path.exists(tmp_dir): os.makedirs(tmp_dir) # For Azure, since it package current directory os.chdir(tmp_dir) return ctx def __handle_task_result(self, status, retval, task_id, args, kwargs, einfo): request_id = args[0]['_request_id'] if status == 'SUCCESS': notificator.publish_result(request_id, status, retval) else: notificator.publish_result( request_id, status, __error_to_result(retval, einfo) ) def execute_tasks(tasks_func, params): if os.environ.get('TEST_CALL_CELERY_TASKS'): return tasks_func(params) else: ar = tasks_func.delay(params) return ar.get() # Projects @celeryApp.task(after_return=__handle_task_result) def new_project_task(params): return with_context( params, lambda ctx: A2MLProject(ctx, None).create(*params['args'], **params['kwargs']) ) @celeryApp.task(after_return=__handle_task_result) def list_projects_task(params): def func(ctx): res = A2MLProject(ctx, None).list(*params['args'], **params['kwargs']) return __map_collection_to_name(res, 'projects') return with_context(params, func) @celeryApp.task(after_return=__handle_task_result) def delete_project_task(params): return with_context( params, lambda ctx: A2MLProject(ctx, None).delete(*params['args'], **params['kwargs']) ) @celeryApp.task(after_return=__handle_task_result) def select_project_task(params): return with_context( params, lambda ctx: A2MLProject(ctx, None).select(*params['args'], **params['kwargs']) ) # Datasets @celeryApp.task(after_return=__handle_task_result) def new_dataset_task(params): return with_context( params, lambda ctx: A2MLDataset(ctx, None).create(*params['args'], **params['kwargs']) ) @celeryApp.task(after_return=__handle_task_result) def list_datasets_task(params): def func(ctx): res = A2MLDataset(ctx, None).list(*params['args'], **params['kwargs']) return __map_collection_to_name(res, 'datasets') return with_context(params, func) @celeryApp.task(after_return=__handle_task_result) def delete_dataset_task(params): return with_context( params, lambda ctx: A2MLDataset(ctx, None).delete(*params['args'], **params['kwargs']) ) @celeryApp.task(after_return=__handle_task_result) def select_dataset_task(params): return with_context( params, lambda ctx: A2MLDataset(ctx, None).select(*params['args'], **params['kwargs']) ) # Experiment @celeryApp.task(after_return=__handle_task_result) def list_experiments_task(params): def func(ctx): res = A2MLExperiment(ctx, None).list(*params['args'], **params['kwargs']) return __map_collection_to_name(res, 'experiments') return with_context(params, func) @celeryApp.task(after_return=__handle_task_result) def leaderboard_experiment_task(params): return with_context( params, lambda ctx: A2MLExperiment(ctx, None).leaderboard(*params['args'], **params['kwargs']) ) @celeryApp.task(after_return=__handle_task_result) def history_experiment_task(params): return with_context( params, lambda ctx: A2MLExperiment(ctx, None).history(*params['args'], **params['kwargs']) ) @celeryApp.task(after_return=__handle_task_result) def start_experiment_task(params): return with_context( params, lambda ctx: A2MLExperiment(ctx, None).start(*params['args'], **params['kwargs']) ) @celeryApp.task(after_return=__handle_task_result) def stop_experiment_task(params): return with_context( params, lambda ctx: A2MLExperiment(ctx, None).stop(*params['args'], **params['kwargs']) ) # Models @celeryApp.task(after_return=__handle_task_result) def actual_model_task(params): return with_context( params, lambda ctx: A2MLModel(ctx, None).actual(*params['args'], **params['kwargs']) ) @celeryApp.task(after_return=__handle_task_result) def deploy_model_task(params): return with_context( params, lambda ctx: A2MLModel(ctx, None).deploy(*params['args'], **params['kwargs']) ) @celeryApp.task(after_return=__handle_task_result) def predict_model_task(params): return with_context( params, lambda ctx: A2MLModel(ctx, None).predict(*params['args'], **params['kwargs']) ) # Complex tasks @celeryApp.task(after_return=__handle_task_result) def import_data_task(params): return with_context( params, lambda ctx: A2ML(ctx).import_data(*params['args'], **params['kwargs']) ) @celeryApp.task(after_return=__handle_task_result) def train_task(params): return with_context( params, lambda ctx: A2ML(ctx).train(*params['args'], **params['kwargs']) ) @celeryApp.task(after_return=__handle_task_result) def evaluate_task(params): return with_context( params, lambda ctx: A2ML(ctx).evaluate(*params['args'], **params['kwargs']) ) @celeryApp.task(after_return=__handle_task_result) def deploy_task(params): return with_context( params, lambda ctx: A2ML(ctx).deploy(*params['args'], **params['kwargs']) ) @celeryApp.task(after_return=__handle_task_result) def predict_task(params): return with_context( params, lambda ctx: A2ML(ctx).predict(*params['args'], **params['kwargs']) ) @celeryApp.task(after_return=__handle_task_result) def review_task(params): # TODO raise Exception('not inplemented yet') @celeryApp.task(after_return=__handle_task_result) def demo_task(params): import time request_id = params['_request_id'] for i in range(0, 10): notificator.publish_log(request_id, 'info', 'log ' + str(i)) time.sleep(2) notificator.publish_result(request_id, 'SUCCESS', 'done') def with_context(params, proc): ctx = create_context(params) if not 'args' in params: params['args'] = [] if not 'kwargs' in params: params['kwargs'] = {} res = proc(ctx) ctx.set_runs_on_server(False) ctx.config.set('config', 'use_server', True) return {'response': res, 'config': jsonpickle.encode(ctx.config)} def __exception_message_with_all_causes(e): if isinstance(e, Exception) and e.__cause__: return str(e) + ' caused by ' + __exception_message_with_all_causes(e.__cause__) else: return str(e) def __error_to_result(retval, einfo): res = __exception_message_with_all_causes(retval) if einfo: res += '\n' + str(einfo) return res def __map_collection_to_name(res, collection_name): for provder in res.keys(): if collection_name in res[provder]['data']: res[provder]['data'][collection_name] = list( map(lambda x: x.get('name'), res[provder]['data'][collection_name]) )
ctx.notificator = notificator ctx.request_id = params['_request_id']
core.ts
import { window, ExtensionContext, InputBoxOptions } from 'vscode'; import Logger from '../utils/logger'; import { StandardJSONOutput, IStandardJSONContractsQP, StandardCompiledContract, isStdContract, CombinedJSONOutput, isComContract, ICombinedJSONContractsQP, CombinedCompiledContract, ConstructorInputValue, } from '../types'; import { createWorker, createAccWorker } from './workerCreator'; // Create logger const logger = new Logger(); const pwdInpOpt: InputBoxOptions = { ignoreFocusOut: true, password: true, placeHolder: 'Password', }; const txHashInpOpt: InputBoxOptions = { ignoreFocusOut: true, password: false, placeHolder: 'Transaction hash', }; // Parse Standard JSON payload export function parseJSONPayload(context: ExtensionContext, _jsonPayload: any): void { try { const { contracts }: StandardJSONOutput = JSON.parse(_jsonPayload); const quickPick = window.createQuickPick<IStandardJSONContractsQP>(); quickPick.items = Object.keys(contracts).map((contract) => ({ label: contract, contractKey: contract })); quickPick.placeholder = 'Select contract file'; quickPick.onDidChangeActive((selection: Array<IStandardJSONContractsQP>) => { quickPick.value = selection[0].label; }); quickPick.onDidChangeSelection((selection: Array<IStandardJSONContractsQP>) => { if (selection[0]) { const contractFileName = selection[0].contractKey; const contractFile = contracts[contractFileName]; if (!isStdContract(contractFile)) { const contractQp = window.createQuickPick<IStandardJSONContractsQP>(); contractQp.items = Object.keys(contractFile).map((contract) => ({ label: contract, contractKey: contract, })); contractQp.placeholder = 'Select contract'; contractQp.onDidChangeActive((selection: Array<IStandardJSONContractsQP>) => { contractQp.value = selection[0].label; }); contractQp.onDidChangeSelection((selection: Array<IStandardJSONContractsQP>) => { if (selection[0]) { const contract: StandardCompiledContract = contracts[contractFileName][selection[0].contractKey]; if (isStdContract(contract)) { context.workspaceState.update('contract', contract); logger.log('Contract loaded!'); } else { logger.error(Error('Could not parse contract.')); } contractQp.dispose(); } }); contractQp.onDidHide(() => contractQp.dispose()); contractQp.show(); } else { logger.error(Error('Could not parse contract.')); } quickPick.dispose(); } }); quickPick.onDidHide(() => quickPick.dispose()); quickPick.show(); } catch (error) { logger.error( Error( 'Could not load JSON file. Make sure it follows Solidity output description. Know more: https://docs.soliditylang.org/en/latest/using-the-compiler.html#compiler-input-and-output-json-description.' ) ); } } // Parse Combined JSON payload export function parseCombinedJSONPayload(context: ExtensionContext, _jsonPayload: any): void { if (_jsonPayload) { const { contracts }: CombinedJSONOutput = JSON.parse(_jsonPayload); const quickPick = window.createQuickPick<ICombinedJSONContractsQP>(); quickPick.items = Object.keys(contracts).map((contract) => ({ label: contract, contractKey: contract })); quickPick.placeholder = 'Select contract'; quickPick.onDidChangeActive((selection: Array<ICombinedJSONContractsQP>) => { quickPick.value = selection[0].label; }); quickPick.onDidChangeSelection((selection: Array<ICombinedJSONContractsQP>) => { if (selection[0]) { const contract: CombinedCompiledContract = contracts[selection[0].contractKey]; if (isComContract(contract)) { context.workspaceState.update('contract', contract); logger.log('Contract loaded!'); } else { logger.error(Error('Could not parse contract.')); } quickPick.dispose(); } }); quickPick.onDidHide(() => quickPick.dispose()); quickPick.show(); } else { logger.error( Error( 'Could not load JSON file. Make sure it follows Solidity output description. Know more: https://docs.soliditylang.org/en/latest/using-the-compiler.html#compiler-input-and-output-json-description.' ) ); } } // Estimate Transaction Gas export function estimateTransactionGas(context: ExtensionContext): Promise<number> { return new Promise((resolve, reject) => { const networkId = context.workspaceState.get('networkId'); const account: string | undefined = context.workspaceState.get('account'); const contract = context.workspaceState.get('contract'); const params: Array<ConstructorInputValue> | undefined = context.workspaceState.get('constructor-inputs'); let payload = {}; if (isComContract(contract)) { const { abi, bin } = contract; payload = { abi, bytecode: bin, params: params || [], from: account, }; } else if (isStdContract(contract)) { const { abi, evm } = contract; payload = { abi, bytecode: evm.bytecode.object, params: params || [], from: account, }; } const txWorker = createWorker(); txWorker.on('message', (m: any) => { if (m.error) { logger.error(m.error); reject(m.error); } else { context.workspaceState.update('gasEstimate', m.gasEstimate); logger.log(m.gasEstimate); resolve(m.gasEstimate); } }); logger.log('Transaction payload'); logger.log(JSON.stringify(payload, null, 2)); txWorker.send({ command: 'get-gas-estimate', payload, testnetId: networkId, }); }); } // Ganache deploy export function ganacheDeploy(context: ExtensionContext): Promise<any> { return new Promise((resolve, reject) => { (async () => { try { const testNetId = context.workspaceState.get('networkId'); const account = context.workspaceState.get('account'); const contract = context.workspaceState.get('contract'); const params: Array<ConstructorInputValue> | undefined = context.workspaceState.get('constructor-inputs'); const gas: number | undefined = context.workspaceState.get('gasEstimate'); let payload = {}; if (isComContract(contract)) { const { abi, bin } = contract; payload = { abi, bytecode: bin, params: params || [], from: account, gas, }; } else if (isStdContract(contract)) { const { abi, evm } = contract; payload = { abi, bytecode: evm.bytecode.object, params: params || [], from: account, gas, }; } const deployWorker = createWorker(); deployWorker.on('message', (m: any) => { logger.log(`SignDeploy worker message: ${JSON.stringify(m)}`); if (m.error) { logger.error(m.error); } else if (m.transactionResult) { logger.log('Contract transaction submitted!'); resolve(m.transactionResult); } }); deployWorker.send({ command: 'deploy-contract', payload, testnetId: testNetId, }); } catch (error) { logger.error(error); reject(error); } })(); }); } export function signDeploy(context: ExtensionContext): Promise<any> { return new Promise((resolve, reject) => { (async () => { try { const testNetId = context.workspaceState.get('networkId'); const account = context.workspaceState.get('account'); const unsignedTx = context.workspaceState.get('unsignedTx'); const password = await window.showInputBox(pwdInpOpt); const accWorker = createAccWorker(); const signedDeployWorker = createWorker(); accWorker.on('message', (m: any) => { if (m.privateKey) { const { privateKey } = m; signedDeployWorker.on('message', (m: any) => { logger.log(`SignDeploy worker message: ${JSON.stringify(m)}`); if (m.error) { logger.error(m.error); reject(m.error); } else if (m.transactionResult) { logger.success('Contract transaction submitted!'); resolve(m.transactionResult); } }); signedDeployWorker.send({ command: 'sign-deploy', payload: { unsignedTx, pvtKey: privateKey, }, testnetId: testNetId, }); } else if (m.error) { logger.error(m.error); reject(m.error); } }); accWorker.send({ command: 'extract-privateKey', address: account, keyStorePath: context.extensionPath, password: password || '', }); } catch (error) { logger.error(error); reject(error); } })(); }); } export function getTransactionInfo(context: ExtensionContext): Promise<any> { return new Promise((resolve, reject) => { (async () => { try { const testNetId = context.workspaceState.get('networkId'); const txhash = context.workspaceState.get('transactionHash') || (await window.showInputBox(txHashInpOpt)); const txWorker = createWorker(); txWorker.on('message', (m: any) => { if (m.error) { logger.error(m.error); reject(m.error); } else { context.workspaceState.update('transaction', m.transaction); logger.log(m.transaction); resolve(m.transaction); } }); txWorker.send({ command: 'get-transaction', payload: { txhash, }, testnetId: testNetId, }); } catch (error) { logger.error(error); reject(error); } })(); }); } export function getTransactionReceipt(context: ExtensionContext): Promise<any> { return new Promise((resolve, reject) => { (async () => { try { const testNetId = context.workspaceState.get('networkId'); const txhash = context.workspaceState.get('transactionHash') || (await window.showInputBox(txHashInpOpt)); const txWorker = createWorker();
logger.error(m.error); reject(m.error); } else { context.workspaceState.update('transaction-receipt', JSON.parse(m.transactionReceipt)); logger.log(m.transactionReceipt); resolve(m.transactionReceipt); } }); txWorker.send({ command: 'get-transaction-receipt', payload: { txhash, }, testnetId: testNetId, }); } catch (error) { logger.error(error); reject(error); } })(); }); }
txWorker.on('message', (m: any) => { if (m.error) {
io.py
# io.py # Contact: Jacob Schreiber # [email protected] ''' This script focuses on data input and output, and currently supports the following files: * FastA ''' from seq import * class FastA( object ): ''' This is a FastA file. It can contain many DNA, RNA, or Protein sequences in it. This can be read in or written out. ''' def __init__( self, sequences ): ''' If sequences are passed in, they should be as the DNA, RNA, or protein objects, so that all metadata is written out as well. ''' self.sequences = sequences def __str__( self ):
def to_file( self, filename, attrs=None ): ''' Write out a FastA file. Attrs specifies the attributes you want to write out as well, in that order. Since any data can be stored in these objects, it allows you to pick both what you want to write out, and in what order. If nothing is provided, nothing is written out. ''' with open( filename, 'w' ) as outfile: # Write out each stored sequence for sequence in self.sequences: outfile.write( sequence.to_fasta( attrs ) ) @classmethod def from_file( cls, filename, attrs=None, delimiter=' ', seqType=None ): ''' Read in a FastA file. Given names for each delimited item in the comments by specifying their attribute in order. Specify the seqType as the class object or string. ''' if isinstance( seqType, str ): if seqType.lower() == 'protein': seqType = Protein elif seqType.lower() == 'rna': seqType = RNA elif seqType.lower() == 'dna': seqType = DNA else: seqType = Sequence seqType = seqType or Sequence sequences = [] with open( filename, 'r' ) as infile: comments, sequence = None, '' # Go through the file line by line for line in infile: # If the next line starts with a >, it means that the previous # sequence has come to an end. if line.startswith( '>' ): # If a sequence has been found, create and append the # sequence object if sequence != '': comments = comments.split( delimiter ) attributes = { attr: comment for attr, comment in zip( attrs, comments ) } sequences.append( seqType( sequence, **attributes ) ) # Now get the comment, removing the > and any newlines comments = line[1:].strip('\r\n') # Reset the sequence sequence = '' else: # Otherwise, append the sequence line to the growing # sequence sequence += line.strip('\r\n') comments = comments.split( delimiter ) attributes = { attr: comment for attr, comment in zip( attrs, comments )} sequences.append( seqType( sequence, **attributes ) ) return cls( sequences )
''' String representation of the FastA ''' return '\n'.join( sequence.to_fasta() for sequence in self.sequences )
server.go
package test import ( "log" "net/http" "net/http/httptest" "sort" "github.com/cj123/test2doc/doc" "github.com/cj123/test2doc/doc/parse" ) // resources = map[uri]Resource var resources = map[string]*doc.Resource{} type Server struct { *httptest.Server doc *doc.Doc } // TODO: filter out 404 responses func NewServer(handler http.Handler) (s *Server, err error) { // check if url var extractor func is set if parse.Extractor == nil { panic("please set a URLVarExtractor.") } outDoc, err := doc.NewDoc(".") if err != nil { return s, err } httptestServer := httptest.NewServer(handleAndRecord(handler, outDoc)) return &Server{ httptestServer, outDoc, }, nil } func (s *Server) Finish() { s.Close() // sort resources by path var uris []string for k := range resources { uris = append(uris, k) } sort.Strings(uris) for _, uri := range uris { s.doc.AddResource(resources[uri]) }
err := s.doc.Write() if err != nil { panic(err.Error()) } } func handleAndRecord(handler http.Handler, outDoc *doc.Doc) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { // copy request body into Request object docReq, err := doc.NewRequest(req) if err != nil { log.Println("Error:", err.Error()) return } // record response rw := httptest.NewRecorder() resp := NewResponseWriter(rw, req) handler.ServeHTTP(resp, req) // setup resource u := doc.NewURL(req) path := u.ParameterizedPath if resources[path] == nil { resources[path] = doc.NewResource(u) } // store response body in Response object docResp := doc.NewResponse(resp.W) // find action action := resources[path].FindAction(req.Method) if action == nil { // make new action action, err = doc.NewAction(req.Method, resp.HandlerInfo.FuncName) if err != nil { log.Println("Error:", err.Error()) return } // add Action to Resource's list of Actions resources[path].AddAction(action) } // add request, response to action action.AddRequest(docReq, docResp) // copy response over to w doc.CopyHeader(w.Header(), resp.Header()) w.WriteHeader(resp.W.Code) w.Write(resp.W.Body.Bytes()) } }
custom_build.rs
use std::collections::hash_map::{Entry, HashMap}; use std::collections::{BTreeSet, HashSet}; use std::fs; use std::path::{Path, PathBuf}; use std::str; use std::sync::{Arc, Mutex}; use crate::core::compiler::job_queue::JobState; use crate::core::PackageId; use crate::util::errors::{CargoResult, CargoResultExt}; use crate::util::machine_message::{self, Message}; use crate::util::Cfg; use crate::util::{self, internal, paths, profile}; use super::job::{Freshness, Job, Work}; use super::{fingerprint, Context, Kind, TargetConfig, Unit}; /// Contains the parsed output of a custom build script. #[derive(Clone, Debug, Hash)] pub struct BuildOutput { /// Paths to pass to rustc with the `-L` flag. pub library_paths: Vec<PathBuf>, /// Names and link kinds of libraries, suitable for the `-l` flag. pub library_links: Vec<String>, /// Linker arguments suitable to be passed to `-C link-arg=<args>` pub linker_args: Vec<String>, /// Various `--cfg` flags to pass to the compiler. pub cfgs: Vec<String>, /// Additional environment variables to run the compiler with. pub env: Vec<(String, String)>, /// Metadata to pass to the immediate dependencies. pub metadata: Vec<(String, String)>, /// Paths to trigger a rerun of this build script. /// May be absolute or relative paths (relative to package root). pub rerun_if_changed: Vec<PathBuf>, /// Environment variables which, when changed, will cause a rebuild. pub rerun_if_env_changed: Vec<String>, /// Warnings generated by this build. pub warnings: Vec<String>, } /// Map of packages to build info. pub type BuildMap = HashMap<(PackageId, Kind), BuildOutput>; /// Build info and overrides. pub struct BuildState { pub outputs: Mutex<BuildMap>, overrides: HashMap<(String, Kind), BuildOutput>, } #[derive(Default)] pub struct BuildScripts { // Cargo will use this `to_link` vector to add `-L` flags to compiles as we // propagate them upwards towards the final build. Note, however, that we // need to preserve the ordering of `to_link` to be topologically sorted. // This will ensure that build scripts which print their paths properly will // correctly pick up the files they generated (if there are duplicates // elsewhere). // // To preserve this ordering, the (id, kind) is stored in two places, once // in the `Vec` and once in `seen_to_link` for a fast lookup. We maintain // this as we're building interactively below to ensure that the memory // usage here doesn't blow up too much. // // For more information, see #2354. pub to_link: Vec<(PackageId, Kind)>, seen_to_link: HashSet<(PackageId, Kind)>, pub plugins: BTreeSet<PackageId>, } #[derive(Debug)] pub struct BuildDeps { pub build_script_output: PathBuf, pub rerun_if_changed: Vec<PathBuf>, pub rerun_if_env_changed: Vec<String>, } /// Prepares a `Work` that executes the target as a custom build script. /// /// The `req` given is the requirement which this run of the build script will /// prepare work for. If the requirement is specified as both the target and the /// host platforms it is assumed that the two are equal and the build script is /// only run once (not twice). pub fn prepare<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult<Job> { let _p = profile::start(format!( "build script prepare: {}/{}", unit.pkg, unit.target.name() )); let key = (unit.pkg.package_id(), unit.kind); if cx.build_script_overridden.contains(&key) { fingerprint::prepare_target(cx, unit, false) } else { build_work(cx, unit) } } fn emit_build_output(state: &JobState<'_>, output: &BuildOutput, package_id: PackageId) { let library_paths = output .library_paths .iter() .map(|l| l.display().to_string()) .collect::<Vec<_>>(); let msg = machine_message::BuildScript { package_id, linked_libs: &output.library_links, linked_paths: &library_paths, cfgs: &output.cfgs, env: &output.env, } .to_json_string(); state.stdout(msg); } fn build_work<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult<Job> { assert!(unit.mode.is_run_custom_build()); let bcx = &cx.bcx; let dependencies = cx.dep_targets(unit); let build_script_unit = dependencies .iter() .find(|d| !d.mode.is_run_custom_build() && d.target.is_custom_build()) .expect("running a script not depending on an actual script"); let script_dir = cx.files().build_script_dir(build_script_unit); let script_out_dir = cx.files().build_script_out_dir(unit); let script_run_dir = cx.files().build_script_run_dir(unit); let build_plan = bcx.build_config.build_plan; let invocation_name = unit.buildkey(); if let Some(deps) = unit.pkg.manifest().metabuild() { prepare_metabuild(cx, build_script_unit, deps)?; } // Building the command to execute let to_exec = script_dir.join(unit.target.name()); // Start preparing the process to execute, starting out with some // environment variables. Note that the profile-related environment // variables are not set with this the build script's profile but rather the // package's library profile. // NOTE: if you add any profile flags, be sure to update // `Profiles::get_profile_run_custom_build` so that those flags get // carried over. let to_exec = to_exec.into_os_string(); let mut cmd = cx.compilation.host_process(to_exec, unit.pkg)?; let debug = unit.profile.debuginfo.unwrap_or(0) != 0; cmd.env("OUT_DIR", &script_out_dir) .env("CARGO_MANIFEST_DIR", unit.pkg.root()) .env("NUM_JOBS", &bcx.jobs().to_string()) .env( "TARGET", &match unit.kind { Kind::Host => bcx.host_triple(), Kind::Target => bcx.target_triple(), }, ) .env("DEBUG", debug.to_string()) .env("OPT_LEVEL", &unit.profile.opt_level.to_string()) .env( "PROFILE", if bcx.build_config.release { "release" } else { "debug" }, ) .env("HOST", &bcx.host_triple()) .env("RUSTC", &bcx.rustc.path) .env("RUSTDOC", &*bcx.config.rustdoc()?) .inherit_jobserver(&cx.jobserver); if let Some(ref linker) = bcx.target_config.linker { cmd.env("RUSTC_LINKER", linker); } if let Some(links) = unit.pkg.manifest().links() { cmd.env("CARGO_MANIFEST_LINKS", links); } // Be sure to pass along all enabled features for this package, this is the // last piece of statically known information that we have. for feat in bcx.resolve.features(unit.pkg.package_id()).iter() { cmd.env(&format!("CARGO_FEATURE_{}", super::envify(feat)), "1"); } let mut cfg_map = HashMap::new(); for cfg in bcx.cfg(unit.kind) { match *cfg { Cfg::Name(ref n) => { cfg_map.insert(n.clone(), None); } Cfg::KeyPair(ref k, ref v) => { if let Some(ref mut values) = *cfg_map.entry(k.clone()).or_insert_with(|| Some(Vec::new())) { values.push(v.clone()) } } } } for (k, v) in cfg_map { let k = format!("CARGO_CFG_{}", super::envify(&k)); match v { Some(list) => { cmd.env(&k, list.join(",")); } None => { cmd.env(&k, ""); } } } // Gather the set of native dependencies that this package has along with // some other variables to close over. // // This information will be used at build-time later on to figure out which // sorts of variables need to be discovered at that time. let lib_deps = { dependencies .iter() .filter_map(|unit| { if unit.mode.is_run_custom_build() { Some(( unit.pkg.manifest().links().unwrap().to_string(), unit.pkg.package_id(), )) } else { None } }) .collect::<Vec<_>>() }; let pkg_name = unit.pkg.to_string(); let build_state = Arc::clone(&cx.build_state); let id = unit.pkg.package_id(); let output_file = script_run_dir.join("output"); let err_file = script_run_dir.join("stderr"); let root_output_file = script_run_dir.join("root-output"); let host_target_root = cx.files().host_root().to_path_buf(); let all = ( id, pkg_name.clone(), Arc::clone(&build_state), output_file.clone(), script_out_dir.clone(), ); let build_scripts = super::load_build_deps(cx, unit); let kind = unit.kind; let json_messages = bcx.build_config.emit_json(); let extra_verbose = bcx.config.extra_verbose(); let (prev_output, prev_script_out_dir) = prev_build_output(cx, unit); fs::create_dir_all(&script_dir)?; fs::create_dir_all(&script_out_dir)?; // Prepare the unit of "dirty work" which will actually run the custom build // command. // // Note that this has to do some extra work just before running the command // to determine extra environment variables and such. let dirty = Work::new(move |state| { // Make sure that OUT_DIR exists. // // If we have an old build directory, then just move it into place, // otherwise create it! if fs::metadata(&script_out_dir).is_err() { fs::create_dir(&script_out_dir).chain_err(|| { internal( "failed to create script output directory for \ build command", ) })?; } // For all our native lib dependencies, pick up their metadata to pass // along to this custom build command. We're also careful to augment our // dynamic library search path in case the build script depended on any // native dynamic libraries. if !build_plan { let build_state = build_state.outputs.lock().unwrap(); for (name, id) in lib_deps { let key = (id, kind); let state = build_state.get(&key).ok_or_else(|| { internal(format!( "failed to locate build state for env \ vars: {}/{:?}", id, kind )) })?; let data = &state.metadata; for &(ref key, ref value) in data.iter() { cmd.env( &format!("DEP_{}_{}", super::envify(&name), super::envify(key)), value, ); } } if let Some(build_scripts) = build_scripts { super::add_plugin_deps(&mut cmd, &build_state, &build_scripts, &host_target_root)?; } } if build_plan { state.build_plan(invocation_name, cmd.clone(), Arc::new(Vec::new())); return Ok(()); } // And now finally, run the build command itself! state.running(&cmd); let timestamp = paths::set_invocation_time(&script_run_dir)?; let prefix = format!("[{} {}] ", id.name(), id.version()); let output = cmd .exec_with_streaming( &mut |stdout| { if extra_verbose { state.stdout(format!("{}{}", prefix, stdout)); } Ok(()) }, &mut |stderr| { if extra_verbose { state.stderr(format!("{}{}", prefix, stderr)); } Ok(()) }, true, ) .chain_err(|| format!("failed to run custom build command for `{}`", pkg_name))?; // After the build command has finished running, we need to be sure to // remember all of its output so we can later discover precisely what it // was, even if we don't run the build command again (due to freshness). // // This is also the location where we provide feedback into the build // state informing what variables were discovered via our script as // well. paths::write(&output_file, &output.stdout)?; filetime::set_file_times(output_file, timestamp, timestamp)?; paths::write(&err_file, &output.stderr)?; paths::write(&root_output_file, util::path2bytes(&script_out_dir)?)?; let parsed_output = BuildOutput::parse(&output.stdout, &pkg_name, &script_out_dir, &script_out_dir)?; if json_messages { emit_build_output(state, &parsed_output, id); } build_state.insert(id, kind, parsed_output); Ok(()) }); // Now that we've prepared our work-to-do, we need to prepare the fresh work // itself to run when we actually end up just discarding what we calculated // above. let fresh = Work::new(move |state| { let (id, pkg_name, build_state, output_file, script_out_dir) = all; let output = match prev_output { Some(output) => output, None => BuildOutput::parse_file( &output_file, &pkg_name, &prev_script_out_dir, &script_out_dir, )?, }; if json_messages { emit_build_output(state, &output, id); } build_state.insert(id, kind, output); Ok(()) }); let mut job = if cx.bcx.build_config.build_plan { Job::new(Work::noop(), Freshness::Dirty) } else { fingerprint::prepare_target(cx, unit, false)? }; if job.freshness() == Freshness::Dirty { job.before(dirty); } else { job.before(fresh); } Ok(job) } impl BuildState { pub fn new(host_config: &TargetConfig, target_config: &TargetConfig) -> BuildState { let mut overrides = HashMap::new(); let i1 = host_config.overrides.iter().map(|p| (p, Kind::Host)); let i2 = target_config.overrides.iter().map(|p| (p, Kind::Target)); for ((name, output), kind) in i1.chain(i2) { overrides.insert((name.clone(), kind), output.clone()); } BuildState { outputs: Mutex::new(HashMap::new()), overrides, } } fn
(&self, id: PackageId, kind: Kind, output: BuildOutput) { self.outputs.lock().unwrap().insert((id, kind), output); } } impl BuildOutput { pub fn parse_file( path: &Path, pkg_name: &str, script_out_dir_when_generated: &Path, script_out_dir: &Path, ) -> CargoResult<BuildOutput> { let contents = paths::read_bytes(path)?; BuildOutput::parse( &contents, pkg_name, script_out_dir_when_generated, script_out_dir, ) } // Parses the output of a script. // The `pkg_name` is used for error messages. pub fn parse( input: &[u8], pkg_name: &str, script_out_dir_when_generated: &Path, script_out_dir: &Path, ) -> CargoResult<BuildOutput> { let mut library_paths = Vec::new(); let mut library_links = Vec::new(); let mut linker_args = Vec::new(); let mut cfgs = Vec::new(); let mut env = Vec::new(); let mut metadata = Vec::new(); let mut rerun_if_changed = Vec::new(); let mut rerun_if_env_changed = Vec::new(); let mut warnings = Vec::new(); let whence = format!("build script of `{}`", pkg_name); for line in input.split(|b| *b == b'\n') { let line = match str::from_utf8(line) { Ok(line) => line.trim(), Err(..) => continue, }; let mut iter = line.splitn(2, ':'); if iter.next() != Some("cargo") { // skip this line since it doesn't start with "cargo:" continue; } let data = match iter.next() { Some(val) => val, None => continue, }; // getting the `key=value` part of the line let mut iter = data.splitn(2, '='); let key = iter.next(); let value = iter.next(); let (key, value) = match (key, value) { (Some(a), Some(b)) => (a, b.trim_end()), // Line started with `cargo:` but didn't match `key=value`. _ => failure::bail!("Wrong output in {}: `{}`", whence, line), }; // This will rewrite paths if the target directory has been moved. let value = value.replace( script_out_dir_when_generated.to_str().unwrap(), script_out_dir.to_str().unwrap(), ); match key { "rustc-flags" => { let (paths, links) = BuildOutput::parse_rustc_flags(&value, &whence)?; library_links.extend(links.into_iter()); library_paths.extend(paths.into_iter()); } "rustc-link-lib" => library_links.push(value.to_string()), "rustc-link-search" => library_paths.push(PathBuf::from(value)), "rustc-cdylib-link-arg" => linker_args.push(value.to_string()), "rustc-cfg" => cfgs.push(value.to_string()), "rustc-env" => env.push(BuildOutput::parse_rustc_env(&value, &whence)?), "warning" => warnings.push(value.to_string()), "rerun-if-changed" => rerun_if_changed.push(PathBuf::from(value)), "rerun-if-env-changed" => rerun_if_env_changed.push(value.to_string()), _ => metadata.push((key.to_string(), value.to_string())), } } Ok(BuildOutput { library_paths, library_links, linker_args, cfgs, env, metadata, rerun_if_changed, rerun_if_env_changed, warnings, }) } pub fn parse_rustc_flags( value: &str, whence: &str, ) -> CargoResult<(Vec<PathBuf>, Vec<String>)> { let value = value.trim(); let mut flags_iter = value .split(|c: char| c.is_whitespace()) .filter(|w| w.chars().any(|c| !c.is_whitespace())); let (mut library_paths, mut library_links) = (Vec::new(), Vec::new()); while let Some(flag) = flags_iter.next() { if flag != "-l" && flag != "-L" { failure::bail!( "Only `-l` and `-L` flags are allowed in {}: `{}`", whence, value ) } let value = match flags_iter.next() { Some(v) => v, None => failure::bail!( "Flag in rustc-flags has no value in {}: `{}`", whence, value ), }; match flag { "-l" => library_links.push(value.to_string()), "-L" => library_paths.push(PathBuf::from(value)), // was already checked above _ => failure::bail!("only -l and -L flags are allowed"), }; } Ok((library_paths, library_links)) } pub fn parse_rustc_env(value: &str, whence: &str) -> CargoResult<(String, String)> { let mut iter = value.splitn(2, '='); let name = iter.next(); let val = iter.next(); match (name, val) { (Some(n), Some(v)) => Ok((n.to_owned(), v.to_owned())), _ => failure::bail!("Variable rustc-env has no value in {}: {}", whence, value), } } } fn prepare_metabuild<'a, 'cfg>( cx: &Context<'a, 'cfg>, unit: &Unit<'a>, deps: &[String], ) -> CargoResult<()> { let mut output = Vec::new(); let available_deps = cx.dep_targets(unit); // Filter out optional dependencies, and look up the actual lib name. let meta_deps: Vec<_> = deps .iter() .filter_map(|name| { available_deps .iter() .find(|u| u.pkg.name().as_str() == name.as_str()) .map(|dep| dep.target.crate_name()) }) .collect(); for dep in &meta_deps { output.push(format!("use {};\n", dep)); } output.push("fn main() {\n".to_string()); for dep in &meta_deps { output.push(format!(" {}::metabuild();\n", dep)); } output.push("}\n".to_string()); let output = output.join(""); let path = unit.pkg.manifest().metabuild_path(cx.bcx.ws.target_dir()); fs::create_dir_all(path.parent().unwrap())?; paths::write_if_changed(path, &output)?; Ok(()) } impl BuildDeps { pub fn new(output_file: &Path, output: Option<&BuildOutput>) -> BuildDeps { BuildDeps { build_script_output: output_file.to_path_buf(), rerun_if_changed: output .map(|p| &p.rerun_if_changed) .cloned() .unwrap_or_default(), rerun_if_env_changed: output .map(|p| &p.rerun_if_env_changed) .cloned() .unwrap_or_default(), } } } /// Computes the `build_scripts` map in the `Context` which tracks what build /// scripts each package depends on. /// /// The global `build_scripts` map lists for all (package, kind) tuples what set /// of packages' build script outputs must be considered. For example this lists /// all dependencies' `-L` flags which need to be propagated transitively. /// /// The given set of targets to this function is the initial set of /// targets/profiles which are being built. pub fn build_map<'b, 'cfg>(cx: &mut Context<'b, 'cfg>, units: &[Unit<'b>]) -> CargoResult<()> { let mut ret = HashMap::new(); for unit in units { build(&mut ret, cx, unit)?; } cx.build_scripts .extend(ret.into_iter().map(|(k, v)| (k, Arc::new(v)))); return Ok(()); // Recursive function to build up the map we're constructing. This function // memoizes all of its return values as it goes along. fn build<'a, 'b, 'cfg>( out: &'a mut HashMap<Unit<'b>, BuildScripts>, cx: &mut Context<'b, 'cfg>, unit: &Unit<'b>, ) -> CargoResult<&'a BuildScripts> { // Do a quick pre-flight check to see if we've already calculated the // set of dependencies. if out.contains_key(unit) { return Ok(&out[unit]); } let key = unit .pkg .manifest() .links() .map(|l| (l.to_string(), unit.kind)); let build_state = &cx.build_state; if let Some(output) = key.and_then(|k| build_state.overrides.get(&k)) { let key = (unit.pkg.package_id(), unit.kind); cx.build_script_overridden.insert(key); build_state .outputs .lock() .unwrap() .insert(key, output.clone()); } let mut ret = BuildScripts::default(); if !unit.target.is_custom_build() && unit.pkg.has_custom_build() { add_to_link(&mut ret, unit.pkg.package_id(), unit.kind); } if unit.mode.is_run_custom_build() { parse_previous_explicit_deps(cx, unit)?; } // We want to invoke the compiler deterministically to be cache-friendly // to rustc invocation caching schemes, so be sure to generate the same // set of build script dependency orderings via sorting the targets that // come out of the `Context`. let mut targets = cx.dep_targets(unit); targets.sort_by_key(|u| u.pkg.package_id()); for unit in targets.iter() { let dep_scripts = build(out, cx, unit)?; if unit.target.for_host() { ret.plugins .extend(dep_scripts.to_link.iter().map(|p| &p.0).cloned()); } else if unit.target.linkable() { for &(pkg, kind) in dep_scripts.to_link.iter() { add_to_link(&mut ret, pkg, kind); } } } match out.entry(*unit) { Entry::Vacant(entry) => Ok(entry.insert(ret)), Entry::Occupied(_) => panic!("cyclic dependencies in `build_map`"), } } // When adding an entry to 'to_link' we only actually push it on if the // script hasn't seen it yet (e.g., we don't push on duplicates). fn add_to_link(scripts: &mut BuildScripts, pkg: PackageId, kind: Kind) { if scripts.seen_to_link.insert((pkg, kind)) { scripts.to_link.push((pkg, kind)); } } fn parse_previous_explicit_deps<'a, 'cfg>( cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>, ) -> CargoResult<()> { let script_run_dir = cx.files().build_script_run_dir(unit); let output_file = script_run_dir.join("output"); let (prev_output, _) = prev_build_output(cx, unit); let deps = BuildDeps::new(&output_file, prev_output.as_ref()); cx.build_explicit_deps.insert(*unit, deps); Ok(()) } } /// Returns the previous parsed `BuildOutput`, if any, from a previous /// execution. /// /// Also returns the directory containing the output, typically used later in /// processing. fn prev_build_output<'a, 'cfg>( cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>, ) -> (Option<BuildOutput>, PathBuf) { let script_out_dir = cx.files().build_script_out_dir(unit); let script_run_dir = cx.files().build_script_run_dir(unit); let root_output_file = script_run_dir.join("root-output"); let output_file = script_run_dir.join("output"); let prev_script_out_dir = paths::read_bytes(&root_output_file) .and_then(|bytes| util::bytes2path(&bytes)) .unwrap_or_else(|_| script_out_dir.clone()); ( BuildOutput::parse_file( &output_file, &unit.pkg.to_string(), &prev_script_out_dir, &script_out_dir, ) .ok(), prev_script_out_dir, ) }
insert
test_rmsa.py
import os import gym from optical_rl_gym.envs.rmsa_env import shortest_path_first_fit, shortest_available_path_first_fit, \ least_loaded_path_first_fit, SimpleMatrixObservation from optical_rl_gym.utils import evaluate_heuristic, random_policy import pickle import logging import numpy as np import matplotlib.pyplot as plt load = 50 logging.getLogger('rmsaenv').setLevel(logging.INFO) seed = 20 episodes = 10 episode_length = 100 monitor_files = [] policies = [] # topology_name = 'gbn' # topology_name = 'nobel-us' # topology_name = 'germany50' with open(os.path.join('..', 'examples', 'topologies', 'nsfnet_chen_eon_5-paths.h5'), 'rb') as f: topology = pickle.load(f) env_args = dict(topology=topology, seed=10, allow_rejection=True, load=load, mean_service_holding_time=25, episode_length=episode_length, num_spectrum_resources=64, bit_rate_selection='discrete') print('STR'.ljust(5), 'REW'.rjust(7), 'STD'.rjust(7)) init_env = gym.make('RMSA-v0', **env_args) env_rnd = SimpleMatrixObservation(init_env) mean_reward_rnd, std_reward_rnd = evaluate_heuristic(env_rnd, random_policy, n_eval_episodes=episodes) print('Rnd:'.ljust(8), f'{mean_reward_rnd:.4f} {std_reward_rnd:>7.4f}') print('\tBit rate blocking:', (init_env.episode_bit_rate_requested - init_env.episode_bit_rate_provisioned) / init_env.episode_bit_rate_requested) print('\tRequest blocking:', (init_env.episode_services_processed - init_env.episode_services_accepted) / init_env.episode_services_processed) print(init_env.topology.graph['throughput']) # exit(0) env_sp = gym.make('RMSA-v0', **env_args) mean_reward_sp, std_reward_sp = evaluate_heuristic(env_sp, shortest_path_first_fit, n_eval_episodes=episodes) print('SP-FF:'.ljust(8), f'{mean_reward_sp:.4f} {std_reward_sp:<7.4f}') print('\tBit rate blocking:', (env_sp.episode_bit_rate_requested - env_sp.episode_bit_rate_provisioned) / env_sp.episode_bit_rate_requested) print('\tRequest blocking:', (env_sp.episode_services_processed - env_sp.episode_services_accepted) / env_sp.episode_services_processed)
env_sap = gym.make('RMSA-v0', **env_args) mean_reward_sap, std_reward_sap = evaluate_heuristic(env_sap, shortest_available_path_first_fit, n_eval_episodes=episodes) print('SAP-FF:'.ljust(8), f'{mean_reward_sap:.4f} {std_reward_sap:.4f}') print('\tBit rate blocking:', (env_sap.episode_bit_rate_requested - env_sap.episode_bit_rate_provisioned) / env_sap.episode_bit_rate_requested) print('\tRequest blocking:', (env_sap.episode_services_processed - env_sap.episode_services_accepted) / env_sap.episode_services_processed) env_llp = gym.make('RMSA-v0', **env_args) mean_reward_llp, std_reward_llp = evaluate_heuristic(env_llp, least_loaded_path_first_fit, n_eval_episodes=episodes) print('LLP-FF:'.ljust(8), f'{mean_reward_llp:.4f} {std_reward_llp:.4f}') print('\tBit rate blocking:', (env_llp.episode_bit_rate_requested - env_llp.episode_bit_rate_provisioned) / env_llp.episode_bit_rate_requested) print('\tRequest blocking:', (env_llp.episode_services_processed - env_llp.episode_services_accepted) / env_llp.episode_services_processed)
create_csv_results.py
import numpy as np import os import time import sys path = os.path.dirname(os.path.dirname(os.path.dirname(os.path. abspath(__file__)))) if path not in sys.path: sys.path.append(path) import CM_intern.CEDM.modules.cyf.create_density_map as CDM import CM_intern.CEDM.modules.Subfunctions as SF from CM_intern.common_modules.exportLayerDict import export_layer as expLyr import CM_intern.common_modules.cliprasterlayer as CRL import pickle TARGET_RESOLUTION = 100 def load_reference_raster_lyr(NUTS3_vector_path, strd_raster_path_full, outputpath, NUTS3_feat_id_LIST , MOST_RECENT_CUT=""): datatype_int = 'uint32' #self.datatype_int16 = 'uint16' datatype = "float32" # common parameters noDataValue = 0 #SaveLayerDict = {} # Get current extent -> Use the Population 1x1km raster as reference Layer key_field = "NUTS_ID" REFERENCE_RASTER_LAYER_COORD, Layer_is_uncut = CRL.create_reference_raster_layer_origin_extent_of_vctr_feat(strd_raster_path_full , NUTS3_vector_path, NUTS3_feat_id_LIST , Vctr_key_field=key_field) (REFERENCE_geotransform_obj, REFERENCE_RasterSize , REFERENCE_RESOLUTION, REFERENCE_extent) = REFERENCE_RASTER_LAYER_COORD REFERENCE_RasterResolution = REFERENCE_geotransform_obj[1] gto_hr = list(REFERENCE_geotransform_obj) gto_hr[1] = TARGET_RESOLUTION gto_hr[5] = -TARGET_RESOLUTION HighRes_gt_obj = tuple(gto_hr) SaveLayerDict = {} SaveLayerDict["Reference"] = ["%s/REFERENCE.tif" % outputpath, REFERENCE_geotransform_obj , datatype_int , np.ones((REFERENCE_RasterSize), dtype=datatype_int) , noDataValue] # If data are the same as previous cut, then loading data can be done LOAD_DATA_PREVIOUS = False filename = MOST_RECENT_CUT if os.path.exists(MOST_RECENT_CUT): try: with open(MOST_RECENT_CUT, 'rb') as fobject: PREV_CUT = pickle.load(fobject) fobject.close() if PREV_CUT == REFERENCE_RASTER_LAYER_COORD: LOAD_DATA_PREVIOUS = True except Exception as e: print("Cannot import %s"%MOST_RECENT_CUT) print(e) if LOAD_DATA_PREVIOUS != True: with open(filename, 'wb') as fobject: pickle.dump(REFERENCE_RASTER_LAYER_COORD, fobject, protocol=2) fobject.close() SaveLayerDict = expLyr(SaveLayerDict) return (REFERENCE_RasterResolution, HighRes_gt_obj, LOAD_DATA_PREVIOUS, Layer_is_uncut, REFERENCE_geotransform_obj, REFERENCE_RasterSize) def
(main_path, path_in_raw, preproccessed_input_path, prj_path_output): st = time.time() data_type = "uint8" MOST_RECENT_CUT = main_path + prj_path_output + "/MOST_RECENT_CUT.pk" prepro_path = main_path + preproccessed_input_path org_data_path = main_path + path_in_raw p_ = org_data_path pi_ = org_data_path + "/vector_input_data/" NUTS3_vector_path = pi_ + "/NUTS3.shp" strd_raster_path_full = "%s/%s" %(org_data_path, "Population.tif") temp_path = "/home/simulant/workspace/project/Hotmaps_DATA/heat_density_map/output_2/" + os.sep + "Temp" SoilSeal_path_full = "%s/%s" %(org_data_path, "_____ESM100m_final.tif") #p_ = "/home/simulant/workspace/project/Hotmaps_DATA/heat_density_map/output/" sd = "" print(os.path.exists(p_)) print(os.path.exists(pi_)) fn = [] NUTS3_feat_id_LIST = range(12000) (REFERENCE_RasterResolution, HighRes_gt_obj, LOAD_DATA_PREVIOUS , Ref_layer_is_uncut, REFERENCE_geotransform_obj, REFERENCE_RasterSize) = \ load_reference_raster_lyr(NUTS3_vector_path, strd_raster_path_full, temp_path, NUTS3_feat_id_LIST , MOST_RECENT_CUT) for f_ in os.listdir("%s/%s" %(p_, sd)): if f_.endswith(".tif"): fn.append("%s/%s/%s" %(p_, sd, f_)) print(f_) if "g100_clc12_v18_5" in f_.lower(): data, geotransform_obj = CRL.clip_raster_layer(fn[-1] , REFERENCE_geotransform_obj , REFERENCE_RasterSize) data2 = np.zeros((data.shape),dtype="f4") data3 = np.zeros_like(data2) data4 = np.ones_like(data2) * 10.0 # 1000 m2 data2[data <= 21] = 10.0 data3[data <= 6] = 10.0 data3[data == 9] = 10.0 data3[data == 10] = 10.0 data3[data == 11] = 10.0 data3[data == 20] = 10.0 print(np.sum(data2)) print(np.sum(data3)) print(np.sum(data4)) elif "ESM100m_final" in f_: data5, geotransform_obj = CRL.clip_raster_layer(fn[-1] , REFERENCE_geotransform_obj , REFERENCE_RasterSize) data5 *= 10.0/100.0 # in 1000 m2, data5 Einheit = % print(np.sum(data5)) print(time.time() - st) ARR_NUTS_ID_NUMBER, geotransform_obj = SF.rrl("%s/%s_id_number.tif" %(prepro_path, "NUTS3"), data_type="uint16") print(time.time() - st) ARR_LAU2_ID_NUMBER, geotransform_obj = SF.rrl("%s/%s_id_number.tif" %(prepro_path, "LAU2"), data_type="uint32") print(time.time() - st) #num_fn = len(fn) num_fn = 4 RES_Table_NUTS = np.zeros((np.max(ARR_NUTS_ID_NUMBER)+1, num_fn+1), "f4") RES_Table_LAU = np.zeros((np.max(ARR_LAU2_ID_NUMBER)+1, num_fn+1), "f4") RES_Table_NUTS[:,0] = np.arange(RES_Table_NUTS.shape[0]) RES_Table_LAU[:,0] = np.arange(RES_Table_LAU.shape[0]) header = ["DI"] #for i, f_ in enumerate(fn): for i in range(num_fn): #print(f_) if i == 0: data = data2.copy() fn = "dauersiedlungsraum" elif i == 1: data = data3.copy() fn = "dauersiedlungsraum_eng" elif i == 2: data = data4.copy() fn = "flaeche" else: data = data5.copy() fn = "ESM100m_final" print(fn) header.append(fn) print(np.sum(data)) #header.append(f_.split("/")[-1]) #data, geotransform_obj = SF.rrl(f_, data_type=data_type) TABLE_RESULTS_NUTS = CDM.CreateResultsTableperIndicator(data, ARR_NUTS_ID_NUMBER) print(time.time() - st) TABLE_RESULTS_LAU = CDM.CreateResultsTableperIndicator(data, ARR_LAU2_ID_NUMBER) del data print(time.time() - st) RES_Table_NUTS[:, i+1] = TABLE_RESULTS_NUTS[:,-1] RES_Table_LAU[:, i+1] = TABLE_RESULTS_LAU[:,-1] #break header = ",".join(header) np.savetxt("%s/%s.csv" %(prepro_path, "__TABLE_RES_LAU2"), np.round(RES_Table_LAU, 3), delimiter=",", header=header, comments="") np.savetxt("%s/%s.csv" %(prepro_path, "__TABLE_RES_NUTS"), np.round(RES_Table_NUTS, 3), delimiter=",", header=header, comments="") print("DONE")
main
flame-graph-tooltip-container.js
'use strict' const d3 = require('./d3.js') class
{ constructor ({ tooltip, tooltipHtmlContent, showDelay = 700, hideDelay = 200 }) { this.tooltip = tooltip this.ui = tooltip.ui this.tooltipHandler = null this.tooltipHtmlContent = tooltipHtmlContent this.d3TooltipHtml = tooltipHtmlContent.getTooltipD3() this.d3HiddenDiv = d3.select('body').select('.tooltipHiddenDiv', ':first-child') this.showDelay = showDelay this.hideDelay = hideDelay this.nodeData = null this.frameIsZoomed = false } show ({ nodeData, rect, pointerCoords, frameIsZoomed, wrapperNode, delay = null }) { // handling the timeout here because these calculations need to happen only when the tooltip gets actually displayed clearTimeout(this.tooltipHandler) // Cancel any pending highlight clearing callback e.g. from recent mouseout events this.tooltip.onHideCallback = null this.nodeData = nodeData this.tooltipHandler = setTimeout(() => { this.frameIsZoomed = frameIsZoomed const wrapperRect = wrapperNode.getBoundingClientRect() const offset = { x: wrapperRect.x - wrapperNode.scrollLeft, y: wrapperRect.y - wrapperNode.scrollTop - rect.height } // moving the tooltip html into the hidden div to get its size this.d3HiddenDiv.append(() => { const node = this.d3TooltipHtml.remove().node() return node }) this.tooltipHtmlContent.setNodeData(this.nodeData) const pointerPosition = { x: pointerCoords.x - rect.x, y: pointerCoords.y - rect.y } this.tooltip.show({ msg: this.d3TooltipHtml.node(), targetRect: rect, offset, pointerCoords: pointerPosition, outerRect: wrapperRect, showDelay: 0, verticalAlign: 'bottom' }) }, typeof delay === 'number' ? delay : this.showDelay) } hide (args = {}) { clearTimeout(this.tooltipHandler) this.tooltip.hide(Object.assign({ callback: () => { if (this.nodeData === this.ui.highlightedNode) this.ui.highlightNode(null) } }, args)) } } module.exports = FgTooltipContainer
FgTooltipContainer
extras_config_contexts_partial_update_responses.go
// Code generated by go-swagger; DO NOT EDIT. // Copyright 2020 The go-netbox Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package extras // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command import ( "fmt" "io" "github.com/go-openapi/runtime" "github.com/go-openapi/strfmt" "github.com/timsimmons/go-netbox/netbox/models" ) // ExtrasConfigContextsPartialUpdateReader is a Reader for the ExtrasConfigContextsPartialUpdate structure. type ExtrasConfigContextsPartialUpdateReader struct { formats strfmt.Registry } // ReadResponse reads a server response into the received o. func (o *ExtrasConfigContextsPartialUpdateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { switch response.Code() { case 200: result := NewExtrasConfigContextsPartialUpdateOK() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return result, nil default: return nil, runtime.NewAPIError("unknown error", response, response.Code()) } } // NewExtrasConfigContextsPartialUpdateOK creates a ExtrasConfigContextsPartialUpdateOK with default headers values func
() *ExtrasConfigContextsPartialUpdateOK { return &ExtrasConfigContextsPartialUpdateOK{} } /*ExtrasConfigContextsPartialUpdateOK handles this case with default header values. ExtrasConfigContextsPartialUpdateOK extras config contexts partial update o k */ type ExtrasConfigContextsPartialUpdateOK struct { Payload *models.ConfigContext } func (o *ExtrasConfigContextsPartialUpdateOK) Error() string { return fmt.Sprintf("[PATCH /extras/config-contexts/{id}/][%d] extrasConfigContextsPartialUpdateOK %+v", 200, o.Payload) } func (o *ExtrasConfigContextsPartialUpdateOK) GetPayload() *models.ConfigContext { return o.Payload } func (o *ExtrasConfigContextsPartialUpdateOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { o.Payload = new(models.ConfigContext) // response payload if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { return err } return nil }
NewExtrasConfigContextsPartialUpdateOK
ref.py
import numpy as np import pickle import h5py from scipy.misc import imread import os from pycocotools.coco import COCO from pycocotools import mask data_dir = '/home/chuancen/CVResearch/HumanPoseTracking/PJDATA/COCO/images' ann_path = '/home/chuancen/CVResearch/HumanPoseTracking/PJDATA/COCO/annotations/person_keypoints_train2014.json' ref_dir = os.path.dirname(__file__) assert os.path.exists(data_dir) assert os.path.exists(ann_path) coco, img_ids, num_examples = None, None, None with open(ref_dir + '/valid_id', 'r') as f: valid_id = list(map(lambda x:int(x.strip()), f.readlines())) valid_id_set = set(valid_id) def init(): global coco, img_ids, num_examples ann_file = os.path.join(ann_path) coco = COCO(ann_file) img_ids = coco.getImgIds() num_examples = len(img_ids) # num_parts = 17 # part_mask = np.array([0,0,0,0,0,0,0,1,1,1,1,0,0,1,1,1,1]) # part_ref = {'ankle':[15,16],'knee':[13,14],'hip':[11,12], # 'wrist':[9,10],'elbow':[7,8],'shoulder':[5,6], # 'face':[0,1,2],'ears':[3,4]} # part_labels = ['nose','eye_l','eye_r','ear_l','ear_r', # 'sho_l','sho_r','elb_l','elb_r','wri_l','wri_r', # 'hip_l','hip_r','kne_l','kne_r','ank_l','ank_r'] # basic_order = ['sho_l','sho_r', 'nose', 'eye_l','eye_r','ear_l', # 'ear_r','elb_l','elb_r','wri_l','wri_r', # 'hip_l','hip_r','kne_l','kne_r','ank_l','ank_r'] # pairRef = [ # [1,2],[2,3],[1,3], # [6,8],[8,10],[12,14],[14,16], # [7,9],[9,11],[13,15],[15,17], # [6,7],[12,13],[6,12],[7,13] # ] # pairRef = np.array(pairRef) - 1 flipRef = [i-1 for i in [1,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16] ] # part_idx = {b:a for a, b in enumerate(part_labels)} # basic_order = [part_idx[i] for i in basic_order]
def image_path(idx): img_info = coco.loadImgs(img_ids[idx])[0] path = img_info['file_name'].split('_')[1] + '/' + img_info['file_name'] return os.path.join(data_dir, path) def load_image(idx): return imread(image_path(idx),mode='RGB') def num_objects(idx, anns=None, should_greater_than_1 = False): if anns is None: anns = get_anns(idx) return len(anns) def setup_val_split(opt = None): if coco is None: return [], [] tmp_idxs = [] for i in range(num_examples): if num_objects(i, None) > 0: tmp_idxs += [i] ref_idxs = np.array(tmp_idxs,dtype=int) #39935 images that # of ppl > 0 ### choose image_id from valid_id_set valid = {} train = [] for i in ref_idxs: if img_ids[i] in valid_id_set: valid[ img_ids[i] ]=i else: train.append(i) return np.array(train), np.array([valid[i] for i in valid_id if i in valid]) def get_anns(idx): ann_ids = coco.getAnnIds(imgIds=img_ids[idx]) tmp_ann = coco.loadAnns(ann_ids) # Filter tmp_ann for people with no keypoints annotated return [tmp_ann[i] for i in range(len(tmp_ann)) if tmp_ann[i]['num_keypoints'] > 0] def get_mask(idx): ann_ids = coco.getAnnIds(imgIds=img_ids[idx]) anns = coco.loadAnns(ann_ids) img = coco.loadImgs(img_ids[idx])[0] m = np.zeros((img['height'], img['width'])) for j in anns: if j['iscrowd']: rle = mask.frPyObjects(j['segmentation'], img['height'], img['width']) m += mask.decode(rle) return m < 0.5 def get_keypoints(idx, anns=None): if anns is None: anns = get_anns(idx) num_people = num_objects(idx, anns) kps = np.zeros((num_people, 17, 3)) for i in range(num_people): kps[i] = np.array(anns[i]['keypoints']).reshape([-1,3]) return kps
def initialize(opt): return
main2.rs
const INPUT_STRING : &str = "61697637962276641366442297247367117738114719863473648131982449728688116728695866572989524473392982963976411147683588415878214189996163533584547175794158118148724298832798898333399786561459152644144669959887341481968319172987357989785791366732849932788343772112176614723858474959919713855398876956427631354172668133549845585632211935573662181331613137869866693259374322169811683635325321597242889358147123358117774914653787371368574784376721652181792371635288376729784967526824915192526744935187989571347746222113625577963476141923187534658445615596987614385911513939292257263723518774888174635963254624769684533531443745729344341973746469326838186248448483587477563285867499956446218775232374383433921835993136463383628861115573142854358943291148766299653633195582135934544964657663198387794442443531964615169655243652696782443394639169687847463721585527947839992182415393199964893658322757634675274422993237955354185194868638454891442893935694454324235968155913963282642649968153284626154111478389914316765783434365458352785868895582488312334931317935669453447478936938533669921165437373741448378477391812779971528975478298688754939216421429251727555596481943322266289527996672856387648674166997731342558986575258793261986817177487197512282162964167151259485744835854547513341322647732662443512251886771887651614177679229984271191292374755915457372775856178539965131319568278252326242615151412772254257847413799811417287481321745372879513766235745347872632946776538173667371228977212143996391617974367923439923774388523845589769341351167311398787797583543434725374343611724379399566197432154146881344528319826434554239373666962546271299717743591225567564655511353255197516515213963862383762258959957474789718564758843367325794589886852413314713698911855183778978722558742329429867239261464773646389484318446574375323674136638452173815176732385468675215264736786242866295648997365412637499692817747937982628518926381939279935993712418938567488289246779458432179335139731952167527521377546376518126276"; fn main()
fn byte_to_int(input: u8) -> i32 { let zero_byte = '0' as u8; let u8_result = input - zero_byte; return u8_result as i32; }
{ let input_string = INPUT_STRING.to_owned(); let byte_array = input_string.as_bytes(); let array_len = byte_array.len(); let half_length = array_len / 2; let mut sum: i32 = 0; for i in 0..array_len { let match_byte_index = (i + half_length) % array_len; if byte_array[i] == byte_array[match_byte_index] { sum += byte_to_int(byte_array[i]); } } println!("Result is: {}", sum); }
microserviceMindmapContextMenu.spec.js
import sinon from 'sinon'; import React from 'react'; import chai from 'chai'; import { shallow } from 'enzyme'; import { MicroserviceMindmapContextMenu } from '../../../main/javascript/components/microserviceMindmapContextMenu'; describe('<MicroserviceMindmapContextMenu/>', function () { it('is hidden when contextMenuServiceId, contextMenuFromId and contextMenuToId are undefined', function () { const props = createProps(); const wrapper = shallow(<MicroserviceMindmapContextMenu {...props}/>); chai.expect(wrapper.find('nav').props().hidden).to.equal(true); }); describe('when a user is logged in', function () { describe('for services', function () { it('displays Edit Service / Delete Service buttons when contextMenuServiceId is set', function () { const props = createProps(); props.loggedInUser = 'my_user_id'; props.contextMenuServiceId = 1337; const wrapper = shallow(<MicroserviceMindmapContextMenu {...props}/>); chai.expect(wrapper.find('nav').props().hidden).to.be.undefined; chai.expect(wrapper.find('button').at(0).text()).to.equal('Edit Service'); chai.expect(wrapper.find('button').at(1).text()).to.equal('Delete Service'); }); it('calls onAddProperty action when Edit Service button is clicked', function () { const props = createProps(); props.loggedInUser = 'my_user_id'; props.contextMenuServiceId = 1337; const wrapper = shallow(<MicroserviceMindmapContextMenu {...props}/>); wrapper.find('button').at(0).simulate('click'); sinon.assert.calledOnce(props.onAddProperty); }); it('calls onDeleteService action when Edit Service button is clicked', function () { const props = createProps(); props.loggedInUser = 'my_user_id'; props.contextMenuServiceId = 1337; const wrapper = shallow(<MicroserviceMindmapContextMenu {...props}/>); wrapper.find('button').at(1).simulate('click'); sinon.assert.calledOnce(props.onDeleteService); }); }); describe('for relations', function () { it('displays Edit Link / Delete Link buttons when contextMenuFromId & contextMenuToId are set', function () { const props = createProps(); props.loggedInUser = 'my_user_id'; props.contextMenuFromId = 42; props.contextMenuToId = 43; const wrapper = shallow(<MicroserviceMindmapContextMenu {...props}/>); chai.expect(wrapper.find('nav').props().hidden).to.be.undefined; chai.expect(wrapper.find('button').at(0).text()).to.equal('Delete Link'); chai.expect(wrapper.find('button').at(1).text()).to.equal('Edit Link'); }); it('calls onDeleteLink action when Edit Link button is clicked', function () { const props = createProps(); props.loggedInUser = 'my_user_id'; props.contextMenuFromId = 42; props.contextMenuToId = 43; const wrapper = shallow(<MicroserviceMindmapContextMenu {...props}/>); wrapper.find('button').at(0).simulate('click'); sinon.assert.calledOnce(props.onDeleteLink); }); it('calls onEditLink action when Edit Link button is clicked', function () { const props = createProps(); props.loggedInUser = 'my_user_id'; props.contextMenuFromId = 42; props.contextMenuToId = 43; const wrapper = shallow(<MicroserviceMindmapContextMenu {...props}/>); wrapper.find('button').at(1).simulate('click'); sinon.assert.calledOnce(props.onEditLink); }); }); }); describe('when no user is logged in', function () { describe('for services', function () { it('displays Show Service buttons when contextMenuServiceId is set', function () { const props = createProps(); props.contextMenuServiceId = 1337; const wrapper = shallow(<MicroserviceMindmapContextMenu {...props}/>); chai.expect(wrapper.find('nav').props().hidden).to.be.undefined; chai.expect(wrapper.find('button').at(0).text()).to.equal('Service Details'); }); it('calls onAddProperty action when Show Service button is clicked', function () { const props = createProps(); props.contextMenuServiceId = 1337; const wrapper = shallow(<MicroserviceMindmapContextMenu {...props}/>); wrapper.find('button').at(0).simulate('click'); sinon.assert.calledOnce(props.onShowService); }); }); describe('for relations', function () { it('displays nothing at all when contextMenuFromId & contextMenuToId are set', function () { const props = createProps(); props.contextMenuFromId = 42; props.contextMenuToId = 43; const wrapper = shallow(<MicroserviceMindmapContextMenu {...props}/>); chai.expect(wrapper.find('nav').props().hidden).to.be.true; chai.expect(wrapper.find('button').length).to.equal(0); }); }); }); it('dispatches HIDE_SERVICE action', function () { const props = createProps(); props.contextMenuServiceId = 1337; const wrapper = shallow(<MicroserviceMindmapContextMenu {...props}/>); chai.expect(wrapper.find('button').at(1).text()).to.equal('Hide Service'); wrapper.find('button').at(1).simulate('click'); sinon.assert.calledOnce(props.onHideService); }); }); function
() { return { contextMenuServiceId: undefined, contextMenuFromId: undefined, contextMenuToId: undefined, onAddProperty: sinon.spy(), onShowService: sinon.spy(), onDeleteService: sinon.spy(), onDeleteLink: sinon.spy(), onEditLink: sinon.spy(), onHideService: sinon.spy(), loggedInUser: undefined }; }
createProps
user_service.go
package service
import ( "github.com/slack-bot-4all/slack-bot/src/model" "github.com/slack-bot-4all/slack-bot/src/repository" ) // AddUser : have a business rules to add a User to db func AddUser(u *model.User) error { var err error if u.Username != "" && u.Password != "" { err = repository.AddUser(u) } if err != nil { return err } return nil }
0204_count_primes.py
import math class
: def countPrimes(self, n): """ :type n: int :rtype: int 厄拉多塞筛法 比如求20以内质数的个数, 首先0,1不是质数。 2是第一个质数,然后把20以内所有2的倍数划去。 2后面紧跟的数即为下一个质数3,然后把3所有的倍数划去。 3后面紧跟的数即为下一个质数5,再把5所有的倍数划去,以此类推。 """ if n < 2: return 0 s = [1] * n s[0] = s[1] = 0 for i in range(2, int(math.sqrt(n)) + 1): if s[i] == 1: s[i * i:n:i] = [0] * int((n - i * i - 1) / i + 1) return sum(s) def test_count_primes(): s = Solution() assert 4 == s.countPrimes(10)
Solution
config.ts
import * as path from 'path'; import { prettyPath } from '@ionic/cli-framework/utils/format'; import { readFile, writeFile } from '@ionic/utils-fs'; import chalk from 'chalk'; import * as Debug from 'debug'; import * as et from 'elementtree'; import { ProjectIntegration, ResourcesPlatform } from '../../../definitions'; import { FatalException } from '../../errors'; import { shortid } from '../../utils/uuid'; const debug = Debug('ionic:lib:integrations:cordova:config'); export interface PlatformEngine { name: string; spec: string; [key: string]: string; } export class
{ protected _doc?: et.ElementTree; protected _sessionid?: string; protected saving = false; constructor(readonly filePath: string) {} get doc() { if (!this._doc) { throw new Error('No doc loaded.'); } return this._doc; } get sessionid() { if (!this._sessionid) { throw new Error('No doc loaded.'); } return this._sessionid; } static async load(filePath: string): Promise<ConfigXml> { if (!filePath) { throw new Error('Must supply file path.'); } const conf = new ConfigXml(filePath); await conf.reload(); return conf; } async reload(): Promise<void> { const configFileContents = await readFile(this.filePath, { encoding: 'utf8' }); if (!configFileContents) { throw new Error(`Cannot load empty config.xml file.`); } try { this._doc = et.parse(configFileContents); this._sessionid = shortid(); } catch (e) { throw new Error(`Cannot parse config.xml file: ${e.stack ? e.stack : e}`); } } async save(): Promise<void> { if (!this.saving) { this.saving = true; await writeFile(this.filePath, this.write(), { encoding: 'utf8' }); this.saving = false; } } setName(name: string) { const root = this.doc.getroot(); let nameNode = root.find('name'); if (!nameNode) { nameNode = et.SubElement(root, 'name', {}); } nameNode.text = name; } setBundleId(bundleId: string) { const root = this.doc.getroot(); root.set('id', bundleId); } getBundleId() { const root = this.doc.getroot(); return root.get('id'); } /** * Update config.xml content src to be a dev server url. As part of this * backup the original content src for a reset to occur at a later time. */ writeContentSrc(newSrc: string) { const root = this.doc.getroot(); let contentElement = root.find('content'); if (!contentElement) { contentElement = et.SubElement(root, 'content', { src: 'index.html' }); } contentElement.set('original-src', contentElement.get('src')); contentElement.set('src', newSrc); let navElement = root.find(`allow-navigation[@href='${newSrc}']`); if (!navElement) { navElement = et.SubElement(root, 'allow-navigation', { sessionid: this.sessionid, href: newSrc }); } } /** * Set config.xml src url back to its original url */ resetContentSrc() { const root = this.doc.getroot(); let contentElement = root.find('content'); if (!contentElement) { contentElement = et.SubElement(root, 'content', { src: 'index.html' }); } const originalSrc = contentElement.get('original-src'); if (originalSrc) { contentElement.set('src', originalSrc); delete contentElement.attrib['original-src']; } const navElements = root.findall(`allow-navigation[@sessionid='${this.sessionid}']`); for (const navElement of navElements) { root.remove(navElement); } } getPreference(prefName: string): string | undefined { const root = this.doc.getroot(); const preferenceElement = root.find(`preference[@name='${prefName}']`); if (!preferenceElement) { return undefined; } const value = preferenceElement.get('value'); if (!value) { return undefined; } return value; } getProjectInfo(): { id: string; name: string; version: string; } { const root = this.doc.getroot(); let id = root.get('id'); if (!id) { id = ''; } let version = root.get('version'); if (!version) { version = ''; } let nameElement = root.find('name'); if (!nameElement) { nameElement = et.SubElement(root, 'name', {}); } if (!nameElement.text) { nameElement.text = 'MyApp'; } const name = nameElement.text.toString(); return { id, name, version }; } getPlatformEngines(): PlatformEngine[] { const root = this.doc.getroot(); const engines = root.findall('engine'); return engines.map(engine => this.engineElementToPlatformEngine(engine)); } getPlatformEngine(platform: string): PlatformEngine | undefined { const root = this.doc.getroot(); const engine = root.find(`engine[@name='${platform}']`); if (!engine) { return undefined; } return this.engineElementToPlatformEngine(engine); } async ensurePlatformImages(platform: string, resourcesPlatform: ResourcesPlatform) { const root = this.doc.getroot(); const orientation = this.getPreference('Orientation') || 'default'; for (const imgName in resourcesPlatform) { const imgType = resourcesPlatform[imgName]; let platformElement = root.find(`platform[@name='${platform}']`); if (!platformElement) { platformElement = et.SubElement(root, 'platform', { name: platform }); } const images = imgType.images.filter(img => orientation === 'default' || typeof img.orientation === 'undefined' || img.orientation === orientation); for (const image of images) { // We use forward slashes, (not path.join) here to provide // cross-platform compatibility for paths. const imgPath = ['resources', platform, imgType.nodeName, image.name].join('/'); // TODO: hard-coded 'resources' dir let imgElement = platformElement.find(`${imgType.nodeName}[@src='${imgPath}']`); if (!imgElement) { imgElement = platformElement.find(`${imgType.nodeName}[@src='${imgPath.split('/').join('\\')}']`); } if (!imgElement) { const attrs: { [key: string]: string } = {}; for (const attr of imgType.nodeAttributes) { let v = (image as any)[attr]; // TODO if (attr === 'src') { v = imgPath; } attrs[attr] = v; } imgElement = et.SubElement(platformElement, imgType.nodeName, attrs); } imgElement.set('src', imgPath); } } } async ensureSplashScreenPreferences() { const root = this.doc.getroot(); let splashScreenPrefElement = root.find(`preference[@name='SplashScreen']`); if (!splashScreenPrefElement) { splashScreenPrefElement = et.SubElement(root, 'preference', { name: 'SplashScreen', value: 'screen' }); } let splashShowOnlyFirstTimePrefElement = root.find(`preference[@name='SplashShowOnlyFirstTime']`); if (!splashShowOnlyFirstTimePrefElement) { splashShowOnlyFirstTimePrefElement = et.SubElement(root, 'preference', { name: 'SplashShowOnlyFirstTime', value: 'false' }); } let splashScreenDelayPrefElement = root.find(`preference[@name='SplashScreenDelay']`); if (!splashScreenDelayPrefElement) { splashScreenDelayPrefElement = et.SubElement(root, 'preference', { name: 'SplashScreenDelay', value: '3000' }); } } protected write(): string { // Cordova hard codes an indentation of 4 spaces, so we'll follow. const contents = this.doc.write({ indent: 4 }); return contents; } protected engineElementToPlatformEngine(engine: et.Element): PlatformEngine { const name = engine.get('name'); const spec = engine.get('spec'); return { name: name ? name : '', spec: spec ? spec : '', ...engine.attrib }; } } export async function loadConfigXml(integration: Required<ProjectIntegration>): Promise<ConfigXml> { const filePath = path.resolve(integration.root, 'config.xml'); debug(`Using config.xml: ${filePath}`); try { return await ConfigXml.load(filePath); } catch (e) { const msg = e.code === 'ENOENT' ? `Cordova ${chalk.bold('config.xml')} file not found.\n\nYou can re-add the Cordova integration with the following command: ${chalk.green('ionic integrations enable cordova --add')}` : chalk.red(e.stack ? e.stack : e); throw new FatalException( `Cannot load ${chalk.bold(prettyPath(filePath))}\n` + `${msg}` ); } }
ConfigXml
0009_movie_poster_thumbnail.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.6 on 2018-07-08 17:37 from __future__ import unicode_literals
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('reviews', '0008_auto_20180623_2009'), ] operations = [ migrations.AddField( model_name='movie', name='poster_thumbnail', field=models.ImageField(blank=True, help_text='Upload the poster thumbnail', null=True, upload_to='movie_posters/thumbnails/'), ), ]
package.py
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import os.path import shutil import sys import tempfile import spack.util.environment class Octave(AutotoolsPackage, GNUMirrorPackage): """GNU Octave is a high-level language, primarily intended for numerical computations. It provides a convenient command line interface for solving linear and nonlinear problems numerically, and for performing other numerical experiments using a language that is mostly compatible with Matlab. It may also be used as a batch-oriented language. """ homepage = "https://www.gnu.org/software/octave/" gnu_mirror_path = "octave/octave-4.0.0.tar.gz" maintainers = ['mtmiller'] extendable = True version('5.1.0', sha256='e36b1124cac27c7caa51cc57de408c31676d5f0096349b4d50b57bfe1bcd7495') version('4.4.1', sha256='09fbd0f212f4ef21e53f1d9c41cf30ce3d7f9450fb44911601e21ed64c67ae97') version('4.4.0', sha256='72f846379fcec7e813d46adcbacd069d72c4f4d8f6003bcd92c3513aafcd6e96') version('4.2.2', sha256='77b84395d8e7728a1ab223058fe5e92dc38c03bc13f7358e6533aab36f76726e') version('4.2.1', sha256='80c28f6398576b50faca0e602defb9598d6f7308b0903724442c2a35a605333b') version('4.2.0', sha256='443ba73782f3531c94bcf016f2f0362a58e186ddb8269af7dcce973562795567') version('4.0.2', sha256='39cd8fd36c218fc00adace28d74a6c7c9c6faab7113a5ba3c4372324c755bdc1') version('4.0.0', sha256='4c7ee0957f5dd877e3feb9dfe07ad5f39b311f9373932f0d2a289dc97cca3280') # patches # see https://savannah.gnu.org/bugs/?50234 patch('patch_4.2.1_inline.diff', when='@4.2.1') # Variants variant('readline', default=True) variant('arpack', default=False) variant('curl', default=False) variant('fftw', default=False) variant('fltk', default=False) variant('fontconfig', default=False) variant('freetype', default=False) variant('glpk', default=False) variant('gl2ps', default=False) variant('gnuplot', default=False) variant('magick', default=False) variant('hdf5', default=False) variant('jdk', default=False) variant('llvm', default=False) variant('opengl', default=False) variant('qhull', default=False) variant('qrupdate', default=False) variant('qscintilla', default=False) variant('qt', default=False) variant('suitesparse', default=False) variant('zlib', default=False) # Required dependencies depends_on('blas') depends_on('lapack') # Octave does not configure with sed from darwin: depends_on('sed', when=sys.platform == 'darwin', type='build') depends_on('pcre') depends_on('pkgconfig', type='build') # Strongly recommended dependencies depends_on('readline', when='+readline') # Optional dependencies depends_on('arpack-ng', when='+arpack') depends_on('curl', when='+curl') depends_on('fftw', when='+fftw') depends_on('fltk', when='+fltk') depends_on('fontconfig', when='+fontconfig') depends_on('freetype', when='+freetype') depends_on('glpk', when='+glpk') depends_on('gl2ps', when='+gl2ps') depends_on('gnuplot', when='+gnuplot') depends_on('imagemagick', when='+magick') depends_on('hdf5', when='+hdf5') depends_on('java', when='+jdk') # TODO: requires Java 6 ? depends_on('llvm', when='+llvm') # depends_on('opengl', when='+opengl') # TODO: add package depends_on('qhull', when='+qhull') depends_on('qrupdate', when='+qrupdate') # depends_on('qscintilla', when='+qscintilla) # TODO: add package depends_on('qt+opengl', when='+qt') depends_on('suite-sparse', when='+suitesparse') depends_on('zlib', when='+zlib') def patch(self): # Filter mkoctfile.in.cc to use underlying compilers and not # Spack compiler wrappers. We are patching the template file # and not mkoctfile.cc since the latter is generated as part # of the build.
@run_after('install') @on_package_attributes(run_tests=True) def check_mkoctfile_works_outside_of_build_env(self): # Check that mkoctfile is properly configured and can compile # Octave extensions outside of the build env mkoctfile = Executable(os.path.join(self.prefix, 'bin', 'mkoctfile')) helloworld_cc = os.path.join( os.path.dirname(__file__), 'helloworld.cc' ) tmp_dir = tempfile.mkdtemp() shutil.copy(helloworld_cc, tmp_dir) # We need to unset these variables since we are still within # Spack's build environment when running tests vars_to_unset = ['CC', 'CXX', 'F77', 'FC'] with spack.util.environment.preserve_environment(*vars_to_unset): # Delete temporarily the environment variables that point # to Spack compiler wrappers for v in vars_to_unset: del os.environ[v] # Check that mkoctfile outputs the expected value for CC cc = mkoctfile('-p', 'CC', output=str) msg = "mkoctfile didn't output the expected CC compiler" assert self.compiler.cc in cc, msg # Try to compile an Octave extension shutil.copy(helloworld_cc, tmp_dir) with working_dir(tmp_dir): mkoctfile('helloworld.cc') def configure_args(self): # See # https://github.com/macports/macports-ports/blob/master/math/octave/ # https://github.com/Homebrew/homebrew-science/blob/master/octave.rb spec = self.spec config_args = [] # Required dependencies config_args.extend([ "--with-blas=%s" % spec['blas'].libs.ld_flags, "--with-lapack=%s" % spec['lapack'].libs.ld_flags ]) # Strongly recommended dependencies if '+readline' in spec: config_args.append('--enable-readline') else: config_args.append('--disable-readline') # Optional dependencies if '+arpack' in spec: sa = spec['arpack-ng'] config_args.extend([ "--with-arpack-includedir=%s" % sa.prefix.include, "--with-arpack-libdir=%s" % sa.prefix.lib ]) else: config_args.append("--without-arpack") if '+curl' in spec: config_args.extend([ "--with-curl-includedir=%s" % spec['curl'].prefix.include, "--with-curl-libdir=%s" % spec['curl'].prefix.lib ]) else: config_args.append("--without-curl") if '+fftw' in spec: config_args.extend([ "--with-fftw3-includedir=%s" % spec['fftw'].prefix.include, "--with-fftw3-libdir=%s" % spec['fftw'].prefix.lib, "--with-fftw3f-includedir=%s" % spec['fftw'].prefix.include, "--with-fftw3f-libdir=%s" % spec['fftw'].prefix.lib ]) else: config_args.extend([ "--without-fftw3", "--without-fftw3f" ]) if '+fltk' in spec: config_args.extend([ "--with-fltk-prefix=%s" % spec['fltk'].prefix, "--with-fltk-exec-prefix=%s" % spec['fltk'].prefix ]) else: config_args.append("--without-fltk") if '+glpk' in spec: config_args.extend([ "--with-glpk-includedir=%s" % spec['glpk'].prefix.include, "--with-glpk-libdir=%s" % spec['glpk'].prefix.lib ]) else: config_args.append("--without-glpk") if '+magick' in spec: config_args.append("--with-magick=%s" % spec['imagemagick'].prefix.lib) else: config_args.append("--without-magick") if '+hdf5' in spec: config_args.extend([ "--with-hdf5-includedir=%s" % spec['hdf5'].prefix.include, "--with-hdf5-libdir=%s" % spec['hdf5'].prefix.lib ]) else: config_args.append("--without-hdf5") if '+jdk' in spec: config_args.extend([ "--with-java-homedir=%s" % spec['java'].home, "--with-java-includedir=%s" % spec['java'].home.include, "--with-java-libdir=%s" % spec['java'].libs.directories[0] ]) else: config_args.append("--disable-java") if '~opengl' in spec: config_args.extend([ "--without-opengl", "--without-framework-opengl" ]) # TODO: opengl dependency and package is missing? if '+qhull' in spec: config_args.extend([ "--with-qhull-includedir=%s" % spec['qhull'].prefix.include, "--with-qhull-libdir=%s" % spec['qhull'].prefix.lib ]) else: config_args.append("--without-qhull") if '+qrupdate' in spec: config_args.extend([ "--with-qrupdate-includedir=%s" % spec['qrupdate'].prefix.include, "--with-qrupdate-libdir=%s" % spec['qrupdate'].prefix.lib ]) else: config_args.append("--without-qrupdate") if '+zlib' in spec: config_args.extend([ "--with-z-includedir=%s" % spec['zlib'].prefix.include, "--with-z-libdir=%s" % spec['zlib'].prefix.lib ]) else: config_args.append("--without-z") return config_args # ======================================================================== # Set up environment to make install easy for Octave extensions. # ======================================================================== def setup_dependent_package(self, module, dependent_spec): """Called before Octave modules' install() methods. In most cases, extensions will only need to have one line: octave('--eval', 'pkg install %s' % self.stage.archive_file) """ # Octave extension builds can have a global Octave executable function module.octave = Executable(join_path(self.spec.prefix.bin, 'octave'))
mkoctfile_in = os.path.join( self.stage.source_path, 'src', 'mkoctfile.in.cc' ) quote = lambda s: '"' + s + '"' entries_to_patch = { r'%OCTAVE_CONF_MKOCTFILE_CC%': quote(self.compiler.cc), r'%OCTAVE_CONF_MKOCTFILE_CXX%': quote(self.compiler.cxx), r'%OCTAVE_CONF_MKOCTFILE_F77%': quote(self.compiler.f77), r'%OCTAVE_CONF_MKOCTFILE_DL_LD%': quote(self.compiler.cxx), r'%OCTAVE_CONF_MKOCTFILE_LD_CXX%': quote(self.compiler.cxx) } for pattern, subst in entries_to_patch.items(): filter_file(pattern, subst, mkoctfile_in)
util.py
# Copyright (c) 2011 Tencent Inc. # All rights reserved. # # Author: Huan Yu <[email protected]> # Feng chen <[email protected]> # Yi Wang <[email protected]> # Chong peng <[email protected]> # Date: October 20, 2011 """ This is the util module which provides some helper functions. """ from __future__ import absolute_import from __future__ import print_function import ast import errno import fcntl import hashlib import inspect import json import os import signal import string import subprocess import sys import zipfile _IN_PY3 = sys.version_info[0] == 3 # In python 2, cPickle is much faster than pickle, but in python 3, pickle is # reimplemented in C extension and then the standardalone cPickle is removed. if _IN_PY3: import pickle # pylint: disable=unused-import else: # pyright: reportMissingImports=false import cPickle as pickle # pylint: disable=import-error, unused-import def md5sum_bytes(content): """Calculate md5sum of a byte string.""" assert isinstance(content, bytes), 'Invalid type %s' % type(content) m = hashlib.md5() m.update(content) return m.hexdigest() def md5sum_str(content): """Calculate md5sum of a string.""" assert isinstance(content, str), 'Invalid type %s' % type(content) return md5sum_bytes(content.encode('utf-8')) def md5sum_file(file_name): """Calculate md5sum of a file.""" with open(file_name, 'rb') as f: digest = md5sum_bytes(f.read()) return digest def md5sum(obj): """Calculate md5sum of a string-like object""" if isinstance(obj, bytes): return md5sum_bytes(obj) if isinstance(obj, str): return md5sum_str(obj) raise TypeError('Invalid type %s' % type(str)) def lock_file(filename): """lock file.""" try: fd = os.open(filename, os.O_CREAT | os.O_RDWR) old_fd_flags = fcntl.fcntl(fd, fcntl.F_GETFD) fcntl.fcntl(fd, fcntl.F_SETFD, old_fd_flags | fcntl.FD_CLOEXEC) fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) return fd, 0 except IOError as ex_value: return -1, ex_value.errno def unlock_file(fd): """unlock file.""" try: fcntl.flock(fd, fcntl.LOCK_UN) os.close(fd) except IOError: pass def var_to_list(var): """Normalize a singlar or list to list.""" if isinstance(var, list): return var[:] if var is None: return [] return [var] def var_to_list_or_none(var): """Similar to var_to_list but keeps the None unchanged""" if var is None: return var return var_to_list(var) def stable_unique(seq): """unique a seq and keep its original order""" # See http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order seen = set() seen_add = seen.add return [x for x in seq if not (x in seen or seen_add(x))] def to_string(text): if text is None: return text if isinstance(text, str): return text if isinstance(text, bytes): return text.decode('utf-8') raise TypeError('Unknown type %s' % type(text)) def get_cwd(): """get_cwd os.getcwd() doesn't work because it will follow symbol link. os.environ.get('PWD') doesn't work because it won't reflect os.chdir(). So in practice we simply use system('pwd') to get current working directory. """ p = subprocess.Popen(['pwd'], stdout=subprocess.PIPE, shell=True) return to_string(p.communicate()[0].strip()) def find_file_bottom_up(name, from_dir=None): """Find the specified file/dir from from_dir bottom up until found or failed. Returns abspath if found, or empty if failed. """ if from_dir is None: from_dir = get_cwd() finding_dir = os.path.abspath(from_dir) while True: path = os.path.join(finding_dir, name) if os.path.exists(path): return path if finding_dir == '/': break finding_dir = os.path.dirname(finding_dir) return '' def path_under_dir(path, dir): """Check whether a path is under the dir. Both path and dir must be normalized, and they must be both relative or relative path. """ return dir == '.' or path == dir or path.startswith(dir) and path[len(dir)] == os.path.sep def mkdir_p(path): """Make directory if it does not exist.""" try: if not os.path.isdir(path): os.makedirs(path) except OSError as exc: if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def _echo(stdout, stderr): """Echo messages to stdout and stderr.""" if stdout: sys.stdout.write(stdout) if stderr: sys.stderr.write(stderr) def shell(cmd, env=None): if isinstance(cmd, list): cmdline = ' '.join(cmd) else: cmdline = cmd p = subprocess.Popen(cmdline, env=env, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True) stdout, stderr = p.communicate() if p.returncode: if p.returncode != -signal.SIGINT: # Error _echo(stdout, stderr) else: # Warnings _echo(stdout, stderr) return p.returncode def run_command(args, **kwargs): """Run a command without echo, return returncode, stdout and stderr (always as string).""" kwargs.setdefault('stdout', subprocess.PIPE) kwargs.setdefault('stderr', subprocess.PIPE) if _IN_PY3: r = subprocess.run(args, universal_newlines=True, **kwargs) return r.returncode, r.stdout, r.stderr else: p = subprocess.Popen(args, universal_newlines=True, **kwargs) stdout, stderr = p.communicate() return p.returncode, stdout, stderr def
(build_dir): revision = url = 'unknown' path = os.path.join(build_dir, 'scm.json') if os.path.exists(path): with open(path) as f: scm = json.load(f) revision, url = scm['revision'], scm['url'] return revision, url def environ_add_path(env, key, path): """Add path to PATH link environments, such as PATH, LD_LIBRARY_PATH, etc""" old = env.get(key) if old: env[key] = path + ':' + old else: env[key] = path def cpu_count(): try: import multiprocessing # pylint: disable=import-outside-toplevel return multiprocessing.cpu_count() except ImportError: return int(os.sysconf('SC_NPROCESSORS_ONLN')) _TRANS_TABLE = (str if _IN_PY3 else string).maketrans(',-/:.+*', '_______') def regular_variable_name(name): """convert some name to a valid identifier name""" return name.translate(_TRANS_TABLE) # Some python 2/3 compatibility helpers. if _IN_PY3: def iteritems(d): return d.items() def itervalues(d): return d.values() else: def iteritems(d): return d.iteritems() def itervalues(d): return d.itervalues() def exec_file_content(filename, content, globals, locals): """Execute code content as filename""" # pylint: disable=exec-used exec(compile(content, filename, 'exec'), globals, locals) def exec_file(filename, globals, locals): """Same as python2's execfile builtin function, but python3 has no execfile""" # pylint: disable=exec-used with open(filename, 'rb') as f: exec_file_content(filename, f.read(), globals, locals) def eval_file(filepath): """Load a value from file. Safely evaluate an expression node or a string containing a Python literal or container display. The string or node provided may only consist of the following Python literal structures: strings, bytes, numbers, tuples, lists, dicts, sets, booleans, and None. """ return ast.literal_eval(open(filepath).read()) def source_location(filename): """Return source location of current call stack from filename""" full_filename = filename lineno = 1 # See https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow frame = inspect.currentframe() while frame: if frame.f_code.co_filename.endswith(filename): full_filename = frame.f_code.co_filename lineno = frame.f_lineno break frame = frame.f_back return '%s:%s' % (full_filename, lineno) def calling_source_location(skip=0): """Return source location of current call stack, skip specified levels (not include itself).""" skip += 1 # This function itself is excluded. skipped = 0 frame = inspect.currentframe() while frame: if skipped == skip: return '%s:%s' % (frame.f_code.co_filename, frame.f_lineno) frame = frame.f_back skipped += 1 raise ValueError('Invalid value "%d" for "skip"' % skip) def parse_command_line(argv): """Simple command line parsing. options can only be passed as the form of `--name=value`, any other arguments are treated as normal arguments. Returns: tuple(options: dict, args: list) """ options = {} args = [] for arg in argv: if arg.startswith('--'): pos = arg.find('=') if pos < 0: args.append(arg) continue name = arg[2:pos] value = arg[pos+1:] options[name] = value else: args.append(arg) return options, args def open_zip_file_for_write(filename, compression_level): """Open a zip file for writing with specified compression level.""" compression = zipfile.ZIP_DEFLATED if sys.version_info.major < 3 or sys.version_info.major == 3 and sys.version_info.minor < 7: if compression_level == "0": compression = zipfile.ZIP_STORED return zipfile.ZipFile(filename, 'w', compression, allowZip64=True) # pylint: disable=unexpected-keyword-arg return zipfile.ZipFile(filename, 'w', compression, compresslevel=int(compression_level), allowZip64=True)
load_scm
commit.rs
//! Commits to a Tendermint blockchain use crate::block::commit_sig::CommitSig; use crate::block::{Height, Id}; use crate::serializers; use serde::{Deserialize, Serialize}; use std::{ops::Deref, slice}; /// Commit contains the justification (ie. a set of signatures) that a block was committed by a set /// of validators. /// TODO: Update links below! /// <https://github.com/tendermint/tendermint/blob/51dc810d041eaac78320adc6d53ad8b160b06601/types/block.go#L486-L502> /// <https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/blockchain.md#lastcommit> #[derive(Serialize, Deserialize, Clone, PartialEq, Debug)] pub struct Commit { /// Block height pub height: Height, /// Round #[serde(with = "serializers::from_str")] pub round: u64, /// Block ID pub block_id: Id, /// Signatures pub signatures: CommitSigs, } /// CommitSigs which certify that a block is valid
impl CommitSigs { /// Create a new CommitSig collection pub fn new<I>(into_commit_sigs: I) -> Self where I: Into<Vec<CommitSig>>, { Self(into_commit_sigs.into()) } /// Convert this collection of CommitSigs into a vector pub fn into_vec(self) -> Vec<CommitSig> { self.0 } /// Iterate over the CommitSigs in the collection pub fn iter(&self) -> slice::Iter<'_, CommitSig> { self.0.iter() } } impl AsRef<[CommitSig]> for CommitSigs { fn as_ref(&self) -> &[CommitSig] { self.0.as_slice() } } impl Deref for CommitSigs { type Target = [CommitSig]; fn deref(&self) -> &[CommitSig] { self.as_ref() } } impl PartialEq for CommitSigs { fn eq(&self, other: &Self) -> bool { // Note: this is used for asserts in tests: self.0.clone().into_iter().eq(other.0.clone().into_iter()) } }
#[derive(Serialize, Deserialize, Clone, Debug, Default)] pub struct CommitSigs(Vec<CommitSig>);
icon.module.ts
import { NgModule } from '@angular/core'; import { CommonModule } from '@angular/common'; import { IconComponent } from './icon.component'; /** * The Icon module * * Contains icon component */ @NgModule({ declarations: [ IconComponent ], imports: [ CommonModule ], exports: [IconComponent] }) export class
{ }
IconModule
api_op_GetGroups.go
// Code generated by smithy-go-codegen DO NOT EDIT. package xray import ( "context" "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/aws-sdk-go-v2/service/xray/types" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) // Retrieves all active group details. func (c *Client) GetGroups(ctx context.Context, params *GetGroupsInput, optFns ...func(*Options)) (*GetGroupsOutput, error) { if params == nil { params = &GetGroupsInput{} } result, metadata, err := c.invokeOperation(ctx, "GetGroups", params, optFns, c.addOperationGetGroupsMiddlewares) if err != nil { return nil, err } out := result.(*GetGroupsOutput) out.ResultMetadata = metadata return out, nil } type GetGroupsInput struct { // Pagination token. NextToken *string noSmithyDocumentSerde } type GetGroupsOutput struct { // The collection of all active groups. Groups []types.GroupSummary // Pagination token. NextToken *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata noSmithyDocumentSerde } func (c *Client) addOperationGetGroupsMiddlewares(stack *middleware.Stack, options Options) (err error) { err = stack.Serialize.Add(&awsRestjson1_serializeOpGetGroups{}, middleware.After) if err != nil { return err } err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetGroups{}, middleware.After) if err != nil { return err } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { return err } if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { return err } if err = addRetryMiddlewares(stack, options); err != nil { return err } if err = addHTTPSignerV4Middleware(stack, options); err != nil { return err } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { return err } if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetGroups(options.Region), middleware.Before); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } if err = addResponseErrorMiddleware(stack); err != nil { return err } if err = addRequestResponseLogging(stack, options); err != nil { return err } return nil } // GetGroupsAPIClient is a client that implements the GetGroups operation. type GetGroupsAPIClient interface { GetGroups(context.Context, *GetGroupsInput, ...func(*Options)) (*GetGroupsOutput, error) } var _ GetGroupsAPIClient = (*Client)(nil) // GetGroupsPaginatorOptions is the paginator options for GetGroups type GetGroupsPaginatorOptions struct { // Set to true if pagination should stop if the service returns a pagination token // that matches the most recent token provided to the service. StopOnDuplicateToken bool } // GetGroupsPaginator is a paginator for GetGroups type GetGroupsPaginator struct { options GetGroupsPaginatorOptions client GetGroupsAPIClient params *GetGroupsInput nextToken *string firstPage bool } // NewGetGroupsPaginator returns a new GetGroupsPaginator func NewGetGroupsPaginator(client GetGroupsAPIClient, params *GetGroupsInput, optFns ...func(*GetGroupsPaginatorOptions)) *GetGroupsPaginator
// HasMorePages returns a boolean indicating whether more pages are available func (p *GetGroupsPaginator) HasMorePages() bool { return p.firstPage || p.nextToken != nil } // NextPage retrieves the next GetGroups page. func (p *GetGroupsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*GetGroupsOutput, error) { if !p.HasMorePages() { return nil, fmt.Errorf("no more pages available") } params := *p.params params.NextToken = p.nextToken result, err := p.client.GetGroups(ctx, &params, optFns...) if err != nil { return nil, err } p.firstPage = false prevToken := p.nextToken p.nextToken = result.NextToken if p.options.StopOnDuplicateToken && prevToken != nil && p.nextToken != nil && *prevToken == *p.nextToken { p.nextToken = nil } return result, nil } func newServiceMetadataMiddleware_opGetGroups(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, SigningName: "xray", OperationName: "GetGroups", } }
{ if params == nil { params = &GetGroupsInput{} } options := GetGroupsPaginatorOptions{} for _, fn := range optFns { fn(&options) } return &GetGroupsPaginator{ options: options, client: client, params: params, firstPage: true, } }
search-in-workspace-frontend-module.ts
/* * Copyright (C) 2017-2018 Erisson and others. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. * You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 */ import { ContainerModule } from "inversify"; import { SearchInWorkspaceService, SearchInWorkspaceClientImpl } from './search-in-workspace-service'; import { SearchInWorkspaceServer } from '../common/search-in-workspace-interface'; import { WebSocketConnectionProvider, KeybindingContribution } from '@theia/core/lib/browser'; import { QuickSearchInWorkspace, SearchInWorkspaceContributions } from './quick-search-in-workspace'; import { CommandContribution, MenuContribution } from "@theia/core"; export default new ContainerModule(bind => { bind(QuickSearchInWorkspace).toSelf().inSingletonScope(); bind(CommandContribution).to(SearchInWorkspaceContributions).inSingletonScope(); bind(MenuContribution).to(SearchInWorkspaceContributions).inSingletonScope(); bind(KeybindingContribution).to(SearchInWorkspaceContributions).inSingletonScope(); // The object that gets notified of search results. bind(SearchInWorkspaceClientImpl).toSelf().inSingletonScope(); bind(SearchInWorkspaceService).toSelf().inSingletonScope(); // The object to call methods on the backend. bind(SearchInWorkspaceServer).toDynamicValue(ctx => { const client = ctx.container.get(SearchInWorkspaceClientImpl); return WebSocketConnectionProvider.createProxy(ctx.container, '/search-in-workspace', client); }).inSingletonScope();
});
f64.rs
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Operations and constants for 64-bits floats (`f64` type) #![stable(feature = "rust1", since = "1.0.0")] #![allow(missing_docs)] #![doc(primitive = "f64")] use prelude::v1::*; use intrinsics; use libc::c_int; use num::{Float, FpCategory}; use num::strconv; use num::strconv::ExponentFormat::{ExpNone, ExpDec}; use num::strconv::SignificantDigits::{DigAll, DigMax, DigExact}; use num::strconv::SignFormat::SignNeg; use core::num; pub use core::f64::{RADIX, MANTISSA_DIGITS, DIGITS, EPSILON, MIN_VALUE}; pub use core::f64::{MIN_POS_VALUE, MAX_VALUE, MIN_EXP, MAX_EXP, MIN_10_EXP}; pub use core::f64::{MAX_10_EXP, NAN, INFINITY, NEG_INFINITY}; pub use core::f64::{MIN, MIN_POSITIVE, MAX}; pub use core::f64::consts; #[allow(dead_code)] mod cmath { use libc::{c_double, c_int}; #[link_name = "m"] extern { pub fn acos(n: c_double) -> c_double; pub fn asin(n: c_double) -> c_double; pub fn atan(n: c_double) -> c_double; pub fn atan2(a: c_double, b: c_double) -> c_double; pub fn cbrt(n: c_double) -> c_double; pub fn cosh(n: c_double) -> c_double; pub fn erf(n: c_double) -> c_double; pub fn erfc(n: c_double) -> c_double; pub fn expm1(n: c_double) -> c_double; pub fn fdim(a: c_double, b: c_double) -> c_double; pub fn fmax(a: c_double, b: c_double) -> c_double; pub fn fmin(a: c_double, b: c_double) -> c_double; pub fn fmod(a: c_double, b: c_double) -> c_double; pub fn nextafter(x: c_double, y: c_double) -> c_double; pub fn frexp(n: c_double, value: &mut c_int) -> c_double; pub fn hypot(x: c_double, y: c_double) -> c_double; pub fn ldexp(x: c_double, n: c_int) -> c_double; pub fn logb(n: c_double) -> c_double; pub fn log1p(n: c_double) -> c_double; pub fn ilogb(n: c_double) -> c_int; pub fn modf(n: c_double, iptr: &mut c_double) -> c_double; pub fn sinh(n: c_double) -> c_double; pub fn tan(n: c_double) -> c_double; pub fn tanh(n: c_double) -> c_double; pub fn tgamma(n: c_double) -> c_double; // These are commonly only available for doubles pub fn j0(n: c_double) -> c_double; pub fn j1(n: c_double) -> c_double; pub fn jn(i: c_int, n: c_double) -> c_double; pub fn y0(n: c_double) -> c_double; pub fn y1(n: c_double) -> c_double; pub fn yn(i: c_int, n: c_double) -> c_double; #[cfg(unix)] pub fn lgamma_r(n: c_double, sign: &mut c_int) -> c_double; #[cfg(windows)] #[link_name="__lgamma_r"] pub fn lgamma_r(n: c_double, sign: &mut c_int) -> c_double; } } #[stable(feature = "rust1", since = "1.0.0")] #[allow(deprecated)] impl Float for f64 { // inlined methods from `num::Float` #[inline] fn nan() -> f64 { num::Float::nan() } #[inline] fn infinity() -> f64 { num::Float::infinity() } #[inline] fn neg_infinity() -> f64 { num::Float::neg_infinity() } #[inline] fn zero() -> f64 { num::Float::zero() } #[inline] fn neg_zero() -> f64 { num::Float::neg_zero() } #[inline] fn one() -> f64 { num::Float::one() } #[allow(deprecated)] #[inline] fn mantissa_digits(unused_self: Option<f64>) -> usize { num::Float::mantissa_digits(unused_self) } #[allow(deprecated)] #[inline] fn digits(unused_self: Option<f64>) -> usize { num::Float::digits(unused_self) } #[allow(deprecated)] #[inline] fn epsilon() -> f64 { num::Float::epsilon() } #[allow(deprecated)] #[inline] fn min_exp(unused_self: Option<f64>) -> isize { num::Float::min_exp(unused_self) } #[allow(deprecated)] #[inline] fn max_exp(unused_self: Option<f64>) -> isize { num::Float::max_exp(unused_self) } #[allow(deprecated)] #[inline] fn min_10_exp(unused_self: Option<f64>) -> isize { num::Float::min_10_exp(unused_self) } #[allow(deprecated)] #[inline] fn max_10_exp(unused_self: Option<f64>) -> isize { num::Float::max_10_exp(unused_self) } #[allow(deprecated)] #[inline] fn min_value() -> f64 { num::Float::min_value() } #[allow(deprecated)] #[inline] fn min_pos_value(unused_self: Option<f64>) -> f64 { num::Float::min_pos_value(unused_self) } #[allow(deprecated)] #[inline] fn max_value() -> f64 { num::Float::max_value() } #[inline] fn is_nan(self) -> bool { num::Float::is_nan(self) } #[inline] fn is_infinite(self) -> bool { num::Float::is_infinite(self) } #[inline] fn is_finite(self) -> bool { num::Float::is_finite(self) } #[inline] fn is_normal(self) -> bool { num::Float::is_normal(self) } #[inline] fn classify(self) -> FpCategory { num::Float::classify(self) } #[inline] fn integer_decode(self) -> (u64, i16, i8) { num::Float::integer_decode(self) } #[inline] fn floor(self) -> f64 { num::Float::floor(self) } #[inline] fn ceil(self) -> f64 { num::Float::ceil(self) } #[inline] fn round(self) -> f64 { num::Float::round(self) } #[inline] fn trunc(self) -> f64 { num::Float::trunc(self) } #[inline] fn fract(self) -> f64 { num::Float::fract(self) } #[inline] fn abs(self) -> f64 { num::Float::abs(self) } #[inline] fn signum(self) -> f64 { num::Float::signum(self) } #[inline] fn is_positive(self) -> bool { num::Float::is_positive(self) } #[inline] fn is_negative(self) -> bool { num::Float::is_negative(self) } #[inline] fn mul_add(self, a: f64, b: f64) -> f64 { num::Float::mul_add(self, a, b) } #[inline] fn recip(self) -> f64 { num::Float::recip(self) } #[inline] fn powi(self, n: i32) -> f64 { num::Float::powi(self, n) } #[inline] fn powf(self, n: f64) -> f64 { num::Float::powf(self, n) } #[inline] fn sqrt(self) -> f64 { num::Float::sqrt(self) } #[inline] fn rsqrt(self) -> f64 { num::Float::rsqrt(self) } #[inline] fn exp(self) -> f64
#[inline] fn exp2(self) -> f64 { num::Float::exp2(self) } #[inline] fn ln(self) -> f64 { num::Float::ln(self) } #[inline] fn log(self, base: f64) -> f64 { num::Float::log(self, base) } #[inline] fn log2(self) -> f64 { num::Float::log2(self) } #[inline] fn log10(self) -> f64 { num::Float::log10(self) } #[inline] fn to_degrees(self) -> f64 { num::Float::to_degrees(self) } #[inline] fn to_radians(self) -> f64 { num::Float::to_radians(self) } #[inline] fn ldexp(self, exp: isize) -> f64 { unsafe { cmath::ldexp(self, exp as c_int) } } /// Breaks the number into a normalized fraction and a base-2 exponent, /// satisfying: /// /// - `self = x * pow(2, exp)` /// - `0.5 <= abs(x) < 1.0` #[inline] fn frexp(self) -> (f64, isize) { unsafe { let mut exp = 0; let x = cmath::frexp(self, &mut exp); (x, exp as isize) } } /// Returns the next representable floating-point value in the direction of /// `other`. #[inline] fn next_after(self, other: f64) -> f64 { unsafe { cmath::nextafter(self, other) } } #[inline] fn max(self, other: f64) -> f64 { unsafe { cmath::fmax(self, other) } } #[inline] fn min(self, other: f64) -> f64 { unsafe { cmath::fmin(self, other) } } #[inline] fn abs_sub(self, other: f64) -> f64 { unsafe { cmath::fdim(self, other) } } #[inline] fn cbrt(self) -> f64 { unsafe { cmath::cbrt(self) } } #[inline] fn hypot(self, other: f64) -> f64 { unsafe { cmath::hypot(self, other) } } #[inline] fn sin(self) -> f64 { unsafe { intrinsics::sinf64(self) } } #[inline] fn cos(self) -> f64 { unsafe { intrinsics::cosf64(self) } } #[inline] fn tan(self) -> f64 { unsafe { cmath::tan(self) } } #[inline] fn asin(self) -> f64 { unsafe { cmath::asin(self) } } #[inline] fn acos(self) -> f64 { unsafe { cmath::acos(self) } } #[inline] fn atan(self) -> f64 { unsafe { cmath::atan(self) } } #[inline] fn atan2(self, other: f64) -> f64 { unsafe { cmath::atan2(self, other) } } /// Simultaneously computes the sine and cosine of the number #[inline] fn sin_cos(self) -> (f64, f64) { (self.sin(), self.cos()) } /// Returns the exponential of the number, minus `1`, in a way that is /// accurate even if the number is close to zero #[inline] fn exp_m1(self) -> f64 { unsafe { cmath::expm1(self) } } /// Returns the natural logarithm of the number plus `1` (`ln(1+n)`) more /// accurately than if the operations were performed separately #[inline] fn ln_1p(self) -> f64 { unsafe { cmath::log1p(self) } } #[inline] fn sinh(self) -> f64 { unsafe { cmath::sinh(self) } } #[inline] fn cosh(self) -> f64 { unsafe { cmath::cosh(self) } } #[inline] fn tanh(self) -> f64 { unsafe { cmath::tanh(self) } } /// Inverse hyperbolic sine /// /// # Returns /// /// - on success, the inverse hyperbolic sine of `self` will be returned /// - `self` if `self` is `0.0`, `-0.0`, `INFINITY`, or `NEG_INFINITY` /// - `NAN` if `self` is `NAN` #[inline] fn asinh(self) -> f64 { match self { NEG_INFINITY => NEG_INFINITY, x => (x + ((x * x) + 1.0).sqrt()).ln(), } } /// Inverse hyperbolic cosine /// /// # Returns /// /// - on success, the inverse hyperbolic cosine of `self` will be returned /// - `INFINITY` if `self` is `INFINITY` /// - `NAN` if `self` is `NAN` or `self < 1.0` (including `NEG_INFINITY`) #[inline] fn acosh(self) -> f64 { match self { x if x < 1.0 => Float::nan(), x => (x + ((x * x) - 1.0).sqrt()).ln(), } } /// Inverse hyperbolic tangent /// /// # Returns /// /// - on success, the inverse hyperbolic tangent of `self` will be returned /// - `self` if `self` is `0.0` or `-0.0` /// - `INFINITY` if `self` is `1.0` /// - `NEG_INFINITY` if `self` is `-1.0` /// - `NAN` if the `self` is `NAN` or outside the domain of `-1.0 <= self <= 1.0` /// (including `INFINITY` and `NEG_INFINITY`) #[inline] fn atanh(self) -> f64 { 0.5 * ((2.0 * self) / (1.0 - self)).ln_1p() } } #[cfg(not(test))] #[lang = "f64"] #[stable(feature = "rust1", since = "1.0.0")] impl f64 { /// Returns `true` if this value is `NaN` and false otherwise. /// /// ``` /// use std::f64; /// /// let nan = f64::NAN; /// let f = 7.0_f64; /// /// assert!(nan.is_nan()); /// assert!(!f.is_nan()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_nan(self) -> bool { num::Float::is_nan(self) } /// Returns `true` if this value is positive infinity or negative infinity and /// false otherwise. /// /// ``` /// use std::f64; /// /// let f = 7.0f64; /// let inf = f64::INFINITY; /// let neg_inf = f64::NEG_INFINITY; /// let nan = f64::NAN; /// /// assert!(!f.is_infinite()); /// assert!(!nan.is_infinite()); /// /// assert!(inf.is_infinite()); /// assert!(neg_inf.is_infinite()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_infinite(self) -> bool { num::Float::is_infinite(self) } /// Returns `true` if this number is neither infinite nor `NaN`. /// /// ``` /// use std::f64; /// /// let f = 7.0f64; /// let inf: f64 = f64::INFINITY; /// let neg_inf: f64 = f64::NEG_INFINITY; /// let nan: f64 = f64::NAN; /// /// assert!(f.is_finite()); /// /// assert!(!nan.is_finite()); /// assert!(!inf.is_finite()); /// assert!(!neg_inf.is_finite()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_finite(self) -> bool { num::Float::is_finite(self) } /// Returns `true` if the number is neither zero, infinite, /// [subnormal][subnormal], or `NaN`. /// /// ``` /// use std::f32; /// /// let min = f32::MIN_POSITIVE; // 1.17549435e-38f64 /// let max = f32::MAX; /// let lower_than_min = 1.0e-40_f32; /// let zero = 0.0f32; /// /// assert!(min.is_normal()); /// assert!(max.is_normal()); /// /// assert!(!zero.is_normal()); /// assert!(!f32::NAN.is_normal()); /// assert!(!f32::INFINITY.is_normal()); /// // Values between `0` and `min` are Subnormal. /// assert!(!lower_than_min.is_normal()); /// ``` /// [subnormal]: http://en.wikipedia.org/wiki/Denormal_number #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_normal(self) -> bool { num::Float::is_normal(self) } /// Returns the floating point category of the number. If only one property /// is going to be tested, it is generally faster to use the specific /// predicate instead. /// /// ``` /// use std::num::FpCategory; /// use std::f64; /// /// let num = 12.4_f64; /// let inf = f64::INFINITY; /// /// assert_eq!(num.classify(), FpCategory::Normal); /// assert_eq!(inf.classify(), FpCategory::Infinite); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn classify(self) -> FpCategory { num::Float::classify(self) } /// Returns the mantissa, base 2 exponent, and sign as integers, respectively. /// The original number can be recovered by `sign * mantissa * 2 ^ exponent`. /// The floating point encoding is documented in the [Reference][floating-point]. /// /// ``` /// # #![feature(std_misc)] /// let num = 2.0f64; /// /// // (8388608, -22, 1) /// let (mantissa, exponent, sign) = num.integer_decode(); /// let sign_f = sign as f64; /// let mantissa_f = mantissa as f64; /// let exponent_f = num.powf(exponent as f64); /// /// // 1 * 8388608 * 2^(-22) == 2 /// let abs_difference = (sign_f * mantissa_f * exponent_f - num).abs(); /// /// assert!(abs_difference < 1e-10); /// ``` /// [floating-point]: ../../../../../reference.html#machine-types #[unstable(feature = "std_misc", reason = "signature is undecided")] #[inline] pub fn integer_decode(self) -> (u64, i16, i8) { num::Float::integer_decode(self) } /// Returns the largest integer less than or equal to a number. /// /// ``` /// let f = 3.99_f64; /// let g = 3.0_f64; /// /// assert_eq!(f.floor(), 3.0); /// assert_eq!(g.floor(), 3.0); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn floor(self) -> f64 { num::Float::floor(self) } /// Returns the smallest integer greater than or equal to a number. /// /// ``` /// let f = 3.01_f64; /// let g = 4.0_f64; /// /// assert_eq!(f.ceil(), 4.0); /// assert_eq!(g.ceil(), 4.0); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn ceil(self) -> f64 { num::Float::ceil(self) } /// Returns the nearest integer to a number. Round half-way cases away from /// `0.0`. /// /// ``` /// let f = 3.3_f64; /// let g = -3.3_f64; /// /// assert_eq!(f.round(), 3.0); /// assert_eq!(g.round(), -3.0); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn round(self) -> f64 { num::Float::round(self) } /// Return the integer part of a number. /// /// ``` /// let f = 3.3_f64; /// let g = -3.7_f64; /// /// assert_eq!(f.trunc(), 3.0); /// assert_eq!(g.trunc(), -3.0); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn trunc(self) -> f64 { num::Float::trunc(self) } /// Returns the fractional part of a number. /// /// ``` /// let x = 3.5_f64; /// let y = -3.5_f64; /// let abs_difference_x = (x.fract() - 0.5).abs(); /// let abs_difference_y = (y.fract() - (-0.5)).abs(); /// /// assert!(abs_difference_x < 1e-10); /// assert!(abs_difference_y < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn fract(self) -> f64 { num::Float::fract(self) } /// Computes the absolute value of `self`. Returns `NAN` if the /// number is `NAN`. /// /// ``` /// use std::f64; /// /// let x = 3.5_f64; /// let y = -3.5_f64; /// /// let abs_difference_x = (x.abs() - x).abs(); /// let abs_difference_y = (y.abs() - (-y)).abs(); /// /// assert!(abs_difference_x < 1e-10); /// assert!(abs_difference_y < 1e-10); /// /// assert!(f64::NAN.abs().is_nan()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn abs(self) -> f64 { num::Float::abs(self) } /// Returns a number that represents the sign of `self`. /// /// - `1.0` if the number is positive, `+0.0` or `INFINITY` /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY` /// - `NAN` if the number is `NAN` /// /// ``` /// use std::f64; /// /// let f = 3.5_f64; /// /// assert_eq!(f.signum(), 1.0); /// assert_eq!(f64::NEG_INFINITY.signum(), -1.0); /// /// assert!(f64::NAN.signum().is_nan()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn signum(self) -> f64 { num::Float::signum(self) } /// Returns `true` if `self`'s sign bit is positive, including /// `+0.0` and `INFINITY`. /// /// ``` /// use std::f64; /// /// let nan: f64 = f64::NAN; /// /// let f = 7.0_f64; /// let g = -7.0_f64; /// /// assert!(f.is_sign_positive()); /// assert!(!g.is_sign_positive()); /// // Requires both tests to determine if is `NaN` /// assert!(!nan.is_sign_positive() && !nan.is_sign_negative()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_sign_positive(self) -> bool { num::Float::is_positive(self) } #[stable(feature = "rust1", since = "1.0.0")] #[deprecated(since = "1.0.0", reason = "renamed to is_sign_positive")] #[inline] pub fn is_positive(self) -> bool { num::Float::is_positive(self) } /// Returns `true` if `self`'s sign is negative, including `-0.0` /// and `NEG_INFINITY`. /// /// ``` /// use std::f64; /// /// let nan = f64::NAN; /// /// let f = 7.0_f64; /// let g = -7.0_f64; /// /// assert!(!f.is_sign_negative()); /// assert!(g.is_sign_negative()); /// // Requires both tests to determine if is `NaN`. /// assert!(!nan.is_sign_positive() && !nan.is_sign_negative()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_sign_negative(self) -> bool { num::Float::is_negative(self) } #[stable(feature = "rust1", since = "1.0.0")] #[deprecated(since = "1.0.0", reason = "renamed to is_sign_negative")] #[inline] pub fn is_negative(self) -> bool { num::Float::is_negative(self) } /// Fused multiply-add. Computes `(self * a) + b` with only one rounding /// error. This produces a more accurate result with better performance than /// a separate multiplication operation followed by an add. /// /// ``` /// let m = 10.0_f64; /// let x = 4.0_f64; /// let b = 60.0_f64; /// /// // 100.0 /// let abs_difference = (m.mul_add(x, b) - (m*x + b)).abs(); /// /// assert!(abs_difference < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn mul_add(self, a: f64, b: f64) -> f64 { num::Float::mul_add(self, a, b) } /// Take the reciprocal (inverse) of a number, `1/x`. /// /// ``` /// let x = 2.0_f64; /// let abs_difference = (x.recip() - (1.0/x)).abs(); /// /// assert!(abs_difference < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn recip(self) -> f64 { num::Float::recip(self) } /// Raise a number to an integer power. /// /// Using this function is generally faster than using `powf` /// /// ``` /// let x = 2.0_f64; /// let abs_difference = (x.powi(2) - x*x).abs(); /// /// assert!(abs_difference < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn powi(self, n: i32) -> f64 { num::Float::powi(self, n) } /// Raise a number to a floating point power. /// /// ``` /// let x = 2.0_f64; /// let abs_difference = (x.powf(2.0) - x*x).abs(); /// /// assert!(abs_difference < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn powf(self, n: f64) -> f64 { num::Float::powf(self, n) } /// Take the square root of a number. /// /// Returns NaN if `self` is a negative number. /// /// ``` /// let positive = 4.0_f64; /// let negative = -4.0_f64; /// /// let abs_difference = (positive.sqrt() - 2.0).abs(); /// /// assert!(abs_difference < 1e-10); /// assert!(negative.sqrt().is_nan()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn sqrt(self) -> f64 { num::Float::sqrt(self) } /// Take the reciprocal (inverse) square root of a number, `1/sqrt(x)`. /// /// ``` /// # #![feature(std_misc)] /// let f = 4.0_f64; /// /// let abs_difference = (f.rsqrt() - 0.5).abs(); /// /// assert!(abs_difference < 1e-10); /// ``` #[unstable(feature = "std_misc", reason = "unsure about its place in the world")] #[deprecated(since = "1.0.0", reason = "use self.sqrt().recip() instead")] #[inline] pub fn rsqrt(self) -> f64 { num::Float::rsqrt(self) } /// Returns `e^(self)`, (the exponential function). /// /// ``` /// let one = 1.0_f64; /// // e^1 /// let e = one.exp(); /// /// // ln(e) - 1 == 0 /// let abs_difference = (e.ln() - 1.0).abs(); /// /// assert!(abs_difference < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn exp(self) -> f64 { num::Float::exp(self) } /// Returns `2^(self)`. /// /// ``` /// let f = 2.0_f64; /// /// // 2^2 - 4 == 0 /// let abs_difference = (f.exp2() - 4.0).abs(); /// /// assert!(abs_difference < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn exp2(self) -> f64 { num::Float::exp2(self) } /// Returns the natural logarithm of the number. /// /// ``` /// let one = 1.0_f64; /// // e^1 /// let e = one.exp(); /// /// // ln(e) - 1 == 0 /// let abs_difference = (e.ln() - 1.0).abs(); /// /// assert!(abs_difference < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn ln(self) -> f64 { num::Float::ln(self) } /// Returns the logarithm of the number with respect to an arbitrary base. /// /// ``` /// let ten = 10.0_f64; /// let two = 2.0_f64; /// /// // log10(10) - 1 == 0 /// let abs_difference_10 = (ten.log(10.0) - 1.0).abs(); /// /// // log2(2) - 1 == 0 /// let abs_difference_2 = (two.log(2.0) - 1.0).abs(); /// /// assert!(abs_difference_10 < 1e-10); /// assert!(abs_difference_2 < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn log(self, base: f64) -> f64 { num::Float::log(self, base) } /// Returns the base 2 logarithm of the number. /// /// ``` /// let two = 2.0_f64; /// /// // log2(2) - 1 == 0 /// let abs_difference = (two.log2() - 1.0).abs(); /// /// assert!(abs_difference < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn log2(self) -> f64 { num::Float::log2(self) } /// Returns the base 10 logarithm of the number. /// /// ``` /// let ten = 10.0_f64; /// /// // log10(10) - 1 == 0 /// let abs_difference = (ten.log10() - 1.0).abs(); /// /// assert!(abs_difference < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn log10(self) -> f64 { num::Float::log10(self) } /// Convert radians to degrees. /// /// ``` /// use std::f64::consts; /// /// let angle = consts::PI; /// /// let abs_difference = (angle.to_degrees() - 180.0).abs(); /// /// assert!(abs_difference < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn to_degrees(self) -> f64 { num::Float::to_degrees(self) } /// Convert degrees to radians. /// /// ``` /// use std::f64::consts; /// /// let angle = 180.0_f64; /// /// let abs_difference = (angle.to_radians() - consts::PI).abs(); /// /// assert!(abs_difference < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn to_radians(self) -> f64 { num::Float::to_radians(self) } /// Constructs a floating point number of `x*2^exp`. /// /// ``` /// # #![feature(std_misc)] /// // 3*2^2 - 12 == 0 /// let abs_difference = (f64::ldexp(3.0, 2) - 12.0).abs(); /// /// assert!(abs_difference < 1e-10); /// ``` #[unstable(feature = "std_misc", reason = "pending integer conventions")] #[inline] pub fn ldexp(x: f64, exp: isize) -> f64 { unsafe { cmath::ldexp(x, exp as c_int) } } /// Breaks the number into a normalized fraction and a base-2 exponent, /// satisfying: /// /// * `self = x * 2^exp` /// * `0.5 <= abs(x) < 1.0` /// /// ``` /// # #![feature(std_misc)] /// let x = 4.0_f64; /// /// // (1/2)*2^3 -> 1 * 8/2 -> 4.0 /// let f = x.frexp(); /// let abs_difference_0 = (f.0 - 0.5).abs(); /// let abs_difference_1 = (f.1 as f64 - 3.0).abs(); /// /// assert!(abs_difference_0 < 1e-10); /// assert!(abs_difference_1 < 1e-10); /// ``` #[unstable(feature = "std_misc", reason = "pending integer conventions")] #[inline] pub fn frexp(self) -> (f64, isize) { unsafe { let mut exp = 0; let x = cmath::frexp(self, &mut exp); (x, exp as isize) } } /// Returns the next representable floating-point value in the direction of /// `other`. /// /// ``` /// # #![feature(std_misc)] /// /// let x = 1.0f32; /// /// let abs_diff = (x.next_after(2.0) - 1.00000011920928955078125_f32).abs(); /// /// assert!(abs_diff < 1e-10); /// ``` #[unstable(feature = "std_misc", reason = "unsure about its place in the world")] #[inline] pub fn next_after(self, other: f64) -> f64 { unsafe { cmath::nextafter(self, other) } } /// Returns the maximum of the two numbers. /// /// ``` /// let x = 1.0_f64; /// let y = 2.0_f64; /// /// assert_eq!(x.max(y), y); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn max(self, other: f64) -> f64 { unsafe { cmath::fmax(self, other) } } /// Returns the minimum of the two numbers. /// /// ``` /// let x = 1.0_f64; /// let y = 2.0_f64; /// /// assert_eq!(x.min(y), x); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn min(self, other: f64) -> f64 { unsafe { cmath::fmin(self, other) } } /// The positive difference of two numbers. /// /// * If `self <= other`: `0:0` /// * Else: `self - other` /// /// ``` /// let x = 3.0_f64; /// let y = -3.0_f64; /// /// let abs_difference_x = (x.abs_sub(1.0) - 2.0).abs(); /// let abs_difference_y = (y.abs_sub(1.0) - 0.0).abs(); /// /// assert!(abs_difference_x < 1e-10); /// assert!(abs_difference_y < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn abs_sub(self, other: f64) -> f64 { unsafe { cmath::fdim(self, other) } } /// Take the cubic root of a number. /// /// ``` /// let x = 8.0_f64; /// /// // x^(1/3) - 2 == 0 /// let abs_difference = (x.cbrt() - 2.0).abs(); /// /// assert!(abs_difference < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn cbrt(self) -> f64 { unsafe { cmath::cbrt(self) } } /// Calculate the length of the hypotenuse of a right-angle triangle given /// legs of length `x` and `y`. /// /// ``` /// let x = 2.0_f64; /// let y = 3.0_f64; /// /// // sqrt(x^2 + y^2) /// let abs_difference = (x.hypot(y) - (x.powi(2) + y.powi(2)).sqrt()).abs(); /// /// assert!(abs_difference < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn hypot(self, other: f64) -> f64 { unsafe { cmath::hypot(self, other) } } /// Computes the sine of a number (in radians). /// /// ``` /// use std::f64; /// /// let x = f64::consts::PI/2.0; /// /// let abs_difference = (x.sin() - 1.0).abs(); /// /// assert!(abs_difference < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn sin(self) -> f64 { unsafe { intrinsics::sinf64(self) } } /// Computes the cosine of a number (in radians). /// /// ``` /// use std::f64; /// /// let x = 2.0*f64::consts::PI; /// /// let abs_difference = (x.cos() - 1.0).abs(); /// /// assert!(abs_difference < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn cos(self) -> f64 { unsafe { intrinsics::cosf64(self) } } /// Computes the tangent of a number (in radians). /// /// ``` /// use std::f64; /// /// let x = f64::consts::PI/4.0; /// let abs_difference = (x.tan() - 1.0).abs(); /// /// assert!(abs_difference < 1e-14); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn tan(self) -> f64 { unsafe { cmath::tan(self) } } /// Computes the arcsine of a number. Return value is in radians in /// the range [-pi/2, pi/2] or NaN if the number is outside the range /// [-1, 1]. /// /// ``` /// use std::f64; /// /// let f = f64::consts::PI / 2.0; /// /// // asin(sin(pi/2)) /// let abs_difference = (f.sin().asin() - f64::consts::PI / 2.0).abs(); /// /// assert!(abs_difference < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn asin(self) -> f64 { unsafe { cmath::asin(self) } } /// Computes the arccosine of a number. Return value is in radians in /// the range [0, pi] or NaN if the number is outside the range /// [-1, 1]. /// /// ``` /// use std::f64; /// /// let f = f64::consts::PI / 4.0; /// /// // acos(cos(pi/4)) /// let abs_difference = (f.cos().acos() - f64::consts::PI / 4.0).abs(); /// /// assert!(abs_difference < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn acos(self) -> f64 { unsafe { cmath::acos(self) } } /// Computes the arctangent of a number. Return value is in radians in the /// range [-pi/2, pi/2]; /// /// ``` /// let f = 1.0_f64; /// /// // atan(tan(1)) /// let abs_difference = (f.tan().atan() - 1.0).abs(); /// /// assert!(abs_difference < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn atan(self) -> f64 { unsafe { cmath::atan(self) } } /// Computes the four quadrant arctangent of `self` (`y`) and `other` (`x`). /// /// * `x = 0`, `y = 0`: `0` /// * `x >= 0`: `arctan(y/x)` -> `[-pi/2, pi/2]` /// * `y >= 0`: `arctan(y/x) + pi` -> `(pi/2, pi]` /// * `y < 0`: `arctan(y/x) - pi` -> `(-pi, -pi/2)` /// /// ``` /// use std::f64; /// /// let pi = f64::consts::PI; /// // All angles from horizontal right (+x) /// // 45 deg counter-clockwise /// let x1 = 3.0_f64; /// let y1 = -3.0_f64; /// /// // 135 deg clockwise /// let x2 = -3.0_f64; /// let y2 = 3.0_f64; /// /// let abs_difference_1 = (y1.atan2(x1) - (-pi/4.0)).abs(); /// let abs_difference_2 = (y2.atan2(x2) - 3.0*pi/4.0).abs(); /// /// assert!(abs_difference_1 < 1e-10); /// assert!(abs_difference_2 < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn atan2(self, other: f64) -> f64 { unsafe { cmath::atan2(self, other) } } /// Simultaneously computes the sine and cosine of the number, `x`. Returns /// `(sin(x), cos(x))`. /// /// ``` /// use std::f64; /// /// let x = f64::consts::PI/4.0; /// let f = x.sin_cos(); /// /// let abs_difference_0 = (f.0 - x.sin()).abs(); /// let abs_difference_1 = (f.1 - x.cos()).abs(); /// /// assert!(abs_difference_0 < 1e-10); /// assert!(abs_difference_0 < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn sin_cos(self) -> (f64, f64) { (self.sin(), self.cos()) } /// Returns `e^(self) - 1` in a way that is accurate even if the /// number is close to zero. /// /// ``` /// let x = 7.0_f64; /// /// // e^(ln(7)) - 1 /// let abs_difference = (x.ln().exp_m1() - 6.0).abs(); /// /// assert!(abs_difference < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn exp_m1(self) -> f64 { unsafe { cmath::expm1(self) } } /// Returns `ln(1+n)` (natural logarithm) more accurately than if /// the operations were performed separately. /// /// ``` /// use std::f64; /// /// let x = f64::consts::E - 1.0; /// /// // ln(1 + (e - 1)) == ln(e) == 1 /// let abs_difference = (x.ln_1p() - 1.0).abs(); /// /// assert!(abs_difference < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn ln_1p(self) -> f64 { unsafe { cmath::log1p(self) } } /// Hyperbolic sine function. /// /// ``` /// use std::f64; /// /// let e = f64::consts::E; /// let x = 1.0_f64; /// /// let f = x.sinh(); /// // Solving sinh() at 1 gives `(e^2-1)/(2e)` /// let g = (e*e - 1.0)/(2.0*e); /// let abs_difference = (f - g).abs(); /// /// assert!(abs_difference < 1e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn sinh(self) -> f64 { unsafe { cmath::sinh(self) } } /// Hyperbolic cosine function. /// /// ``` /// use std::f64; /// /// let e = f64::consts::E; /// let x = 1.0_f64; /// let f = x.cosh(); /// // Solving cosh() at 1 gives this result /// let g = (e*e + 1.0)/(2.0*e); /// let abs_difference = (f - g).abs(); /// /// // Same result /// assert!(abs_difference < 1.0e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn cosh(self) -> f64 { unsafe { cmath::cosh(self) } } /// Hyperbolic tangent function. /// /// ``` /// use std::f64; /// /// let e = f64::consts::E; /// let x = 1.0_f64; /// /// let f = x.tanh(); /// // Solving tanh() at 1 gives `(1 - e^(-2))/(1 + e^(-2))` /// let g = (1.0 - e.powi(-2))/(1.0 + e.powi(-2)); /// let abs_difference = (f - g).abs(); /// /// assert!(abs_difference < 1.0e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn tanh(self) -> f64 { unsafe { cmath::tanh(self) } } /// Inverse hyperbolic sine function. /// /// ``` /// let x = 1.0_f64; /// let f = x.sinh().asinh(); /// /// let abs_difference = (f - x).abs(); /// /// assert!(abs_difference < 1.0e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn asinh(self) -> f64 { match self { NEG_INFINITY => NEG_INFINITY, x => (x + ((x * x) + 1.0).sqrt()).ln(), } } /// Inverse hyperbolic cosine function. /// /// ``` /// let x = 1.0_f64; /// let f = x.cosh().acosh(); /// /// let abs_difference = (f - x).abs(); /// /// assert!(abs_difference < 1.0e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn acosh(self) -> f64 { match self { x if x < 1.0 => Float::nan(), x => (x + ((x * x) - 1.0).sqrt()).ln(), } } /// Inverse hyperbolic tangent function. /// /// ``` /// use std::f64; /// /// let e = f64::consts::E; /// let f = e.tanh().atanh(); /// /// let abs_difference = (f - e).abs(); /// /// assert!(abs_difference < 1.0e-10); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn atanh(self) -> f64 { 0.5 * ((2.0 * self) / (1.0 - self)).ln_1p() } } // // Section: String Conversions // /// Converts a float to a string /// /// # Arguments /// /// * num - The float value #[inline] #[unstable(feature = "std_misc", reason = "may be removed or relocated")] #[deprecated(since = "1.0.0", reason = "use the ToString trait instead")] pub fn to_string(num: f64) -> String { let (r, _) = strconv::float_to_str_common( num, 10, true, SignNeg, DigAll, ExpNone, false); r } /// Converts a float to a string in hexadecimal format /// /// # Arguments /// /// * num - The float value #[inline] #[unstable(feature = "std_misc", reason = "may be removed or relocated")] #[deprecated(since = "1.0.0", reason = "use format! instead")] pub fn to_str_hex(num: f64) -> String { let (r, _) = strconv::float_to_str_common( num, 16, true, SignNeg, DigAll, ExpNone, false); r } /// Converts a float to a string in a given radix, and a flag indicating /// whether it's a special value /// /// # Arguments /// /// * num - The float value /// * radix - The base to use #[inline] #[unstable(feature = "std_misc", reason = "may be removed or relocated")] #[deprecated(since = "1.0.0", reason = "use format! instead")] pub fn to_str_radix_special(num: f64, rdx: u32) -> (String, bool) { strconv::float_to_str_common(num, rdx, true, SignNeg, DigAll, ExpNone, false) } /// Converts a float to a string with exactly the number of /// provided significant digits /// /// # Arguments /// /// * num - The float value /// * digits - The number of significant digits #[inline] #[unstable(feature = "std_misc", reason = "may be removed or relocated")] pub fn to_str_exact(num: f64, dig: usize) -> String { let (r, _) = strconv::float_to_str_common( num, 10, true, SignNeg, DigExact(dig), ExpNone, false); r } /// Converts a float to a string with a maximum number of /// significant digits /// /// # Arguments /// /// * num - The float value /// * digits - The number of significant digits #[inline] #[unstable(feature = "std_misc", reason = "may be removed or relocated")] pub fn to_str_digits(num: f64, dig: usize) -> String { let (r, _) = strconv::float_to_str_common( num, 10, true, SignNeg, DigMax(dig), ExpNone, false); r } /// Converts a float to a string using the exponential notation with exactly the number of /// provided digits after the decimal point in the significand /// /// # Arguments /// /// * num - The float value /// * digits - The number of digits after the decimal point /// * upper - Use `E` instead of `e` for the exponent sign #[inline] #[unstable(feature = "std_misc", reason = "may be removed or relocated")] pub fn to_str_exp_exact(num: f64, dig: usize, upper: bool) -> String { let (r, _) = strconv::float_to_str_common( num, 10, true, SignNeg, DigExact(dig), ExpDec, upper); r } /// Converts a float to a string using the exponential notation with the maximum number of /// digits after the decimal point in the significand /// /// # Arguments /// /// * num - The float value /// * digits - The number of digits after the decimal point /// * upper - Use `E` instead of `e` for the exponent sign #[inline] #[unstable(feature = "std_misc", reason = "may be removed or relocated")] pub fn to_str_exp_digits(num: f64, dig: usize, upper: bool) -> String { let (r, _) = strconv::float_to_str_common( num, 10, true, SignNeg, DigMax(dig), ExpDec, upper); r } #[cfg(test)] mod tests { use f64::*; use num::*; use num::FpCategory as Fp; #[test] fn test_num_f64() { test_num(10f64, 2f64); } #[test] fn test_min_nan() { assert_eq!(NAN.min(2.0), 2.0); assert_eq!(2.0f64.min(NAN), 2.0); } #[test] fn test_max_nan() { assert_eq!(NAN.max(2.0), 2.0); assert_eq!(2.0f64.max(NAN), 2.0); } #[test] fn test_nan() { let nan: f64 = Float::nan(); assert!(nan.is_nan()); assert!(!nan.is_infinite()); assert!(!nan.is_finite()); assert!(!nan.is_normal()); assert!(!nan.is_sign_positive()); assert!(!nan.is_sign_negative()); assert_eq!(Fp::Nan, nan.classify()); } #[test] fn test_infinity() { let inf: f64 = Float::infinity(); assert!(inf.is_infinite()); assert!(!inf.is_finite()); assert!(inf.is_sign_positive()); assert!(!inf.is_sign_negative()); assert!(!inf.is_nan()); assert!(!inf.is_normal()); assert_eq!(Fp::Infinite, inf.classify()); } #[test] fn test_neg_infinity() { let neg_inf: f64 = Float::neg_infinity(); assert!(neg_inf.is_infinite()); assert!(!neg_inf.is_finite()); assert!(!neg_inf.is_sign_positive()); assert!(neg_inf.is_sign_negative()); assert!(!neg_inf.is_nan()); assert!(!neg_inf.is_normal()); assert_eq!(Fp::Infinite, neg_inf.classify()); } #[test] fn test_zero() { let zero: f64 = Float::zero(); assert_eq!(0.0, zero); assert!(!zero.is_infinite()); assert!(zero.is_finite()); assert!(zero.is_sign_positive()); assert!(!zero.is_sign_negative()); assert!(!zero.is_nan()); assert!(!zero.is_normal()); assert_eq!(Fp::Zero, zero.classify()); } #[test] fn test_neg_zero() { let neg_zero: f64 = Float::neg_zero(); assert_eq!(0.0, neg_zero); assert!(!neg_zero.is_infinite()); assert!(neg_zero.is_finite()); assert!(!neg_zero.is_sign_positive()); assert!(neg_zero.is_sign_negative()); assert!(!neg_zero.is_nan()); assert!(!neg_zero.is_normal()); assert_eq!(Fp::Zero, neg_zero.classify()); } #[test] fn test_one() { let one: f64 = Float::one(); assert_eq!(1.0, one); assert!(!one.is_infinite()); assert!(one.is_finite()); assert!(one.is_sign_positive()); assert!(!one.is_sign_negative()); assert!(!one.is_nan()); assert!(one.is_normal()); assert_eq!(Fp::Normal, one.classify()); } #[test] fn test_is_nan() { let nan: f64 = Float::nan(); let inf: f64 = Float::infinity(); let neg_inf: f64 = Float::neg_infinity(); assert!(nan.is_nan()); assert!(!0.0f64.is_nan()); assert!(!5.3f64.is_nan()); assert!(!(-10.732f64).is_nan()); assert!(!inf.is_nan()); assert!(!neg_inf.is_nan()); } #[test] fn test_is_infinite() { let nan: f64 = Float::nan(); let inf: f64 = Float::infinity(); let neg_inf: f64 = Float::neg_infinity(); assert!(!nan.is_infinite()); assert!(inf.is_infinite()); assert!(neg_inf.is_infinite()); assert!(!0.0f64.is_infinite()); assert!(!42.8f64.is_infinite()); assert!(!(-109.2f64).is_infinite()); } #[test] fn test_is_finite() { let nan: f64 = Float::nan(); let inf: f64 = Float::infinity(); let neg_inf: f64 = Float::neg_infinity(); assert!(!nan.is_finite()); assert!(!inf.is_finite()); assert!(!neg_inf.is_finite()); assert!(0.0f64.is_finite()); assert!(42.8f64.is_finite()); assert!((-109.2f64).is_finite()); } #[test] fn test_is_normal() { let nan: f64 = Float::nan(); let inf: f64 = Float::infinity(); let neg_inf: f64 = Float::neg_infinity(); let zero: f64 = Float::zero(); let neg_zero: f64 = Float::neg_zero(); assert!(!nan.is_normal()); assert!(!inf.is_normal()); assert!(!neg_inf.is_normal()); assert!(!zero.is_normal()); assert!(!neg_zero.is_normal()); assert!(1f64.is_normal()); assert!(1e-307f64.is_normal()); assert!(!1e-308f64.is_normal()); } #[test] fn test_classify() { let nan: f64 = Float::nan(); let inf: f64 = Float::infinity(); let neg_inf: f64 = Float::neg_infinity(); let zero: f64 = Float::zero(); let neg_zero: f64 = Float::neg_zero(); assert_eq!(nan.classify(), Fp::Nan); assert_eq!(inf.classify(), Fp::Infinite); assert_eq!(neg_inf.classify(), Fp::Infinite); assert_eq!(zero.classify(), Fp::Zero); assert_eq!(neg_zero.classify(), Fp::Zero); assert_eq!(1e-307f64.classify(), Fp::Normal); assert_eq!(1e-308f64.classify(), Fp::Subnormal); } #[test] fn test_integer_decode() { assert_eq!(3.14159265359f64.integer_decode(), (7074237752028906, -51, 1)); assert_eq!((-8573.5918555f64).integer_decode(), (4713381968463931, -39, -1)); assert_eq!(2f64.powf(100.0).integer_decode(), (4503599627370496, 48, 1)); assert_eq!(0f64.integer_decode(), (0, -1075, 1)); assert_eq!((-0f64).integer_decode(), (0, -1075, -1)); assert_eq!(INFINITY.integer_decode(), (4503599627370496, 972, 1)); assert_eq!(NEG_INFINITY.integer_decode(), (4503599627370496, 972, -1)); assert_eq!(NAN.integer_decode(), (6755399441055744, 972, 1)); } #[test] fn test_floor() { assert_approx_eq!(1.0f64.floor(), 1.0f64); assert_approx_eq!(1.3f64.floor(), 1.0f64); assert_approx_eq!(1.5f64.floor(), 1.0f64); assert_approx_eq!(1.7f64.floor(), 1.0f64); assert_approx_eq!(0.0f64.floor(), 0.0f64); assert_approx_eq!((-0.0f64).floor(), -0.0f64); assert_approx_eq!((-1.0f64).floor(), -1.0f64); assert_approx_eq!((-1.3f64).floor(), -2.0f64); assert_approx_eq!((-1.5f64).floor(), -2.0f64); assert_approx_eq!((-1.7f64).floor(), -2.0f64); } #[test] fn test_ceil() { assert_approx_eq!(1.0f64.ceil(), 1.0f64); assert_approx_eq!(1.3f64.ceil(), 2.0f64); assert_approx_eq!(1.5f64.ceil(), 2.0f64); assert_approx_eq!(1.7f64.ceil(), 2.0f64); assert_approx_eq!(0.0f64.ceil(), 0.0f64); assert_approx_eq!((-0.0f64).ceil(), -0.0f64); assert_approx_eq!((-1.0f64).ceil(), -1.0f64); assert_approx_eq!((-1.3f64).ceil(), -1.0f64); assert_approx_eq!((-1.5f64).ceil(), -1.0f64); assert_approx_eq!((-1.7f64).ceil(), -1.0f64); } #[test] fn test_round() { assert_approx_eq!(1.0f64.round(), 1.0f64); assert_approx_eq!(1.3f64.round(), 1.0f64); assert_approx_eq!(1.5f64.round(), 2.0f64); assert_approx_eq!(1.7f64.round(), 2.0f64); assert_approx_eq!(0.0f64.round(), 0.0f64); assert_approx_eq!((-0.0f64).round(), -0.0f64); assert_approx_eq!((-1.0f64).round(), -1.0f64); assert_approx_eq!((-1.3f64).round(), -1.0f64); assert_approx_eq!((-1.5f64).round(), -2.0f64); assert_approx_eq!((-1.7f64).round(), -2.0f64); } #[test] fn test_trunc() { assert_approx_eq!(1.0f64.trunc(), 1.0f64); assert_approx_eq!(1.3f64.trunc(), 1.0f64); assert_approx_eq!(1.5f64.trunc(), 1.0f64); assert_approx_eq!(1.7f64.trunc(), 1.0f64); assert_approx_eq!(0.0f64.trunc(), 0.0f64); assert_approx_eq!((-0.0f64).trunc(), -0.0f64); assert_approx_eq!((-1.0f64).trunc(), -1.0f64); assert_approx_eq!((-1.3f64).trunc(), -1.0f64); assert_approx_eq!((-1.5f64).trunc(), -1.0f64); assert_approx_eq!((-1.7f64).trunc(), -1.0f64); } #[test] fn test_fract() { assert_approx_eq!(1.0f64.fract(), 0.0f64); assert_approx_eq!(1.3f64.fract(), 0.3f64); assert_approx_eq!(1.5f64.fract(), 0.5f64); assert_approx_eq!(1.7f64.fract(), 0.7f64); assert_approx_eq!(0.0f64.fract(), 0.0f64); assert_approx_eq!((-0.0f64).fract(), -0.0f64); assert_approx_eq!((-1.0f64).fract(), -0.0f64); assert_approx_eq!((-1.3f64).fract(), -0.3f64); assert_approx_eq!((-1.5f64).fract(), -0.5f64); assert_approx_eq!((-1.7f64).fract(), -0.7f64); } #[test] fn test_abs() { assert_eq!(INFINITY.abs(), INFINITY); assert_eq!(1f64.abs(), 1f64); assert_eq!(0f64.abs(), 0f64); assert_eq!((-0f64).abs(), 0f64); assert_eq!((-1f64).abs(), 1f64); assert_eq!(NEG_INFINITY.abs(), INFINITY); assert_eq!((1f64/NEG_INFINITY).abs(), 0f64); assert!(NAN.abs().is_nan()); } #[test] fn test_signum() { assert_eq!(INFINITY.signum(), 1f64); assert_eq!(1f64.signum(), 1f64); assert_eq!(0f64.signum(), 1f64); assert_eq!((-0f64).signum(), -1f64); assert_eq!((-1f64).signum(), -1f64); assert_eq!(NEG_INFINITY.signum(), -1f64); assert_eq!((1f64/NEG_INFINITY).signum(), -1f64); assert!(NAN.signum().is_nan()); } #[test] fn test_is_sign_positive() { assert!(INFINITY.is_sign_positive()); assert!(1f64.is_sign_positive()); assert!(0f64.is_sign_positive()); assert!(!(-0f64).is_sign_positive()); assert!(!(-1f64).is_sign_positive()); assert!(!NEG_INFINITY.is_sign_positive()); assert!(!(1f64/NEG_INFINITY).is_sign_positive()); assert!(!NAN.is_sign_positive()); } #[test] fn test_is_sign_negative() { assert!(!INFINITY.is_sign_negative()); assert!(!1f64.is_sign_negative()); assert!(!0f64.is_sign_negative()); assert!((-0f64).is_sign_negative()); assert!((-1f64).is_sign_negative()); assert!(NEG_INFINITY.is_sign_negative()); assert!((1f64/NEG_INFINITY).is_sign_negative()); assert!(!NAN.is_sign_negative()); } #[test] fn test_mul_add() { let nan: f64 = Float::nan(); let inf: f64 = Float::infinity(); let neg_inf: f64 = Float::neg_infinity(); assert_approx_eq!(12.3f64.mul_add(4.5, 6.7), 62.05); assert_approx_eq!((-12.3f64).mul_add(-4.5, -6.7), 48.65); assert_approx_eq!(0.0f64.mul_add(8.9, 1.2), 1.2); assert_approx_eq!(3.4f64.mul_add(-0.0, 5.6), 5.6); assert!(nan.mul_add(7.8, 9.0).is_nan()); assert_eq!(inf.mul_add(7.8, 9.0), inf); assert_eq!(neg_inf.mul_add(7.8, 9.0), neg_inf); assert_eq!(8.9f64.mul_add(inf, 3.2), inf); assert_eq!((-3.2f64).mul_add(2.4, neg_inf), neg_inf); } #[test] fn test_recip() { let nan: f64 = Float::nan(); let inf: f64 = Float::infinity(); let neg_inf: f64 = Float::neg_infinity(); assert_eq!(1.0f64.recip(), 1.0); assert_eq!(2.0f64.recip(), 0.5); assert_eq!((-0.4f64).recip(), -2.5); assert_eq!(0.0f64.recip(), inf); assert!(nan.recip().is_nan()); assert_eq!(inf.recip(), 0.0); assert_eq!(neg_inf.recip(), 0.0); } #[test] fn test_powi() { let nan: f64 = Float::nan(); let inf: f64 = Float::infinity(); let neg_inf: f64 = Float::neg_infinity(); assert_eq!(1.0f64.powi(1), 1.0); assert_approx_eq!((-3.1f64).powi(2), 9.61); assert_approx_eq!(5.9f64.powi(-2), 0.028727); assert_eq!(8.3f64.powi(0), 1.0); assert!(nan.powi(2).is_nan()); assert_eq!(inf.powi(3), inf); assert_eq!(neg_inf.powi(2), inf); } #[test] fn test_powf() { let nan: f64 = Float::nan(); let inf: f64 = Float::infinity(); let neg_inf: f64 = Float::neg_infinity(); assert_eq!(1.0f64.powf(1.0), 1.0); assert_approx_eq!(3.4f64.powf(4.5), 246.408183); assert_approx_eq!(2.7f64.powf(-3.2), 0.041652); assert_approx_eq!((-3.1f64).powf(2.0), 9.61); assert_approx_eq!(5.9f64.powf(-2.0), 0.028727); assert_eq!(8.3f64.powf(0.0), 1.0); assert!(nan.powf(2.0).is_nan()); assert_eq!(inf.powf(2.0), inf); assert_eq!(neg_inf.powf(3.0), neg_inf); } #[test] fn test_sqrt_domain() { assert!(NAN.sqrt().is_nan()); assert!(NEG_INFINITY.sqrt().is_nan()); assert!((-1.0f64).sqrt().is_nan()); assert_eq!((-0.0f64).sqrt(), -0.0); assert_eq!(0.0f64.sqrt(), 0.0); assert_eq!(1.0f64.sqrt(), 1.0); assert_eq!(INFINITY.sqrt(), INFINITY); } #[test] fn test_rsqrt() { let nan: f64 = Float::nan(); let inf: f64 = Float::infinity(); let neg_inf: f64 = Float::neg_infinity(); assert!(nan.rsqrt().is_nan()); assert_eq!(inf.rsqrt(), 0.0); assert!(neg_inf.rsqrt().is_nan()); assert!((-1.0f64).rsqrt().is_nan()); assert_eq!((-0.0f64).rsqrt(), neg_inf); assert_eq!(0.0f64.rsqrt(), inf); assert_eq!(1.0f64.rsqrt(), 1.0); assert_eq!(4.0f64.rsqrt(), 0.5); } #[test] fn test_exp() { assert_eq!(1.0, 0.0f64.exp()); assert_approx_eq!(2.718282, 1.0f64.exp()); assert_approx_eq!(148.413159, 5.0f64.exp()); let inf: f64 = Float::infinity(); let neg_inf: f64 = Float::neg_infinity(); let nan: f64 = Float::nan(); assert_eq!(inf, inf.exp()); assert_eq!(0.0, neg_inf.exp()); assert!(nan.exp().is_nan()); } #[test] fn test_exp2() { assert_eq!(32.0, 5.0f64.exp2()); assert_eq!(1.0, 0.0f64.exp2()); let inf: f64 = Float::infinity(); let neg_inf: f64 = Float::neg_infinity(); let nan: f64 = Float::nan(); assert_eq!(inf, inf.exp2()); assert_eq!(0.0, neg_inf.exp2()); assert!(nan.exp2().is_nan()); } #[test] fn test_ln() { let nan: f64 = Float::nan(); let inf: f64 = Float::infinity(); let neg_inf: f64 = Float::neg_infinity(); assert_approx_eq!(1.0f64.exp().ln(), 1.0); assert!(nan.ln().is_nan()); assert_eq!(inf.ln(), inf); assert!(neg_inf.ln().is_nan()); assert!((-2.3f64).ln().is_nan()); assert_eq!((-0.0f64).ln(), neg_inf); assert_eq!(0.0f64.ln(), neg_inf); assert_approx_eq!(4.0f64.ln(), 1.386294); } #[test] fn test_log() { let nan: f64 = Float::nan(); let inf: f64 = Float::infinity(); let neg_inf: f64 = Float::neg_infinity(); assert_eq!(10.0f64.log(10.0), 1.0); assert_approx_eq!(2.3f64.log(3.5), 0.664858); assert_eq!(1.0f64.exp().log(1.0.exp()), 1.0); assert!(1.0f64.log(1.0).is_nan()); assert!(1.0f64.log(-13.9).is_nan()); assert!(nan.log(2.3).is_nan()); assert_eq!(inf.log(10.0), inf); assert!(neg_inf.log(8.8).is_nan()); assert!((-2.3f64).log(0.1).is_nan()); assert_eq!((-0.0f64).log(2.0), neg_inf); assert_eq!(0.0f64.log(7.0), neg_inf); } #[test] fn test_log2() { let nan: f64 = Float::nan(); let inf: f64 = Float::infinity(); let neg_inf: f64 = Float::neg_infinity(); assert_approx_eq!(10.0f64.log2(), 3.321928); assert_approx_eq!(2.3f64.log2(), 1.201634); assert_approx_eq!(1.0f64.exp().log2(), 1.442695); assert!(nan.log2().is_nan()); assert_eq!(inf.log2(), inf); assert!(neg_inf.log2().is_nan()); assert!((-2.3f64).log2().is_nan()); assert_eq!((-0.0f64).log2(), neg_inf); assert_eq!(0.0f64.log2(), neg_inf); } #[test] fn test_log10() { let nan: f64 = Float::nan(); let inf: f64 = Float::infinity(); let neg_inf: f64 = Float::neg_infinity(); assert_eq!(10.0f64.log10(), 1.0); assert_approx_eq!(2.3f64.log10(), 0.361728); assert_approx_eq!(1.0f64.exp().log10(), 0.434294); assert_eq!(1.0f64.log10(), 0.0); assert!(nan.log10().is_nan()); assert_eq!(inf.log10(), inf); assert!(neg_inf.log10().is_nan()); assert!((-2.3f64).log10().is_nan()); assert_eq!((-0.0f64).log10(), neg_inf); assert_eq!(0.0f64.log10(), neg_inf); } #[test] fn test_to_degrees() { let pi: f64 = consts::PI; let nan: f64 = Float::nan(); let inf: f64 = Float::infinity(); let neg_inf: f64 = Float::neg_infinity(); assert_eq!(0.0f64.to_degrees(), 0.0); assert_approx_eq!((-5.8f64).to_degrees(), -332.315521); assert_eq!(pi.to_degrees(), 180.0); assert!(nan.to_degrees().is_nan()); assert_eq!(inf.to_degrees(), inf); assert_eq!(neg_inf.to_degrees(), neg_inf); } #[test] fn test_to_radians() { let pi: f64 = consts::PI; let nan: f64 = Float::nan(); let inf: f64 = Float::infinity(); let neg_inf: f64 = Float::neg_infinity(); assert_eq!(0.0f64.to_radians(), 0.0); assert_approx_eq!(154.6f64.to_radians(), 2.698279); assert_approx_eq!((-332.31f64).to_radians(), -5.799903); assert_eq!(180.0f64.to_radians(), pi); assert!(nan.to_radians().is_nan()); assert_eq!(inf.to_radians(), inf); assert_eq!(neg_inf.to_radians(), neg_inf); } #[test] fn test_ldexp() { // We have to use from_str until base-2 exponents // are supported in floating-point literals let f1: f64 = FromStrRadix::from_str_radix("1p-123", 16).unwrap(); let f2: f64 = FromStrRadix::from_str_radix("1p-111", 16).unwrap(); let f3: f64 = FromStrRadix::from_str_radix("1.Cp-12", 16).unwrap(); assert_eq!(1f64.ldexp(-123), f1); assert_eq!(1f64.ldexp(-111), f2); assert_eq!(Float::ldexp(1.75f64, -12), f3); assert_eq!(Float::ldexp(0f64, -123), 0f64); assert_eq!(Float::ldexp(-0f64, -123), -0f64); let inf: f64 = Float::infinity(); let neg_inf: f64 = Float::neg_infinity(); let nan: f64 = Float::nan(); assert_eq!(Float::ldexp(inf, -123), inf); assert_eq!(Float::ldexp(neg_inf, -123), neg_inf); assert!(Float::ldexp(nan, -123).is_nan()); } #[test] fn test_frexp() { // We have to use from_str until base-2 exponents // are supported in floating-point literals let f1: f64 = FromStrRadix::from_str_radix("1p-123", 16).unwrap(); let f2: f64 = FromStrRadix::from_str_radix("1p-111", 16).unwrap(); let f3: f64 = FromStrRadix::from_str_radix("1.Cp-123", 16).unwrap(); let (x1, exp1) = f1.frexp(); let (x2, exp2) = f2.frexp(); let (x3, exp3) = f3.frexp(); assert_eq!((x1, exp1), (0.5f64, -122)); assert_eq!((x2, exp2), (0.5f64, -110)); assert_eq!((x3, exp3), (0.875f64, -122)); assert_eq!(Float::ldexp(x1, exp1), f1); assert_eq!(Float::ldexp(x2, exp2), f2); assert_eq!(Float::ldexp(x3, exp3), f3); assert_eq!(0f64.frexp(), (0f64, 0)); assert_eq!((-0f64).frexp(), (-0f64, 0)); } #[test] #[cfg_attr(windows, ignore)] // FIXME #8755 fn test_frexp_nowin() { let inf: f64 = Float::infinity(); let neg_inf: f64 = Float::neg_infinity(); let nan: f64 = Float::nan(); assert_eq!(match inf.frexp() { (x, _) => x }, inf); assert_eq!(match neg_inf.frexp() { (x, _) => x }, neg_inf); assert!(match nan.frexp() { (x, _) => x.is_nan() }) } #[test] fn test_abs_sub() { assert_eq!((-1f64).abs_sub(1f64), 0f64); assert_eq!(1f64.abs_sub(1f64), 0f64); assert_eq!(1f64.abs_sub(0f64), 1f64); assert_eq!(1f64.abs_sub(-1f64), 2f64); assert_eq!(NEG_INFINITY.abs_sub(0f64), 0f64); assert_eq!(INFINITY.abs_sub(1f64), INFINITY); assert_eq!(0f64.abs_sub(NEG_INFINITY), INFINITY); assert_eq!(0f64.abs_sub(INFINITY), 0f64); } #[test] fn test_abs_sub_nowin() { assert!(NAN.abs_sub(-1f64).is_nan()); assert!(1f64.abs_sub(NAN).is_nan()); } #[test] fn test_asinh() { assert_eq!(0.0f64.asinh(), 0.0f64); assert_eq!((-0.0f64).asinh(), -0.0f64); let inf: f64 = Float::infinity(); let neg_inf: f64 = Float::neg_infinity(); let nan: f64 = Float::nan(); assert_eq!(inf.asinh(), inf); assert_eq!(neg_inf.asinh(), neg_inf); assert!(nan.asinh().is_nan()); assert_approx_eq!(2.0f64.asinh(), 1.443635475178810342493276740273105f64); assert_approx_eq!((-2.0f64).asinh(), -1.443635475178810342493276740273105f64); } #[test] fn test_acosh() { assert_eq!(1.0f64.acosh(), 0.0f64); assert!(0.999f64.acosh().is_nan()); let inf: f64 = Float::infinity(); let neg_inf: f64 = Float::neg_infinity(); let nan: f64 = Float::nan(); assert_eq!(inf.acosh(), inf); assert!(neg_inf.acosh().is_nan()); assert!(nan.acosh().is_nan()); assert_approx_eq!(2.0f64.acosh(), 1.31695789692481670862504634730796844f64); assert_approx_eq!(3.0f64.acosh(), 1.76274717403908605046521864995958461f64); } #[test] fn test_atanh() { assert_eq!(0.0f64.atanh(), 0.0f64); assert_eq!((-0.0f64).atanh(), -0.0f64); let inf: f64 = Float::infinity(); let neg_inf: f64 = Float::neg_infinity(); let nan: f64 = Float::nan(); assert_eq!(1.0f64.atanh(), inf); assert_eq!((-1.0f64).atanh(), neg_inf); assert!(2f64.atanh().atanh().is_nan()); assert!((-2f64).atanh().atanh().is_nan()); assert!(inf.atanh().is_nan()); assert!(neg_inf.atanh().is_nan()); assert!(nan.atanh().is_nan()); assert_approx_eq!(0.5f64.atanh(), 0.54930614433405484569762261846126285f64); assert_approx_eq!((-0.5f64).atanh(), -0.54930614433405484569762261846126285f64); } #[test] fn test_real_consts() { use super::consts; let pi: f64 = consts::PI; let two_pi: f64 = consts::PI_2; let frac_pi_2: f64 = consts::FRAC_PI_2; let frac_pi_3: f64 = consts::FRAC_PI_3; let frac_pi_4: f64 = consts::FRAC_PI_4; let frac_pi_6: f64 = consts::FRAC_PI_6; let frac_pi_8: f64 = consts::FRAC_PI_8; let frac_1_pi: f64 = consts::FRAC_1_PI; let frac_2_pi: f64 = consts::FRAC_2_PI; let frac_2_sqrtpi: f64 = consts::FRAC_2_SQRTPI; let sqrt2: f64 = consts::SQRT2; let frac_1_sqrt2: f64 = consts::FRAC_1_SQRT2; let e: f64 = consts::E; let log2_e: f64 = consts::LOG2_E; let log10_e: f64 = consts::LOG10_E; let ln_2: f64 = consts::LN_2; let ln_10: f64 = consts::LN_10; assert_approx_eq!(two_pi, 2.0 * pi); assert_approx_eq!(frac_pi_2, pi / 2f64); assert_approx_eq!(frac_pi_3, pi / 3f64); assert_approx_eq!(frac_pi_4, pi / 4f64); assert_approx_eq!(frac_pi_6, pi / 6f64); assert_approx_eq!(frac_pi_8, pi / 8f64); assert_approx_eq!(frac_1_pi, 1f64 / pi); assert_approx_eq!(frac_2_pi, 2f64 / pi); assert_approx_eq!(frac_2_sqrtpi, 2f64 / pi.sqrt()); assert_approx_eq!(sqrt2, 2f64.sqrt()); assert_approx_eq!(frac_1_sqrt2, 1f64 / 2f64.sqrt()); assert_approx_eq!(log2_e, e.log2()); assert_approx_eq!(log10_e, e.log10()); assert_approx_eq!(ln_2, 2f64.ln()); assert_approx_eq!(ln_10, 10f64.ln()); } }
{ num::Float::exp(self) }
setup.py
#!/usr/bin/env python # Copyright (c) 2017 The sqlalchemy-bigquery Authors # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import io import itertools import os import re from setuptools import setup # Package metadata. name = "sqlalchemy-bigquery" description = "SQLAlchemy dialect for BigQuery" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' # 'Development Status :: 5 - Production/Stable' release_status = "Development Status :: 5 - Production/Stable" package_root = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(package_root, "sqlalchemy_bigquery", "version.py")) as f: version = re.search('__version__ = "([^"]+)"', f.read()).group(1) def readme():
extras = dict( geography=["GeoAlchemy2", "shapely"], alembic=["alembic"], tests=["packaging", "pytz"], ) extras["all"] = set(itertools.chain.from_iterable(extras.values())) setup( name=name, version=version, description=description, long_description=readme(), long_description_content_type="text/x-rst", author="The Sqlalchemy-Bigquery Authors", author_email="[email protected]", packages=["sqlalchemy_bigquery"], url="https://github.com/googleapis/python-bigquery-sqlalchemy", keywords=["bigquery", "sqlalchemy"], classifiers=[ release_status, "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Operating System :: OS Independent", "Topic :: Database :: Front-Ends", ], platforms="Posix; MacOS X; Windows", install_requires=[ "google-api-core>=1.30.0", # Work-around bug in cloud core deps. # NOTE: Maintainers, please do not require google-auth>=2.x.x # Until this issue is closed # https://github.com/googleapis/google-cloud-python/issues/10566 "google-auth>=1.25.0,<3.0.0dev", # Work around pip wack. "google-cloud-bigquery>=2.24.1", "sqlalchemy>=1.2.0,<1.5.0dev", "future", ], extras_require=extras, python_requires=">=3.6, <3.10", tests_require=["packaging", "pytz"], entry_points={ "sqlalchemy.dialects": ["bigquery = sqlalchemy_bigquery:BigQueryDialect"] }, # Document that this replaces pybigquery, however, this isn't # enforced by pip, because doing so would allow rogue packages to # obsolete legitimate ones. obsoletes=["pybigquery"], )
with io.open("README.rst", "r", encoding="utf8") as f: return f.read()
migrate.rs
//! migrates the AppConfig file 0L.toml /// /// tool to migrate config files /// should be run after version upgrade /// /// this version implements migration of 0L.toml from v4.2.8 to 4.3.0 /// /// /// code is not performance optimized /// reads and writes the config file for each attribute, but it's a one time job... /// use libra_global_constants::{CONFIG_FILE, NODE_HOME}; use chrono::{DateTime, Utc}; use fs_extra::file::{copy, CopyOptions}; use regex::Regex; use std::fs::File; use std::io::{Read, Write}; use std::path::PathBuf; /// Migrate the toml file pub fn migrate(opt_home_path: Option<PathBuf>) { let home = opt_home_path.unwrap_or(dirs::home_dir().unwrap().join(NODE_HOME)); let config_file = home.join(CONFIG_FILE); migrate_toml(config_file, home); } fn migrate_toml(config_file: PathBuf, node_home: PathBuf) { if !config_file.exists() { println!("config file: {:?} does not exist - no migration possible", config_file); return; } // step 1 : make a backup make_backup_file(&config_file); // step 2: update all attributes // ---------------------- udate [workspace] config start ---------------------- let default_db_path = node_home.join("db").as_path().display().to_string(); let default_source_path = dirs::home_dir().unwrap().join("libra").as_path().display().to_string(); add_or_update_s(&config_file, "workspace", "db_path", default_db_path); add_or_update_s(&config_file, "workspace", "source_path", default_source_path); // ---------------------- udate [workspace] config finished ---------------------- // ---------------------- udate [chain_info] config start ---------------------- add_or_update_n(&config_file, "chain_info", "base_epoch", 0); // ---------------------- udate [workspace] config finished ---------------------- // ---------------------- udate the tx costs config start ---------------------- rename_section(&config_file, "tx_configs.miner_txs", "tx_configs.baseline_cost"); // previous [tx_configs.management_txs] is renamed and value changed from 1000000 to 100000 rename_section(&config_file, "tx_configs.management_txs", "tx_configs.management_txs_cost"); add_or_update_n(&config_file, "tx_configs.management_txs_cost", "max_gas_unit_for_tx", 100000); // add [tx_configs.critical_txs_cost] if not there add_section(&config_file, "tx_configs.critical_txs_cost"); add_or_update_n(&config_file, "tx_configs.critical_txs_cost", "user_tx_timeout", 5000); add_or_update_n(&config_file, "tx_configs.critical_txs_cost", "coin_price_per_unit", 1); add_or_update_n(&config_file, "tx_configs.critical_txs_cost", "max_gas_unit_for_tx", 1000000); // add [tx_configs.miner_txs_cost] if not there add_section(&config_file, "tx_configs.miner_txs_cost"); add_or_update_n(&config_file, "tx_configs.miner_txs_cost", "user_tx_timeout", 5000); add_or_update_n(&config_file, "tx_configs.miner_txs_cost", "coin_price_per_unit", 1); add_or_update_n(&config_file, "tx_configs.miner_txs_cost", "max_gas_unit_for_tx", 10000); // add [tx_configs.cheap_txs_cost] if not there add_section(&config_file, "tx_configs.cheap_txs_cost"); add_or_update_n(&config_file, "tx_configs.cheap_txs_cost", "user_tx_timeout", 5000); add_or_update_n(&config_file, "tx_configs.cheap_txs_cost", "coin_price_per_unit", 1); add_or_update_n(&config_file, "tx_configs.cheap_txs_cost", "max_gas_unit_for_tx", 1000); // ---------------------- udate the tx costs config finished ---------------------- } /// add a new section in case it does not exist already /// /// searches in the `filename` if `section` /// If so, the file is not updated. /// Otherwise an empty `section` is inserted at the end of the file /// /// example call: /// add_section("/root/.0L/0L.toml", "new-section"); /// pub fn add_section(filename: &PathBuf, section: &str) { let mut section_exists = false; let my_section_re = Regex::new(&format!(r"^\[{}\]$", section).as_str()).unwrap(); // round 1: check if section already exists let file_content = read_file(&filename); for line in file_content.lines() { section_exists |= my_section_re.is_match(&line); } // round 2: update if necessary if section_exists { println!("{:?}: section [{}] already there (all fine)", &filename, &section); } else { let mut file = match File::create(&filename) { Err(why) => panic!("couldn't create {:?}: {}", filename, why), Ok(file) => file, }; for line in file_content.lines() { match file.write_fmt(format_args!("{}\n", &line)) { Err(why) => println!("writing to file failed {:?}", why), _ => (), } } match file.write_fmt(format_args!("\n[{}]\n", &section)) { Err(why) => println!("writing to file failed {:?}", why), _ => (), } println!("{:?}: added section [{}]", &filename, &section); } } /// rename a section /// /// searches in the `filename` if `old_section_name` exists /// If so, the `old_section_name` is irenamed to `new_section_name` /// /// example call: /// rename_section("/root/.0L/0L.toml", "old-section", "new-section"); /// pub fn rename_section(filename: &PathBuf, old_section_name: &str, new_section_name: &str) { let mut section_exists = false; let old_section_re = Regex::new(&format!(r"^\[{}\]$", old_section_name).as_str()).unwrap(); let new_section_re = Regex::new(&format!(r"^\[{}\]$", new_section_name).as_str()).unwrap(); // round 1: check if section already exists let file_content = read_file(&filename); for line in file_content.lines() { section_exists |= new_section_re.is_match(&line); } // round 2: update if necessary if section_exists { println!("{:?}: section [{}] already there (all fine)", &filename, &new_section_name); } else { let mut file = match File::create(&filename) { Err(why) => panic!("couldn't create {:?}: {}", filename, why), Ok(file) => file, }; for line in file_content.lines() { if old_section_re.is_match(&line) { match file.write_fmt(format_args!("[{}]\n", &new_section_name)) { Err(why) => println!("writing to file failed {:?}", why), _ => (), } } else { match file.write_fmt(format_args!("{}\n", &line)) { Err(why) => println!("writing to file failed {:?}", why), _ => (), } } } println!( "{:?}: renamed section [{}] to [{}]", &filename, &old_section_name, &new_section_name ); } } /// add a attribute which should be surrounded by quote signs to a section in case it does not exist already /// /// searches in the `filename` if `attribute` /// exists in `section`. If so, the file is not updated. /// Otherwise the `attribute` in inserted with `value` /// /// example call: /// add_or_update("/root/.0L/0L.toml", "workspace", "db_path", "/root/.0L/db"); /// pub fn add_or_update_s(filename: &PathBuf, section: &str, attribute: &str, value: String) { add_or_update(filename, section, attribute, format!("\"{}\"", value)); } /// TODO: pub fn add_or_update_n(filename: &PathBuf, section: &str, attribute: &str, value: i64) { add_or_update(filename, section, attribute, value.to_string()); } /// adds or updates an attribute in a section /// /// searches in the `filename` if `attribute` /// exists in `section`. If so, the attribute is updated to `value` /// Otherwise the `attribute` in inserted with `value` /// /// example call: /// add_or_update("/root/.0L/0L.toml", "workspace", "db_path", "/root/.0L/db"); /// pub fn
(filename: &PathBuf, section: &str, attribute: &str, value: String) { let mut in_my_section = false; let mut attribute_exists = false; let any_section_start_re = Regex::new(r"^\[.*\]$").unwrap(); let my_section_re = Regex::new(&format!(r"^\[{}\]$", section).as_str()).unwrap(); let my_attribute_re = Regex::new(&format!(r"^{}[ \t]*=.*$", attribute).as_str()).unwrap(); // round 1: check if attribute already exists let file_content = read_file(&filename); for line in file_content.lines() { if any_section_start_re.is_match(&line) { in_my_section = my_section_re.is_match(&line); } else { attribute_exists |= in_my_section && my_attribute_re.is_match(&line); } } // round 2: add or update if necessary let mut file = match File::create(&filename) { Err(why) => panic!("couldn't create {:?}: {}", filename, why), Ok(file) => file, }; in_my_section = false; for line in file_content.lines() { if any_section_start_re.is_match(&line) { in_my_section = my_section_re.is_match(&line); match file.write_fmt(format_args!("{}\n", &line)) { Err(why) => println!("writing to file failed {:?}", why), _ => (), } if in_my_section && !attribute_exists { // add the new attribute and value to start of section match file.write_fmt(format_args!("{} = {}\n", attribute, value)) { Err(why) => println!("writing to file failed {:?}", why), _ => println!("{:?}: added property [{}]/{}", &filename, &section, &attribute), } } } else { if in_my_section && my_attribute_re.is_match(&line) { // update the value match file.write_fmt(format_args!("{} = {}\n", attribute, value)) { Err(why) => println!("writing to file failed {:?}", why), _ => println!("{:?}: updated property [{}]/{} to {}", &filename, &section, &attribute, &value), } } else { // otherwise just write the original attribute and value match file.write_fmt(format_args!("{}\n", &line)) { Err(why) => println!("writing to file failed {:?}", why), _ => (), } } } } } /// creates a backup of the file /// /// the filename of the backup file be like the original /// filename, with an appended ".bak" and timestamp /// /// example call /// make_backup_file("/root/.0L/0L.toml"); /// would create a file /// "/root/.0L/0L.toml.bak.20210428-200235" /// fn make_backup_file(filename: &PathBuf) { let now: DateTime<Utc> = Utc::now(); let backup_filename = &format!( "{}.bak.{}", filename.as_path().display().to_string(), now.format("%Y%m%d-%H%M%S") ); match copy(&filename, PathBuf::from(backup_filename), &CopyOptions::new()) { Err(why) => panic!("writing backup file failed {:?} - stopping", why), _ => println!("created backup file: {:?}", backup_filename), } } /// read a file into a String /// borrowed from: /// https://www.tutorialspoint.com/file-operations-in-rust-programming pub fn read_file(filename: &PathBuf) -> String { let mut file = match File::open(&filename) { Err(why) => panic!("unable to open {:?} - {}", filename, why), Ok(file) => file, }; let mut s = String::new(); match file.read_to_string(&mut s) { Err(why) => panic!("unable to read {:?} - {}", filename, why), Ok(_) => return s, } }
add_or_update
logging.py
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). import http.client import locale import logging import os import sys from contextlib import contextmanager from io import BufferedReader, TextIOWrapper from logging import Formatter, LogRecord, StreamHandler from typing import Dict, Iterator import pants.util.logging as pants_logging from pants.base.deprecated import deprecated_conditional from pants.engine.internals import native_engine from pants.option.option_value_container import OptionValueContainer from pants.util.dirutil import safe_mkdir_for from pants.util.logging import LogLevel # Although logging supports the WARN level, its not documented and could conceivably be yanked. # Since pants has supported 'warn' since inception, leave the 'warn' choice as-is but explicitly # setup a 'WARN' logging level name that maps to 'WARNING'. logging.addLevelName(logging.WARNING, "WARN") logging.addLevelName(pants_logging.TRACE, "TRACE") class _NativeHandler(StreamHandler): """This class is installed as a Python logging module handler (using the logging.addHandler method) and proxies logs to the Rust logging infrastructure.""" def emit(self, record: LogRecord) -> None: native_engine.write_log(self.format(record), record.levelno, record.name) def flush(self) -> None: native_engine.flush_log() class _ExceptionFormatter(Formatter): """Uses the `--print-stacktrace` option to decide whether to render stacktraces.""" def __init__(self, print_stacktrace: bool): super().__init__(None) self.print_stacktrace = print_stacktrace def formatException(self, exc_info): if self.print_stacktrace: return super().formatException(exc_info) return "\n(Use --print-stacktrace to see more error details.)" @contextmanager def stdio_destination(stdin_fileno: int, stdout_fileno: int, stderr_fileno: int) -> Iterator[None]: """Sets a destination for both logging and stdio: must be called after `initialize_stdio`. After `initialize_stdio` and outside of this contextmanager, the default stdio destination is the pants.log. But inside of this block, all engine "tasks"/@rules that are spawned will have thread/task-local state that directs their IO to the given destination. When the contextmanager exits all tasks will be restored to the default destination (regardless of whether they have completed). """ if not logging.getLogger(None).handlers: raise AssertionError("stdio_destination should only be called after initialize_stdio.") native_engine.stdio_thread_console_set(stdin_fileno, stdout_fileno, stderr_fileno) try: yield finally: native_engine.stdio_thread_console_clear() @contextmanager def _python_logging_setup(level: LogLevel, print_stacktrace: bool) -> Iterator[None]: """Installs a root Python logger that routes all logging through a Rust logger.""" def trace_fn(self, message, *args, **kwargs): if self.isEnabledFor(LogLevel.TRACE.level): self._log(LogLevel.TRACE.level, message, *args, **kwargs) logging.Logger.trace = trace_fn # type: ignore[attr-defined] logger = logging.getLogger(None) def clear_logging_handlers(): handlers = tuple(logger.handlers) for handler in handlers: logger.removeHandler(handler) return handlers def set_logging_handlers(handlers): for handler in handlers: logger.addHandler(handler) # Remove existing handlers, and restore them afterward. handlers = clear_logging_handlers() try: # This routes warnings through our loggers instead of straight to raw stderr. logging.captureWarnings(True) handler = _NativeHandler() handler.setFormatter(_ExceptionFormatter(print_stacktrace)) logger.addHandler(handler) level.set_level_for(logger) if logger.isEnabledFor(LogLevel.TRACE.level): http.client.HTTPConnection.debuglevel = 1 # type: ignore[attr-defined] requests_logger = logging.getLogger("requests.packages.urllib3") LogLevel.TRACE.set_level_for(requests_logger) requests_logger.propagate = True yield finally: clear_logging_handlers() set_logging_handlers(handlers) @contextmanager def initialize_stdio(global_bootstrap_options: OptionValueContainer) -> Iterator[None]: """Mutates sys.std* and logging to route stdio for a Pants process to thread local destinations. In this context, `sys.std*` and logging handlers will route through Rust code that uses thread-local information to decide whether to write to a file, or to stdio file handles. To control the stdio destination set by this method, use the `stdio_destination` context manager. This is called in two different processes: * PantsRunner, after it has determined that LocalPantsRunner will be running in process, and immediately before setting a `stdio_destination` for the remainder of the run. * PantsDaemon, immediately on startup. The process will then default to sending stdio to the log until client connections arrive, at which point `stdio_destination` is used per-connection. """ global_level = global_bootstrap_options.level log_show_rust_3rdparty = global_bootstrap_options.log_show_rust_3rdparty use_color = global_bootstrap_options.colors show_target = global_bootstrap_options.show_log_target log_levels_by_target = _get_log_levels_by_target(global_bootstrap_options) message_regex_filters = global_bootstrap_options.ignore_pants_warnings print_stacktrace = global_bootstrap_options.print_stacktrace # Set the pants log destination. deprecated_log_path = os.path.join( global_bootstrap_options.pants_workdir, "pantsd", "pantsd.log" ) log_path = os.path.join(global_bootstrap_options.pants_workdir, "pants.log") safe_mkdir_for(deprecated_log_path) safe_mkdir_for(log_path) # NB: We append to the deprecated log location with a deprecated conditional that never # triggers, because there is nothing that the user can do about the deprecation. deprecated_conditional( predicate=lambda: False, removal_version="2.5.0.dev0", entity_description=f"Logging to {deprecated_log_path}", hint_message=f"Refer to {log_path} instead.", ) with open(deprecated_log_path, "a") as a: a.write(f"This log location is deprecated: please refer to {log_path} instead.\n") # Initialize thread-local stdio, and replace sys.std* with proxies. original_stdin, original_stdout, original_stderr = sys.stdin, sys.stdout, sys.stderr try: raw_stdin, sys.stdout, sys.stderr = native_engine.stdio_initialize( global_level.level, log_show_rust_3rdparty, use_color, show_target, {k: v.level for k, v in log_levels_by_target.items()}, tuple(message_regex_filters), log_path, ) sys.stdin = TextIOWrapper( BufferedReader(raw_stdin), # NB: We set the default encoding explicitly to bypass logic in the TextIOWrapper # constructor that would poke the underlying file (which is not valid until a # `stdio_destination` is set). encoding=locale.getpreferredencoding(False), ) sys.__stdin__, sys.__stdout__, sys.__stderr__ = sys.stdin, sys.stdout, sys.stderr # Install a Python logger that will route through the Rust logger.
sys.stdin, sys.stdout, sys.stderr = original_stdin, original_stdout, original_stderr sys.__stdin__, sys.__stdout__, sys.__stderr__ = sys.stdin, sys.stdout, sys.stderr def _get_log_levels_by_target( global_bootstrap_options: OptionValueContainer, ) -> Dict[str, LogLevel]: raw_levels = global_bootstrap_options.log_levels_by_target levels: Dict[str, LogLevel] = {} for key, value in raw_levels.items(): if not isinstance(key, str): raise ValueError( "Keys for log_domain_levels must be strings, but was given the key: {key} with type {type(key)}." ) if not isinstance(value, str): raise ValueError( "Values for log_domain_levels must be strings, but was given the value: {value} with type {type(value)}." ) log_level = LogLevel[value.upper()] levels[key] = log_level return levels
with _python_logging_setup(global_level, print_stacktrace): yield finally:
issue-12729.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // run-pass // pretty-expanded FIXME #23616 pub struct Foo; mod bar { use Foo; impl Foo { fn baz(&self) {} } } fn
() {}
main
gameserver.go
package events import v1 "agones.dev/agones/pkg/apis/agones/v1" var ( GameServerEventAdded GameServerEventType = "gameserver.events.added" GameServerEventUpdated GameServerEventType = "gameserver.events.updated" GameServerEventDeleted GameServerEventType = "gameserver.events.deleted" ) type GameServerEventType string // GameServerEvent is the data structure for reconcile events associated with Agones GameServers // It holds the event source (OnAdd, OnUpdate, OnDelete) and the event type (Added, Updated, Deleted). type GameServerEvent struct { Source EventSource `json:"source"` Type GameServerEventType `json:"type"` Message `json:"message"` }
func init() { RegisterEventFactory(&v1.GameServer{}, GameServerAdded, GameServerUpdated, GameServerDeleted) } // GameServerAdded is the data structure for reconcile events of type Add func GameServerAdded(message Message) Event { return &GameServerEvent{ Source: EventSourceOnAdd, Type: GameServerEventAdded, Message: message, } } // GameServerUpdates is the data structure for reconcile events of type Update func GameServerUpdated(message Message) Event { return &GameServerEvent{ Source: EventSourceOnUpdate, Type: GameServerEventUpdated, Message: message, } } // GameServerDeleted is the data structure for reconcile events of type Delete func GameServerDeleted(message Message) Event { return &GameServerEvent{ Source: EventSourceOnDelete, Type: GameServerEventDeleted, Message: message, } } // EventType returns the type of the reconcile event for a GameServer. // For example: Added, Updated, Deleted func (t *GameServerEvent) EventType() EventType { return EventType(t.Type) } // EventSource return the event source that generated the event. // For example: OnAdd, OnUpdate, OnDelete func (t *GameServerEvent) EventSource() EventSource { return t.Source } // String is a helper method that returns the string version of a GameServerEventType func (t GameServerEventType) String() string { return string(t) }
base.go
package api import ( "gf-vue-admin/library/response" service "gf-vue-admin/app/service/system" "github.com/gogf/gf/net/ghttp" ) var Base = new(base) type base struct{} // @Tags SystemBase // @Summary 生成验证码 // @Security ApiKeyAuth // @accept application/json // @Produce application/json // @Success 200 {string} string "{"success":true,"data":{},"msg":"验证码获取成功"}" // @Router /base/captcha [post] func (b *base) Captcha(r *ghttp.Request) *response.Response { if result, err := service.Base.Captcha(); err != nil { return &response.Res
se.Response{Data: result, MessageCode: response.SuccessCaptcha} } }
ponse{Data: result, MessageCode: response.ErrorCaptcha} } else { return &respon
sync_test.go
package revenue_test import ( "context" "testing" . "github.com/onsi/gomega" "github.com/asia-loop-gmbh/lambda-utils-go/v3/internal/pkg/test" "github.com/asia-loop-gmbh/lambda-utils-go/v3/pkg/logger" "github.com/asia-loop-gmbh/lambda-utils-go/v3/pkg/revenue" ) func TestCheckOrder(t *testing.T) { RegisterFailHandler(test.FailedHandler(t)) err := revenue.CheckOrder(logger.NewEmptyLogger(), context.TODO(), "dev", "259") Expect(err).To(BeNil()) err = revenue.CheckOrder(logger.NewEmptyLogger(), context.TODO(), "dev", "62235c6c8ed20c2774179f40") Expect(err).To(BeNil()) err = revenue.CheckOrder(logger.NewEmptyLogger(), context.TODO(), "dev", "62229b6c957a5b919f8360fb") Expect(err).To(BeNil()) } func TestCheckRefundWoo(t *testing.T)
func TestCheckRefundNonWoo(t *testing.T) { RegisterFailHandler(test.FailedHandler(t)) err := revenue.CheckRefund(logger.NewEmptyLogger(), context.TODO(), "dev", "62235c6c8ed20c2774179f40") Expect(err).To(BeNil()) }
{ RegisterFailHandler(test.FailedHandler(t)) err := revenue.CheckRefund(logger.NewEmptyLogger(), context.TODO(), "dev", "263") Expect(err).To(BeNil()) }
main.rs
use lambda_runtime::{error::HandlerError, lambda, Context}; use serde_json::Value; fn main() { lambda!(handler) } fn handler( event: Value, _: Context, ) -> Result<Value, HandlerError> { Ok(event) } #[cfg(test)] mod tests { use super::*; use serde_json::json; #[test] fn handler_handles() {
let event = json!({ "answer": 42 }); assert_eq!( handler(event.clone(), Context::default()).expect("expected Ok(_) value"), event ) } }
xflash-serial.py
#!/usr/bin/env python import serial import sys import struct import pprint import argparse import code pp = pprint.PrettyPrinter() class ConsoleUI: def opStart(self, name): sys.stdout.write(name.ljust(40)) def opProgress(self, progress, total=-1): if (total >= 0): prstr = "0x%04x / 0x%04x" % (progress, total) else: prstr = "0x%04x" % (progress) sys.stdout.write(prstr.ljust(20)) sys.stdout.write('\x08' * 20) sys.stdout.flush() def opEnd(self, result): sys.stdout.write(result.ljust(20)) sys.stdout.write("\n") class XFlash: def __init__(self, serialport): self.serial = serial.Serial(serialport, baudrate=115200) def __del__(self): try: self.serial.close() del self.serial except: pass def cmd(self, cmd, argA=0, argB=0): buffer = struct.pack("<LL", argA, argB) self.serial.write(bytes([cmd])) self.serial.write(buffer) self.serial.flush() def flashPowerOn(self): self.cmd(0x10) def flashShutdown(self): self.cmd(0x11) def update(self): try: self.cmd(0xF0) except: pass def flashInit(self): self.cmd(0x03) buffer = self.serial.read(4) return struct.unpack("<L", buffer)[0] def flashDeInit(self): self.cmd(0x04) def flashStatus(self): self.cmd(0x05) buffer = self.serial.read(2) return struct.unpack("<H", buffer)[0] def flashErase(self, block):
def flashReadBlock(self, block): self.cmd(0x01, block, 528 * 32) # for i in range(0, 32): buffer = self.serial.read(528 * 32) status = self.flashStatus() return (status, buffer) def flashWriteBlock(self, block, buffer): self.cmd(0x02, block, len(buffer)) self.serial.write(buffer) return self.flashStatus() # def calcecc(data): # assert len(data) == 0x210 # val = 0 # for i in range(0x1066): # if not i & 31: # v = ~struct.unpack("<L", data[i/8:i/8+4])[0] # val ^= v & 1 # v >>= 1 # if val & 1: # val ^= 0x6954559 # val >>= 1 # # val = ~val # return data[:-4] + struct.pack("<L", (val << 6) & 0xFFFFFFFF) # # def addecc(data, block = 0, off_8 = "\x00" * 4): # res = "" # while len(data): # d = (data[:0x200] + "\x00" * 0x200)[:0x200] # data = data[0x200:] # # d += struct.pack("<L4B4s4s", block / 32, 0, 0xFF, 0, 0, off_8, "\0\0\0\0") # d = calcecc(d) # block += 1 # res += d # return res def main(argv): parser = argparse.ArgumentParser(description='XBox 360 NAND Flasher') parser.add_argument('port', metavar='port', type=str, help='serial port for comms (e.g. COM5 or /dev/ttyUSB0)') subparsers = parser.add_subparsers(title='Operations', dest='action') parser_read = subparsers.add_parser('read', help='Dumps an image from the NAND') parser_read.add_argument('file', nargs=1, type=argparse.FileType('wb'), help='The file to dump the NAND to') parser_read.add_argument('start', nargs='?', metavar='start', action='store', type=int, default=0, help='The block to start the action from') parser_read.add_argument('end', nargs='?', metavar='end', action='store', type=int, default=0x400, help='The count of blocks to perform the action to') parser_write = subparsers.add_parser('write', help='Writes an image into the NAND') parser_write.add_argument('file', nargs=1, type=argparse.FileType('rb'), help='The image file to write to the NAND') parser_write.add_argument('start', nargs='?', metavar='start', action='store', type=int, default=0, help='The block to start the action from') parser_write.add_argument('end', nargs='?', metavar='end', action='store', type=int, default=0x400, help='The count of blocks to perform the action to') # parser_erase = subparsers.add_parser('erase', help='Erases blocks in the NAND') # parser_erase.add_argument('start', nargs='?', metavar='start', action='store', type=int, default=0, # help='The block to start the action from') # parser_erase.add_argument('end', nargs='?', metavar='end', action='store', type=int, default=0x400, # help='The count of blocks to perform the action to') # # parser_update = subparsers.add_parser('update', # help='Jumps into the bootloader of the NAND Flashing device for updating the firmware') # parser_shutdown = subparsers.add_parser('shutdown', help='Shuts down the attached XBox 360') # parser_poweron = subparsers.add_parser('powerup', help='Powers up the attached XBox 360') arguments = parser.parse_args(argv[1:]) ui = ConsoleUI() xf = XFlash(arguments.port) if arguments.action in ('erase', 'write', 'read'): try: flash_config = xf.flashInit() print("FlashConfig: 0x%08x" % (flash_config)) if flash_config <= 0: raise Exception("FlashConfig invalid!") except Exception as e: print("Error!", e) xf.flashDeInit() return 1 try: if arguments.action == 'erase': # start = 0 # end = (options.flashsize * 1024) / 16 start = arguments.start end = arguments.end ui.opStart('Erase') ui.opProgress(0, end) for b in range(start, end): status = xf.flashErase(b) ui.opProgress(b + 1, end) ui.opEnd('0x%04x blocks OK' % (end)) if arguments.action == 'read': # start = 0 # end = (options.flashsize * 1024) / 16 start = arguments.start end = arguments.end ui.opStart('Read') ui.opProgress(0, end) for b in range(start, end): (status, buffer) = xf.flashReadBlock(b) ui.opProgress(b + 1, end) arguments.file[0].write(buffer) if arguments.action == 'write': # start = 0 # end = (options.flashsize * 1024) / 16 start = arguments.start end = arguments.end blocksize = 528 * 32 ui.opStart('Write') ui.opProgress(0, end) for b in range(start, end): buffer = arguments.file[0].read(blocksize) if len(buffer) < blocksize: buffer += ('\xFF' * (blocksize - len(buffer))) status = xf.flashWriteBlock(b, buffer) ui.opProgress(b + 1, end) # # if arguments.action == 'update': # xf.update() # # if arguments.action == 'powerup': # xf.flashPowerOn() # # if arguments.action == 'shutdown': # xf.flashShutdown() except Exception as e: raise e finally: xf.flashDeInit() return 0 if __name__ == '__main__': sys.exit(main(sys.argv))
self.cmd(0x06, block) # return self.flashStatus()
conftest.py
import pytest from trydjango.users.models import User from trydjango.users.tests.factories import UserFactory @pytest.fixture(autouse=True) def media_storage(settings, tmpdir): settings.MEDIA_ROOT = tmpdir.strpath @pytest.fixture def user() -> User:
return UserFactory()
decode_gogo.go
// Protocol Buffers for Go with Gadgets // // Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto import ( "reflect" ) // Decode a reference to a struct pointer. func (o *Buffer) dec_ref_struct_message(p *Properties, base structPointer) (err error) { raw, e := o.DecodeRawBytes(false) if e != nil { return e } // If the object can unmarshal itself, let it. if p.isUnmarshaler { panic("not supported, since this is a pointer receiver") } obuf := o.buf oi := o.index o.buf = raw o.index = 0 bas := structPointer_FieldPointer(base, p.field) err = o.unmarshalType(p.stype, p.sprop, false, bas) o.buf = obuf o.index = oi return err } // Decode a slice of references to struct pointers ([]struct). func (o *Buffer) dec_slice_ref_struct(p *Properties, is_group bool, base structPointer) error { newBas := appendStructPointer(base, p.field, p.sstype) if is_group { panic("not supported, maybe in future, if requested.") } raw, err := o.DecodeRawBytes(false) if err != nil { return err } // If the object can unmarshal itself, let it. if p.isUnmarshaler { panic("not supported, since this is not a pointer receiver.") } obuf := o.buf oi := o.index o.buf = raw o.index = 0 err = o.unmarshalType(p.stype, p.sprop, is_group, newBas) o.buf = obuf o.index = oi return err } // Decode a slice of references to struct pointers. func (o *Buffer) dec_slice_ref_struct_message(p *Properties, base structPointer) error { return o.dec_slice_ref_struct(p, false, base) } func setPtrCustomType(base structPointer, f field, v interface{})
func setCustomType(base structPointer, f field, value interface{}) { if value == nil { return } v := reflect.ValueOf(value).Elem() t := reflect.TypeOf(value).Elem() kind := t.Kind() switch kind { case reflect.Slice: slice := reflect.MakeSlice(t, v.Len(), v.Cap()) reflect.Copy(slice, v) oldHeader := structPointer_GetSliceHeader(base, f) oldHeader.Data = slice.Pointer() oldHeader.Len = v.Len() oldHeader.Cap = v.Cap() default: size := reflect.TypeOf(value).Elem().Size() structPointer_Copy(toStructPointer(reflect.ValueOf(value)), structPointer_Add(base, f), int(size)) } } func (o *Buffer) dec_custom_bytes(p *Properties, base structPointer) error { b, err := o.DecodeRawBytes(true) if err != nil { return err } i := reflect.New(p.ctype.Elem()).Interface() custom := (i).(Unmarshaler) if err := custom.Unmarshal(b); err != nil { return err } setPtrCustomType(base, p.field, custom) return nil } func (o *Buffer) dec_custom_ref_bytes(p *Properties, base structPointer) error { b, err := o.DecodeRawBytes(true) if err != nil { return err } i := reflect.New(p.ctype).Interface() custom := (i).(Unmarshaler) if err := custom.Unmarshal(b); err != nil { return err } if custom != nil { setCustomType(base, p.field, custom) } return nil } // Decode a slice of bytes ([]byte) into a slice of custom types. func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error { b, err := o.DecodeRawBytes(true) if err != nil { return err } i := reflect.New(p.ctype.Elem()).Interface() custom := (i).(Unmarshaler) if err := custom.Unmarshal(b); err != nil { return err } newBas := appendStructPointer(base, p.field, p.ctype) var zero field setCustomType(newBas, zero, custom) return nil }
{ if v == nil { return } structPointer_SetStructPointer(base, f, toStructPointer(reflect.ValueOf(v))) }
remove-element-object.ts
export const removeElementObject = ( originalObject: object, keyNames: string[], ) => { const elementsRecuded: any = {}; const objectReduced: any = {};
elementsRecuded[value] = originalObject[value]; } }); return { originalObject, objectReduced, elementsRecuded }; };
Object.keys(originalObject).forEach(value => { if (keyNames.indexOf(value) === -1) { objectReduced[value] = originalObject[value]; } else {
codec.go
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package dubbo import ( "bufio" "bytes" e1 "errors" "fmt" "sync" ) import ( perrors "github.com/pkg/errors" ) import ( hessian "github.com/apache/dubbo-go-hessian2" ) //SerialID serial ID type SerialID byte const ( // S_Dubbo protocol serial id S_Dubbo SerialID = 2 ) //CallType call type type CallType int32 const ( // CT_UNKNOWN unknown call type CT_UNKNOWN CallType = 0 // CT_OneWay call one way CT_OneWay CallType = 1 // CT_TwoWay call in request/response CT_TwoWay CallType = 2 ) //////////////////////////////////////////// // protocol package //////////////////////////////////////////// // SequenceType sequence type type SequenceType int64 // nolint type DubboPackage struct { Header hessian.DubboHeader Service hessian.Service Body interface{} Err error } // Marshal encode hessian package. // DubboPackage -> byte func (p *DubboPackage) Marshal() (*bytes.Buffer, error) { codec := hessian.NewHessianCodec(nil) pkg, err := codec.Write(p.Service, p.Header, p.Body) if err != nil { return nil, perrors.WithStack(err) } return bytes.NewBuffer(pkg), nil } // Unmarshal decodes hessian package. // byte -> DubboPackage func (p *DubboPackage) Unmarshal(buf *bytes.Buffer, pendingRsp *sync.Map) error { bufLen := buf.Len() if bufLen < hessian.HEADER_LENGTH { return perrors.WithStack(hessian.ErrHeaderNotEnough) } codec := hessian.NewHessianCodec(bufio.NewReaderSize(buf, bufLen)) // read header err := codec.ReadHeader(&p.Header) if err != nil { return perrors.WithStack(err) } if p.Header.Type&hessian.PackageRequest != 0x00 { p.Body = make([]interface{}, 7) } else { rspObj, ok := pendingRsp.Load(uint64(p.Header.ID)) if !ok { return e1.New(fmt.Sprintf("seq = %d not found", p.Header.ID)) } p.Body = &hessian.Response{RspObj: rspObj} } // read body
//////////////////////////////////////////// // Response //////////////////////////////////////////// // Response is protocol protocol response. type Response struct { Reply interface{} atta map[string]string } // NewResponse creates a new Response. func NewResponse(reply interface{}, atta map[string]string) *Response { return &Response{ Reply: reply, atta: atta, } }
err = codec.ReadBody(p.Body) return perrors.WithStack(err) }
parallel_pipe_io_base.py
import abc
@abc.abstractmethod def set_pipe(self, pipe): pass @abc.abstractmethod def parallel_write_end_loop(self) -> None: pass @abc.abstractmethod def is_running(self): pass @abc.abstractmethod def is_stopped(self): pass @abc.abstractmethod def start_running(self): pass @abc.abstractmethod def __call__(self): pass class ReaderBase(abc.ABC): @abc.abstractmethod def set_pipe(self, pipe): pass @abc.abstractmethod def is_running(self): pass @abc.abstractmethod def is_stopped(self): pass @abc.abstractmethod def start_running(self): pass @abc.abstractmethod def __call__(self): pass
class WriterBase(abc.ABC):
st-encode-line.js
export default function encodeLine(str) { let res = ''; let counter = 1; for (let i = 0; i < str.length; i++) { if (str[i] === str[i + 1]) { counter++; } else if (counter === 1) { res += `${str[i]}`; } else { res += `${counter}${str[i]}`; counter = 1; } } return res; }
import { NotImplementedError } from '../extensions/index.js';
cookbook_versions_data.go
// Copyright (c) 2017-2021 Ingram Micro Inc. package testdata import ( "github.com/ingrammicro/cio/api/types" ) // GetCookbookVersionData loads test data func GetCookbookVersionData() []*types.CookbookVersion { return []*types.CookbookVersion{ { ID: "fakeID0", Name: "fakeName0", Description: "fakeDescription0", Version: "fakeVersion0", State: "fakeState0", RevisionID: "fakeRevisionID0", Recipes: []string{"fakeRecipe01", "fakeRecipe02"}, ResourceType: "fakeResourceType0", PubliclyAvailable: true, GlobalLegacy: true, UploadURL: "fakeUploadURL0", ErrorMessage: "",
Name: "fakeName1", Description: "fakeDescription1", Version: "fakeVersion1", State: "fakeState1", RevisionID: "fakeRevisionID1", Recipes: []string{"fakeRecipe11", "fakeRecipe12"}, ResourceType: "fakeResourceType1", PubliclyAvailable: true, GlobalLegacy: true, UploadURL: "fakeUploadURL1", ErrorMessage: "", }, } }
}, { ID: "fakeID1",
api.go
package clair import ( "bytes" "context" "encoding/json" "fmt" "io/ioutil" "net/http" "strings" "time" "github.com/quay/clair/v3/api/v3/clairpb" "github.com/scopej/klar/docker" "github.com/scopej/klar/utils" "google.golang.org/grpc" ) type apiV1 struct { url string client http.Client } type apiV3 struct { url string client clairpb.AncestryServiceClient } func newAPI(url string, version int, timeout time.Duration) (API, error) { if version < 3 { return newAPIV1(url, timeout), nil } return newAPIV3(url) } func newAPIV1(url string, timeout time.Duration) *apiV1 { if !strings.HasPrefix(url, "http://") && !strings.HasPrefix(url, "https://") { url = fmt.Sprintf("http://%s", url) } if strings.LastIndex(url, ":") < 6 { url = fmt.Sprintf("%s:6060", url) } return &apiV1{ url: url, client: http.Client{ Timeout: timeout, }, } } func newAPIV3(url string) (*apiV3, error) { if i := strings.Index(url, "://"); i != -1 { runes := []rune(url) url = string(runes[i+3:]) } if strings.Index(url, ":") == -1 { url = fmt.Sprintf("%s:6060", url) } conn, err := grpc.Dial(url, grpc.WithInsecure()) if err != nil { return nil, fmt.Errorf("did not connect to %s: %v", url, err) } return &apiV3{ url: url, client: clairpb.NewAncestryServiceClient(conn)}, nil } func (a *apiV1) Push(image *docker.Image) error { for i := 0; i < len(image.FsLayers); i++ { layer := newLayer(image, i) if err := a.pushLayer(layer); err != nil { return err } } return nil } func (a *apiV1) pushLayer(layer *layer) error { envelope := layerEnvelope{Layer: layer} reqBody, err := json.Marshal(envelope) if err != nil { return fmt.Errorf("can't serialze push request: %s", err) } url := fmt.Sprintf("%s/v1/layers", a.url) request, err := http.NewRequest("POST", url, bytes.NewBuffer(reqBody)) if err != nil { return fmt.Errorf("can't create a push request: %s", err) } request.Header.Set("Content-Type", "application/json") utils.DumpRequest(request) response, err := a.client.Do(request) if err != nil { return fmt.Errorf("can't push layer to Clair: %s", err) } utils.DumpResponse(response) defer response.Body.Close() body, err := ioutil.ReadAll(response.Body) if err != nil { return fmt.Errorf("can't read clair response : %s", err) } if response.StatusCode != http.StatusCreated { var lerr layerError err = json.Unmarshal(body, &lerr) if err != nil { return fmt.Errorf("can't even read an error message: %s", err) } return fmt.Errorf("push error %d: %s", response.StatusCode, string(body)) } return nil } func (a *apiV1) Analyze(image *docker.Image) ([]*Vulnerability, error) { url := fmt.Sprintf("%s/v1/layers/%s?vulnerabilities", a.url, image.AnalyzedLayerName()) request, err := http.NewRequest("GET", url, nil) if err != nil { return nil, fmt.Errorf("can't create an analyze request: %s", err) } utils.DumpRequest(request) response, err := a.client.Do(request) if err != nil { return nil, err } utils.DumpResponse(response) defer response.Body.Close() if response.StatusCode != http.StatusOK { body, _ := ioutil.ReadAll(response.Body) return nil, fmt.Errorf("analyze error %d: %s", response.StatusCode, string(body)) } var envelope layerEnvelope if err = json.NewDecoder(response.Body).Decode(&envelope); err != nil { return nil, err } var vs []*Vulnerability for _, f := range envelope.Layer.Features { for _, v := range f.Vulnerabilities {
v.FeatureName = f.Name v.FeatureVersion = f.Version //the for loop uses the same variable for "v", reloading with new values //since we are appending a pointer to the variable to the slice, we need to create a copy of the struct //otherwise the slice winds up with multiple pointers to the same struct vulnerability := v vs = append(vs, &vulnerability) } } return vs, nil } func (a *apiV3) Push(image *docker.Image) error { req := &clairpb.PostAncestryRequest{ Format: "Docker", AncestryName: image.Name, } ls := make([]*clairpb.PostAncestryRequest_PostLayer, len(image.FsLayers)) for i := 0; i < len(image.FsLayers); i++ { ls[i] = newLayerV3(image, i) } req.Layers = ls _, err := a.client.PostAncestry(context.Background(), req) return err } func newLayerV3(image *docker.Image, index int) *clairpb.PostAncestryRequest_PostLayer { return &clairpb.PostAncestryRequest_PostLayer{ Hash: image.LayerName(index), Path: strings.Join([]string{image.Registry, image.Name, "blobs", image.FsLayers[index].BlobSum}, "/"), Headers: map[string]string{"Authorization": image.Token}, } } func (a *apiV3) Analyze(image *docker.Image) ([]*Vulnerability, error) { req := &clairpb.GetAncestryRequest{ AncestryName: image.Name, } resp, err := a.client.GetAncestry(context.Background(), req) if err != nil { return nil, err } var vs []*Vulnerability for _, l := range resp.Ancestry.Layers { for _, f := range l.DetectedFeatures { for _, v := range f.Vulnerabilities { cv := convertVulnerability(v) cv.FeatureName = f.Name cv.FeatureVersion = f.Version //the for loop uses the same variable for "cv", reloading with new values //since we are appending a pointer to the variable to the slice, we need to create a copy of the struct //otherwise the slice winds up with multiple pointers to the same struct vulnerability := cv vs = append(vs, vulnerability) } } } return vs, nil } func convertVulnerability(cv *clairpb.Vulnerability) *Vulnerability { return &Vulnerability{ Name: cv.Name, NamespaceName: cv.NamespaceName, Description: cv.Description, Severity: cv.Severity, Link: cv.Link, FixedBy: cv.FixedBy, } }
updateBinData.py
'''This job updates the minute-by-minute trading data for the whole available futures universe. ''' ''' Copyright (c) 2017, WinQuant Information and Technology Co. Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the <organization> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ''' # built-in modules import datetime as dt import logging # third-party modules # customized modules import data.api.futures as futuresApi import data.config as config import data.driver.mongodb as mongodb import data.instrument.trading.futures as futuresTrading # customize logging config logging.basicConfig( format='[%(levelname)s] %(message)s', level=logging.INFO ) def
(): '''Entry point of the job. ''' # runtime asOfDate = dt.date.today() logging.info( 'Updating minute bin data for futures on date {d:s}...'.format( d=str( asOfDate ) ) ) # get all futures in the universe futuresInfo = futuresApi.getFuturesInformation( asOfDate ) universe = dict( zip( futuresInfo.ticker, futuresInfo.secID ) ) # initialize MongoDB connection username, password = config.MONGODB_CRED db = mongodb.getAuthenticatedConnection( config.MONGODB_URL, config.MONGODB_PORT, username, password, 'binData' ) nFutures = len( universe ) logging.info( 'Minute bin volume for {ns:d} futures in total to be updated...' ) # for bin data, futures are updated one-by-one for i, ids in enumerate( universe.items() ): futures, secId = ids futures = futures.upper() logging.info( 'Updating minute bin data for {s:s} ({idx:d}/{n:d})...'.format( s=secId, idx=i + 1, n=nFutures ) ) data = futuresTrading.getBinData( futures, dataDate=asOfDate ) if len( data ) > 0: mongoDate = dt.datetime.combine( asOfDate, dt.datetime.min.time() ) record = { 'SecID': secId, 'Date': mongoDate, 'Data': data.to_json(), 'Country': 'CN' } db.futures.update( { 'SecID': secId, 'Date': mongoDate, 'Country': 'CN' }, record, upsert=True ) else: logging.warning( 'Empty data for {secId:s}'.format( secId=secId ) ) logging.info( 'All futures updated.' ) if __name__ == '__main__': # let's kick off the job main()
main
lib.rs
#[cfg(feature = "rayon")] extern crate intervals; #[macro_use(concatenate)] extern crate ndarray; extern crate healpix; extern crate num; extern crate numpy; extern crate rayon; extern crate time; extern crate pyo3; #[macro_use] extern crate lazy_static; use ndarray::{Array, Array1, Array2, Axis}; use numpy::{IntoPyArray, PyArray1, PyArray2, PyReadonlyArray1, PyReadonlyArray2}; use pyo3::prelude::{pymodule, Py, PyModule, PyResult, Python}; use pyo3::types::{PyDict, PyList}; use pyo3::{PyObject, ToPyObject}; use intervals::nestedranges2d::NestedRanges2D; use std::collections::HashMap; use std::sync::Mutex; pub mod coverage; pub mod spatial_coverage; pub mod temporal_coverage; pub mod time_space_coverage; type Coverage2DHashMap = HashMap<usize, NestedRanges2D<u64, u64>>; lazy_static! { static ref COVERAGES_2D: Mutex<Coverage2DHashMap> = Mutex::new(HashMap::new()); static ref NUM_COVERAGES_2D: Mutex<usize> = Mutex::new(0); } /// Insert a Time-Space coverage in the Hash Map /// storing all the current 2D coverages /// /// # Arguments /// /// * `coverage` - The new Time-Space coverage to insert /// /// # Panics /// /// * This will panic if the `COVERAGES_2D` or `NUM_COVERAGES_2D` /// are already held by the current thread fn
(coverage: NestedRanges2D<u64, u64>) -> usize { let mut coverages = COVERAGES_2D.lock().unwrap(); let mut num_coverages = NUM_COVERAGES_2D.lock().unwrap(); let index = *num_coverages; if let Some(_v) = coverages.insert(index, coverage) { panic!("There is already a coverage at this index."); } *num_coverages += 1; index } /// Remove a Time-Space coverage from the Hash Map /// storing all the current 2D coverages /// /// # Arguments /// /// * `index` - The coverage to remove /// /// # Panics /// /// * If `COVERAGES_2D` is already held by the current thread. fn remove_coverage(index: usize) { let mut coverages = COVERAGES_2D.lock().unwrap(); let _coverage = coverages .remove(&index) // `None` is mapped to `Err(&'static str)` // because we suppose there should be a coverage // stored in the hash map at the `index` key. .expect("There is no coverage to remove"); } /// Replace a Time-Space coverage at a specific index. /// /// # Arguments /// /// * `index` - The index of the Time-Space coverage to replace /// * `coverage` - The new coverage /// /// # Panics /// /// * If no Time-Space coverage has been found in the hash map /// for this specific `index`. /// * If `COVERAGES_2D` is already held by the current thread. fn update_coverage(index: usize, coverage: NestedRanges2D<u64, u64>) { let mut coverages = COVERAGES_2D.lock().unwrap(); coverages .insert(index, coverage) // `None` is mapped to `Err(&'static str)` // because we suppose there should be a coverage // stored in the hash map at the `index` key. .expect("There is no coverage present"); } #[pymodule] fn mocpy(_py: Python, m: &PyModule) -> PyResult<()> { /// Create a 1D spatial coverage from a list of /// longitudes and latitudes /// /// # Arguments /// /// * ``depth`` - The depth of the coverage between `[0, <u64>::MAXDEPTH] = [0, 29]` /// * ``lon`` - The longitudes in radians /// * ``lat`` - The latitudes in radians /// /// # Precondition /// /// ``lon`` and ``lat`` must be expressed in radians and be valid. /// /// # Errors /// /// * ``lon`` and ``lat`` do not have the same length /// * ``depth`` is not comprised in `[0, <T>::MAXDEPTH] = [0, 29]` #[pyfn(m, "from_lonlat")] fn from_lonlat( py: Python, depth: i8, lon: PyReadonlyArray1<f64>, lat: PyReadonlyArray1<f64>, ) -> PyResult<Py<PyArray2<u64>>> { let lon = lon.as_array().to_owned().into_raw_vec(); let lat = lat.as_array().to_owned().into_raw_vec(); let ranges = spatial_coverage::create_from_position(lon, lat, depth)?; let result: Array2<u64> = ranges.into(); Ok(result.into_pyarray(py).to_owned()) } /// Create a 1D spatial coverage from a list of uniq cells each associated with a value. /// /// The coverage computed contains the cells summing from ``cumul_from`` to ``cumul_to``. /// /// # Arguments /// /// * ``uniq`` - Uniq HEALPix indices /// * ``values`` - Array containing the values associated for each cells. /// Must be of the same size of ``uniq`` and must sum to one. /// * ``cumul_from`` - The cumulative value from which cells are put in the coverage /// * ``cumul_to`` - The cumulative value to which cells are put in the coverage /// * ``max_depth`` - the largest depth of the output MOC, which must be larger or equals to the largest /// depth in the `uniq` values /// /// # Precondition /// /// * ``uniq`` and ``values`` must be of the same size /// * ``values`` must sum to one #[pyfn(m, "from_valued_hpx_cells")] fn from_valued_hpx_cells( py: Python, max_depth: u8, uniq: PyReadonlyArray1<u64>, values: PyReadonlyArray1<f64>, cumul_from: f64, cumul_to: f64, ) -> PyResult<Py<PyArray2<u64>>> { let uniq = uniq.as_array().to_owned(); let values = values.as_array().to_owned(); let ranges = spatial_coverage::from_valued_healpix_cells(max_depth as u32, uniq, values, cumul_from, cumul_to)?; let result: Array2<u64> = ranges.into(); Ok(result.into_pyarray(py).to_owned()) } /// Create a 2D Time-Space coverage from a list of /// (time, longitude, latitude) tuples. /// /// # Arguments /// /// * ``times`` - The times at which the sky coordinates have be given. /// * ``d1`` - The depth along the Time axis. /// * ``lon`` - The longitudes in radians /// * ``lat`` - The latitudes in radians /// * ``d2`` - The depth along the Space axis. /// /// # Precondition /// /// * ``lon`` and ``lat`` must be expressed in radians. /// * ``times`` must be expressed in jd. /// /// # Errors /// /// * ``lon``, ``lat`` and ``times`` do not have the same length. /// * ``d1`` is not comprised in `[0, <T>::MAXDEPTH] = [0, 29]` /// * ``d2`` is not comprised in `[0, <S>::MAXDEPTH] = [0, 29]` #[pyfn(m, "from_time_lonlat")] fn from_time_lonlat( index: usize, times: PyReadonlyArray1<f64>, d1: i8, lon: PyReadonlyArray1<f64>, lat: PyReadonlyArray1<f64>, d2: i8, ) -> PyResult<()> { let times = times.as_array() .to_owned() .into_raw_vec(); let lon = lon.as_array() .to_owned() .into_raw_vec(); let lat = lat.as_array() .to_owned() .into_raw_vec(); let coverage = time_space_coverage::create_from_times_positions(times, lon, lat, d1, d2)?; // Update a coverage in the COVERAGES_2D // hash map and return its index key to python update_coverage(index, coverage); Ok(()) } /// Create a 2D Time-Space coverage from a list of /// (time_range, longitude, latitude) tuples. /// /// # Arguments /// /// * ``times_min`` - The begining time of observation. /// * ``times_max`` - The ending time of observation. /// * ``d1`` - The depth along the Time axis. /// * ``lon`` - The longitudes in radians /// * ``lat`` - The latitudes in radians /// * ``d2`` - The depth along the Space axis. /// /// # Precondition /// /// * ``lon`` and ``lat`` must be expressed in radians. /// * ``times`` must be expressed in jd. /// /// # Errors /// /// * ``lon``, ``lat`` and ``times`` do not have the same length. /// * ``d1`` is not comprised in `[0, <T>::MAXDEPTH] = [0, 29]` /// * ``d2`` is not comprised in `[0, <S>::MAXDEPTH] = [0, 29]` /// #[pyfn(m, "from_time_ranges_lonlat")] fn from_time_ranges_lonlat( index: usize, times_min: PyReadonlyArray1<f64>, times_max: PyReadonlyArray1<f64>, d1: i8, lon: PyReadonlyArray1<f64>, lat: PyReadonlyArray1<f64>, d2: i8, ) -> PyResult<()> { let times_min = times_min.as_array() .to_owned() .into_raw_vec(); let times_max = times_max.as_array() .to_owned() .into_raw_vec(); let lon = lon.as_array() .to_owned() .into_raw_vec(); let lat = lat.as_array() .to_owned() .into_raw_vec(); let coverage = time_space_coverage::create_from_time_ranges_positions(times_min, times_max, d1, lon, lat, d2)?; // Update a coverage in the COVERAGES_2D // hash map and return its index key to python update_coverage(index, coverage); Ok(()) } /// Create a 2D Time-Space coverage from a list of /// (time_range, longitude, latitude, radius) tuples. /// /// # Arguments /// /// * ``times_min`` - The begining time of observation. /// * ``times_max`` - The ending time of observation. /// * ``d1`` - The depth along the Time axis. /// * ``lon`` - The longitudes in radians. /// * ``lat`` - The latitudes in radians. /// * ``radius`` - Radius in radians. /// * ``d2`` - The depth along the Space axis. /// /// # Precondition /// /// * ``lon``, ``lat`` and ``radius`` must be expressed in radians. /// * ``times`` must be expressed in jd. /// /// # Errors /// /// * ``lon``, ``lat``, ``times_min``, ``times_max`` and ``radius`` do not have the same length. /// * ``d1`` is not comprised in `[0, <T>::MAXDEPTH] = [0, 29]` /// * ``d2`` is not comprised in `[0, <S>::MAXDEPTH] = [0, 29]` /// #[pyfn(m, "from_time_ranges_spatial_coverages")] fn from_time_ranges_spatial_coverages( py: Python, index: usize, times_min: PyReadonlyArray1<f64>, times_max: PyReadonlyArray1<f64>, d1: i8, spatial_coverages: &PyList, ) -> PyResult<()> { let times_min = times_min.as_array() .to_owned() .into_raw_vec(); let times_max = times_max.as_array() .to_owned() .into_raw_vec(); let coverage = time_space_coverage::from_time_ranges_spatial_coverages(py, times_min, times_max, d1, spatial_coverages)?; // Update a coverage in the COVERAGES_2D // hash map and return its index key to python update_coverage(index, coverage); Ok(()) } #[pyfn(m, "project_on_first_dim")] fn project_on_first_dim(py: Python, ranges: PyReadonlyArray2<u64>, index: usize) -> Py<PyArray2<u64>> { // Build the input ranges from a Array2 let ranges = ranges.as_array().to_owned(); let ranges = coverage::create_nested_ranges_from_py(ranges); // Get the coverage and perform the projection let result = { let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); time_space_coverage::project_on_first_dim(&ranges, coverage) }; // Convert the result back to an ndarray::Array2 let result: Array2<u64> = result.into(); result.into_pyarray(py).to_owned() } /// Project the Time-Space coverage into its second dimension /// (i.e. the Space axis) /// /// # Arguments /// /// * ``ranges`` - The constrained time set of ranges. /// * ``index`` - The index of the Time-Space coverage. /// /// # Algorithm /// /// Returns the union of the spatial coverages for which /// their time ranges is contained into ``x``. /// /// # Panic /// /// If the ``ranges`` is not valid i.e.: /// /// * Contains ranges for which their inf bound is /// superior to their sup bound. /// /// This **should** not panic as this code is wrapped around MOCPy #[pyfn(m, "project_on_second_dim")] fn project_on_second_dim( py: Python, ranges: PyReadonlyArray2<u64>, index: usize, ) -> Py<PyArray2<u64>> { // Build the input ranges from a Array2 let ranges = ranges.as_array().to_owned(); let ranges = coverage::create_nested_ranges_from_py(ranges); // Get the coverage and perform the projection let result = { let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); time_space_coverage::project_on_second_dim(&ranges, coverage) }; // Convert the result back to an ndarray::Array2 let result: Array2<u64> = result.into(); result.into_pyarray(py).to_owned() } /// Serialize a Time-Space coverage into FITS /// /// # Context /// /// This is wrapped around the `serialize` method /// of MOCPy to serialize a Time-Space coverage into /// FITS. /// /// # Arguments /// /// * ``index`` - The index of the Time-Space coverage /// to serialize. #[pyfn(m, "coverage_2d_to_fits")] fn coverage_2d_to_fits(py: Python, index: usize) -> Py<PyArray1<i64>> { // Get the coverage and flatten it // to a Array1 let result: Array1<i64> = { let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); time_space_coverage::to_fits(coverage) }; result.into_pyarray(py).to_owned() } /// Deserialize a Time-Space coverage from FITS /// /// # Context /// /// This is wrapped around the `from_fits` method /// of MOCPy to load a Time-Space coverage from a /// FITS file. /// /// # Arguments /// /// * ``data`` - A 1d array buffer containing the time and /// space axis ranges data. /// /// # Errors /// /// The `Array1` object stores the Time-Space coverage /// under the nested format. /// Its memory layout contains each time range followed by the /// list of space ranges referred to that time range. /// Time ranges are negatives so that one can distinguish them /// from space ranges. /// /// This method returns a `PyValueError` if the `Array1` is not /// defined as above. #[pyfn(m, "coverage_2d_from_fits")] fn coverage_2d_from_fits(index: usize, data: PyReadonlyArray1<i64>) -> PyResult<()> { let data = data.as_array().to_owned(); let coverage_from_fits = time_space_coverage::from_fits(data)?; // Update a coverage in the COVERAGES_2D // hash map and return its index key to python update_coverage(index, coverage_from_fits); Ok(()) } /// Create a new empty Time-Space coverage /// /// This method is called in the constructor of the /// `mocpy.STMOC` class /// /// # Returns /// /// The index of the newly created Time-Space coverage #[pyfn(m, "create_2d_coverage")] fn create_2d_coverage(_py: Python) -> usize { // Create new empty coverage let empty_coverage = time_space_coverage::new(); // Insert a new coverage in the COVERAGES_2D // hash map and return its index key to python let result = insert_new_coverage(empty_coverage); result } /// Drop the content of a Time-Space coverage /// /// This method is automatically called by the /// Python garbage collector. #[pyfn(m, "drop_2d_coverage")] fn drop_2d_coverage(_py: Python, index: usize) { remove_coverage(index); } /// Computes the depth of a Time-Space coverage /// /// # Arguments /// /// * ``index`` - The index of the Time-Space coverage. /// /// # Infos /// /// If the Time-Space coverage is empty, the returned /// depth is `(0, 0)`. #[pyfn(m, "coverage_2d_depth")] fn coverage_2d_depth(_py: Python, index: usize) -> (i8, i8) { // Get the coverage and computes its depth // If the coverage is empty, the depth will be // (0, 0) let result = { let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); time_space_coverage::depth(coverage) }; result } /// Returns the minimum time value of the Time-Space /// coverage /// /// # Arguments /// /// * ``index`` - The index of the Time-Space coverage. /// /// # Errors /// /// * If the coverage is empty. #[pyfn(m, "coverage_2d_min_time")] fn coverage_2d_min_time(_py: Python, index: usize) -> PyResult<f64> { // Get the coverage let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); time_space_coverage::t_min(coverage) } /// Returns the maximum time value of the Time-Space /// coverage /// /// # Arguments /// /// * ``index`` - The index of the Time-Space coverage. /// /// # Errors /// /// * If the coverage is empty. #[pyfn(m, "coverage_2d_max_time")] fn coverage_2d_max_time(_py: Python, index: usize) -> PyResult<f64> { // Get the coverage let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); time_space_coverage::t_max(coverage) } /// Perform the union between two Time-Space coverages. /// /// # Arguments /// /// * ``id_left`` - The index of the Time-Space coverage being /// in the left of the operation. /// * ``id_right`` - The index of the Time-Space coverage being /// in the right of the operation. #[pyfn(m, "coverage_2d_union")] fn coverage_2d_union(_py: Python, index: usize, id_left: usize, id_right: usize) { let result = { let coverages = COVERAGES_2D.lock().unwrap(); // Get the left and right coverages let coverage_left = coverages.get(&id_left).unwrap(); let coverage_right = coverages.get(&id_right).unwrap(); // Perform the union let result = time_space_coverage::union(coverage_left, coverage_right); result }; // Update the coverage in the COVERAGES_2D // hash map and return its index key to python update_coverage(index, result); } /// Perform the intersection between two Time-Space coverages. /// /// # Arguments /// /// * ``id_left`` - The index of the Time-Space coverage being /// in the left of the operation. /// * ``id_right`` - The index of the Time-Space coverage being /// in the right of the operation. #[pyfn(m, "coverage_2d_intersection")] fn coverage_2d_intersection(_py: Python, index: usize, id_left: usize, id_right: usize) { let result = { let coverages = COVERAGES_2D.lock().unwrap(); // Get the left and right coverages let coverage_left = coverages.get(&id_left).unwrap(); let coverage_right = coverages.get(&id_right).unwrap(); // Perform the intersection let result = time_space_coverage::intersection(coverage_left, coverage_right); result }; // Update the coverage in the COVERAGES_2D // hash map and return its index key to python update_coverage(index, result); } /// Perform the difference between two Time-Space coverages. /// /// # Arguments /// /// * ``id_left`` - The index of the Time-Space coverage being /// in the left of the operation. /// * ``id_right`` - The index of the Time-Space coverage being /// in the right of the operation. #[pyfn(m, "coverage_2d_difference")] fn coverage_2d_difference(_py: Python, index: usize, id_left: usize, id_right: usize) { let result = { let coverages = COVERAGES_2D.lock().unwrap(); // Get the left and right coverages let coverage_left = coverages.get(&id_left).unwrap(); let coverage_right = coverages.get(&id_right).unwrap(); // Perform the difference let result = time_space_coverage::difference(coverage_left, coverage_right); result }; // Update the coverage in the COVERAGES_2D // hash map and return its index key to python update_coverage(index, result) } /// Check the equality between two Time-Space coverages /// /// # Arguments /// /// * ``id_left`` - The index of the Time-Space coverage being /// in the left of the operation. /// * ``id_right`` - The index of the Time-Space coverage being /// in the right of the operation. #[pyfn(m, "coverage_2d_equality_check")] fn coverage_2d_equality_check(_py: Python, id_left: usize, id_right: usize) -> bool { let result = { let coverages = COVERAGES_2D.lock().unwrap(); // Get the left and right coverages let cov_left = coverages.get(&id_left).unwrap(); let cov_right = coverages.get(&id_right).unwrap(); // Check the equality cov_left == cov_right }; // Return the index of the newly created // coverage result } /// Checks whether a Time-Space coverage is empty. /// /// # Arguments /// /// * ``index`` - The index of the Time-Space coverage to check /// the emptiness. #[pyfn(m, "coverage_2d_is_empty")] fn coverage_2d_is_empty(_py: Python, index: usize) -> bool { // Get the coverage let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); coverage.is_empty() } /// Check if (time, position) tuples are contained into a Time-Space coverage /// /// # Arguments /// /// * ``index`` - The index of the Time-Space coverage. /// * ``times`` - Times at which the positions have been observed. /// * ``lon`` - The longitudes. /// * ``lat`` - The latitudes. /// /// # Errors /// /// * If `lon`, `lat` and `times` do not have the same length #[pyfn(m, "coverage_2d_contains")] fn coverage_2d_contains( py: Python, index: usize, times: PyReadonlyArray1<f64>, lon: PyReadonlyArray1<f64>, lat: PyReadonlyArray1<f64>) -> PyResult<Py<PyArray1<bool>>> { let times = times.as_array().to_owned(); let lon = lon.as_array().to_owned(); let lat = lat.as_array().to_owned(); let res = COVERAGES_2D.lock().unwrap(); let coverage = res.get(&index).unwrap(); let mut result: Array1<bool> = Array::from_elem((lon.shape()[0],), false); time_space_coverage::contains(coverage, times, lon, lat, &mut result)?; Ok(result.into_pyarray(py).to_owned()) } /// Perform the union between two spatial coverages /// /// # Arguments /// /// * ``a`` - The spatial coverage being the left operand /// * ``b`` - The spatial coverage being the right operand #[pyfn(m, "coverage_union")] fn coverage_union(py: Python, a: PyReadonlyArray2<u64>, b: PyReadonlyArray2<u64>) -> Py<PyArray2<u64>> { let ranges_a = a.as_array().to_owned(); let ranges_b = b.as_array().to_owned(); let cov_a = coverage::create_nested_ranges_from_py(ranges_a); let cov_b = coverage::create_nested_ranges_from_py(ranges_b); let result = spatial_coverage::union(&cov_a, &cov_b); let result: Array2<u64> = result.into(); result.to_owned().into_pyarray(py).to_owned() } /// Perform the difference between two spatial coverages /// /// # Arguments /// /// * ``a`` - The spatial coverage being the left operand /// * ``b`` - The spatial coverage being the right operand #[pyfn(m, "coverage_difference")] fn coverage_difference(py: Python, a: PyReadonlyArray2<u64>, b: PyReadonlyArray2<u64>) -> Py<PyArray2<u64>> { let ranges_a = a.as_array().to_owned(); let ranges_b = b.as_array().to_owned(); let cov_a = coverage::create_nested_ranges_from_py(ranges_a); let cov_b = coverage::create_nested_ranges_from_py(ranges_b); let result = spatial_coverage::difference(&cov_a, &cov_b); let result: Array2<u64> = result.into(); result.into_pyarray(py).to_owned() } /// Perform the intersection between two spatial coverages /// /// # Arguments /// /// * ``a`` - The spatial coverage being the left operand /// * ``b`` - The spatial coverage being the right operand #[pyfn(m, "coverage_intersection")] fn coverage_intersection( py: Python, a: PyReadonlyArray2<u64>, b: PyReadonlyArray2<u64>, ) -> Py<PyArray2<u64>> { let ranges_a = a.as_array().to_owned(); let ranges_b = b.as_array().to_owned(); let cov_a = coverage::create_nested_ranges_from_py(ranges_a); let cov_b = coverage::create_nested_ranges_from_py(ranges_b); let result = spatial_coverage::intersection(&cov_a, &cov_b); let result: Array2<u64> = result.into(); result.into_pyarray(py).to_owned() } /// Computes the complement of the coverage /// /// # Arguments /// /// * ``ranges`` - The input spatial coverage #[pyfn(m, "coverage_complement")] fn coverage_complement(py: Python, ranges: PyReadonlyArray2<u64>) -> Py<PyArray2<u64>> { let ranges = ranges.as_array().to_owned(); let coverage = coverage::create_nested_ranges_from_py(ranges); let result = spatial_coverage::complement(&coverage); let result = if !result.is_empty() { result.into() } else { // TODO: try without this condition Array::zeros((1, 0)) }; result.into_pyarray(py).to_owned() } /// Deserialize a spatial coverage from a json python dictionary /// /// JSON python dictionary stores (key, value) pair where: /// /// * the ``key`` is a ``char`` being a depth /// * the ``value`` is a list of HEALPix cell indices at the depth /// indicated by the ``key`` /// /// # Arguments /// /// * ``input`` - The input python dictionary /// /// # Errors /// /// * ``input`` dict must have string typed ``key``. /// * ``input`` dict values must be a list of unsigned integer encoded /// on 64 bits (i.e. an array of `u64`). #[pyfn(m, "coverage_from_json")] fn coverage_from_json(py: Python, input: &PyDict) -> PyResult<Py<PyArray2<u64>>> { let coverage = coverage::from_json(py, input)?; let result: Array2<u64> = coverage.into(); Ok(result.into_pyarray(py).to_owned()) } /// Serialize a spatial coverage to a JSON format /// /// # Arguments /// /// * ``ranges`` - The spatial coverage ranges to serialize. #[pyfn(m, "coverage_to_json")] fn coverage_to_json(py: Python, ranges: PyReadonlyArray2<u64>) -> PyResult<PyObject> { let ranges = ranges.as_array().to_owned(); let coverage = coverage::create_nested_ranges_from_py(ranges); let result = coverage::to_json(py, coverage)?; Ok(result.to_object(py)) } /// Degrade a spatial coverage to a specific depth. /// /// # Arguments /// /// * ``ranges`` - The spatial coverage ranges to degrade. /// * ``depth`` - The depth to degrade the spatial coverage to. /// /// # Errors /// /// * ``depth`` is not comprised in `[0, <T>::MAXDEPTH] = [0, 29]` #[pyfn(m, "coverage_degrade")] fn coverage_degrade( py: Python, ranges: PyReadonlyArray2<u64>, depth: i8, ) -> PyResult<Py<PyArray2<u64>>> { let ranges = ranges.as_array().to_owned(); let mut ranges = coverage::create_nested_ranges_from_py(ranges); coverage::degrade_nested_ranges(&mut ranges, depth)?; // Merge the overlapping intervals after degradation let ranges = ranges.make_consistent(); let result: Array2<u64> = ranges.into(); Ok(result.into_pyarray(py).to_owned()) } /// Make a spatial coverage consistent /// /// # Infos /// /// This is an internal method whose purpose is not to be called /// by an user. It is called inside of the `mocpy.IntervalSet` class. /// /// # Arguments /// /// * ``ranges`` - The spatial coverage ranges to make consistent. /// * ``min_depth`` - A minimum depth. This argument is optional. /// A min depth means that we do not want any HEALPix cells to be /// of depth < to ``min_depth``. This argument is used for example for /// plotting a spatial coverage. All HEALPix cells of depth < 2 are splitted /// into cells of depth 2. /// /// # Errors /// /// * ``min_depth`` is not comprised in `[0, <T>::MAXDEPTH] = [0, 29]` #[pyfn(m, "coverage_merge_nested_intervals")] fn coverage_merge_nested_intervals( py: Python, ranges: PyReadonlyArray2<u64>, min_depth: i8, ) -> PyResult<Py<PyArray2<u64>>> { let ranges = ranges.as_array().to_owned(); // Convert the Array2<u64> to a NestedRanges<u64> // and make it consistent let mut coverage = coverage::create_nested_ranges_from_py(ranges); coverage = coverage::merge(coverage, min_depth)?; let result: Array2<u64> = coverage.into(); Ok(result.into_pyarray(py).to_owned()) } /// Compute the depth of a spatial coverage /// /// # Arguments /// /// * ``ranges`` - The input coverage. #[pyfn(m, "coverage_depth")] fn coverage_depth(_py: Python, ranges: PyReadonlyArray2<u64>) -> i8 { let ranges = ranges.as_array().to_owned(); let coverage = coverage::create_nested_ranges_from_py(ranges); coverage::depth(&coverage) } /// Compute the sky fraction of a spatial coverage /// /// # Arguments /// /// * ``coverage`` - The spatial coverage /// * ``max_depth`` - The max depth of the spatial coverage. #[pyfn(m, "coverage_sky_fraction")] fn coverage_sky_fraction(_py: Python, ranges: PyReadonlyArray2<u64>) -> f32 { let ranges = ranges.as_array().to_owned(); coverage::sky_fraction(&ranges) } /// Convert HEALPix cell indices from the **uniq** to the **nested** format. /// /// # Arguments /// /// * ``ranges`` - The HEALPix cells defined in the **uniq** format. #[pyfn(m, "to_nested")] fn to_nested(py: Python, ranges: PyReadonlyArray1<u64>) -> Py<PyArray2<u64>> { let ranges = ranges.as_array().to_owned(); let result: Array2<u64> = if ranges.is_empty() { Array::zeros((1, 0)) } else { let shape = (ranges.shape()[0], 1); let start = ranges.into_shape(shape).unwrap(); let end = &start + &Array2::<u64>::ones(shape); let ranges = concatenate![Axis(1), start, end]; let uniq_coverage = coverage::create_uniq_ranges_from_py(ranges).make_consistent(); let nested_coverage = spatial_coverage::to_nested(uniq_coverage); nested_coverage.into() }; result.into_pyarray(py).to_owned() } /// Convert HEALPix cell indices from the **nested** to the **uniq** format. /// /// # Arguments /// /// * ``ranges`` - The HEALPix cells defined in the **nested** format. #[pyfn(m, "to_uniq")] fn to_uniq(py: Python, ranges: PyReadonlyArray2<u64>) -> Py<PyArray1<u64>> { let ranges = ranges.as_array().to_owned(); let result: Array1<u64> = if ranges.is_empty() { Array::zeros((0,)) } else { let nested_coverage = coverage::create_nested_ranges_from_py(ranges); let uniq_coverage = nested_coverage.to_uniq(); uniq_coverage.into() }; result.into_pyarray(py).to_owned() } /// Create a temporal coverage from a list of time ranges expressed in jd. /// /// # Arguments /// /// * ``min_times`` - The list of inf bounds of the time ranges expressed in **jd** /// * ``max_times`` - The list of sup bounds of the time ranges expressed in **jd** /// /// # Errors /// /// * If the number of ``min_times`` and ``max_times`` do not match. #[pyfn(m, "from_time_ranges")] fn from_time_ranges( py: Python, min_times: PyReadonlyArray1<f64>, max_times: PyReadonlyArray1<f64>, ) -> PyResult<Py<PyArray2<u64>>> { let min_times = min_times.as_array().to_owned(); let max_times = max_times.as_array().to_owned(); let coverage = temporal_coverage::from_time_ranges(min_times, max_times)?; let result: Array2<u64> = coverage.into(); Ok(result.into_pyarray(py).to_owned()) } /// Flatten HEALPix cells to a specific depth /// /// # Arguments /// /// * ``data`` - The spatial coverage /// * ``depth`` - The depth to flatten the coverage to. #[pyfn(m, "flatten_pixels")] fn flatten_pixels(py: Python, data: PyReadonlyArray2<u64>, depth: i8) -> Py<PyArray1<u64>> { let data = data.as_array().to_owned(); let result = coverage::flatten_pixels(data, depth); result.into_pyarray(py).to_owned() } /// Create a spatial coverage from a list of HEALPix cell indices. /// /// # Arguments /// /// * ``pixels`` - A set of HEALPix cell indices /// * ``depth`` - The depths of each HEALPix cell indices /// /// # Precondition /// /// ``pixels`` and ``depth`` must be valid. This means that: /// /// * ``depth`` contains values in the range `[0, <T>::MAXDEPTH] = [0, 29]` /// * ``pixels`` contains values in the range `[0, 12*4**(depth)]` /// /// # Errors /// /// * ``depth`` and ``pixels`` have not the same length. #[pyfn(m, "from_healpix_cells")] fn from_healpix_cells( py: Python, pixels: PyReadonlyArray1<u64>, depth: PyReadonlyArray1<i8>, ) -> PyResult<Py<PyArray2<u64>>> { let pixels = pixels.as_array().to_owned(); let depth = depth.as_array().to_owned(); let result = spatial_coverage::from_healpix_cells(pixels, depth)?; Ok(result.into_pyarray(py).to_owned()) } Ok(()) }
insert_new_coverage
test_gluoncv_object_detection.py
import pytest from autogluon.core.space import Categorical from autogluon.vision._gluoncv import ObjectDetection def get_dataset(path): return ObjectDetection.Dataset.from_voc(path) @pytest.mark.skip(reason="ObjectDetector is not stable to test, and fails due to transient errors occasionally.") def test_object_detection_estimator(): dataset = get_dataset('https://autogluon.s3.amazonaws.com/datasets/tiny_motorbike.zip') train_data, val_data, test_data = dataset.random_split(val_size=0.3, test_size=0.2, random_state=0) task = ObjectDetection({'num_trials': 1, 'epochs': 1, 'batch_size': 4}) detector = task.fit(train_data) assert task.fit_summary().get('valid_map', 0) > 0 test_result = detector.predict(test_data) @pytest.mark.skip(reason="ObjectDetector is not stable to test, and fails due to transient errors occasionally.") def test_object_detection_estimator_transfer():
dataset = get_dataset('https://autogluon.s3.amazonaws.com/datasets/tiny_motorbike.zip') train_data, val_data, test_data = dataset.random_split(val_size=0.3, test_size=0.2, random_state=0) task = ObjectDetection({'num_trials': 1, 'epochs': 1, 'transfer': Categorical('yolo3_darknet53_coco', 'ssd_512_resnet50_v1_voc'), 'estimator': 'ssd', 'batch_size': 4}) detector = task.fit(train_data) assert task.fit_summary().get('valid_map', 0) > 0 test_result = detector.predict(test_data)
memory.rs
// SPDX-License-Identifier: MIT //! Memory Management for the Virt Device use core::{cell::UnsafeCell, ops::RangeInclusive}; //------------------------------------------------------------------------------ //- Symbols //------------------------------------------------------------------------------ /// Memory Map of this board pub(super) mod map {
// Symbols from the linker script. extern "Rust" { static __bss_start: UnsafeCell<u64>; static __bss_end: UnsafeCell<u64>; } //------------------------------------------------------------------------------ //- Functions //------------------------------------------------------------------------------ /// Returns the range of the .bss section pub fn bss_range() -> RangeInclusive<*mut u64> { // Safety: The values are provided by the Linker Script unsafe { let start = __bss_start.get(); let end = __bss_end.get(); assert!(end > start); RangeInclusive::new(start, end.sub(1)) } }
pub const UART_MMIO: *mut u8 = 0x1000_0000 as *mut u8; }
conftest.py
import pytest import logging import time from tests.common.dualtor.dual_tor_utils import get_crm_nexthop_counter # lgtm[py/unused-import] from tests.common.helpers.assertions import pytest_assert as py_assert from tests.common.fixtures.ptfhost_utils import change_mac_addresses, run_garp_service CRM_POLL_INTERVAL = 1 CRM_DEFAULT_POLL_INTERVAL = 300 @pytest.fixture def set_crm_polling_interval(rand_selected_dut): """ A function level fixture to set crm polling interval to 1 second """ wait_time = 2 logging.info("Setting crm polling interval to {} seconds".format(CRM_POLL_INTERVAL)) rand_selected_dut.command("crm config polling interval {}".format(CRM_POLL_INTERVAL)) logging.info("Waiting {} sec for CRM counters to become updated".format(wait_time)) time.sleep(wait_time) yield
@pytest.fixture def verify_crm_nexthop_counter_not_increased(rand_selected_dut, set_crm_polling_interval): """ A function level fixture to verify crm nexthop counter not increased """ original_counter = get_crm_nexthop_counter(rand_selected_dut) logging.info("Before test: crm nexthop counter = {}".format(original_counter)) yield time.sleep(CRM_POLL_INTERVAL) diff = get_crm_nexthop_counter(rand_selected_dut) - original_counter logging.info("Before test: crm nexthop counter = {}".format(original_counter + diff)) py_assert(diff <= 0, "crm nexthop counter is increased by {}.".format(diff)) def pytest_addoption(parser): """ Adds pytest options that are used by dual ToR tests """ dual_tor_group = parser.getgroup("Dual ToR test suite options") dual_tor_group.addoption( "--mux-stress-count", action="store", default=2, type=int, help="The number of iterations for mux stress test" ) @pytest.fixture(scope="module", autouse=True) def common_setup_teardown(request, tbinfo, vmhost): if 'dualtor' in tbinfo['topo']['name']: request.getfixturevalue('run_garp_service') vmhost.shell('systemctl restart mux-simulator')
logging.info("Setting crm polling interval to {} seconds".format(CRM_DEFAULT_POLL_INTERVAL)) rand_selected_dut.command("crm config polling interval {}".format(CRM_DEFAULT_POLL_INTERVAL))
retry_filter.py
# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Copyright (c) 2013-2017 Wind River Systems, Inc. # from oslo_log import log as logging from nova.i18n import _LI from nova.scheduler import filters LOG = logging.getLogger(__name__) class RetryFilter(filters.BaseHostFilter):
"""Filter out nodes that have already been attempted for scheduling purposes """ # NOTE(danms): This does not affect _where_ an instance lands, so not # related to rebuild. RUN_ON_REBUILD = False def host_passes(self, host_state, spec_obj): """Skip nodes that have already been attempted.""" retry = spec_obj.retry if not retry: # Re-scheduling is disabled LOG.debug("Re-scheduling is disabled") return True # TODO(sbauza): Once the HostState is actually a ComputeNode, we could # easily get this one... host = [host_state.host, host_state.nodename] # TODO(sbauza)... and we wouldn't need to primitive the hosts into # lists hosts = [[cn.host, cn.hypervisor_hostname] for cn in retry.hosts] passes = host not in hosts if not passes: LOG.info(_LI("Host %(host)s fails. Previously tried hosts: " "%(hosts)s"), {'host': host, 'hosts': hosts}) msg = ('Previously tried: %(hosts)s' % {'hosts': hosts}) self.filter_reject(host_state, spec_obj, msg, append=True) # Host passes if it's not in the list of previously attempted hosts: return passes
fq12.rs
use super::fq::FROBENIUS_COEFF_FQ12_C1; use super::fq2::Fq2; use super::fq6::Fq6; use ff::Field; use rand::{Rand, Rng}; /// An element of Fq12, represented by c0 + c1 * w. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub struct Fq12 { pub c0: Fq6, pub c1: Fq6, } impl ::std::fmt::Display for Fq12 { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { write!(f, "Fq12({} + {} * w)", self.c0, self.c1) } } impl Rand for Fq12 { fn rand<R: Rng>(rng: &mut R) -> Self { Fq12 { c0: rng.gen(), c1: rng.gen(), } } } // BN256 and BLS12 implementations should be the same // Defined over w^2 - v = 0 impl Fq12 { pub fn conjugate(&mut self) { self.c1.negate(); } pub fn mul_by_014(&mut self, c0: &Fq2, c1: &Fq2, c4: &Fq2) { let mut aa = self.c0; aa.mul_by_01(c0, c1); let mut bb = self.c1; bb.mul_by_1(c4); let mut o = *c1; o.add_assign(c4); self.c1.add_assign(&self.c0); self.c1.mul_by_01(c0, &o); self.c1.sub_assign(&aa); self.c1.sub_assign(&bb); self.c0 = bb; self.c0.mul_by_nonresidue(); self.c0.add_assign(&aa); } // TODO make it hand optimized // // multiply by (c0, c1, c2) + (c3, c4, c5)*w where only c0, c3 and c4 are non-zero pub fn mul_by_034(&mut self, c0: &Fq2, c3: &Fq2, c4: &Fq2) { self.mul_assign(&Fq12 { c0: Fq6 { c0: *c0, c1: Fq2::zero(), c2: Fq2::zero(), }, c1: Fq6 { c0: *c3, c1: *c4, c2: Fq2::zero(), }, }); } } impl Field for Fq12 { fn zero() -> Self { Fq12 { c0: Fq6::zero(), c1: Fq6::zero(), } } fn one() -> Self { Fq12 { c0: Fq6::one(), c1: Fq6::zero(), } } fn is_zero(&self) -> bool { self.c0.is_zero() && self.c1.is_zero() } fn double(&mut self) { self.c0.double(); self.c1.double(); } fn negate(&mut self) { self.c0.negate(); self.c1.negate(); } fn add_assign(&mut self, other: &Self) { self.c0.add_assign(&other.c0); self.c1.add_assign(&other.c1); } fn sub_assign(&mut self, other: &Self) { self.c0.sub_assign(&other.c0); self.c1.sub_assign(&other.c1); } fn frobenius_map(&mut self, power: usize) { self.c0.frobenius_map(power); self.c1.frobenius_map(power); self.c1.c0.mul_assign(&FROBENIUS_COEFF_FQ12_C1[power % 12]); self.c1.c1.mul_assign(&FROBENIUS_COEFF_FQ12_C1[power % 12]); self.c1.c2.mul_assign(&FROBENIUS_COEFF_FQ12_C1[power % 12]); } fn square(&mut self) { let mut ab = self.c0; ab.mul_assign(&self.c1); let mut c0c1 = self.c0; c0c1.add_assign(&self.c1); let mut c0 = self.c1; c0.mul_by_nonresidue(); c0.add_assign(&self.c0); c0.mul_assign(&c0c1); c0.sub_assign(&ab); self.c1 = ab; self.c1.add_assign(&ab); ab.mul_by_nonresidue(); c0.sub_assign(&ab); self.c0 = c0; } fn mul_assign(&mut self, other: &Self) { let mut aa = self.c0; aa.mul_assign(&other.c0); let mut bb = self.c1; bb.mul_assign(&other.c1); let mut o = other.c0; o.add_assign(&other.c1); self.c1.add_assign(&self.c0); self.c1.mul_assign(&o); self.c1.sub_assign(&aa); self.c1.sub_assign(&bb); self.c0 = bb; self.c0.mul_by_nonresidue(); self.c0.add_assign(&aa); } fn inverse(&self) -> Option<Self> { let mut c0s = self.c0; c0s.square(); let mut c1s = self.c1; c1s.square(); c1s.mul_by_nonresidue(); c0s.sub_assign(&c1s); c0s.inverse().map(|t| { let mut tmp = Fq12 { c0: t, c1: t }; tmp.c0.mul_assign(&self.c0); tmp.c1.mul_assign(&self.c1); tmp.c1.negate(); tmp }) } } #[cfg(test)] use rand::{SeedableRng, XorShiftRng}; #[test] fn test_fq12_mul_by_014() { let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]); for _ in 0..1000 { let c0 = Fq2::rand(&mut rng); let c1 = Fq2::rand(&mut rng); let c5 = Fq2::rand(&mut rng); let mut a = Fq12::rand(&mut rng); let mut b = a; a.mul_by_014(&c0, &c1, &c5); b.mul_assign(&Fq12 { c0: Fq6 { c0: c0, c1: c1, c2: Fq2::zero(), }, c1: Fq6 { c0: Fq2::zero(), c1: c5, c2: Fq2::zero(), }, }); assert_eq!(a, b); } } #[test] fn test_squaring() { let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]); for _ in 0..1000 { let mut a = Fq12::rand(&mut rng); let mut b = a; b.mul_assign(&a); a.square(); assert_eq!(a, b); } }
use ff::PrimeField; crate::tests::field::random_field_tests::<Fq12>(); crate::tests::field::random_frobenius_tests::<Fq12, _>(super::fq::Fq::char(), 13); }
#[test] fn fq12_field_tests() {
avatar.module.ts
import { NgModule } from '@angular/core'; import { CommonModule } from '@angular/common'; import { AvatarComponent, UxgImageAvatar } from './avatar.component'; import { AvatarListComponent } from './avatar-list.component'; import { InitialsPipe } from './initials.pipe'; @NgModule({ imports: [CommonModule], declarations: [AvatarComponent, AvatarListComponent, UxgImageAvatar, InitialsPipe], exports: [AvatarComponent, AvatarListComponent, UxgImageAvatar, InitialsPipe] }) export class
{}
AvatarModule
setup.py
import subprocess import sys import setup_util from os.path import expanduser home = expanduser("~") ############## # start(args) ############## def start(args, logfile, errfile): setup_util.replace_text("treefrog/config/database.ini", "HostName=.*", "HostName=" + args.database_host) setup_util.replace_text("treefrog/config/application.ini", "MultiProcessingModule=.*", "MultiProcessingModule=hybrid") # 1. Generate Makefile # 2. Compile applicaton # 3. Clean log files # 4. Start TreeFrog
subprocess.check_call("rm -f log/*.log", shell=True, cwd="treefrog", stderr=errfile, stdout=logfile) subprocess.check_call("treefrog -d " + home + "/FrameworkBenchmarks/treefrog", shell=True, stderr=errfile, stdout=logfile) return 0 except subprocess.CalledProcessError: return 1 ############## # stop() ############## def stop(logfile, errfile): try: subprocess.call("treefrog -k abort " + home + "/FrameworkBenchmarks/treefrog", shell=True, stderr=errfile, stdout=logfile) return 0 except subprocess.CalledProcessError: return 1
try: subprocess.check_call("qmake -r CONFIG+=release", shell=True, cwd="treefrog", stderr=errfile, stdout=logfile) subprocess.check_call("make clean", shell=True, cwd="treefrog", stderr=errfile, stdout=logfile) subprocess.check_call("make -j8", shell=True, cwd="treefrog", stderr=errfile, stdout=logfile)
btsServer.ts
import axios from "axios"; const testAPI = "https://localhost:44351/api/"; const official = "https://bst-clinic:2020/api/"; export default axios.create({
baseURL: testAPI, // headers: { "Access-Control-Allow-Origin": "*" }, });
index.tsx
import React from 'react'; import { Recipe } from '~constants/interfaces/recipe'; import useShadow from '~hooks/useShadow'; interface Props { className?: string; recipe: Recipe; } function ShadowRecipe({ className = '', recipe }: Props) { // TODO: Update to the new files structure const shadowElem = useShadow<HTMLDivElement>({ html: recipe.html.content, css: recipe.css.content }); return <div ref={shadowElem} className={`full-width row middle center ${className}`} />;
export default ShadowRecipe;
}
trace_test.go
// Unless explicitly stated otherwise all files in this repository are licensed // under the MIT License. // This product includes software developed at Guance Cloud (https://www.guance.com/). // Copyright 2021-present Guance, Inc. package collector import ( "context" "reflect" "testing" "time" DKtrace "gitlab.jiagouyun.com/cloudcare-tools/datakit/io/trace" "gitlab.jiagouyun.com/cloudcare-tools/datakit/plugins/inputs/opentelemetry/mock" commonpb "go.opentelemetry.io/proto/otlp/common/v1" tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) var testKV = []*commonpb.KeyValue{ { Key: "service.name", Value: &commonpb.AnyValue{ Value: &commonpb.AnyValue_StringValue{ StringValue: "service", }, }, }, { Key: "http.method", Value: &commonpb.AnyValue{ Value: &commonpb.AnyValue_StringValue{ StringValue: "POST", }, }, }, { Key: "http.status_code", Value: &commonpb.AnyValue{ Value: &commonpb.AnyValue_IntValue{ IntValue: 200, }, }, }, { Key: "container.name", Value: &commonpb.AnyValue{ Value: &commonpb.AnyValue_StringValue{ StringValue: "hostName", }, }, }, { Key: "process.pid", Value: &commonpb.AnyValue{ Value: &commonpb.AnyValue_IntValue{ IntValue: 2222, }, }, }, } var allTag = map[string]string{ "service.name": "service", "http.method": "POST", "http.status_code": "200", "container.name": "hostName", "process.pid": "2222", } func Test_mkDKTrace(t *testing.T) { /* mock server mock client 发送 readOnlySpans 从export中获取 ResourceSpans */ trace := &mock.MockTrace{} endpoint := "localhost:20010" m := mock.MockOtlpGrpcCollector{Trace: trace} go m.StartServer(t, endpoint) <-time.After(5 * time.Millisecond) t.Log("start server") ctx := context.Background() exp := mock.NewGRPCExporter(t, ctx, endpoint) roSpans, want := mock.MockRoSpans(t) if err := exp.ExportSpans(ctx, roSpans); err != nil { t.Errorf("err=%v", err) return } time.Sleep(time.Millisecond * 40) // wait MockTrace rss := trace.GetResourceSpans() type args struct { rss []*tracepb.ResourceSpans } tests := []struct { name string args args want []DKtrace.DatakitTrace }{ {name: "case1", args: args{rss: rss}, want: want}, } storage := &SpansStorage{RegexpString: "", GlobalTags: map[string]string{}} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := storage.mkDKTrace(tt.args.rss) if !reflect.DeepEqual(got[0][0], tt.want[0][0]) { t.Errorf("mkDKTrace() = %+v,\n want %+v", got[0][0], tt.want[0][0]) } }) } } func Test_byteToString(t *testing.T) { type args struct { bts []byte } tests := []struct { name string args args want string }{ {name: "nil", args: args{bts: []byte{}}, want: "0"}, {name: "100", args: args{bts: []byte{1, 0, 0}}, want: "010000"}, {name: "a1", args: args{bts: []byte{0xa1}}, want: "a1"}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := byteToString(tt.args.bts); got != tt.want { t.Errorf("byteToInt64() = %v, want %v", got, tt.want) } }) } } func Test_dkTags_addGlobalTags(t *testing.T) { type fields struct { tags map[string]string globalTags map[string]string } tests := []struct { name string fields fields want *dkTags }{ { name: "add a:b", fields: fields{tags: map[string]string{}, globalTags: map[string]string{"globalTag_a": "b"}}, want: &dkTags{replaceTags: map[string]string{"globalTag_a": "b"}, globalTags: map[string]string{"globalTag_a": "b"}, tags: map[string]string{}}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { dt := &dkTags{ tags: tt.fields.tags, globalTags: tt.fields.globalTags, replaceTags: map[string]string{}, } if got := dt.addGlobalTags(); !reflect.DeepEqual(got, tt.want) { t.Errorf("addGlobalTags() = %v, want %v", got, tt.want) } }) } } func Test_dkTags_addOtherTags(t *testing.T) { type fields struct { tags map[string]string } type args struct { span *tracepb.Span } tests := []struct { name string fields fields args args want *dkTags }{ { name: "case", fields: fields{ tags: map[string]string{}, }, args: args{ span: &tracepb.Span{ DroppedEventsCount: 1, // drop event count = 1 Events: []*tracepb.Span_Event{{Name: "1"}, {Name: "1"}}, // events = 2 Links: []*tracepb.Span_Link{{TraceState: "1"}}, // links = 1 }, }, want: &dkTags{ tags: map[string]string{"links_count": "1", "events_count": "2", "dropped_events_count": "1"}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { dt := &dkTags{ tags: tt.fields.tags, } if got := dt.addOtherTags(tt.args.span); !reflect.DeepEqual(got, tt.want) { t.Errorf("addOtherTags() = %v, want %v", got, tt.want) } }) } } func Test_dkTags_checkAllTagsKey(t *testing.T) { type fields struct { tags map[string]string } tests := []struct { name string fields fields want *dkTags }{ { name: "case", fields: fields{tags: map[string]string{"a.b": "c"}}, want: &dkTags{ tags: map[string]string{"a.b": "c"}, replaceTags: map[string]string{"a_b": "c"}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { dt := &dkTags{ tags: tt.fields.tags, replaceTags: map[string]string{}, } if got := dt.checkAllTagsKey(); !reflect.DeepEqual(got, tt.want) { t.Errorf("checkAllTagsKey() = %v, want %v", got, tt.want) } }) } } func Test_dkTags_resource(t *testing.T) { type fields struct { replaceTags map[string]string } tests := []struct { name string fields fields want map[string]string }{ { name: "get resource", fields: fields{replaceTags: map[string]string{"a": "b"}}, want: map[string]string{"a": "b"}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { dt := &dkTags{ replaceTags: tt.fields.replaceTags, } if got := dt.resource(); !reflect.DeepEqual(got, tt.want) { t.Errorf("resource() = %v, want %v", got, tt.want) } }) } } func Test_dkTags_setAttributesToTags(t *testing.T) { type fields struct { tags map[string]string } type args struct { attr []*commonpb.KeyValue } tests := []struct { name string fields fields args args want *dkTags }{ { name: "case1", fields: fields{tags: map[string]string{}}, args: args{attr: testKV}, want: &dkTags{tags: allTag}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { dt := &dkTags{ tags: tt.fields.tags, } if got := dt.setAttributesToTags(tt.args.attr); !reflect.DeepEqual(got, tt.want) { t.Errorf("setAttributesToTags() = %v, want %v", got, tt.want) } }) } } func Test_getDKSpanStatus(t *testing.T) { type args struct { code *tracepb.Status
want string }{ { name: "case1", args: args{code: &tracepb.Status{Code: tracepb.Status_STATUS_CODE_UNSET}}, want: "ok", }, { name: "case2", args: args{code: &tracepb.Status{Code: tracepb.Status_STATUS_CODE_OK}}, want: "ok", }, { name: "case3", args: args{code: &tracepb.Status{Code: tracepb.Status_STATUS_CODE_ERROR}}, want: "error", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := getDKSpanStatus(tt.args.code); got != tt.want { t.Errorf("getDKSpanStatus() = %v, want %v", got, tt.want) } }) } } func Test_newEmptyTags(t *testing.T) { type args struct { regexp string globalTags map[string]string } tests := []struct { name string args args want *dkTags }{ { name: "case", args: args{regexp: "", globalTags: map[string]string{}}, want: &dkTags{ regexpString: "", globalTags: map[string]string{}, tags: make(map[string]string), replaceTags: make(map[string]string), }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := newEmptyTags(tt.args.regexp, tt.args.globalTags); !reflect.DeepEqual(got, tt.want) { t.Errorf("newEmptyTags() = %v, want %v", got, tt.want) } }) } } func Test_replace(t *testing.T) { type args struct { key string } tests := []struct { name string args args want string }{ {name: "case", args: args{key: "mysql.select"}, want: "mysql_select"}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := replace(tt.args.key); got != tt.want { t.Errorf("replace() = %v, want %v", got, tt.want) } }) } } func Test_dkTags_getAttributeVal(t *testing.T) { type fields struct { tags map[string]string replaceTags map[string]string } type args struct { keyName string } tests := []struct { name string fields fields args args want string }{ { name: "case1", fields: fields{tags: allTag}, args: args{keyName: otelResourceServiceKey}, want: "service", }, { name: "case2", fields: fields{tags: allTag}, args: args{keyName: otelResourceHTTPMethodKey}, want: "POST", }, { name: "case3", fields: fields{tags: allTag}, args: args{keyName: otelResourceContainerNameKey}, want: "hostName", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { dt := &dkTags{ tags: tt.fields.tags, replaceTags: tt.fields.replaceTags, } if got := dt.getAttributeVal(tt.args.keyName); got != tt.want { t.Errorf("getAttributeVal() = %v, want %v", got, tt.want) } }) } } func Test_dkTags_checkCustomTags(t *testing.T) { type fields struct { regexpStr string tags map[string]string replaceTags map[string]string } tests := []struct { name string fields fields want *dkTags }{ { name: "regexp-1", fields: fields{ tags: map[string]string{}, replaceTags: map[string]string{"os_name": "linux", "other_key": "other_value"}, regexpStr: "os_*|process_*", }, want: &dkTags{ regexpString: "os_*|process_*", tags: map[string]string{}, replaceTags: map[string]string{"other_key": "other_value"}, }, }, { name: "regexp-2", fields: fields{ regexpStr: "os_*|process_*", tags: map[string]string{}, replaceTags: map[string]string{"os_name": "linux", "process_id": "123", "other_key": "other_value"}, }, want: &dkTags{ regexpString: "os_*|process_*", tags: map[string]string{}, replaceTags: map[string]string{"other_key": "other_value"}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { dt := &dkTags{ regexpString: tt.fields.regexpStr, tags: tt.fields.tags, replaceTags: tt.fields.replaceTags, } if got := dt.checkCustomTags(); !reflect.DeepEqual(got, tt.want) { t.Errorf("checkCustomTags() = %v, want %v", got, tt.want) } }) } }
} tests := []struct { name string args args
microphone.py
import time import numpy as np import pyaudio import config import sys def start_stream(callback):
p = pyaudio.PyAudio() frames_per_buffer = int(config.MIC_RATE / config.FPS) stream = p.open(format=pyaudio.paInt16, channels=1, rate=config.MIC_RATE, input=True, frames_per_buffer=frames_per_buffer) overflows = 0 prev_ovf_time = time.time() while True: try: y = np.fromstring(stream.read(frames_per_buffer), dtype=np.int16) y = y.astype(np.float32) callback(y) except IOError: overflows += 1 if time.time() > prev_ovf_time + 1: prev_ovf_time = time.time() print('Audio buffer has overflowed {} times'.format(overflows)) if overflows > 10000: sys.exit() stream.stop_stream() stream.close() p.terminate()
interface.rs
/// Represents a network interface for mDNS services #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum
{ /// No interface specified, bind to all available interfaces Unspec, /// An interface at a specified index AtIndex(u32), }
NetworkInterface
SegmentTagParams.spec.js
/** * freee API * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) * * OpenAPI spec version: v1.0 * * NOTE: This class is auto generated by the swagger code generator program. * https://github.com/swagger-api/swagger-codegen.git * * Swagger Codegen version: 2.4.0-SNAPSHOT * * Do not edit the class manually. * */ (function(root, factory) { if (typeof define === 'function' && define.amd) { // AMD. define(['expect.js', '../../src/index'], factory); } else if (typeof module === 'object' && module.exports) { // CommonJS-like environments that support module.exports, like Node. factory(require('expect.js'), require('../../src/index')); } else { // Browser globals (root is window) factory(root.expect, root.FreeeAccountingClient); } }(this, function(expect, FreeeAccountingClient) { 'use strict'; var instance; beforeEach(function() { instance = new FreeeAccountingClient.SegmentTagParams(); }); var getProperty = function(object, getter, property) { // Use getter method if present; otherwise, get the property directly. if (typeof object[getter] === 'function') return object[getter](); else return object[property]; } var setProperty = function(object, setter, property, value) { // Use setter method if present; otherwise, set the property directly. if (typeof object[setter] === 'function') object[setter](value); else object[property] = value; } describe('SegmentTagParams', function() { it('should create an instance of SegmentTagParams', function() { // uncomment below and update the code to test SegmentTagParams //var instance = new FreeeAccountingClient.SegmentTagParams(); //expect(instance).to.be.a(FreeeAccountingClient.SegmentTagParams); }); it('should have the property companyId (base name: "company_id")', function() { // uncomment below and update the code to test the property companyId //var instance = new FreeeAccountingClient.SegmentTagParams(); //expect(instance).to.be(); }); it('should have the property name (base name: "name")', function() { // uncomment below and update the code to test the property name //var instance = new FreeeAccountingClient.SegmentTagParams(); //expect(instance).to.be(); });
// uncomment below and update the code to test the property description //var instance = new FreeeAccountingClient.SegmentTagParams(); //expect(instance).to.be(); }); it('should have the property shortcut1 (base name: "shortcut1")', function() { // uncomment below and update the code to test the property shortcut1 //var instance = new FreeeAccountingClient.SegmentTagParams(); //expect(instance).to.be(); }); it('should have the property shortcut2 (base name: "shortcut2")', function() { // uncomment below and update the code to test the property shortcut2 //var instance = new FreeeAccountingClient.SegmentTagParams(); //expect(instance).to.be(); }); }); }));
it('should have the property description (base name: "description")', function() {
statsd.go
package statsd import ( "bufio" "bytes" "errors" "fmt" "net" "sort" "strconv" "strings" "sync" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers/graphite" "github.com/influxdata/telegraf/selfstat" ) const ( // UDP_MAX_PACKET_SIZE is the UDP packet limit, see // https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure UDP_MAX_PACKET_SIZE int = 64 * 1024 defaultFieldName = "value" defaultProtocol = "udp" defaultSeparator = "_" defaultAllowPendingMessage = 10000 MaxTCPConnections = 250 parserGoRoutines = 5 ) // Statsd allows the importing of statsd and dogstatsd data. type Statsd struct { // Protocol used on listener - udp or tcp Protocol string `toml:"protocol"` // Address & Port to serve from ServiceAddress string // Number of messages allowed to queue up in between calls to Gather. If this // fills up, packets will get dropped until the next Gather interval is ran. AllowedPendingMessages int // Percentiles specifies the percentiles that will be calculated for timing // and histogram stats. Percentiles []internal.Number PercentileLimit int DeleteGauges bool DeleteCounters bool DeleteSets bool DeleteTimings bool ConvertNames bool // MetricSeparator is the separator between parts of the metric name. MetricSeparator string // This flag enables parsing of tags in the dogstatsd extension to the // statsd protocol (http://docs.datadoghq.com/guides/dogstatsd/) ParseDataDogTags bool // depreciated in 1.10; use datadog_extensions // Parses extensions to statsd in the datadog statsd format // currently supports metrics and datadog tags. // http://docs.datadoghq.com/guides/dogstatsd/ DataDogExtensions bool `toml:"datadog_extensions"` // UDPPacketSize is deprecated, it's only here for legacy support // we now always create 1 max size buffer and then copy only what we need // into the in channel // see https://github.com/influxdata/telegraf/pull/992 UDPPacketSize int `toml:"udp_packet_size"` ReadBufferSize int `toml:"read_buffer_size"` sync.Mutex // Lock for preventing a data race during resource cleanup cleanup sync.Mutex wg sync.WaitGroup // accept channel tracks how many active connections there are, if there // is an available bool in accept, then we are below the maximum and can // accept the connection accept chan bool // drops tracks the number of dropped metrics. drops int // malformed tracks the number of malformed packets malformed int // Channel for all incoming statsd packets in chan input done chan struct{} // Cache gauges, counters & sets so they can be aggregated as they arrive // gauges and counters map measurement/tags hash -> field name -> metrics // sets and timings map measurement/tags hash -> metrics gauges map[string]cachedgauge counters map[string]cachedcounter sets map[string]cachedset timings map[string]cachedtimings // bucket -> influx templates Templates []string // Protocol listeners UDPlistener *net.UDPConn TCPlistener *net.TCPListener // track current connections so we can close them in Stop() conns map[string]*net.TCPConn MaxTCPConnections int `toml:"max_tcp_connections"` TCPKeepAlive bool `toml:"tcp_keep_alive"` TCPKeepAlivePeriod *internal.Duration `toml:"tcp_keep_alive_period"` // Max duration for each metric to stay cached without being updated. MaxTTL config.Duration `toml:"max_ttl"` graphiteParser *graphite.GraphiteParser acc telegraf.Accumulator MaxConnections selfstat.Stat CurrentConnections selfstat.Stat TotalConnections selfstat.Stat TCPPacketsRecv selfstat.Stat TCPBytesRecv selfstat.Stat UDPPacketsRecv selfstat.Stat UDPPacketsDrop selfstat.Stat UDPBytesRecv selfstat.Stat ParseTimeNS selfstat.Stat Log telegraf.Logger `toml:"-"` // A pool of byte slices to handle parsing bufPool sync.Pool } type input struct { *bytes.Buffer time.Time Addr string } // One statsd metric, form is <bucket>:<value>|<mtype>|@<samplerate> type metric struct { name string field string bucket string hash string intvalue int64 floatvalue float64 strvalue string mtype string additive bool samplerate float64 tags map[string]string } type cachedset struct { name string fields map[string]map[string]bool tags map[string]string expiresAt time.Time } type cachedgauge struct { name string fields map[string]interface{} tags map[string]string expiresAt time.Time } type cachedcounter struct { name string fields map[string]interface{} tags map[string]string expiresAt time.Time } type cachedtimings struct { name string fields map[string]RunningStats tags map[string]string expiresAt time.Time } func (_ *Statsd) Description() string { return "Statsd UDP/TCP Server" } const sampleConfig = ` ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp) protocol = "udp" ## MaxTCPConnection - applicable when protocol is set to tcp (default=250) max_tcp_connections = 250 ## Enable TCP keep alive probes (default=false) tcp_keep_alive = false ## Specifies the keep-alive period for an active network connection. ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false. ## Defaults to the OS configuration. # tcp_keep_alive_period = "2h" ## Address and port to host UDP listener on service_address = ":8125" ## The following configuration options control when telegraf clears it's cache ## of previous values. If set to false, then telegraf will only clear it's ## cache when the daemon is restarted. ## Reset gauges every interval (default=true) delete_gauges = true ## Reset counters every interval (default=true) delete_counters = true ## Reset sets every interval (default=true) delete_sets = true ## Reset timings & histograms every interval (default=true) delete_timings = true ## Percentiles to calculate for timing & histogram stats percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0] ## separator to use between elements of a statsd metric metric_separator = "_" ## Parses tags in the datadog statsd format ## http://docs.datadoghq.com/guides/dogstatsd/ parse_data_dog_tags = false ## Parses datadog extensions to the statsd format datadog_extensions = false ## Statsd data translation templates, more info can be read here: ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md # templates = [ # "cpu.* measurement*" # ] ## Number of UDP messages allowed to queue up, once filled, ## the statsd server will start dropping packets allowed_pending_messages = 10000 ## Number of timing/histogram values to track per-measurement in the ## calculation of percentiles. Raising this limit increases the accuracy ## of percentiles but also increases the memory usage and cpu time. percentile_limit = 1000 ## Max duration (TTL) for each metric to stay cached/reported without being updated. #max_ttl = "1000h" ` func (_ *Statsd) SampleConfig() string { return sampleConfig } func (s *Statsd) Gather(acc telegraf.Accumulator) error { s.Lock() defer s.Unlock() now := time.Now() for _, m := range s.timings { // Defining a template to parse field names for timers allows us to split // out multiple fields per timer. In this case we prefix each stat with the // field name and store these all in a single measurement. fields := make(map[string]interface{}) for fieldName, stats := range m.fields { var prefix string if fieldName != defaultFieldName { prefix = fieldName + "_" } fields[prefix+"mean"] = stats.Mean() fields[prefix+"stddev"] = stats.Stddev() fields[prefix+"sum"] = stats.Sum() fields[prefix+"upper"] = stats.Upper() fields[prefix+"lower"] = stats.Lower() fields[prefix+"count"] = stats.Count() for _, percentile := range s.Percentiles { name := fmt.Sprintf("%s%v_percentile", prefix, percentile.Value) fields[name] = stats.Percentile(percentile.Value) } } acc.AddFields(m.name, fields, m.tags, now) } if s.DeleteTimings { s.timings = make(map[string]cachedtimings) } for _, m := range s.gauges { acc.AddGauge(m.name, m.fields, m.tags, now) } if s.DeleteGauges { s.gauges = make(map[string]cachedgauge) } for _, m := range s.counters { acc.AddCounter(m.name, m.fields, m.tags, now) } if s.DeleteCounters { s.counters = make(map[string]cachedcounter) } for _, m := range s.sets { fields := make(map[string]interface{}) for field, set := range m.fields { fields[field] = int64(len(set)) } acc.AddFields(m.name, fields, m.tags, now) } if s.DeleteSets { s.sets = make(map[string]cachedset) } s.expireCachedMetrics() return nil } func (s *Statsd) Start(ac telegraf.Accumulator) error { if s.ParseDataDogTags { s.DataDogExtensions = true s.Log.Warn("'parse_data_dog_tags' config option is deprecated, please use 'datadog_extensions' instead") } s.acc = ac // Make data structures s.gauges = make(map[string]cachedgauge) s.counters = make(map[string]cachedcounter) s.sets = make(map[string]cachedset) s.timings = make(map[string]cachedtimings) s.Lock() defer s.Unlock() // tags := map[string]string{ "address": s.ServiceAddress, } s.MaxConnections = selfstat.Register("statsd", "tcp_max_connections", tags) s.MaxConnections.Set(int64(s.MaxTCPConnections)) s.CurrentConnections = selfstat.Register("statsd", "tcp_current_connections", tags) s.TotalConnections = selfstat.Register("statsd", "tcp_total_connections", tags) s.TCPPacketsRecv = selfstat.Register("statsd", "tcp_packets_received", tags) s.TCPBytesRecv = selfstat.Register("statsd", "tcp_bytes_received", tags) s.UDPPacketsRecv = selfstat.Register("statsd", "udp_packets_received", tags) s.UDPPacketsDrop = selfstat.Register("statsd", "udp_packets_dropped", tags) s.UDPBytesRecv = selfstat.Register("statsd", "udp_bytes_received", tags) s.ParseTimeNS = selfstat.Register("statsd", "parse_time_ns", tags) s.in = make(chan input, s.AllowedPendingMessages) s.done = make(chan struct{}) s.accept = make(chan bool, s.MaxTCPConnections) s.conns = make(map[string]*net.TCPConn) s.bufPool = sync.Pool{ New: func() interface{} { return new(bytes.Buffer) }, } for i := 0; i < s.MaxTCPConnections; i++ { s.accept <- true } if s.ConvertNames { s.Log.Warn("'convert_names' config option is deprecated, please use 'metric_separator' instead") } if s.MetricSeparator == "" { s.MetricSeparator = defaultSeparator } if s.isUDP() { address, err := net.ResolveUDPAddr(s.Protocol, s.ServiceAddress) if err != nil { return err } conn, err := net.ListenUDP(s.Protocol, address) if err != nil { return err } s.Log.Infof("UDP listening on %q", conn.LocalAddr().String()) s.UDPlistener = conn s.wg.Add(1) go func() { defer s.wg.Done() s.udpListen(conn) }() } else { address, err := net.ResolveTCPAddr("tcp", s.ServiceAddress) if err != nil { return err } listener, err := net.ListenTCP("tcp", address) if err != nil { return err } s.Log.Infof("TCP listening on %q", listener.Addr().String()) s.TCPlistener = listener s.wg.Add(1) go func() { defer s.wg.Done() s.tcpListen(listener) }() } for i := 1; i <= parserGoRoutines; i++ { // Start the line parser s.wg.Add(1) go func() { defer s.wg.Done() s.parser() }() } s.Log.Infof("Started the statsd service on %q", s.ServiceAddress) return nil } // tcpListen() starts listening for udp packets on the configured port. func (s *Statsd) tcpListen(listener *net.TCPListener) error { for { select { case <-s.done: return nil default: // Accept connection: conn, err := listener.AcceptTCP() if err != nil { return err } if s.TCPKeepAlive { if err = conn.SetKeepAlive(true); err != nil { return err } if s.TCPKeepAlivePeriod != nil { if err = conn.SetKeepAlivePeriod(s.TCPKeepAlivePeriod.Duration); err != nil { return err } } } select { case <-s.accept: // not over connection limit, handle the connection properly. s.wg.Add(1) // generate a random id for this TCPConn id := internal.RandomString(6) s.remember(id, conn) go s.handler(conn, id) default: // We are over the connection limit, refuse & close. s.refuser(conn) } } } } // udpListen starts listening for udp packets on the configured port. func (s *Statsd) udpListen(conn *net.UDPConn) error { if s.ReadBufferSize > 0 { s.UDPlistener.SetReadBuffer(s.ReadBufferSize) } buf := make([]byte, UDP_MAX_PACKET_SIZE) for { select { case <-s.done: return nil default: n, addr, err := conn.ReadFromUDP(buf) if err != nil { if !strings.Contains(err.Error(), "closed network") { s.Log.Errorf("Error reading: %s", err.Error()) continue } return err } s.UDPPacketsRecv.Incr(1) s.UDPBytesRecv.Incr(int64(n)) b := s.bufPool.Get().(*bytes.Buffer) b.Reset() b.Write(buf[:n]) select { case s.in <- input{ Buffer: b, Time: time.Now(), Addr: addr.IP.String()}: default: s.UDPPacketsDrop.Incr(1) s.drops++ if s.drops == 1 || s.AllowedPendingMessages == 0 || s.drops%s.AllowedPendingMessages == 0 { s.Log.Errorf("Statsd message queue full. "+ "We have dropped %d messages so far. "+ "You may want to increase allowed_pending_messages in the config", s.drops) } } } } } // parser monitors the s.in channel, if there is a packet ready, it parses the // packet into statsd strings and then calls parseStatsdLine, which parses a // single statsd metric into a struct. func (s *Statsd) parser() error { for { select { case <-s.done: return nil case in := <-s.in: start := time.Now() lines := strings.Split(in.Buffer.String(), "\n") s.bufPool.Put(in.Buffer) for _, line := range lines { line = strings.TrimSpace(line) switch { case line == "": case s.DataDogExtensions && strings.HasPrefix(line, "_e"): s.parseEventMessage(in.Time, line, in.Addr) default: s.parseStatsdLine(line) } } elapsed := time.Since(start) s.ParseTimeNS.Set(elapsed.Nanoseconds()) } } } // parseStatsdLine will parse the given statsd line, validating it as it goes. // If the line is valid, it will be cached for the next call to Gather() func (s *Statsd) parseStatsdLine(line string) error { lineTags := make(map[string]string) if s.DataDogExtensions { recombinedSegments := make([]string, 0) // datadog tags look like this: // users.online:1|c|@0.5|#country:china,environment:production // users.online:1|c|#sometagwithnovalue // we will split on the pipe and remove any elements that are datadog // tags, parse them, and rebuild the line sans the datadog tags pipesplit := strings.Split(line, "|") for _, segment := range pipesplit { if len(segment) > 0 && segment[0] == '#' { // we have ourselves a tag; they are comma separated parseDataDogTags(lineTags, segment[1:]) } else { recombinedSegments = append(recombinedSegments, segment) } } line = strings.Join(recombinedSegments, "|") } // Validate splitting the line on ":" bits := strings.Split(line, ":") if len(bits) < 2 { s.Log.Errorf("Splitting ':', unable to parse metric: %s", line) return errors.New("error Parsing statsd line") } // Extract bucket name from individual metric bits bucketName, bits := bits[0], bits[1:] // Add a metric for each bit available for _, bit := range bits { m := metric{} m.bucket = bucketName // Validate splitting the bit on "|" pipesplit := strings.Split(bit, "|") if len(pipesplit) < 2 { s.Log.Errorf("Splitting '|', unable to parse metric: %s", line) return errors.New("error parsing statsd line") } else if len(pipesplit) > 2 { sr := pipesplit[2] if strings.Contains(sr, "@") && len(sr) > 1 { samplerate, err := strconv.ParseFloat(sr[1:], 64) if err != nil { s.Log.Errorf("Parsing sample rate: %s", err.Error()) } else { // sample rate successfully parsed m.samplerate = samplerate } } else { s.Log.Debugf("Sample rate must be in format like: "+ "@0.1, @0.5, etc. Ignoring sample rate for line: %s", line) } } // Validate metric type switch pipesplit[1] { case "g", "c", "s", "ms", "h": m.mtype = pipesplit[1] default: s.Log.Errorf("Metric type %q unsupported", pipesplit[1]) return errors.New("error parsing statsd line") } // Parse the value if strings.HasPrefix(pipesplit[0], "-") || strings.HasPrefix(pipesplit[0], "+") { if m.mtype != "g" && m.mtype != "c" { s.Log.Errorf("+- values are only supported for gauges & counters, unable to parse metric: %s", line) return errors.New("error parsing statsd line") } m.additive = true } switch m.mtype { case "g", "ms", "h": v, err := strconv.ParseFloat(pipesplit[0], 64) if err != nil { s.Log.Errorf("Parsing value to float64, unable to parse metric: %s", line) return errors.New("error parsing statsd line") } m.floatvalue = v case "c": var v int64 v, err := strconv.ParseInt(pipesplit[0], 10, 64) if err != nil { v2, err2 := strconv.ParseFloat(pipesplit[0], 64) if err2 != nil { s.Log.Errorf("Parsing value to int64, unable to parse metric: %s", line) return errors.New("error parsing statsd line") } v = int64(v2) } // If a sample rate is given with a counter, divide value by the rate if m.samplerate != 0 && m.mtype == "c" { v = int64(float64(v) / m.samplerate) } m.intvalue = v case "s": m.strvalue = pipesplit[0] } // Parse the name & tags from bucket m.name, m.field, m.tags = s.parseName(m.bucket) switch m.mtype { case "c": m.tags["metric_type"] = "counter" case "g": m.tags["metric_type"] = "gauge" case "s": m.tags["metric_type"] = "set" case "ms": m.tags["metric_type"] = "timing" case "h": m.tags["metric_type"] = "histogram" } if len(lineTags) > 0 { for k, v := range lineTags { m.tags[k] = v } } // Make a unique key for the measurement name/tags var tg []string for k, v := range m.tags { tg = append(tg, k+"="+v) } sort.Strings(tg) tg = append(tg, m.name) m.hash = strings.Join(tg, "") s.aggregate(m) } return nil } // parseName parses the given bucket name with the list of bucket maps in the // config file. If there is a match, it will parse the name of the metric and // map of tags. // Return values are (<name>, <field>, <tags>) func (s *Statsd) parseName(bucket string) (string, string, map[string]string) { tags := make(map[string]string) bucketparts := strings.Split(bucket, ",") // Parse out any tags in the bucket if len(bucketparts) > 1 { for _, btag := range bucketparts[1:] { k, v := parseKeyValue(btag) if k != "" { tags[k] = v } } } var field string name := bucketparts[0] p := s.graphiteParser var err error if p == nil || s.graphiteParser.Separator != s.MetricSeparator { p, err = graphite.NewGraphiteParser(s.MetricSeparator, s.Templates, nil) s.graphiteParser = p } if err == nil { p.DefaultTags = tags name, tags, field, _ = p.ApplyTemplate(name) } if s.ConvertNames { name = strings.Replace(name, ".", "_", -1) name = strings.Replace(name, "-", "__", -1) } if field == "" { field = defaultFieldName } return name, field, tags } // Parse the key,value out of a string that looks like "key=value" func parseKeyValue(keyvalue string) (string, string) { var key, val string split := strings.Split(keyvalue, "=") // Must be exactly 2 to get anything meaningful out of them if len(split) == 2 { key = split[0] val = split[1] } else if len(split) == 1 { val = split[0] } return key, val } // aggregate takes in a metric. It then // aggregates and caches the current value(s). It does not deal with the // Delete* options, because those are dealt with in the Gather function. func (s *Statsd) aggregate(m metric) { s.Lock() defer s.Unlock() switch m.mtype { case "ms", "h": // Check if the measurement exists cached, ok := s.timings[m.hash] if !ok { cached = cachedtimings{ name: m.name, fields: make(map[string]RunningStats), tags: m.tags, } } // Check if the field exists. If we've not enabled multiple fields per timer // this will be the default field name, eg. "value" field, ok := cached.fields[m.field] if !ok { field = RunningStats{ PercLimit: s.PercentileLimit, } } if m.samplerate > 0 { for i := 0; i < int(1.0/m.samplerate); i++ { field.AddValue(m.floatvalue) } } else { field.AddValue(m.floatvalue) } cached.fields[m.field] = field cached.expiresAt = time.Now().Add(time.Duration(s.MaxTTL)) s.timings[m.hash] = cached case "c": // check if the measurement exists cached, ok := s.counters[m.hash] if !ok { cached = cachedcounter{ name: m.name, fields: make(map[string]interface{}), tags: m.tags, } } // check if the field exists _, ok = cached.fields[m.field] if !ok { cached.fields[m.field] = int64(0) } cached.fields[m.field] = cached.fields[m.field].(int64) + m.intvalue cached.expiresAt = time.Now().Add(time.Duration(s.MaxTTL)) s.counters[m.hash] = cached case "g": // check if the measurement exists cached, ok := s.gauges[m.hash] if !ok { cached = cachedgauge{ name: m.name, fields: make(map[string]interface{}), tags: m.tags, } } // check if the field exists _, ok = cached.fields[m.field] if !ok { cached.fields[m.field] = float64(0) } if m.additive { cached.fields[m.field] = cached.fields[m.field].(float64) + m.floatvalue } else { cached.fields[m.field] = m.floatvalue } cached.expiresAt = time.Now().Add(time.Duration(s.MaxTTL)) s.gauges[m.hash] = cached case "s": // check if the measurement exists cached, ok := s.sets[m.hash] if !ok { cached = cachedset{ name: m.name, fields: make(map[string]map[string]bool), tags: m.tags, } } // check if the field exists _, ok = cached.fields[m.field] if !ok { cached.fields[m.field] = make(map[string]bool) } cached.fields[m.field][m.strvalue] = true cached.expiresAt = time.Now().Add(time.Duration(s.MaxTTL)) s.sets[m.hash] = cached } } // handler handles a single TCP Connection func (s *Statsd) handler(conn *net.TCPConn, id string) { s.CurrentConnections.Incr(1) s.TotalConnections.Incr(1) // connection cleanup function defer func() { s.wg.Done() conn.Close() // Add one connection potential back to channel when this one closes s.accept <- true s.forget(id) s.CurrentConnections.Incr(-1) }() var remoteIP string if addr, ok := conn.RemoteAddr().(*net.TCPAddr); ok { remoteIP = addr.IP.String() } var n int scanner := bufio.NewScanner(conn) for { select { case <-s.done: return default: if !scanner.Scan() { return } n = len(scanner.Bytes()) if n == 0 { continue } s.TCPBytesRecv.Incr(int64(n)) s.TCPPacketsRecv.Incr(1) b := s.bufPool.Get().(*bytes.Buffer) b.Reset() b.Write(scanner.Bytes()) b.WriteByte('\n') select { case s.in <- input{Buffer: b, Time: time.Now(), Addr: remoteIP}: default: s.drops++ if s.drops == 1 || s.drops%s.AllowedPendingMessages == 0 { s.Log.Errorf("Statsd message queue full. "+ "We have dropped %d messages so far. "+ "You may want to increase allowed_pending_messages in the config", s.drops) } } } } } // refuser refuses a TCP connection func (s *Statsd) refuser(conn *net.TCPConn) { conn.Close() s.Log.Infof("Refused TCP Connection from %s", conn.RemoteAddr()) s.Log.Warn("Maximum TCP Connections reached, you may want to adjust max_tcp_connections") } // forget a TCP connection func (s *Statsd) forget(id string) { s.cleanup.Lock() defer s.cleanup.Unlock() delete(s.conns, id) } // remember a TCP connection func (s *Statsd) remember(id string, conn *net.TCPConn) { s.cleanup.Lock() defer s.cleanup.Unlock() s.conns[id] = conn } func (s *Statsd) Stop() { s.Lock() s.Log.Infof("Stopping the statsd service") close(s.done) if s.isUDP() { s.UDPlistener.Close() } else { s.TCPlistener.Close() // Close all open TCP connections // - get all conns from the s.conns map and put into slice // - this is so the forget() function doesnt conflict with looping // over the s.conns map var conns []*net.TCPConn s.cleanup.Lock() for _, conn := range s.conns { conns = append(conns, conn) } s.cleanup.Unlock() for _, conn := range conns { conn.Close() } } s.Unlock() s.wg.Wait() s.Lock() close(s.in) s.Log.Infof("Stopped listener service on %q", s.ServiceAddress) s.Unlock() } // IsUDP returns true if the protocol is UDP, false otherwise. func (s *Statsd) isUDP() bool { return strings.HasPrefix(s.Protocol, "udp") } func (s *Statsd) expireCachedMetrics() { // If Max TTL wasn't configured, skip expiration. if s.MaxTTL == 0 { return } now := time.Now() for key, cached := range s.gauges { if now.After(cached.expiresAt) { delete(s.gauges, key) } } for key, cached := range s.sets { if now.After(cached.expiresAt) { delete(s.sets, key) } } for key, cached := range s.timings { if now.After(cached.expiresAt) { delete(s.timings, key) } } for key, cached := range s.counters { if now.After(cached.expiresAt) { delete(s.counters, key) } } } func
() { inputs.Add("statsd", func() telegraf.Input { return &Statsd{ Protocol: defaultProtocol, ServiceAddress: ":8125", MaxTCPConnections: 250, TCPKeepAlive: false, MetricSeparator: "_", AllowedPendingMessages: defaultAllowPendingMessage, DeleteCounters: true, DeleteGauges: true, DeleteSets: true, DeleteTimings: true, } }) }
init
get.go
// Copyright (c) 2018, Google, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pipeline_template import ( "errors" "fmt" "net/http" "github.com/spf13/cobra" "github.com/estebangarcia/spin/gateclient" "github.com/estebangarcia/spin/util" ) type GetOptions struct { *pipelineTemplateOptions id string tag string } var ( getPipelineTemplateShort = "Get the pipeline template with the provided id" getPipelineTemplateLong = "Get the specified pipeline template" ) func NewGetCmd(pipelineTemplateOptions pipelineTemplateOptions) *cobra.Command { options := GetOptions{ pipelineTemplateOptions: &pipelineTemplateOptions, } cmd := &cobra.Command{ Use: "get", Short: getPipelineTemplateShort, Long: getPipelineTemplateLong, RunE: func(cmd *cobra.Command, args []string) error { return getPipelineTemplate(cmd, options, args) }, } cmd.PersistentFlags().StringVar(&options.id, "id", "", "id of the pipeline template") cmd.PersistentFlags().StringVar(&options.tag, "tag", "", "(optional) specific tag to query") return cmd } func getPipelineTemplate(cmd *cobra.Command, options GetOptions, args []string) error { gateClient, err := gateclient.NewGateClient() if err != nil { return err } id := options.id if id == "" { id, err = util.ReadArgsOrStdin(args) if err != nil { return err } if id == "" { return errors.New("no pipeline template id supplied, exiting") } } queryParams := map[string]interface{}{} if options.tag != "" { queryParams["tag"] = options.tag } successPayload, resp, err := gateClient.V2PipelineTemplatesControllerApi.GetUsingGET2(gateClient.Context, id, queryParams) if err != nil {
return fmt.Errorf("Encountered an error getting pipeline template with id %s, status code: %d\n", id, resp.StatusCode) } util.UI.JsonOutput(successPayload, util.UI.OutputFormat) return nil }
return err } if resp.StatusCode != http.StatusOK {
EditOutlined.tsx
/* eslint-disable */
import React from 'react'; import createIcon from './utils/createIcon'; export default createIcon(<svg viewBox="0 0 1024 1024"><path d="M257.7 752c2 0 4-.2 6-.5L431.9 722c2-.4 3.9-1.3 5.3-2.8l423.9-423.9a9.96 9.96 0 0 0 0-14.1L694.9 114.9c-1.9-1.9-4.4-2.9-7.1-2.9s-5.2 1-7.1 2.9L256.8 538.8c-1.5 1.5-2.4 3.3-2.8 5.3l-29.5 168.2a33.5 33.5 0 0 0 9.4 29.8c6.6 6.4 14.9 9.9 23.8 9.9zm67.4-174.4L687.8 215l73.3 73.3-362.7 362.6-88.9 15.7 15.6-89zM880 836H144c-17.7 0-32 14.3-32 32v36c0 4.4 3.6 8 8 8h784c4.4 0 8-3.6 8-8v-36c0-17.7-14.3-32-32-32z" /></svg>, 'EditOutlined');
namespace.go
package namespace import ( "github.com/Qihoo360/wayne/src/backend/client" "github.com/Qihoo360/wayne/src/backend/client/api" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "github.com/Qihoo360/wayne/src/backend/resources/common" "github.com/Qihoo360/wayne/src/backend/util" ) func CreateNotExitNamespace(cli *kubernetes.Clientset, ns *v1.Namespace) (*v1.Namespace, error) {
_, err := cli.CoreV1().Namespaces().Get(ns.Name, metaV1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { return cli.CoreV1().Namespaces().Create(ns) } return nil, err } return nil, nil } // ResourcesUsageByNamespace Count resource usage for a namespace func ResourcesUsageByNamespace(cli client.ResourceHandler, namespace, selector string) (*common.ResourceList, error) { objs, err := cli.List(api.ResourceNamePod, namespace, selector) if err != nil { return nil, err } var cpuUsage, memoryUsage int64 for _, obj := range objs { pod := obj.(*v1.Pod) if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed { continue } resourceList := common.ContainersResourceList(pod.Spec.Containers) cpuUsage += resourceList.Cpu memoryUsage += resourceList.Memory } return &common.ResourceList{ Cpu: cpuUsage, Memory: memoryUsage, }, nil } // ResourcesOfAppByNamespace Count resource usage for a namespace func ResourcesOfAppByNamespace(cli client.ResourceHandler, namespace, selector string) (map[string]*common.ResourceApp, error) { objs, err := cli.List(api.ResourceNamePod, namespace, selector) if err != nil { return nil, err } result := make(map[string]*common.ResourceApp) for _, obj := range objs { pod := obj.(*v1.Pod) if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed { continue } resourceList := common.ContainersResourceList(pod.Spec.Containers) if result[pod.Labels[util.AppLabelKey]] == nil { result[pod.Labels[util.AppLabelKey]] = &common.ResourceApp{ Cpu: resourceList.Cpu / 1000, Memory: resourceList.Memory / (1024 * 1024 * 1024), PodNum: 1, } } else { result[pod.Labels[util.AppLabelKey]].Cpu += resourceList.Cpu / 1000 result[pod.Labels[util.AppLabelKey]].Memory += resourceList.Memory / (1024 * 1024 * 1024) result[pod.Labels[util.AppLabelKey]].PodNum += 1 } } return result, nil }
tools.py
from datetime import datetime, timedelta, time import numpy as np from collections import MutableMapping import pandas.lib as lib import pandas.tslib as tslib from pandas.types.common import (_ensure_object, is_datetime64_ns_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_integer_dtype, is_list_like) from pandas.types.generic import (ABCIndexClass, ABCSeries, ABCDataFrame) from pandas.types.missing import notnull import pandas.compat as compat _DATEUTIL_LEXER_SPLIT = None try: # Since these are private methods from dateutil, it is safely imported # here so in case this interface changes, pandas will just fallback # to not using the functionality from dateutil.parser import _timelex if hasattr(_timelex, 'split'): def _lexer_split_from_str(dt_str): # The StringIO(str(_)) is for dateutil 2.2 compatibility return _timelex.split(compat.StringIO(str(dt_str))) _DATEUTIL_LEXER_SPLIT = _lexer_split_from_str except (ImportError, AttributeError): pass def _infer_tzinfo(start, end): def _infer(a, b): tz = a.tzinfo if b and b.tzinfo: if not (tslib.get_timezone(tz) == tslib.get_timezone(b.tzinfo)): raise AssertionError('Inputs must both have the same timezone,' ' {0} != {1}'.format(tz, b.tzinfo)) return tz tz = None if start is not None: tz = _infer(start, end) elif end is not None: tz = _infer(end, start) return tz def _guess_datetime_format(dt_str, dayfirst=False, dt_str_parse=compat.parse_date, dt_str_split=_DATEUTIL_LEXER_SPLIT):
def _guess_datetime_format_for_array(arr, **kwargs): # Try to guess the format based on the first non-NaN element non_nan_elements = notnull(arr).nonzero()[0] if len(non_nan_elements): return _guess_datetime_format(arr[non_nan_elements[0]], **kwargs) def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, utc=None, box=True, format=None, exact=True, unit=None, infer_datetime_format=False): """ Convert argument to datetime. Parameters ---------- arg : integer, float, string, datetime, list, tuple, 1-d array, Series .. versionadded: 0.18.1 or DataFrame/dict-like errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception - If 'coerce', then invalid parsing will be set as NaT - If 'ignore', then invalid parsing will return the input dayfirst : boolean, default False Specify a date parse order if `arg` is str or its list-likes. If True, parses dates with the day first, eg 10/11/12 is parsed as 2012-11-10. Warning: dayfirst=True is not strict, but will prefer to parse with day first (this is a known bug, based on dateutil behavior). yearfirst : boolean, default False Specify a date parse order if `arg` is str or its list-likes. - If True parses dates with the year first, eg 10/11/12 is parsed as 2010-11-12. - If both dayfirst and yearfirst are True, yearfirst is preceded (same as dateutil). Warning: yearfirst=True is not strict, but will prefer to parse with year first (this is a known bug, based on dateutil beahavior). .. versionadded: 0.16.1 utc : boolean, default None Return UTC DatetimeIndex if True (converting any tz-aware datetime.datetime objects as well). box : boolean, default True - If True returns a DatetimeIndex - If False returns ndarray of values. format : string, default None strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse all the way up to nanoseconds. exact : boolean, True by default - If True, require an exact format match. - If False, allow the format to match anywhere in the target string. unit : string, default 'ns' unit of the arg (D,s,ms,us,ns) denote the unit in epoch (e.g. a unix timestamp), which is an integer/float number. infer_datetime_format : boolean, default False If True and no `format` is given, attempt to infer the format of the datetime strings, and if it can be inferred, switch to a faster method of parsing them. In some cases this can increase the parsing speed by ~5-10x. Returns ------- ret : datetime if parsing succeeded. Return type depends on input: - list-like: DatetimeIndex - Series: Series of datetime64 dtype - scalar: Timestamp In case when it is not possible to return designated types (e.g. when any element of input is before Timestamp.min or after Timestamp.max) return will have datetime.datetime type (or correspoding array/Series). Examples -------- Assembling a datetime from multiple columns of a DataFrame. The keys can be common abbreviations like ['year', 'month', 'day', 'minute', 'second', 'ms', 'us', 'ns']) or plurals of the same >>> df = pd.DataFrame({'year': [2015, 2016], 'month': [2, 3], 'day': [4, 5]}) >>> pd.to_datetime(df) 0 2015-02-04 1 2016-03-05 dtype: datetime64[ns] If a date does not meet the `timestamp limitations <http://pandas.pydata.org/pandas-docs/stable/timeseries.html #timeseries-timestamp-limits>`_, passing errors='ignore' will return the original input instead of raising any exception. Passing errors='coerce' will force an out-of-bounds date to NaT, in addition to forcing non-dates (or non-parseable dates) to NaT. >>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore') datetime.datetime(1300, 1, 1, 0, 0) >>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce') NaT Passing infer_datetime_format=True can often-times speedup a parsing if its not an ISO8601 format exactly, but in a regular format. >>> s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000']*1000) >>> s.head() 0 3/11/2000 1 3/12/2000 2 3/13/2000 3 3/11/2000 4 3/12/2000 dtype: object >>> %timeit pd.to_datetime(s,infer_datetime_format=True) 100 loops, best of 3: 10.4 ms per loop >>> %timeit pd.to_datetime(s,infer_datetime_format=False) 1 loop, best of 3: 471 ms per loop """ from pandas.tseries.index import DatetimeIndex tz = 'utc' if utc else None def _convert_listlike(arg, box, format, name=None, tz=tz): if isinstance(arg, (list, tuple)): arg = np.array(arg, dtype='O') # these are shortcutable if is_datetime64tz_dtype(arg): if not isinstance(arg, DatetimeIndex): return DatetimeIndex(arg, tz=tz, name=name) if utc: arg = arg.tz_convert(None).tz_localize('UTC') return arg elif is_datetime64_ns_dtype(arg): if box and not isinstance(arg, DatetimeIndex): try: return DatetimeIndex(arg, tz=tz, name=name) except ValueError: pass return arg elif unit is not None: if format is not None: raise ValueError("cannot specify both format and unit") arg = getattr(arg, 'values', arg) result = tslib.array_with_unit_to_datetime(arg, unit, errors=errors) if box: if errors == 'ignore': from pandas import Index return Index(result) return DatetimeIndex(result, tz=tz, name=name) return result elif getattr(arg, 'ndim', 1) > 1: raise TypeError('arg must be a string, datetime, list, tuple, ' '1-d array, or Series') arg = _ensure_object(arg) require_iso8601 = False if infer_datetime_format and format is None: format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst) if format is not None: # There is a special fast-path for iso8601 formatted # datetime strings, so in those cases don't use the inferred # format because this path makes process slower in this # special case format_is_iso8601 = _format_is_iso(format) if format_is_iso8601: require_iso8601 = not infer_datetime_format format = None try: result = None if format is not None: # shortcut formatting here if format == '%Y%m%d': try: result = _attempt_YYYYMMDD(arg, errors=errors) except: raise ValueError("cannot convert the input to " "'%Y%m%d' date format") # fallback if result is None: try: result = tslib.array_strptime(arg, format, exact=exact, errors=errors) except tslib.OutOfBoundsDatetime: if errors == 'raise': raise result = arg except ValueError: # if format was inferred, try falling back # to array_to_datetime - terminate here # for specified formats if not infer_datetime_format: if errors == 'raise': raise result = arg if result is None and (format is None or infer_datetime_format): result = tslib.array_to_datetime( arg, errors=errors, utc=utc, dayfirst=dayfirst, yearfirst=yearfirst, require_iso8601=require_iso8601 ) if is_datetime64_dtype(result) and box: result = DatetimeIndex(result, tz=tz, name=name) return result except ValueError as e: try: values, tz = tslib.datetime_to_datetime64(arg) return DatetimeIndex._simple_new(values, name=name, tz=tz) except (ValueError, TypeError): raise e if arg is None: return arg elif isinstance(arg, tslib.Timestamp): return arg elif isinstance(arg, ABCSeries): from pandas import Series values = _convert_listlike(arg._values, False, format) return Series(values, index=arg.index, name=arg.name) elif isinstance(arg, (ABCDataFrame, MutableMapping)): return _assemble_from_unit_mappings(arg, errors=errors) elif isinstance(arg, ABCIndexClass): return _convert_listlike(arg, box, format, name=arg.name) elif is_list_like(arg): return _convert_listlike(arg, box, format) return _convert_listlike(np.array([arg]), box, format)[0] # mappings for assembling units _unit_map = {'year': 'year', 'years': 'year', 'month': 'month', 'months': 'month', 'day': 'day', 'days': 'day', 'hour': 'h', 'hours': 'h', 'minute': 'm', 'minutes': 'm', 'second': 's', 'seconds': 's', 'ms': 'ms', 'millisecond': 'ms', 'milliseconds': 'ms', 'us': 'us', 'microsecond': 'us', 'microseconds': 'us', 'ns': 'ns', 'nanosecond': 'ns', 'nanoseconds': 'ns' } def _assemble_from_unit_mappings(arg, errors): """ assemble the unit specifed fields from the arg (DataFrame) Return a Series for actual parsing Parameters ---------- arg : DataFrame errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception - If 'coerce', then invalid parsing will be set as NaT - If 'ignore', then invalid parsing will return the input Returns ------- Series """ from pandas import to_timedelta, to_numeric, DataFrame arg = DataFrame(arg) if not arg.columns.is_unique: raise ValueError("cannot assemble with duplicate keys") # replace passed unit with _unit_map def f(value): if value in _unit_map: return _unit_map[value] # m is case significant if value.lower() in _unit_map: return _unit_map[value.lower()] return value unit = {k: f(k) for k in arg.keys()} unit_rev = {v: k for k, v in unit.items()} # we require at least Ymd required = ['year', 'month', 'day'] req = sorted(list(set(required) - set(unit_rev.keys()))) if len(req): raise ValueError("to assemble mappings requires at " "least that [year, month, day] be specified: " "[{0}] is missing".format(','.join(req))) # keys we don't recognize excess = sorted(list(set(unit_rev.keys()) - set(_unit_map.values()))) if len(excess): raise ValueError("extra keys have been passed " "to the datetime assemblage: " "[{0}]".format(','.join(excess))) def coerce(values): # we allow coercion to if errors allows values = to_numeric(values, errors=errors) # prevent overflow in case of int8 or int16 if is_integer_dtype(values): values = values.astype('int64', copy=False) return values values = (coerce(arg[unit_rev['year']]) * 10000 + coerce(arg[unit_rev['month']]) * 100 + coerce(arg[unit_rev['day']])) try: values = to_datetime(values, format='%Y%m%d', errors=errors) except (TypeError, ValueError) as e: raise ValueError("cannot assemble the " "datetimes: {0}".format(e)) for u in ['h', 'm', 's', 'ms', 'us', 'ns']: value = unit_rev.get(u) if value is not None and value in arg: try: values += to_timedelta(coerce(arg[value]), unit=u, errors=errors) except (TypeError, ValueError) as e: raise ValueError("cannot assemble the datetimes " "[{0}]: {1}".format(value, e)) return values def _attempt_YYYYMMDD(arg, errors): """ try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like, arg is a passed in as an object dtype, but could really be ints/strings with nan-like/or floats (e.g. with nan) Parameters ---------- arg : passed value errors : 'raise','ignore','coerce' """ def calc(carg): # calculate the actual result carg = carg.astype(object) parsed = lib.try_parse_year_month_day(carg / 10000, carg / 100 % 100, carg % 100) return tslib.array_to_datetime(parsed, errors=errors) def calc_with_mask(carg, mask): result = np.empty(carg.shape, dtype='M8[ns]') iresult = result.view('i8') iresult[~mask] = tslib.iNaT result[mask] = calc(carg[mask].astype(np.float64).astype(np.int64)). \ astype('M8[ns]') return result # try intlike / strings that are ints try: return calc(arg.astype(np.int64)) except: pass # a float with actual np.nan try: carg = arg.astype(np.float64) return calc_with_mask(carg, notnull(carg)) except: pass # string with NaN-like try: mask = ~lib.ismember(arg, tslib._nat_strings) return calc_with_mask(arg, mask) except: pass return None def _format_is_iso(f): """ Does format match the iso8601 set that can be handled by the C parser? Generally of form YYYY-MM-DDTHH:MM:SS - date separator can be different but must be consistent. Leading 0s in dates and times are optional. """ iso_template = '%Y{date_sep}%m{date_sep}%d{time_sep}%H:%M:%S.%f'.format excluded_formats = ['%Y%m%d', '%Y%m', '%Y'] for date_sep in [' ', '/', '\\', '-', '.', '']: for time_sep in [' ', 'T']: if (iso_template(date_sep=date_sep, time_sep=time_sep ).startswith(f) and f not in excluded_formats): return True return False def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None): """ Try hard to parse datetime string, leveraging dateutil plus some extra goodies like quarter recognition. Parameters ---------- arg : compat.string_types freq : str or DateOffset, default None Helps with interpreting time string if supplied dayfirst : bool, default None If None uses default from print_config yearfirst : bool, default None If None uses default from print_config Returns ------- datetime, datetime/dateutil.parser._result, str """ from pandas.core.config import get_option if not isinstance(arg, compat.string_types): return arg from pandas.tseries.offsets import DateOffset if isinstance(freq, DateOffset): freq = freq.rule_code if dayfirst is None: dayfirst = get_option("display.date_dayfirst") if yearfirst is None: yearfirst = get_option("display.date_yearfirst") return tslib.parse_datetime_string_with_reso(arg, freq=freq, dayfirst=dayfirst, yearfirst=yearfirst) DateParseError = tslib.DateParseError normalize_date = tslib.normalize_date # Fixed time formats for time parsing _time_formats = ["%H:%M", "%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p"] def _guess_time_format_for_array(arr): # Try to guess the format based on the first non-NaN element non_nan_elements = notnull(arr).nonzero()[0] if len(non_nan_elements): element = arr[non_nan_elements[0]] for time_format in _time_formats: try: datetime.strptime(element, time_format) return time_format except ValueError: pass return None def to_time(arg, format=None, infer_time_format=False, errors='raise'): """ Parse time strings to time objects using fixed strptime formats ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p") Use infer_time_format if all the strings are in the same format to speed up conversion. Parameters ---------- arg : string in time format, datetime.time, list, tuple, 1-d array, Series format : str, default None Format used to convert arg into a time object. If None, fixed formats are used. infer_time_format: bool, default False Infer the time format based on the first non-NaN element. If all strings are in the same format, this will speed up conversion. errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception - If 'coerce', then invalid parsing will be set as None - If 'ignore', then invalid parsing will return the input Returns ------- datetime.time """ from pandas.core.series import Series def _convert_listlike(arg, format): if isinstance(arg, (list, tuple)): arg = np.array(arg, dtype='O') elif getattr(arg, 'ndim', 1) > 1: raise TypeError('arg must be a string, datetime, list, tuple, ' '1-d array, or Series') arg = _ensure_object(arg) if infer_time_format and format is None: format = _guess_time_format_for_array(arg) times = [] if format is not None: for element in arg: try: times.append(datetime.strptime(element, format).time()) except (ValueError, TypeError): if errors == 'raise': raise ValueError("Cannot convert %s to a time with " "given format %s" % (element, format)) elif errors == 'ignore': return arg else: times.append(None) else: formats = _time_formats[:] format_found = False for element in arg: time_object = None for time_format in formats: try: time_object = datetime.strptime(element, time_format).time() if not format_found: # Put the found format in front fmt = formats.pop(formats.index(time_format)) formats.insert(0, fmt) format_found = True break except (ValueError, TypeError): continue if time_object is not None: times.append(time_object) elif errors == 'raise': raise ValueError("Cannot convert arg {arg} to " "a time".format(arg=arg)) elif errors == 'ignore': return arg else: times.append(None) return times if arg is None: return arg elif isinstance(arg, time): return arg elif isinstance(arg, Series): values = _convert_listlike(arg._values, format) return Series(values, index=arg.index, name=arg.name) elif isinstance(arg, ABCIndexClass): return _convert_listlike(arg, format) elif is_list_like(arg): return _convert_listlike(arg, format) return _convert_listlike(np.array([arg]), format)[0] def format(dt): """Returns date in YYYYMMDD format.""" return dt.strftime('%Y%m%d') OLE_TIME_ZERO = datetime(1899, 12, 30, 0, 0, 0) def ole2datetime(oledt): """function for converting excel date to normal date format""" val = float(oledt) # Excel has a bug where it thinks the date 2/29/1900 exists # we just reject any date before 3/1/1900. if val < 61: raise ValueError("Value is outside of acceptable range: %s " % val) return OLE_TIME_ZERO + timedelta(days=val)
""" Guess the datetime format of a given datetime string. Parameters ---------- dt_str : string, datetime string to guess the format of dayfirst : boolean, default False If True parses dates with the day first, eg 20/01/2005 Warning: dayfirst=True is not strict, but will prefer to parse with day first (this is a known bug). dt_str_parse : function, defaults to `compat.parse_date` (dateutil) This function should take in a datetime string and return a `datetime.datetime` guess that the datetime string represents dt_str_split : function, defaults to `_DATEUTIL_LEXER_SPLIT` (dateutil) This function should take in a datetime string and return a list of strings, the guess of the various specific parts e.g. '2011/12/30' -> ['2011', '/', '12', '/', '30'] Returns ------- ret : datetime format string (for `strftime` or `strptime`) """ if dt_str_parse is None or dt_str_split is None: return None if not isinstance(dt_str, compat.string_types): return None day_attribute_and_format = (('day',), '%d', 2) # attr name, format, padding (if any) datetime_attrs_to_format = [ (('year', 'month', 'day'), '%Y%m%d', 0), (('year',), '%Y', 0), (('month',), '%B', 0), (('month',), '%b', 0), (('month',), '%m', 2), day_attribute_and_format, (('hour',), '%H', 2), (('minute',), '%M', 2), (('second',), '%S', 2), (('microsecond',), '%f', 6), (('second', 'microsecond'), '%S.%f', 0), ] if dayfirst: datetime_attrs_to_format.remove(day_attribute_and_format) datetime_attrs_to_format.insert(0, day_attribute_and_format) try: parsed_datetime = dt_str_parse(dt_str, dayfirst=dayfirst) except: # In case the datetime can't be parsed, its format cannot be guessed return None if parsed_datetime is None: return None try: tokens = dt_str_split(dt_str) except: # In case the datetime string can't be split, its format cannot # be guessed return None format_guess = [None] * len(tokens) found_attrs = set() for attrs, attr_format, padding in datetime_attrs_to_format: # If a given attribute has been placed in the format string, skip # over other formats for that same underlying attribute (IE, month # can be represented in multiple different ways) if set(attrs) & found_attrs: continue if all(getattr(parsed_datetime, attr) is not None for attr in attrs): for i, token_format in enumerate(format_guess): token_filled = tokens[i].zfill(padding) if (token_format is None and token_filled == parsed_datetime.strftime(attr_format)): format_guess[i] = attr_format tokens[i] = token_filled found_attrs.update(attrs) break # Only consider it a valid guess if we have a year, month and day if len(set(['year', 'month', 'day']) & found_attrs) != 3: return None output_format = [] for i, guess in enumerate(format_guess): if guess is not None: # Either fill in the format placeholder (like %Y) output_format.append(guess) else: # Or just the token separate (IE, the dashes in "01-01-2013") try: # If the token is numeric, then we likely didn't parse it # properly, so our guess is wrong float(tokens[i]) return None except ValueError: pass output_format.append(tokens[i]) guessed_format = ''.join(output_format) # rebuild string, capturing any inferred padding dt_str = ''.join(tokens) if parsed_datetime.strftime(guessed_format) == dt_str: return guessed_format
AcceptedConnectionEnum.rs
// This file is part of file-descriptors. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/file-descriptors/master/COPYRIGHT. No part of file-descriptors, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file. // Copyright © 2018-2019 The developers of file-descriptors. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/file-descriptors/master/COPYRIGHT. /// One of three possible types. #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub enum A
{ /// An Internet Protocol (IP) version 4 accepted connection. InternetProtocolVersion4(AcceptedConnection<sockaddr_in>), /// An Internet Protocol (IP) version 6 accepted connection. InternetProtocolVersion6(AcceptedConnection<sockaddr_in6>), /// An Unix Domain connection. UnixDomain(AcceptedConnection<sockaddr_un>), }
cceptedConnectionEnum
convert.rs
use std::io::Error; use std::num::ParseIntError; use std::str; use neo_crypto::hex; use neo_crypto::hex::ToHex; use crate::fixed8::Fixed8; /** * @param buf ArrayBuffer * @returns ASCII string */ pub fn ab2str(buf: &[u8]) -> String { str::from_utf8(buf).unwrap().to_string() } /** * @param str ASCII string * @returns */ pub fn str2ab(s: &str) -> &[u8] { s.as_bytes() } /** * @param str HEX string * @returns */ pub fn hexstring2ab(s: &str) -> Result<Box<[u8]>, Error> { Ok(hex::decode(s).unwrap().into_boxed_slice()) } /** * @param arr * @returns HEX string */ pub fn ab2hexstring(arr: &[u8]) -> String { // arr.to_hex() hex::encode(arr) } /** * @param str ASCII string * @returns HEX string */ pub fn str2hex(s: &str) -> String { s.encode_hex() // ab2hexstring(str2ab(s)) } /** * @param hexstring HEX string * @returns ASCII string */ pub fn hex2str(hex_str: &str) -> String { let h = hex::decode(hex_str).unwrap(); let v = str::from_utf8(h.as_slice()).unwrap(); v.to_string() } /** * convert an integer to big endian hex and add leading zeros * @param num Integer. */ pub fn int2hex(num: i32) -> String { // num.to_string() format!("{:02X}", num) } /** * Converts a Fixed8 hex string to its original number * @param fixed8hex number in Fixed8 representation */ pub fn hex2int(hex: &str) -> Result<i64, ParseIntError> { i64::from_str_radix(hex, 16) } /** * Converts a number to a big endian hexstring of a suitable size, optionally little endian * @param num A positive integer. * @param size The required size in bytes, eg 1 for Uint8, 2 for Uint16. Defaults to 1. * @param littleEndian Encode the hex in little endian form */ pub fn num2hexstring(num: i64, size: usize) -> String { format!("{:01$x}", num, size) } /** * Converts a number to a Fixed8 format hex string * @param num * @param size output size in bytes * @return number in Fixed8 representation. */ pub fn num2fixed8(num: i64) -> Fixed8 { Fixed8(num) } /** * Converts a number to a variable length Int. Used for array length header * @param num * @returns hexstring of int. */ pub fn num2var_int(num: i64) -> String { match num { d if d < 0xfd => num2hexstring(num, 1*2), d if d <= 0xffff => format!("fd{}", num2hexstring(num, 2*2)), d if d <= 0xffffffff => format!("fe{}", num2hexstring(num, 4*2)), _ => format!("ff{}", num2hexstring(num, 8*2)), } } #[cfg(test)] mod tests { use crate::convert::{ab2str, hex2int, int2hex, num2hexstring, str2ab}; #[test] pub fn test_ab2str() { let v: Vec<u8> = vec![0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64]; //helloworld let s = ab2str(&v); assert_eq!(s, "hello world"); } #[test] pub fn test_str2ab() { let s = "hello world"; let v = str2ab(s); assert_eq!(v, [0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64]); } #[test] pub fn test_num2hexstring() { let i = 92233720; let h = num2hexstring(i, 2); assert_ne!(h, "F8".to_lowercase()); let h = num2hexstring(i, 4); assert_ne!(h, "5FF8".to_lowercase()); let h = num2hexstring(i, 8); assert_eq!(h, "057F5FF8".to_lowercase()); let h = num2hexstring(i, 10); assert_eq!(h, "00057F5FF8".to_lowercase()); let h = num2hexstring(i, 12); assert_eq!(h, "0000057F5FF8".to_lowercase()) } #[test] pub fn test_int2hex() { let i = 92233720; let h = int2hex(i).to_lowercase(); assert_eq!(h, "57F5FF8".to_lowercase()) } #[test] pub fn test_hex2int() { let h = "57F5FF8"; let i = hex2int(h).unwrap(); assert_eq!(i, 92233720) }
}
data_aggregator.rs
#![allow(dead_code)] use crate::Scalar; use std::{fs::File, io::Write, path::Path}; pub trait DataAggregatorRenderer { fn render_columns(&self, columns: &mut Vec<String>); } impl DataAggregatorRenderer for Scalar { fn render_columns(&self, columns: &mut Vec<String>) { columns.push(self.to_string()); } } impl<T> DataAggregatorRenderer for Vec<T> where T: ToString, { fn render_columns(&self, columns: &mut Vec<String>) { for item in self { columns.push(item.to_string()); } } } impl<T> DataAggregatorRenderer for [T] where T: ToString, { fn render_columns(&self, columns: &mut Vec<String>) { for item in self { columns.push(item.to_string()); } } } impl<T0, T1> DataAggregatorRenderer for (T0, T1) where T0: ToString, T1: ToString, { fn render_columns(&self, columns: &mut Vec<String>) { columns.push(self.0.to_string()); columns.push(self.1.to_string()); } } impl<T0, T1, T2> DataAggregatorRenderer for (T0, T1, T2) where T0: ToString, T1: ToString, T2: ToString, { fn render_columns(&self, columns: &mut Vec<String>) { columns.push(self.0.to_string()); columns.push(self.1.to_string()); columns.push(self.2.to_string()); } } impl<T0, T1, T2, T3> DataAggregatorRenderer for (T0, T1, T2, T3) where T0: ToString, T1: ToString, T2: ToString, T3: ToString, { fn render_columns(&self, columns: &mut Vec<String>) { columns.push(self.0.to_string()); columns.push(self.1.to_string()); columns.push(self.2.to_string()); columns.push(self.3.to_string()); } } impl<T0, T1, T2, T3, T4> DataAggregatorRenderer for (T0, T1, T2, T3, T4) where T0: ToString, T1: ToString, T2: ToString, T3: ToString, T4: ToString, { fn render_columns(&self, columns: &mut Vec<String>) { columns.push(self.0.to_string()); columns.push(self.1.to_string()); columns.push(self.2.to_string()); columns.push(self.3.to_string()); columns.push(self.4.to_string()); } } impl<T0, T1, T2, T3, T4, T5> DataAggregatorRenderer for (T0, T1, T2, T3, T4, T5) where T0: ToString, T1: ToString, T2: ToString, T3: ToString, T4: ToString, T5: ToString, { fn render_columns(&self, columns: &mut Vec<String>) { columns.push(self.0.to_string()); columns.push(self.1.to_string()); columns.push(self.2.to_string()); columns.push(self.3.to_string()); columns.push(self.4.to_string()); columns.push(self.5.to_string()); } } impl<T0, T1, T2, T3, T4, T5, T6> DataAggregatorRenderer for (T0, T1, T2, T3, T4, T5, T6) where T0: ToString, T1: ToString, T2: ToString, T3: ToString, T4: ToString, T5: ToString, T6: ToString, { fn render_columns(&self, columns: &mut Vec<String>) { columns.push(self.0.to_string()); columns.push(self.1.to_string()); columns.push(self.2.to_string()); columns.push(self.3.to_string()); columns.push(self.4.to_string()); columns.push(self.5.to_string()); columns.push(self.6.to_string()); } } impl<T0, T1, T2, T3, T4, T5, T6, T7> DataAggregatorRenderer for (T0, T1, T2, T3, T4, T5, T6, T7) where T0: ToString, T1: ToString, T2: ToString, T3: ToString, T4: ToString, T5: ToString, T6: ToString, T7: ToString, { fn render_columns(&self, columns: &mut Vec<String>) { columns.push(self.0.to_string()); columns.push(self.1.to_string()); columns.push(self.2.to_string()); columns.push(self.3.to_string()); columns.push(self.4.to_string()); columns.push(self.5.to_string()); columns.push(self.6.to_string()); columns.push(self.7.to_string()); } } pub struct DataAggregator<T> where T: DataAggregatorRenderer, { result: File, rows: Vec<T>, auto_flush: Option<usize>, } impl<T> DataAggregator<T> where T: DataAggregatorRenderer, { pub fn new<P>(result_path: P) -> Self where P: AsRef<Path>, { Self { result: File::create(result_path).unwrap(), rows: vec![], auto_flush: None, } } pub fn auto_flush(&self) -> Option<usize> { self.auto_flush } pub fn set_auto_flush(&mut self, rows_count: Option<usize>) { self.auto_flush = rows_count; } pub fn push(&mut self, item: T) { self.rows.push(item); if let Some(count) = self.auto_flush { if self.rows.len() >= count { self.flush(); } } } pub fn flush(&mut self) { if self.rows.is_empty() { return; } let mut columns = vec![]; for row in &self.rows { columns.clear(); row.render_columns(&mut columns); self.result .write_fmt(format_args!("{}\r\n", columns.join("\t"))) .unwrap(); } self.rows.clear(); self.result.flush().unwrap(); } } impl<T> Drop for DataAggregator<T> where T: DataAggregatorRenderer, { fn drop(&mut self) { self.flush();
}
}
__init__.py
"""Interact with Taskwarrior.""" import datetime import os import re import threading import traceback from pathlib import Path from shutil import which from subprocess import PIPE, Popen from typing import List, Optional, Tuple, Union import albert as v0 # type: ignore import dateutil import gi import taskw from fuzzywuzzy import process from overrides import overrides from taskw_gcal_sync import TaskWarriorSide gi.require_version("Notify", "0.7") # isort:skip gi.require_version("GdkPixbuf", "2.0") # isort:skip from gi.repository import GdkPixbuf, Notify # isort:skip # type: ignore # metadata ------------------------------------------------------------------------------------ __title__ = "Taskwarrior interaction" __version__ = "0.4.0" __triggers__ = "t " __authors__ = "Nikos Koukis" __homepage__ = "https://github.com/bergercookie/awesome-albert-plugins" __simplename__ = "taskwarrior" # initial checks ------------------------------------------------------------------------------ # icon ---------------------------------------------------------------------------------------- icon_path = os.path.join(os.path.dirname(__file__), "taskwarrior.svg") icon_path_b = os.path.join(os.path.dirname(__file__), "taskwarrior_blue.svg") icon_path_r = os.path.join(os.path.dirname(__file__), "taskwarrior_red.svg") icon_path_y = os.path.join(os.path.dirname(__file__), "taskwarrior_yellow.svg") icon_path_c = os.path.join(os.path.dirname(__file__), "taskwarrior_cyan.svg") icon_path_g = os.path.join(os.path.dirname(__file__), "taskwarrior_green.svg") # initial configuration ----------------------------------------------------------------------- # should the plugin show relevant some info without the trigger? show_items_wo_trigger = True failure_tag = "fail" cache_path = Path(v0.cacheLocation()) / __simplename__ config_path = Path(v0.configLocation()) / __simplename__ data_path = Path(v0.dataLocation()) / __simplename__ reminders_tag_path = config_path / "reminders_tag" reminders_tag = "remindme" class FileBackedVar: def __init__(self, varname, convert_fn=str, init_val=None): self._fpath = config_path / varname self._convert_fn = convert_fn if init_val: with open(self._fpath, "w") as f: f.write(str(init_val)) else: self._fpath.touch() def get(self): with open(self._fpath, "r") as f: return self._convert_fn(f.read().strip()) def set(self, val): with open(self._fpath, "w") as f: return f.write(str(val)) class TaskWarriorSideWLock: """Multithreading-safe version of TaskWarriorSide.""" def __init__(self): self.tw = TaskWarriorSide(enable_caching=True) self.tw_lock = threading.Lock() def start(self, *args, **kargs): with self.tw_lock: return self.tw.start(*args, **kargs) def get_all_items(self, *args, **kargs): with self.tw_lock: return self.tw.get_all_items(*args, **kargs) def get_task_id(self, *args, **kargs): with self.tw_lock: return self.tw.get_task_id(*args, **kargs) @property def reload_items(self): return self.tw.reload_items @reload_items.setter def reload_items(self, val: bool): self.tw.reload_items = val def update_item(self, *args, **kargs): self.tw.update_item(*args, **kargs) tw_side = TaskWarriorSideWLock() last_used_date = FileBackedVar( "last_date_used", convert_fn=lambda s: datetime.datetime.strptime(s, "%Y-%m-%d").date(), init_val=datetime.datetime.today().date(), ) dev_mode = True # regular expression to match URLs # https://gist.github.com/gruber/8891611 url_re = re.compile( r"""(?i)\b((?:https?:(?:/{1,3}|[a-z0-9%])|[a-z0-9.\-]+[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)/)(?:[^\s()<>{}\[\]]+|\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\))+(?:\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’])|(?:(?<!@)[a-z0-9]+(?:[.\-][a-z0-9]+)*[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)\b/?(?!@)))""" ) # plugin main functions ----------------------------------------------------------------------- def do_notify(msg: str, image=None): app_name =
e_only_tzlocal(datetime: datetime.datetime): return datetime.astimezone(dateutil.tz.tzlocal()).date() # type: ignore def get_tasks_of_date(date: datetime.date): tasks = tw_side.get_all_items(skip_completed=True) # You have to do the comparison in tzlocal. TaskWarrior stores the tasks in UTC and thus # the effetive date*time* may not match the given date parameter because of the time # difference tasks = [t for t in tasks if "due" in t.keys() and date_only_tzlocal(t["due"]) == date] return tasks def initialize(): # Called when the extension is loaded (ticked in the settings) - blocking # create cache location config_path.mkdir(parents=False, exist_ok=True) def finalize(): pass def handleQuery(query): results = [] # we're into the new day, create and assign a fresh instance last_used = last_used_date.get() current_date = datetime.datetime.today().date() global tw_side, subcommands if last_used < current_date: tw_side = TaskWarriorSideWLock() subcommands = create_subcommands() last_used_date.set(current_date) elif last_used > current_date: # maybe due to NTP? v0.critical( f"Current date {current_date} < last_used date {last_used} ?! Overriding current date, please report this if it persists" ) tw_side = TaskWarriorSideWLock() subcommands = create_subcommands() last_used_date.set(current_date) if not query.isTriggered: if show_items_wo_trigger and len(query.string) < 2: results = [ ActiveTasks().get_as_albert_item(), TodayTasks().get_as_albert_item(), *results, ] else: # join any previously launched threads for i in range(len(workers)): workers.pop(i).join(2) try: query.disableSort() results_setup = setup(query) if results_setup: return results_setup tasks = tw_side.get_all_items(skip_completed=True) query_str = query.string if len(query_str) < 2: results.extend([s.get_as_albert_item() for s in subcommands]) results.append( get_as_item( text="Reload list of tasks", actions=[v0.FuncAction("Reload", async_reload_items)], ) ) tasks.sort(key=lambda t: t["urgency"], reverse=True) results.extend([get_tw_item(task) for task in tasks]) else: subcommand_query = get_subcommand_query(query_str) if subcommand_query: results.extend( subcommand_query.command.get_as_albert_items_full( subcommand_query.query ) ) if not results: results.append(get_as_item(text="No results")) else: # find relevant results desc_to_task = {task["description"]: task for task in tasks} matched = process.extract(query_str, list(desc_to_task.keys()), limit=30) for m in [elem[0] for elem in matched]: task = desc_to_task[m] results.append(get_tw_item(task)) except Exception: # user to report error if dev_mode: v0.critical(traceback.format_exc()) raise results.insert( 0, v0.Item( id=__title__, icon=icon_path, text="Something went wrong! Press [ENTER] to copy error and report it", actions=[ v0.ClipAction( f"Copy error - report it to {__homepage__[8:]}", f"{traceback.format_exc()}", ) ], ), ) return results def get_as_item(**kargs) -> v0.Item: if "icon" in kargs: icon = kargs.pop("icon") else: icon = icon_path return v0.Item(id=__title__, icon=icon, **kargs) # supplementary functions --------------------------------------------------------------------- workers: List[threading.Thread] = [] def async_reload_items(): def do_reload(): v0.info("TaskWarrior: Updating list of tasks...") tw_side.reload_items = True tw_side.get_all_items(skip_completed=True) t = threading.Thread(target=do_reload) t.start() workers.append(t) def setup(query): # type: ignore results = [] if not which("task"): results.append( v0.Item( id=__title__, icon=icon_path, text=f'"taskwarrior" is not installed.', subtext='Please install and configure "taskwarrior" accordingly.', actions=[ v0.UrlAction( 'Open "taskwarrior" website', "https://taskwarrior.org/download/" ) ], ) ) return results return results def save_data(data: str, data_name: str): """Save a piece of data in the configuration directory.""" with open(config_path / data_name, "w") as f: f.write(data) def load_data(data_name) -> str: """Load a piece of data from the configuration directory.""" with open(config_path / data_name, "r") as f: data = f.readline().strip().split()[0] return data def get_as_subtext_field(field, field_title=None): s = "" if field: s = f"{field} | " else: return "" if field_title: s = f"{field_title}:" + s return s def urgency_to_visuals(prio: Union[float, None]) -> Tuple[Union[str, None], Path]: if prio is None: return None, Path(icon_path) elif prio < 4: return "↓", Path(icon_path_b) elif prio < 8: return "↘", Path(icon_path_c) elif prio < 11: return "-", Path(icon_path_g) elif prio < 15: return "↗", Path(icon_path_y) else: return "↑", Path(icon_path_r) def fail_task(task_id: list): run_tw_action(args_list=[task_id, "modify", "+fail"]) run_tw_action(args_list=[task_id, "done"]) def run_tw_action(args_list: list, need_pty=False): args_list = ["task", "rc.recurrence.confirmation=no", "rc.confirmation=off", *args_list] if need_pty: args_list.insert(0, "x-terminal-emulator") args_list.insert(1, "-e") proc = Popen(args_list, stdout=PIPE, stderr=PIPE) stdout, stderr = proc.communicate() if proc.returncode != 0: image = icon_path_r msg = f'stdout: {stdout.decode("utf-8")} | stderr: {stderr.decode("utf-8")}' else: image = icon_path msg = stdout.decode("utf-8") do_notify(msg=msg, image=image) async_reload_items() def get_tw_item(task: taskw.task.Task) -> v0.Item: # type: ignore """Get a single TW task as an Albert Item.""" field = get_as_subtext_field task_id = tw_side.get_task_id(task) actions = [ v0.FuncAction( "Complete task", lambda args_list=["done", task_id]: run_tw_action(args_list), ), v0.FuncAction( "Delete task", lambda args_list=["delete", task_id]: run_tw_action(args_list), ), v0.FuncAction( "Start task", lambda args_list=["start", task_id]: run_tw_action(args_list), ), v0.FuncAction( "Stop task", lambda args_list=["stop", task_id]: run_tw_action(args_list), ), v0.FuncAction( "Edit task interactively", lambda args_list=["edit", task_id]: run_tw_action(args_list, need_pty=True), ), v0.FuncAction( "Fail task", lambda task_id=task_id: fail_task(task_id=task_id), ), v0.ClipAction("Copy task UUID", f"{task_id}"), ] found_urls = url_re.findall(task["description"]) if "annotations" in task.keys(): found_urls.extend(url_re.findall(" ".join(task["annotations"]))) for url in found_urls[-1::-1]: actions.insert(0, v0.UrlAction(f"Open {url}", url)) if reminders_tag_path.is_file(): global reminders_tag reminders_tag = load_data(reminders_tag_path) else: save_data("remindme", str(reminders_tag_path)) actions.append( v0.FuncAction( f"Add to Reminders (+{reminders_tag})", lambda args_list=[ "modify", task_id, f"+{reminders_tag}", ]: run_tw_action(args_list), ) ) urgency_str, icon = urgency_to_visuals(task.get("urgency")) text = f'{task["description"]}' if "start" in task: text = f'<p style="color:orange;">{text}</p>' due = None if "due" in task: due = task["due"].astimezone(dateutil.tz.tzlocal()).strftime("%Y-%m-%d %H:%M:%S") # type: ignore return get_as_item( text=text, subtext="{}{}{}{}{}".format( field(urgency_str), "ID: {}... | ".format(tw_side.get_task_id(task)[:8]), field(task["status"]), field(task.get("tags"), "tags"), field(due, "due"), )[:-2], icon=str(icon), completion=f'{__triggers__}{task["description"]}', actions=actions, ) # subcommands --------------------------------------------------------------------------------- class Subcommand: def __init__(self, *, name, desc): self.name = name self.desc = desc self.subcommand_prefix = f"{__triggers__}{self.name}" def get_as_albert_item(self): return get_as_item(text=self.desc, completion=f"{self.subcommand_prefix} ") def get_as_albert_items_full(self, query_str): return [self.get_as_albert_item()] def __str__(self) -> str: return f"Name: {self.name} | Description: {self.desc}" class AddSubcommand(Subcommand): def __init__(self): super(AddSubcommand, self).__init__(name="add", desc="Add a new task") @overrides def get_as_albert_items_full(self, query_str): items = [] add_item = self.get_as_albert_item() add_item.subtext = query_str add_item.completion = f"{self.subcommand_prefix} {query_str}" add_item.addAction( v0.FuncAction( "Add task", lambda args_list=["add", *query_str.split()]: run_tw_action(args_list), ) ) items.append(add_item) to_reminders = self.get_as_albert_item() to_reminders = v0.Item( id=__title__, text=f"Add +{reminders_tag} tag", subtext="Add +remindme on [TAB]", icon=icon_path_y, completion=f"{self.subcommand_prefix} {query_str} +remindme", ) items.append(to_reminders) def item_at_date(date: datetime.date, time_24h: int): dt_str = f'{date.strftime("%Y%m%d")}T{time_24h}0000' return v0.Item( id=__title__, text=f"Due {date}, at {time_24h}:00", subtext="Add due:dt_str on [TAB]", icon=icon_path_c, completion=f"{self.subcommand_prefix} {query_str} due:{dt_str}", ) items.append(item_at_date(datetime.date.today(), time_24h=15)) items.append(item_at_date(datetime.date.today(), time_24h=19)) items.append(item_at_date(datetime.date.today() + datetime.timedelta(days=1), time_24h=10)) items.append(item_at_date(datetime.date.today() + datetime.timedelta(days=1), time_24h=15)) items.append(item_at_date(datetime.date.today() + datetime.timedelta(days=1), time_24h=19)) return items class LogSubcommand(Subcommand): def __init__(self): super(LogSubcommand, self).__init__(name="log", desc="Log an already done task") @overrides def get_as_albert_items_full(self, query_str): item = self.get_as_albert_item() item.subtext = query_str item.addAction( v0.FuncAction( "Log task", lambda args_list=["log", *query_str.split()]: run_tw_action(args_list), ) ) return [item] class ActiveTasks(Subcommand): def __init__(self): super(ActiveTasks, self).__init__(name="active", desc="Active tasks") @overrides def get_as_albert_items_full(self, query_str): return [ get_tw_item(t) for t in tw_side.get_all_items(skip_completed=True) if "start" in t ] def move_tasks_of_date_to_next_day(date: datetime.date): for t in get_tasks_of_date(date): tw_side.update_item(item_id=str(t["uuid"]), due=t["due"] + datetime.timedelta(days=1)) class DateTasks(Subcommand): """ Common parent to classes like TodayTasks, and YesterdayTasks so as to not repeat ourselves. """ def __init__(self, date: datetime.date, *args, **kargs): super(DateTasks, self).__init__(*args, **kargs) self.date = date @overrides def get_as_albert_item(self): item = super().get_as_albert_item() item.addAction( v0.FuncAction( "Move tasks to the day after", lambda date=self.date: move_tasks_of_date_to_next_day(date), ) ) return item @overrides def get_as_albert_items_full(self, query_str): return [get_tw_item(t) for t in get_tasks_of_date(self.date)] class TodayTasks(DateTasks): def __init__(self): super(TodayTasks, self).__init__( date=datetime.date.today(), name="today", desc="Today's tasks" ) class YesterdayTasks(DateTasks): def __init__(self): super(YesterdayTasks, self).__init__( date=datetime.date.today() - datetime.timedelta(days=1), name="yesterday", desc="Yesterday's tasks", ) class TomorrowTasks(DateTasks): def __init__(self): super(TomorrowTasks, self).__init__( date=datetime.date.today() + datetime.timedelta(days=1), name="tomorrow", desc="Tomorrow's tasks", ) class SubcommandQuery: def __init__(self, subcommand: Subcommand, query: str): """ Query for a specific subcommand. :query: Query text - doesn't include the subcommand itself """ self.command = subcommand self.query = query def __str__(self) -> str: return f"Command: {self.command}\nQuery Text: {self.query}" def create_subcommands(): return [ AddSubcommand(), LogSubcommand(), ActiveTasks(), TodayTasks(), YesterdayTasks(), TomorrowTasks(), ] subcommands = create_subcommands() def get_subcommand_for_name(name: str) -> Optional[Subcommand]: """Get a subcommand with the indicated name.""" matching = [s for s in subcommands if s.name.lower() == name.lower()] if matching: return matching[0] def get_subcommand_query(query_str: str) -> Optional[SubcommandQuery]: """ Determine whether current query is of a subcommand. If so first returned the corresponding SubcommandQeury object. """ if not query_str: return None # spilt: # "subcommand_name rest of query" -> ["subcommand_name", "rest of query""] query_parts = query_str.strip().split(None, maxsplit=1) if len(query_parts) < 2: query_str = "" else: query_str = query_parts[1] subcommand = get_subcommand_for_name(query_parts[0]) if subcommand: return SubcommandQuery(subcommand=subcommand, query=query_str)
"Taskwarrior" Notify.init(app_name) image = image n = Notify.Notification.new(app_name, msg, image) n.show() def dat
digitize-data.py
from pynput import mouse class MyException(Exception):pass X = [] Y = [] NumberOfMouseClicks = 0 print('Click Origin') def on_click(x, y, button, pressed): button = str(button) global NumberOfMouseClicks NumberOfMouseClicks = NumberOfMouseClicks + 1 if NumberOfMouseClicks==1: print('Click Top Right') if NumberOfMouseClicks==3:
X.append(x) Y.append(y) if button!='Button.left': raise MyException(button) def plot_data(X, Y, Xmin, Xmax, Ymin, Ymax): import matplotlib.pyplot as plt plt.plot(X,Y,'b-') plt.xlim((Xmin, Xmax)) plt.ylim((Ymin, Ymax)) plt.show() def main(X,Y): with mouse.Listener(on_click=on_click) as listener: try: listener.join() except MyException as e: pass # drop duplicates X = X[::2] Y = Y[::2] # input boundaries Xmin = float(input('Input X-min: ')) Xmax = float(input('Input X-max: ')) Ymin = float(input('Input Y-min: ')) Ymax = float(input('Input Y-max: ')) # define scales from data origin = [X[0],Y[0]] topRight = [X[1],Y[1]] XminScale = origin[0] XmaxScale = topRight[0] YminScale = origin[1] YmaxScale = topRight[1] # drop extras X = X[2:-1] Y = Y[2:-1] # scale ## (old_value - old_min) / (old_max - old_min) * (new_max - new_min) + new_min Xplot = [(i - XminScale) / (XmaxScale - XminScale) * (Xmax - Xmin) + Xmin for i in X] Yplot = [(i - YminScale) / (YmaxScale - YminScale) * (Ymax - Ymin) + Ymin for i in Y] # print outputs print('Origin: {}'.format([round(i, 2) for i in origin])) print('Top Right: {}'.format([round(i, 2) for i in topRight])) print('X: {}'.format([round(i, 2) for i in Xplot])) print('Y: {}'.format([round(i, 2) for i in Yplot])) # plot plot_data(Xplot, Yplot, Xmin, Xmax, Ymin, Ymax) if __name__ == '__main__': main(X,Y)
print('Click data points. Right-click to end.')
marker.rs
//! Primitive traits and types representing basic properties of types. //! //! Rust types can be classified in various useful ways according to //! their intrinsic properties. These classifications are represented //! as traits. #![stable(feature = "rust1", since = "1.0.0")] use crate::cell::UnsafeCell; use crate::cmp; use crate::hash::Hash; use crate::hash::Hasher; /// Types that can be transferred across thread boundaries. /// /// This trait is automatically implemented when the compiler determines it's /// appropriate. /// /// An example of a non-`Send` type is the reference-counting pointer /// [`rc::Rc`][`Rc`]. If two threads attempt to clone [`Rc`]s that point to the same /// reference-counted value, they might try to update the reference count at the /// same time, which is [undefined behavior][ub] because [`Rc`] doesn't use atomic /// operations. Its cousin [`sync::Arc`][arc] does use atomic operations (incurring /// some overhead) and thus is `Send`. /// /// See [the Nomicon](../../nomicon/send-and-sync.html) for more details. /// /// [`Rc`]: ../../std/rc/struct.Rc.html /// [arc]: ../../std/sync/struct.Arc.html /// [ub]: ../../reference/behavior-considered-undefined.html #[stable(feature = "rust1", since = "1.0.0")] #[rustc_on_unimplemented( message="`{Self}` cannot be sent between threads safely", label="`{Self}` cannot be sent between threads safely" )] pub unsafe auto trait Send { // empty. } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> !Send for *const T { } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> !Send for *mut T { } /// Types with a constant size known at compile time. /// /// All type parameters have an implicit bound of `Sized`. The special syntax /// `?Sized` can be used to remove this bound if it's not appropriate. /// /// ``` /// # #![allow(dead_code)] /// struct Foo<T>(T); /// struct Bar<T: ?Sized>(T); /// /// // struct FooUse(Foo<[i32]>); // error: Sized is not implemented for [i32] /// struct BarUse(Bar<[i32]>); // OK /// ``` /// /// The one exception is the implicit `Self` type of a trait. A trait does not /// have an implicit `Sized` bound as this is incompatible with [trait object]s /// where, by definition, the trait needs to work with all possible implementors, /// and thus could be any size. /// /// Although Rust will let you bind `Sized` to a trait, you won't /// be able to use it to form a trait object later: /// /// ``` /// # #![allow(unused_variables)] /// trait Foo { } /// trait Bar: Sized { } /// /// struct Impl; /// impl Foo for Impl { } /// impl Bar for Impl { } /// /// let x: &dyn Foo = &Impl; // OK /// // let y: &dyn Bar = &Impl; // error: the trait `Bar` cannot /// // be made into an object /// ``` /// /// [trait object]: ../../book/ch17-02-trait-objects.html #[stable(feature = "rust1", since = "1.0.0")] #[lang = "sized"] #[rustc_on_unimplemented( on(parent_trait="std::path::Path", label="borrow the `Path` instead"), message="the size for values of type `{Self}` cannot be known at compilation time", label="doesn't have a size known at compile-time", note="to learn more, visit <https://doc.rust-lang.org/book/\ ch19-04-advanced-types.html#dynamically-sized-types-and-the-sized-trait>", )] #[fundamental] // for Default, for example, which requires that `[T]: !Default` be evaluatable pub trait Sized { // Empty. } /// Types that can be "unsized" to a dynamically-sized type. /// /// For example, the sized array type `[i8; 2]` implements `Unsize<[i8]>` and /// `Unsize<fmt::Debug>`. /// /// All implementations of `Unsize` are provided automatically by the compiler. /// /// `Unsize` is implemented for: /// /// - `[T; N]` is `Unsize<[T]>` /// - `T` is `Unsize<dyn Trait>` when `T: Trait` /// - `Foo<..., T, ...>` is `Unsize<Foo<..., U, ...>>` if: /// - `T: Unsize<U>` /// - Foo is a struct /// - Only the last field of `Foo` has a type involving `T` /// - `T` is not part of the type of any other fields /// - `Bar<T>: Unsize<Bar<U>>`, if the last field of `Foo` has type `Bar<T>` /// /// `Unsize` is used along with [`ops::CoerceUnsized`][coerceunsized] to allow /// "user-defined" containers such as [`rc::Rc`][rc] to contain dynamically-sized /// types. See the [DST coercion RFC][RFC982] and [the nomicon entry on coercion][nomicon-coerce] /// for more details. /// /// [coerceunsized]: ../ops/trait.CoerceUnsized.html /// [rc]: ../../std/rc/struct.Rc.html /// [RFC982]: https://github.com/rust-lang/rfcs/blob/master/text/0982-dst-coercion.md /// [nomicon-coerce]: ../../nomicon/coercions.html #[unstable(feature = "unsize", issue = "27732")] #[lang = "unsize"] pub trait Unsize<T: ?Sized> { // Empty. } /// Types whose values can be duplicated simply by copying bits. /// /// By default, variable bindings have 'move semantics.' In other /// words: /// /// ``` /// #[derive(Debug)] /// struct Foo; /// /// let x = Foo; /// /// let y = x; /// /// // `x` has moved into `y`, and so cannot be used /// /// // println!("{:?}", x); // error: use of moved value /// ``` /// /// However, if a type implements `Copy`, it instead has 'copy semantics': /// /// ``` /// // We can derive a `Copy` implementation. `Clone` is also required, as it's /// // a supertrait of `Copy`. /// #[derive(Debug, Copy, Clone)] /// struct Foo; /// /// let x = Foo; /// /// let y = x; /// /// // `y` is a copy of `x` /// /// println!("{:?}", x); // A-OK! /// ``` /// /// It's important to note that in these two examples, the only difference is whether you /// are allowed to access `x` after the assignment. Under the hood, both a copy and a move /// can result in bits being copied in memory, although this is sometimes optimized away. /// /// ## How can I implement `Copy`? /// /// There are two ways to implement `Copy` on your type. The simplest is to use `derive`: /// /// ``` /// #[derive(Copy, Clone)] /// struct MyStruct; /// ``` /// /// You can also implement `Copy` and `Clone` manually: /// /// ``` /// struct MyStruct; /// /// impl Copy for MyStruct { } /// /// impl Clone for MyStruct { /// fn clone(&self) -> MyStruct { /// *self /// } /// } /// ``` /// /// There is a small difference between the two: the `derive` strategy will also place a `Copy` /// bound on type parameters, which isn't always desired. /// /// ## What's the difference between `Copy` and `Clone`? /// /// Copies happen implicitly, for example as part of an assignment `y = x`. The behavior of /// `Copy` is not overloadable; it is always a simple bit-wise copy. /// /// Cloning is an explicit action, `x.clone()`. The implementation of [`Clone`] can /// provide any type-specific behavior necessary to duplicate values safely. For example, /// the implementation of [`Clone`] for [`String`] needs to copy the pointed-to string /// buffer in the heap. A simple bitwise copy of [`String`] values would merely copy the /// pointer, leading to a double free down the line. For this reason, [`String`] is [`Clone`] /// but not `Copy`. /// /// [`Clone`] is a supertrait of `Copy`, so everything which is `Copy` must also implement /// [`Clone`]. If a type is `Copy` then its [`Clone`] implementation only needs to return `*self` /// (see the example above). /// /// ## When can my type be `Copy`? /// /// A type can implement `Copy` if all of its components implement `Copy`. For example, this /// struct can be `Copy`: /// /// ``` /// # #[allow(dead_code)] /// struct Point { /// x: i32, /// y: i32, /// } /// ``` /// /// A struct can be `Copy`, and [`i32`] is `Copy`, therefore `Point` is eligible to be `Copy`. /// By contrast, consider /// /// ``` /// # #![allow(dead_code)] /// # struct Point; /// struct PointList { /// points: Vec<Point>, /// } /// ``` /// /// The struct `PointList` cannot implement `Copy`, because [`Vec<T>`] is not `Copy`. If we /// attempt to derive a `Copy` implementation, we'll get an error: /// /// ```text /// the trait `Copy` may not be implemented for this type; field `points` does not implement `Copy` /// ``` /// /// ## When *can't* my type be `Copy`? /// /// Some types can't be copied safely. For example, copying `&mut T` would create an aliased /// mutable reference. Copying [`String`] would duplicate responsibility for managing the /// [`String`]'s buffer, leading to a double free. /// /// Generalizing the latter case, any type implementing [`Drop`] can't be `Copy`, because it's /// managing some resource besides its own [`size_of::<T>`] bytes. /// /// If you try to implement `Copy` on a struct or enum containing non-`Copy` data, you will get /// the error [E0204]. /// /// [E0204]: ../../error-index.html#E0204 /// /// ## When *should* my type be `Copy`? /// /// Generally speaking, if your type _can_ implement `Copy`, it should. Keep in mind, though, /// that implementing `Copy` is part of the public API of your type. If the type might become /// non-`Copy` in the future, it could be prudent to omit the `Copy` implementation now, to /// avoid a breaking API change. /// /// ## Additional implementors /// /// In addition to the [implementors listed below][impls], /// the following types also implement `Copy`: /// /// * Function item types (i.e., the distinct types defined for each function) /// * Function pointer types (e.g., `fn() -> i32`) /// * Array types, for all sizes, if the item type also implements `Copy` (e.g., `[i32; 123456]`) /// * Tuple types, if each component also implements `Copy` (e.g., `()`, `(i32, bool)`) /// * Closure types, if they capture no value from the environment /// or if all such captured values implement `Copy` themselves. /// Note that variables captured by shared reference always implement `Copy` /// (even if the referent doesn't), /// while variables captured by mutable reference never implement `Copy`. /// /// [`Vec<T>`]: ../../std/vec/struct.Vec.html /// [`String`]: ../../std/string/struct.String.html /// [`Drop`]: ../../std/ops/trait.Drop.html /// [`size_of::<T>`]: ../../std/mem/fn.size_of.html /// [`Clone`]: ../clone/trait.Clone.html /// [`String`]: ../../std/string/struct.String.html /// [`i32`]: ../../std/primitive.i32.html /// [impls]: #implementors #[stable(feature = "rust1", since = "1.0.0")] #[lang = "copy"] pub trait Copy : Clone { // Empty. } /// Types for which it is safe to share references between threads. /// /// This trait is automatically implemented when the compiler determines /// it's appropriate. /// /// The precise definition is: a type `T` is `Sync` if and only if `&T` is /// [`Send`][send]. In other words, if there is no possibility of /// [undefined behavior][ub] (including data races) when passing /// `&T` references between threads. /// /// As one would expect, primitive types like [`u8`][u8] and [`f64`][f64] /// are all `Sync`, and so are simple aggregate types containing them, /// like tuples, structs and enums. More examples of basic `Sync` /// types include "immutable" types like `&T`, and those with simple /// inherited mutability, such as [`Box<T>`][box], [`Vec<T>`][vec] and /// most other collection types. (Generic parameters need to be `Sync` /// for their container to be `Sync`.) /// /// A somewhat surprising consequence of the definition is that `&mut T` /// is `Sync` (if `T` is `Sync`) even though it seems like that might /// provide unsynchronized mutation. The trick is that a mutable /// reference behind a shared reference (that is, `& &mut T`) /// becomes read-only, as if it were a `& &T`. Hence there is no risk /// of a data race. /// /// Types that are not `Sync` are those that have "interior /// mutability" in a non-thread-safe form, such as [`cell::Cell`][cell] /// and [`cell::RefCell`][refcell]. These types allow for mutation of /// their contents even through an immutable, shared reference. For /// example the `set` method on [`Cell<T>`][cell] takes `&self`, so it requires /// only a shared reference [`&Cell<T>`][cell]. The method performs no /// synchronization, thus [`Cell`][cell] cannot be `Sync`. /// /// Another example of a non-`Sync` type is the reference-counting /// pointer [`rc::Rc`][rc]. Given any reference [`&Rc<T>`][rc], you can clone /// a new [`Rc<T>`][rc], modifying the reference counts in a non-atomic way. /// /// For cases when one does need thread-safe interior mutability, /// Rust provides [atomic data types], as well as explicit locking via /// [`sync::Mutex`][mutex] and [`sync::RwLock`][rwlock]. These types /// ensure that any mutation cannot cause data races, hence the types /// are `Sync`. Likewise, [`sync::Arc`][arc] provides a thread-safe /// analogue of [`Rc`][rc]. /// /// Any types with interior mutability must also use the /// [`cell::UnsafeCell`][unsafecell] wrapper around the value(s) which /// can be mutated through a shared reference. Failing to doing this is /// [undefined behavior][ub]. For example, [`transmute`][transmute]-ing /// from `&T` to `&mut T` is invalid. /// /// See [the Nomicon](../../nomicon/send-and-sync.html) for more /// details about `Sync`. /// /// [send]: trait.Send.html /// [u8]: ../../std/primitive.u8.html /// [f64]: ../../std/primitive.f64.html /// [box]: ../../std/boxed/struct.Box.html /// [vec]: ../../std/vec/struct.Vec.html /// [cell]: ../cell/struct.Cell.html /// [refcell]: ../cell/struct.RefCell.html /// [rc]: ../../std/rc/struct.Rc.html /// [arc]: ../../std/sync/struct.Arc.html /// [atomic data types]: ../sync/atomic/index.html /// [mutex]: ../../std/sync/struct.Mutex.html /// [rwlock]: ../../std/sync/struct.RwLock.html /// [unsafecell]: ../cell/struct.UnsafeCell.html /// [ub]: ../../reference/behavior-considered-undefined.html /// [transmute]: ../../std/mem/fn.transmute.html #[stable(feature = "rust1", since = "1.0.0")] #[lang = "sync"] #[rustc_on_unimplemented( message="`{Self}` cannot be shared between threads safely", label="`{Self}` cannot be shared between threads safely" )] pub unsafe auto trait Sync { // FIXME(estebank): once support to add notes in `rustc_on_unimplemented` // lands in beta, and it has been extended to check whether a closure is // anywhere in the requirement chain, extend it as such (#48534): // ``` // on( // closure, // note="`{Self}` cannot be shared safely, consider marking the closure `move`" // ), // ``` // Empty } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> !Sync for *const T { } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> !Sync for *mut T { } macro_rules! impls{ ($t: ident) => ( #[stable(feature = "rust1", since = "1.0.0")] impl<T:?Sized> Hash for $t<T> { #[inline] fn hash<H: Hasher>(&self, _: &mut H) { } } #[stable(feature = "rust1", since = "1.0.0")] impl<T:?Sized> cmp::PartialEq for $t<T> { fn eq(&self, _other: &$t<T>) -> bool { true } } #[stable(feature = "rust1", since = "1.0.0")] impl<T:?Sized> cmp::Eq for $t<T> { } #[stable(feature = "rust1", since = "1.0.0")] impl<T:?Sized> cmp::PartialOrd for $t<T> { fn partial_cmp(&self, _other: &$t<T>) -> Option<cmp::Ordering> { Option::Some(cmp::Ordering::Equal) } } #[stable(feature = "rust1", since = "1.0.0")] impl<T:?Sized> cmp::Ord for $t<T> { fn cmp(&self, _other: &$t<T>) -> cmp::Ordering { cmp::Ordering::Equal } } #[stable(feature = "rust1", since = "1.0.0")] impl<T:?Sized> Copy for $t<T> { } #[stable(feature = "rust1", since = "1.0.0")] impl<T:?Sized> Clone for $t<T> { fn clone(&self) -> $t<T> { $t } } #[stable(feature = "rust1", since = "1.0.0")] impl<T:?Sized> Default for $t<T> { fn default() -> $t<T> { $t } } ) } /// Zero-sized type used to mark things that "act like" they own a `T`. /// /// Adding a `PhantomData<T>` field to your type tells the compiler that your /// type acts as though it stores a value of type `T`, even though it doesn't /// really. This information is used when computing certain safety properties. /// /// For a more in-depth explanation of how to use `PhantomData<T>`, please see /// [the Nomicon](../../nomicon/phantom-data.html). /// /// # A ghastly note 👻👻👻 /// /// Though they both have scary names, `PhantomData` and 'phantom types' are /// related, but not identical. A phantom type parameter is simply a type /// parameter which is never used. In Rust, this often causes the compiler to /// complain, and the solution is to add a "dummy" use by way of `PhantomData`. /// /// # Examples /// /// ## Unused lifetime parameters /// /// Perhaps the most common use case for `PhantomData` is a struct that has an /// unused lifetime parameter, typically as part of some unsafe code. For /// example, here is a struct `Slice` that has two pointers of type `*const T`, /// presumably pointing into an array somewhere: /// /// ```compile_fail,E0392 /// struct Slice<'a, T> { /// start: *const T, /// end: *const T, /// } /// ``` /// /// The intention is that the underlying data is only valid for the /// lifetime `'a`, so `Slice` should not outlive `'a`. However, this /// intent is not expressed in the code, since there are no uses of /// the lifetime `'a` and hence it is not clear what data it applies /// to. We can correct this by telling the compiler to act *as if* the /// `Slice` struct contained a reference `&'a T`: /// /// ``` /// use std::marker::PhantomData; /// /// # #[allow(dead_code)] /// struct Slice<'a, T: 'a> { /// start: *const T, /// end: *const T, /// phantom: PhantomData<&'a T>, /// } /// ``` /// /// This also in turn requires the annotation `T: 'a`, indicating /// that any references in `T` are valid over the lifetime `'a`. /// /// When initializing a `Slice` you simply provide the value /// `PhantomData` for the field `phantom`: /// /// ``` /// # #![allow(dead_code)] /// # use std::marker::PhantomData; /// # struct Slice<'a, T: 'a> { /// # start: *const T, /// # end: *const T, /// # phantom: PhantomData<&'a T>, /// # } /// fn borrow_vec<T>(vec: &Vec<T>) -> Slice<'_, T> { /// let ptr = vec.as_ptr(); /// Slice { /// start: ptr, /// end: unsafe { ptr.add(vec.len()) }, /// phantom: PhantomData, /// } /// } /// ``` /// /// ## Unused type parameters /// /// It sometimes happens that you have unused type parameters which /// indicate what type of data a struct is "tied" to, even though that /// data is not actually found in the struct itself. Here is an /// example where this arises with [FFI]. The foreign interface uses /// handles of type `*mut ()` to refer to Rust values of different /// types. We track the Rust type using a phantom type parameter on /// the struct `ExternalResource` which wraps a handle. /// /// [FFI]: ../../book/ch19-01-unsafe-rust.html#using-extern-functions-to-call-external-code /// /// ``` /// # #![allow(dead_code)] /// # trait ResType { } /// # struct ParamType; /// # mod foreign_lib { /// # pub fn new(_: usize) -> *mut () { 42 as *mut () } /// # pub fn do_stuff(_: *mut (), _: usize) {} /// # } /// # fn convert_params(_: ParamType) -> usize { 42 } /// use std::marker::PhantomData; /// use std::mem; /// /// struct ExternalResource<R> { /// resource_handle: *mut (), /// resource_type: PhantomData<R>, /// } /// /// impl<R: ResType> ExternalResource<R> { /// fn new() -> ExternalResource<R> { /// let size_of_res = mem::size_of::<R>(); /// ExternalResource { /// resource_handle: foreign_lib::new(size_of_res), /// resource_type: PhantomData, /// } /// } /// /// fn do_stuff(&self, param: ParamType) { /// let foreign_params = convert_params(param); /// foreign_lib::do_stuff(self.resource_handle, foreign_params); /// } /// } /// ``` /// /// ## Ownership and the drop check /// /// Adding a field of type `PhantomData<T>` indicates that your /// type owns data of type `T`. This in turn implies that when your /// type is dropped, it may drop one or more instances of the type /// `T`. This has bearing on the Rust compiler's [drop check] /// analysis. /// /// If your struct does not in fact *own* the data of type `T`, it is /// better to use a reference type, like `PhantomData<&'a T>` /// (ideally) or `PhantomData<*const T>` (if no lifetime applies), so /// as not to indicate ownership. /// /// [drop check]: ../../nomicon/dropck.html #[lang = "phantom_data"] #[structural_match] #[stable(feature = "rust1", since = "1.0.0")] pub struct PhantomDa
>; impls! { PhantomData } mod impls { #[stable(feature = "rust1", since = "1.0.0")] unsafe impl<T: Sync + ?Sized> Send for &T {} #[stable(feature = "rust1", since = "1.0.0")] unsafe impl<T: Send + ?Sized> Send for &mut T {} } /// Compiler-internal trait used to determine whether a type contains /// any `UnsafeCell` internally, but not through an indirection. /// This affects, for example, whether a `static` of that type is /// placed in read-only static memory or writable static memory. #[lang = "freeze"] pub(crate) unsafe auto trait Freeze {} impl<T: ?Sized> !Freeze for UnsafeCell<T> {} unsafe impl<T: ?Sized> Freeze for PhantomData<T> {} unsafe impl<T: ?Sized> Freeze for *const T {} unsafe impl<T: ?Sized> Freeze for *mut T {} unsafe impl<T: ?Sized> Freeze for &T {} unsafe impl<T: ?Sized> Freeze for &mut T {} /// Types which can be safely moved after being pinned. /// /// Since Rust itself has no notion of immovable types, and considers moves /// (e.g. through assignment or [`mem::replace`]) to always be safe, /// this trait cannot prevent types from moving by itself. /// /// Instead it is used to prevent moves through the type system, /// by controlling the behavior of pointers `P` wrapped in the [`Pin<P>`] wrapper, /// which "pin" the type in place by not allowing it to be moved out of them. /// See the [`pin module`] documentation for more information on pinning. /// /// Implementing this trait lifts the restrictions of pinning off a type, /// which then allows it to move out with functions such as [`mem::replace`]. /// /// `Unpin` has no consequence at all for non-pinned data. In particular, /// [`mem::replace`] happily moves `!Unpin` data (it works for any `&mut T`, not /// just when `T: Unpin`). However, you cannot use /// [`mem::replace`] on data wrapped inside a [`Pin<P>`] because you cannot get the /// `&mut T` you need for that, and *that* is what makes this system work. /// /// So this, for example, can only be done on types implementing `Unpin`: /// /// ```rust /// use std::mem; /// use std::pin::Pin; /// /// let mut string = "this".to_string(); /// let mut pinned_string = Pin::new(&mut string); /// /// // We need a mutable reference to call `mem::replace`. /// // We can obtain such a reference by (implicitly) invoking `Pin::deref_mut`, /// // but that is only possible because `String` implements `Unpin`. /// mem::replace(&mut *pinned_string, "other".to_string()); /// ``` /// /// This trait is automatically implemented for almost every type. /// /// [`mem::replace`]: ../../std/mem/fn.replace.html /// [`Pin<P>`]: ../pin/struct.Pin.html /// [`pin module`]: ../../std/pin/index.html #[stable(feature = "pin", since = "1.33.0")] #[lang = "unpin"] pub auto trait Unpin {} /// A marker type which does not implement `Unpin`. /// /// If a type contains a `PhantomPinned`, it will not implement `Unpin` by default. #[stable(feature = "pin", since = "1.33.0")] #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] pub struct PhantomPinned; #[stable(feature = "pin", since = "1.33.0")] impl !Unpin for PhantomPinned {} #[stable(feature = "pin", since = "1.33.0")] impl<'a, T: ?Sized + 'a> Unpin for &'a T {} #[stable(feature = "pin", since = "1.33.0")] impl<'a, T: ?Sized + 'a> Unpin for &'a mut T {} #[stable(feature = "pin_raw", since = "1.38.0")] impl<T: ?Sized> Unpin for *const T {} #[stable(feature = "pin_raw", since = "1.38.0")] impl<T: ?Sized> Unpin for *mut T {} /// Implementations of `Copy` for primitive types. /// /// Implementations that cannot be described in Rust /// are implemented in `SelectionContext::copy_clone_conditions()` in librustc. mod copy_impls { use super::Copy; macro_rules! impl_copy { ($($t:ty)*) => { $( #[stable(feature = "rust1", since = "1.0.0")] impl Copy for $t {} )* } } impl_copy! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 bool char } #[unstable(feature = "never_type", issue = "35121")] impl Copy for ! {} #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Copy for *const T {} #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Copy for *mut T {} // Shared references can be copied, but mutable references *cannot*! #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Copy for &T {} }
ta<T:?Sized
log.go
package log import ( stdlog "log" "os" ) // Logger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger type StdLogger interface { Print(v ...interface{}) Printf(format string, v ...interface{}) } var Logger StdLogger func init() { // default Logger SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile)) } func SetLogger(customLogger StdLogger) { Logger = customLogger } func Print(v ...interface{}) { Logger.Print(v...)
func Printf(format string, v ...interface{}) { Logger.Printf(format, v...) }
}
print.go
// Copyright 2018 The CUE Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package parser import ( "fmt" "strconv" "strings" "cuelang.org/go/cue/ast" "cuelang.org/go/cue/token" "cuelang.org/go/internal" ) func init() { internal.DebugStr = debugStr } func
(x interface{}) (out string) { if n, ok := x.(ast.Node); ok { comments := "" for _, g := range n.Comments() { comments += debugStr(g) } if comments != "" { defer func() { out = "<" + comments + out + ">" }() } } switch v := x.(type) { case *ast.File: out := "" out += debugStr(v.Decls) return out case *ast.Package: out := "package " out += debugStr(v.Name) return out case *ast.Alias: out := debugStr(v.Ident) out += " = " out += debugStr(v.Expr) return out case *ast.BottomLit: return "_|_" case *ast.BasicLit: return v.Value case *ast.Interpolation: for _, e := range v.Elts { out += debugStr(e) } return out case *ast.EmbedDecl: out += debugStr(v.Expr) return out case *ast.ImportDecl: out := "import " if v.Lparen != token.NoPos { out += "( " out += debugStr(v.Specs) out += " )" } else { out += debugStr(v.Specs) } return out case *ast.Comprehension: out := debugStr(v.Clauses) out += debugStr(v.Value) return out case *ast.StructLit: out := "{" out += debugStr(v.Elts) out += "}" return out case *ast.ListLit: out := "[" out += debugStr(v.Elts) out += "]" return out case *ast.Ellipsis: out := "..." if v.Type != nil { out += debugStr(v.Type) } return out case *ast.ListComprehension: out := "[" out += debugStr(v.Expr) out += " " out += debugStr(v.Clauses) out += "]" return out case *ast.ForClause: out := "for " if v.Key != nil { out += debugStr(v.Key) out += ": " } out += debugStr(v.Value) out += " in " out += debugStr(v.Source) return out case *ast.IfClause: out := "if " out += debugStr(v.Condition) return out case *ast.Field: out := debugStr(v.Label) if v.Optional != token.NoPos { out += "?" } if v.Value != nil { switch v.Token { case token.ILLEGAL, token.COLON: out += ": " default: out += fmt.Sprintf(" %s ", v.Token) } out += debugStr(v.Value) for _, a := range v.Attrs { out += " " out += debugStr(a) } } return out case *ast.Attribute: return v.Text case *ast.Ident: return v.Name case *ast.TemplateLabel: out := "<" out += debugStr(v.Ident) out += ">" return out case *ast.SelectorExpr: return debugStr(v.X) + "." + debugStr(v.Sel) case *ast.CallExpr: out := debugStr(v.Fun) out += "(" out += debugStr(v.Args) out += ")" return out case *ast.ParenExpr: out := "(" out += debugStr(v.X) out += ")" return out case *ast.UnaryExpr: return v.Op.String() + debugStr(v.X) case *ast.BinaryExpr: out := debugStr(v.X) op := v.Op.String() if 'a' <= op[0] && op[0] <= 'z' { op = fmt.Sprintf(" %s ", op) } out += op out += debugStr(v.Y) return out case []*ast.CommentGroup: var a []string for _, c := range v { a = append(a, debugStr(c)) } return strings.Join(a, "\n") case *ast.CommentGroup: str := "[" if v.Doc { str += "d" } if v.Line { str += "l" } str += strconv.Itoa(int(v.Position)) var a = []string{} for _, c := range v.List { a = append(a, c.Text) } return str + strings.Join(a, " ") + "] " case *ast.IndexExpr: out := debugStr(v.X) out += "[" out += debugStr(v.Index) out += "]" return out case *ast.SliceExpr: out := debugStr(v.X) out += "[" out += debugStr(v.Low) out += ":" out += debugStr(v.High) out += "]" return out case *ast.ImportSpec: out := "" if v.Name != nil { out += debugStr(v.Name) out += " " } out += debugStr(v.Path) return out case []ast.Decl: if len(v) == 0 { return "" } out := "" for _, d := range v { out += debugStr(d) out += sep } return out[:len(out)-len(sep)] case []ast.Clause: if len(v) == 0 { return "" } out := "" for _, c := range v { out += debugStr(c) out += " " } return out case []ast.Expr: if len(v) == 0 { return "" } out := "" for _, d := range v { out += debugStr(d) out += sep } return out[:len(out)-len(sep)] case []*ast.ImportSpec: if len(v) == 0 { return "" } out := "" for _, d := range v { out += debugStr(d) out += sep } return out[:len(out)-len(sep)] default: if v == nil { return "" } return fmt.Sprintf("<%T>", x) } } const sep = ", "
debugStr
airline_seat_flat.js
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; /* airline_seat_flat icon*/ var _react = require("react"); var _react2 = _interopRequireDefault(_react); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } const Icon = (props = {}) => _react2.default.createElement(
"svg", _extends({ viewBox: "0 0 48 48" }, props), _react2.default.createElement("path", { d: "M44 22v4H18V14h18c4.42 0 8 3.58 8 8zM4 28v4h12v4h16v-4h12v-4H4zm10.29-3.8c2.32-2.37 2.28-6.17-.09-8.48-2.37-2.32-6.17-2.28-8.48.09-2.32 2.37-2.28 6.17.09 8.48 2.36 2.32 6.16 2.28 8.48-.09z" }) ); Icon.displayName = "AirlineSeatFlatIcon"; Icon.isMaterialIcon = true; exports.default = Icon;
apps.py
from django.apps import AppConfig class ControllerConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField' name = 'controller'
exceptions.py
"""Defines exceptions that can occur when interacting with interface data values""" from util.exceptions import ValidationException class InvalidData(ValidationException): """Exception indicating that the data is invalid""" def __init__(self, name, description): """Constructor :param name: The name of the validation error :type name: string :param description: The description of the validation error :type description: string """ super(InvalidData, self).__init__(name, description)
class InvalidDataFilter(ValidationException): """Exception indicating that the data filter is invalid""" def __init__(self, name, description): """Constructor :param name: The name of the validation error :type name: string :param description: The description of the validation error :type description: string """ super(InvalidDataFilter, self).__init__(name, description)
initca_test.go
package initca import ( "bytes" "encoding/json" "net/http" "net/http/httptest" "testing" "github.com/joostschriek/cfssl/csr" ) func csrData(t *testing.T) *bytes.Reader { req := &csr.CertificateRequest{ Names: []csr.Name{ { C: "US", ST: "California", L: "San Francisco", O: "CloudFlare", OU: "Systems Engineering", }, }, CN: "cloudflare.com", Hosts: []string{"cloudflare.com"}, KeyRequest: csr.NewKeyRequest(), } csrBytes, err := json.Marshal(req) if err != nil { t.Fatal(err) } return bytes.NewReader(csrBytes) } func TestInitCARESTfulVerbs(t *testing.T) { ts := httptest.NewServer(NewHandler()) data := csrData(t) // POST should work. req, _ := http.NewRequest("POST", ts.URL, data) resp, _ := http.DefaultClient.Do(req) if resp.StatusCode != http.StatusOK { t.Fatal(resp.Status) } // Test GET, PUT, DELETE and whatever, expect 400 errors. req, _ = http.NewRequest("GET", ts.URL, data) resp, _ = http.DefaultClient.Do(req) if resp.StatusCode != http.StatusMethodNotAllowed { t.Fatal(resp.Status) } req, _ = http.NewRequest("PUT", ts.URL, data) resp, _ = http.DefaultClient.Do(req) if resp.StatusCode != http.StatusMethodNotAllowed { t.Fatal(resp.Status) } req, _ = http.NewRequest("DELETE", ts.URL, data) resp, _ = http.DefaultClient.Do(req) if resp.StatusCode != http.StatusMethodNotAllowed { t.Fatal(resp.Status) } req, _ = http.NewRequest("WHATEVER", ts.URL, data) resp, _ = http.DefaultClient.Do(req) if resp.StatusCode != http.StatusMethodNotAllowed { t.Fatal(resp.Status) } } func TestBadRequestBody(t *testing.T) { ts := httptest.NewServer(NewHandler()) req, _ := http.NewRequest("POST", ts.URL, nil) resp, _ := http.DefaultClient.Do(req) if resp.StatusCode == http.StatusOK { t.Fatal(resp.Status) } } func TestBadRequestBody_2(t *testing.T) { ts := httptest.NewServer(NewHandler())
csrBytes, err := json.Marshal(r) if err != nil { t.Fatal(err) } data := bytes.NewReader(csrBytes) req, _ := http.NewRequest("POST", ts.URL, data) resp, _ := http.DefaultClient.Do(req) if resp.StatusCode == http.StatusOK { t.Fatal(resp.Status) } }
r := &csr.CertificateRequest{}
contrast.py
# -*- coding: utf-8 -*- """ (c) 2015 @author: Janto Oellrich email: [email protected] CONTENT Function for contrast driver sampling
""" Given the featmatrix samples n_ref contrast trips. """ print 'Sampling contrast trips...' # random sampling of trips from different drivers ref_trips = np.random.choice(trips.shape[0],size=(n_ref,1),replace=False) ref = trips[ref_trips[:,0],:] print '\t\t{0} contrast trips, {1} features'.format(ref.shape[0],ref.shape[1]) return ref
""" from modules import * def sampleContrast(trips,n_ref=1000):
SettingPixels.py
import numpy as np import cv2 import os window_title = "The Input Image" input_image = "input.jpg" output_image = os.path.basename(__file__)[:-len(".py")] + ".jpg" HORIZONTAL = 0 VERTICAL = 1 def read_image(file_name = input_image): img = cv2.imread(file_name) return img def
(img,window_title = window_title): cv2.namedWindow(window_title, cv2.WINDOW_NORMAL) cv2.imshow(window_title,img) cv2.waitKey(0) cv2.destroyAllWindows() return def grayscale(img): grayscale = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) #=6, BGR and not RGB because of how cv2 returns images return grayscale def save_to_disk(img,filename=output_image): cv2.imwrite(filename,img) def get_dimensions_hw(img): return img.shape[0:2] def get_middle_pixels_hw(img, new_height, new_width): input_img_h,input_img_w = get_dimensions_hw(img) if new_height > input_img_h: raise ValueError("Requested new height (" + str(new_height) + ") is greater than image height (" + str(input_img_h) + ").") if new_width > input_img_w: raise ValueError("Requested new width (" + str(new_width) + ") is greater than image width (" + str(input_img_w) + ").") middle_h = round(input_img_h/2) half_new_height = round(new_height/2) middle_w = round(input_img_w/2) half_new_width = round(new_width/2) middle_pixels = img[middle_h-half_new_height:middle_h+half_new_height,middle_w-half_new_width:middle_w+half_new_width] return middle_pixels def set_periodic_pixel(img, frequency, direction, new_pixel): h,w = get_dimensions_hw(img) img = np.array(img,copy=True) if direction == HORIZONTAL: for i in range(0,h): for j in range(0,w,frequency): img[i][j] = new_pixel elif direction == VERTICAL: for i in range(0,h,frequency): for j in range(0,w): img[i][j] = new_pixel return img if __name__ == "__main__": img = read_image() revised = set_periodic_pixel(img,10,HORIZONTAL,0) revised = set_periodic_pixel(revised, 20, VERTICAL, 0) save_to_disk(revised) display_image(revised) #Note: Owing to the large input image used for this example, the program will not show all #lines unless you zoom in on the saved file (unless your monitor happens to have enough #resolution...)
display_image
ListContainer.tsx
import { connect } from 'react-redux'; import { RootState } from 'redux/interfaces'; import {
selectAll, getAreConsumerGroupsPagedFulfilled, } from 'redux/reducers/consumerGroups/consumerGroupsSlice'; import List from 'components/ConsumerGroups/List/List'; const mapStateToProps = (state: RootState) => ({ consumerGroups: selectAll(state), orderBy: getConsumerGroupsOrderBy(state), sortOrder: getConsumerGroupsSortOrder(state), totalPages: getConsumerGroupsTotalPages(state), isFetched: getAreConsumerGroupsPagedFulfilled(state), }); const mapDispatchToProps = { setConsumerGroupsSortOrderBy: sortBy, }; export default connect(mapStateToProps, mapDispatchToProps)(List);
getConsumerGroupsOrderBy, getConsumerGroupsSortOrder, getConsumerGroupsTotalPages, sortBy,
tests.py
import unittest import re import json import collections from collections import namedtuple import client,api,entities # TODO: # Test multiple get_entities calls # so that the second one uses the cached value # Really - the class factory needs a delegate to call inorder to get # the meta data. THE CLIENT SHOULDN"T NEED TO TEST FOR CLASS EXISTANCE # MOCKS class MockCallable(object): fcall = namedtuple('fcall',['args','kwargs']) def __init__(self,response=None): self.last_call = None self.response = response def __call__(self,*args,**kwargs): self.last_call = self.fcall(args,kwargs) return self.response(*args,**kwargs) if callable(self.response) else self.response class MockObject(object): def __init__(self,**kwargs): self.__dict__.update(kwargs) # UNIT TESTS # == client.py Tests == # class HTTPRequestDispatcherTests(unittest.TestCase): def setUp(self): self.test_instance = client.HTTPRequestDispatcher() def test_encode_params_list(self): # The only time I can thing this is called # is when using ids=123,1234 for context "I think this only called when using 'ids' *maybe?" n = self.test_instance.encode_params({'test':[1,2,3]}) self.assertEqual(n,"test=1,2,3") def test_encode_params_str(self): n = self.test_instance.encode_params({'test':"foobar"}) self.assertEqual(n,"test=foobar") def test_encode_params_unicode(self): n = self.test_instance.encode_params({u'test':u"foobar"}) self.assertEqual(n,"test=foobar") def test_encode_params_int(self): n = self.test_instance.encode_params({'test':123}) self.assertEqual(n,"test=123") class TPBasicClientTests(unittest.TestCase): """ The client is an adapter of the more basic functionality of the HTTPRequestDispatcher hence to test the base client, we need to prove
TEST_BASE_URL = 'testurl' def setUp(self): "Setup client with mock requester so we can feed in request reponses" self.request_response = [[1,2,3]] self.mock_dispatcher = MockObject( paginated_get_request = MockCallable( response = lambda url,params:self.request_response ), post_request = MockCallable( response = lambda url,params,msg,response_format:self.request_response ), ) self.test_client = client.BasicClient( self.TEST_BASE_URL,self.mock_dispatcher ) # Method call tests def test_get_entities_http_request(self): "Get entities should send a paginated get request" test_inst = [i for i in self.test_client.get_entities('test_entity')] self.assertEqual(test_inst,[1,2,3]) def test_create_entity_http_request(self): "create entity should send post request and return response" self.request_response = "client just returns response" test_inst = self.test_client.create_entity('test_entity',{}) self.assertEqual(test_inst,self.request_response) # Client functionality def test_get_entities_chains_multi_iterable(self): """ Get entities should present a list of lists as a single iterable, This way we simplify paginated request for caller """ self.request_response = [[0,1,2,3],[4,5,6],[7,8,9]] test_inst = [i for i in self.test_client.get_entities('test_entity')] self.assertEqual(test_inst,range(10)) def test_request_call_includes_baseurl(self): """General condition for interaction with client and requester The client will always make sure to pass full urls to the requester """ test_inst = [i for i in self.test_client.get_entities('test_entity')] self.assertEqual( self.mock_dispatcher.paginated_get_request.last_call.args[0], "/".join([self.TEST_BASE_URL,"test_entity"]) ) class TPClientEntityLimitTests(unittest.TestCase): """The client is also able to limit number of entities it returns This is really a safety check to make sure we don't inadvertantly send too many requests (each request = 25 items) """ def setUp(self): "Setup client with mock requester so we can feed in request reponses" self.request_response = [[1,2,3,4,5]] self.mock_dispatcher = MockObject( paginated_get_request = MockCallable( response = lambda url,params:self.request_response ), ) self.test_client = client.BasicClient( "test",self.mock_dispatcher ) def test_limit_more_than_response_length(self): # default limit = 50 test_collection = [i for i in self.test_client.get_entities('test_entity')] self.assertTrue(len(test_collection)==5) def test_limit_less_than_response_length(self): test_collection = [i for i in self.test_client.get_entities('test_entity',return_limit=3)] self.assertTrue(len(test_collection)==3) def test_limit_spans_multiple_requests(self): self.request_response = [range(10),range(10,20)] test_collection = [i for i in self.test_client.get_entities('test_entity',return_limit=15)] self.assertEqual(test_collection,range(15)) def test_limit_is_unsupported(self): "We don't support floats or non numbers or negative ints, should raise error" "Also it seems 0 returns nothing, so we also guard against that" # all error cases raise Assertino errors with self.assertRaises(AssertionError): test_collection = [ i for i in self.test_client.get_entities('test_entity',return_limit=-1) ] with self.assertRaises(AssertionError): test_collection = [ i for i in self.test_client.get_entities('test_entity',return_limit=0.1) ] with self.assertRaises(AssertionError): test_collection = [ i for i in self.test_client.get_entities('test_entity',return_limit="s") ] with self.assertRaises(AssertionError): test_collection = [ i for i in self.test_client.get_entities('test_entity',return_limit=0) ] class ObjectMappingClientTests(unittest.TestCase): """ The conversion of entity data to entity instances is done in a specific subclass. These tests confirm the right instance is created for a given entity endpoint as data retrieval is already covered. """ def setUp(self): "Setup client with mock requester so we can feed in request reponses" # Setup mock client self.request_response = [[1,2,3,4,5]] self.mock_dispatcher = MockObject( paginated_get_request = MockCallable( response = lambda url,params:self.request_response ), post_request = MockCallable( response = lambda url,params,data,response_format:self.request_response ) ) # setup mock class factory class MockEntity(object): def __init__(self,data): self.d = data @classmethod def create_from_data(cls,d): return cls(d) def toDict(self): return self.d # Mock factory will return new subclass of mock self.mock_factory = MockObject( get = MockCallable( response = lambda entity,immutable: type('MockEntitySubclass',(MockEntity,),{ 'name':entity,'immutable':immutable }) ) ) self.test_client = client.ObjectMappingClient( "test",self.mock_dispatcher,MockCallable(response=self.mock_factory) ) def test_get_entities_return_class(self): "Entity data is instanciated by entity classes based on entity_endpoint" test_inst = [i for i in self.test_client.get_entities('test_entity')] # Test mock 'get' method of factory was passed entity endpoint # also test reponse data was passed to init for i in test_inst: self.assertEqual(i.name,'test_entity') self.assertIn(i.d,range(1,6)) def test_create_entity_return_class(self): "Test we return an immutable entity and passed the post data to init" self.request_response = {'foo':'bar'} test_inst = self.test_client.create_entity('test_entity',{'foo':'bar'}) self.assertTrue(test_inst.immutable) self.assertEqual(test_inst.d['foo'],'bar') self.assertEqual(test_inst.name,'test_entity') def test_get_entities_empty_response(self): """ If the query result has no items, get entities shouldn't fail aka instanciate stuff without data """ self.request_response = [[]] test_inst = [i for i in self.test_client.get_entities('test_entity')] self.assertEqual(test_inst,[]) # == Api.py Tests == # class QueryTests(unittest.TestCase): """ Querys form the basis of the public api. They mainly wrap the client But have some new functionality in how they accept and transform input and output args. """ def setUp(self): self.mock_client = MockObject( get_entities=MockCallable( response=lambda entity_endpoint,params,return_limit:(entity_endpoint,params) ) ) # Default args def test_default_args(self): "We can pass key val pairs at init time that will always be apart of params" test_query = api.Query(self.mock_client,acid='helloWorld') test_inst = test_query.get('Bugs') self.assertEqual(test_inst[1].get('acid'),'helloWorld') def test_default_args(self): "We can pass multi default kwargs for incusion into params" test_query = api.Query(self.mock_client,acid='helloWorld',foo="bar") test_inst = test_query.get('Bugs') self.assertEqual(test_inst[1].get('acid'),'helloWorld') self.assertEqual(test_inst[1].get('foo'),'bar') def test_get_id_return(self): "When specifying an Entity Id, we expect a single entity to be returned" # redefine mock client to return iter self.mock_client = MockObject( get_entities=MockCallable( response=lambda entity_endpoint,params,return_limit:iter([entity_endpoint,1]) ) ) test_query = api.Query(self.mock_client,acid='helloWorld',foo="bar") test_inst = test_query.get('Bugs',Id=1) # Test that we didn't get back a list, instead 1st elem self.assertTrue(isinstance(test_inst,str)) self.assertEqual(test_inst,'Bugs/1') def test_check_endpoint_exists(self): "We guard against non existant endpoints to save on the network request" with self.assertRaises(AssertionError): test_query = api.Query(self.mock_client,acid='helloWorld',foo="bar") test_inst = test_query.get('foobar') # == entities.py Tests == # class EntityBaseTests(unittest.TestCase): class mock_object(object): def __init__(self,**kwargs): self.__dict__.update(kwargs) # Data Access Tests def test_getattr_Tpdata(self): 'I can retrieve value from TP data cache via attribute lookup' i = entities.EntityBase(data={ 'data1':'a', 'data2':1, 'data3':[1,2] }) self.assertEqual(i.data1,'a') self.assertEqual(i.data2,1) self.assertEqual(i.data3,[1,2]) def test_setattr_Tpdata(self): "I cannot edit tpdata cache ref aka entity instance is immutable" i = entities.EntityBase(data={'data1':'a'}) with self.assertRaises(AssertionError): i.data1 = 'b' def testEntitySubclass_setattr(self): "Entity subclasses are still immutable" class test(entities.EntityBase): pass i = test(data={}) with self.assertRaises(AssertionError): i.data1 = 'arbitrary string' # Comparison Tests def test_entityComparisonTrue(self): "Entities with same id should be equal" i = entities.EntityBase(data={'Id':1}) j = entities.EntityBase(data={'Id':1,'onlyIdsMatter':2}) self.assertEqual(i,j) def test_entityComparisonFalse(self): "Entites with different Ids should not be equal" i = entities.EntityBase(data={'Id':100}) j = entities.EntityBase(data={'Id':1,'onlyIdsMatter':100}) self.assertNotEqual(i,j) def test_entityComparisonNoId(self): "An entity without id can never be equal" i = entities.EntityBase(data={'noId':1}) self.assertNotEqual(i,i) # Hashable Tests def test_entityHashingTrue(self): i = entities.EntityBase(data={'Id':100}) try: d = {i:"isHashable"} except: raise Exception("Entity isn't hashable") def test_entityHashingNoId(self): i = entities.EntityBase(data={'Id':100}) self.assertRaises({i:"isn't Hashable"}) class MutableEntityTests(unittest.TestCase): def test_setProperty(self): "on a mutable entity, setattr will forward to property objects setter" pass class EntityFactoryTests(unittest.TestCase): """ Make sure EntityClassFactory can parse a metadata reponse into a suitable class. """ _TESTDATA = './testdata.json' def setUp(self): with open(self._TESTDATA) as f: self.test_data = json.load(f) self.test_client = MockObject( raw_request = MockCallable( response = lambda url:self.test_data ) ) self.test_class_factory = entities.EntityClassFactory( self.test_client ) def test_metadataFailsToParse(self): "If error occurs reading metadata we should get a Generic Entity" self.test_data = {} test_instance = self.test_class_factory.get('Bugs')({}) self.assertIsInstance(test_instance,entities.GenericEntity) def test_classCreation_value_attribute(self): "Parse meta data and assign value properties" test_instance = self.test_class_factory.get('Bugs')({}) self.assertIn("Name",test_instance.__class__.__dict__) self.assertIsInstance( test_instance.__class__.__dict__['Name'], entities.ValueAttribute ) def test_classCreation_resource_attribute(self): "Parse meta data and assign resource properties" test_instance = self.test_class_factory.get('Bugs')({}) self.assertIn("Release",test_instance.__class__.__dict__) self.assertIsInstance( test_instance.__class__.__dict__['Release'], entities.ResourceAttribute ) def test_classCreation_collection_attribute(self): "Parse meta data and assign Collection properties" test_instance = self.test_class_factory.get("Bugs")({}) self.assertIn("Comments",test_instance.__class__.__dict__) self.assertIsInstance( test_instance.__class__.__dict__["Comments"], entities.CollectionAttribute ) def test_get_mutable_entity_class(self): "Factory should be able to supply a mutable version of a entity" test_cls = self.test_class_factory.get('Bugs',immutable=False) self.assertTrue(issubclass(test_cls,entities.MutableEntity)) def test_get_all_property_info(self): "User should be able to reflect over all class properties" test_instance = self.test_class_factory.get('Bugs')({}) # Assert all types of properties are present in dict self.assertIn('Comments',test_instance.entity_properties) self.assertIn('Release',test_instance.entity_properties) self.assertIn('Name',test_instance.entity_properties) # Entity Property Tests # class BasePropertyTests(unittest.TestCase): """ The base property class mainly supports reflection of initial metadata used at init time, the rest is left up to subclasses """ def setUp(self): self.test_property = entities.EntityProperty('name','uri/meta',{'meta1':'foo'}) def test_get_meta_return(self): "A Property can return a copy of the meta data it was init from" self.assertEqual(self.test_property.get_meta()['meta1'],'foo') def test_meta_contains_relURI(self): "A propery meta data contains an 'entity endppoint' reference for inspection" self.assertEqual(self.test_property.get_meta()['RelUri'],'uri') def test_meta_data_is_copy(self): "User can't change/edit a metadata as you're only returned a copy" m = self.test_property.get_meta() m['new_attr'] = 1 self.assertTrue('new_attr' not in self.test_property.get_meta()) class ValuePropertiesTests(unittest.TestCase): def setUp(self): class test_class(object): test_property = entities.ValueAttribute( name = 'test_property', uri = "" ) test_error_property = entities.ValueAttribute( name = 'not there', uri = "" ) def __init__(self,test_variable): self._tpdata = {'test_property':test_variable} self.test_class = test_class def test_valueDescriptorGet(self): "Descriptor should return value in _tpdata field" test_instance = self.test_class(99) self.assertEqual(test_instance.test_property,99) def test_valueDescriptorSet(self): "Setting the property should update the value in _tpdata" test_instance = self.test_class(99) test_instance.test_property = 1 self.assertEqual(test_instance._tpdata['test_property'],1) def test_valueDescriptorSet_missing_attr(self): "if propert value not found in _tpdata, just set it,don't error" test_instance = self.test_class(99) test_instance.test_error_property = 1 self.assertEqual(test_instance._tpdata['not there'],1) def test_valueDescriptorGetNoValue(self): "Descriptor should return None if value = None" test_instance = self.test_class(None) self.assertEqual(test_instance.test_property,None) def test_valueDescriptorGetDataNotPresent(self): "Descriptor should return None if value wasn't in initial tp data" test_instance = self.test_class(None) self.assertEqual(test_instance.test_error_property,None) class ResourcePropertiesTests(unittest.TestCase): def setUp(self): self.test_client = MockObject( get_entities = MockCallable(response = iter([{"Name":"helloWorld"}])) ) test_client = self.test_client class test_class(object): TP = test_client test_property = entities.ResourceAttribute( name = 'test_property', uri = 'spam/meta', metadata = {} ) test_error_property = entities.ResourceAttribute( name = 'not there', uri = "" ) def __init__(self,test_variable): self._tpdata = { 'test_property':test_variable } self.test_class = test_class def test_ResourcePropertyWithoutAnyData(self): "if no data is there, return None ie, no resource assigned" test_instance = self.test_class(None) self.assertEqual(test_instance.test_property,None) def test_ResourcePropertyCallsClientCorrectly(self): "Resources are just sparse, only hold Id in _tpdata. Property has to fetch data" test_instance = self.test_class({'Name':'foobar',"ResourceType":'chips','Id':1}) self.assertEqual(test_instance.test_property['Name'],'helloWorld') # Make sure url is working # Interesting, seems we ignore resource type in initial data # and prefer uri ? Good / bad ? self.assertEqual(self.test_client.get_entities.last_call.args[0], 'spam/1') def test_ResourcePropertyCanSetToOtherEntity(self): "When user sets property, update value to dict with id == new entity" test_instance = self.test_class(None) test_instance.test_property = MockObject(Id=999) self.assertEqual(test_instance._tpdata['test_property'],{'Id':999}) class CollectionPropertiesTests(unittest.TestCase): """ Collection properties are some what easier than resources Most of the time they will be blank, as so client returns it """ def setUp(self): self.test_client = MockObject( get_entities = MockCallable( response = iter([{"Name":"helloWorld"},{"Name":"Goodbye"}]) ) ) test_client = self.test_client class test_class(object): TP = test_client _api_endpoint = "foo" test_property = entities.CollectionAttribute( name = 'test_property', uri = 'spam/meta' ) def __init__(self,test_variable): self._tpdata = { 'test_property':test_variable, 'Id':1, } def __getattr__(self,name): # Mimic GenericEntity lookup return self._tpdata[name] self.test_class = test_class def test_trivialCollectionInData(self): """ If the collection attr has any data in initial response, just return it """ test_instance = self.test_class([ {'Name':'foobar'}, {'Name':'HelloWorld'}, ]) self.assertEqual(len(test_instance.test_property),2) self.assertEqual(test_instance.test_property[0].Name,'foobar') self.assertIsInstance( test_instance.test_property[0],entities.GenericEntity ) def test_CollectionCallsClientCorrectly(self): "if no data is present, property makes call to client" test_instance = self.test_class(None) self.assertNotEqual(test_instance.test_property,None) # Make sure url is correct ie # <current entitiy endpoint>/<current entity id>/<collection endpoint> self.assertEqual( self.test_client.get_entities.last_call.args[0], 'foo/1/spam' ) # Integration Tests class IntegrationTests(unittest.TestCase): """ Here we setup a full object graph and see if a a request from the api layer can make its way all the way through and back again returning entity instances. We mock out the very lowest level, the request.py module handle in HTTPRequestDispatcher and supply our own data to the requests """ def setUp(self): self.TESTACID='TESTACIDSTR' # Mock response need to be from root to specifc # in order to be matched correctly # e.g { "test":1,"test/123/":2, self.mock_responses = { r"Test/Context/\?ids=111":{ 'Items':[{'Acid':'foo'}] }, r"Test/Context/meta": { 'This will error to a generic Entity':1 }, r"Test/Bugs/\?acid=foo":{ 'Items':[ {'Id':1,'Name':'Item1'},{'Id':2,'Name':'Item2'} ] }, "Test/Bugs/meta":{ 'Name':"Bug", 'ResourceMetadataPropertiesDescription':{ "ResourceMetadataProperties"\ "ResourceValuesDescription":{"Items":[ {"Name":"Id"},{"Name":"ValueAttrExample"}]}, "ResourceMetadataProperties"\ "ResourceReferencesDescription":{"Items":[{"Name":"ResourceAttrExample"}]}, }, }, } def mock_request(method,url,auth,**kwargs): try: return MockObject( json = MockCallable(response = [ v for k,v in self.mock_responses.iteritems() if re.match(r"^("+ k +r")/?\??(&?format=json)?(?!.)",url)][0] ), raise_for_status = MockCallable(response=None) ) except IndexError: raise Exception("Mock Request couldn't match {}".format(url or "None")) # Mock out requests.py for test client self.test_requester = client.HTTPRequestDispatcher() self.test_requester._requests = MockObject( request = mock_request ) self.test_client = client.TPEntityClient( url = 'Test', requester = self.test_requester, ) self.test_project = api.ProjectProxy(self.test_client,MockObject(Id=111)) def test_simple_query_request(self): "Project attributes should return iter of Generic Entities" # Bad meta should fail and return generic entities self.mock_responses.update({ r"Test/Bugs/meta":{ 'ResourceMetadataPropertiesDescription':{ }, }, }) items = [x for x in self.test_project.get("Bugs")] # We should get back 2 generic entities self.assertTrue(len(items) == 2 ) self.assertTrue(items[0].Name == 'Item1') self.assertTrue(items[0].Id == 1) self.assertIsInstance(items[0],entities.GenericEntity) def test_EntityClass_from_request(self): "This tests to make sure the class factory instanciates dynamic classes" self.mock_responses.update({ r"Test/Bugs/\?acid=foo":{ 'Items':[ {'Id':1,'Name':'Item1','ValueAttrExample':1}, {'Id':2,'Name':'Item2','ValueAttrExample':2}, ] }, }) items = [ x for x in self.test_project.get('Bugs')] self.assertTrue(len(items) == 2 ) self.assertNotIsInstance(items[0],entities.GenericEntity) self.assertEqual(items[0].ValueAttrExample, 1) def test_queryEntityWithoutID(self): "I can create a query for entities (like Contexts) that don't have an ID" self.mock_responses.update({ r"Test/Context/\?acid=foo":{ "Items":[{'ItemWithoutId':1,'Name':'Context'}] } }) # Get bugs from project items = [x for x in self.test_project.get('Context')] # Make sure Returned Entity is Correct and with ID self.assertEqual(len(items),1) self.assertEqual(items[0].Name,'Context') self.assertIsInstance(items[0],entities.GenericEntity) with self.assertRaises(AttributeError) as e: items[0].Id def test_createEntity(self): "I can create a query to create an entity within a TP Project" # Try creating a test bug with value and resource based attrs bug_data = { 'Id': 0, 'ValueAttrExample':'NewBug', 'ResourceAttrExample':MockObject(Id=1) } returned_bug_data = bug_data.copy() returned_bug_data['Id']=123 self.mock_responses.update({ r"Test/Bugs":returned_bug_data }) # Assert returned bug has same data as input data # plus now has an ID new_bug = self.test_project.create('Bugs',bug_data) self.assertEqual(new_bug.ValueAttrExample,'NewBug') self.assertEqual(new_bug.Id,123) if __name__ == "__main__": unittest.main();
proper delegation for each action method. """
main_test.go
// Copyright (c) 2018 Benjamin Borbe All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main_test import ( "testing" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/onsi/gomega/gexec" ) var _ = Describe("Maxscale CDC Connector", func() { It("Compiles", func() { var err error _, err = gexec.Build("github.com/bborbe/kafka-maxscale-cdc-connector") Expect(err).NotTo(HaveOccurred()) }) }) func
(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Maxscale CDC Connector Suite") }
TestMaxscaleCDCConnector
mod.rs
macro_rules! wrap_joint { { $wrapped:ty => $wrap:ident ($joint_type:path) < $as_base:path > $base_as:path } => { wrap! { ffi::Joint: $wrapped => pub $wrap < $as_base > $base_as } impl Joint for $wrap { fn assumed_type() -> JointType { $joint_type } } }; } pub mod distance; pub mod friction; pub mod gear; pub mod motor; pub mod mouse; pub mod prismatic; pub mod pulley; pub mod revolute; pub mod rope; pub mod weld; pub mod wheel; pub use self::distance::{DistanceJoint, DistanceJointDef}; pub use self::friction::{FrictionJoint, FrictionJointDef}; pub use self::gear::{GearJoint, GearJointDef}; pub use self::motor::{MotorJoint, MotorJointDef}; pub use self::mouse::{MouseJoint, MouseJointDef}; pub use self::prismatic::{PrismaticJoint, PrismaticJointDef}; pub use self::pulley::{PulleyJoint, PulleyJointDef}; pub use self::revolute::{RevoluteJoint, RevoluteJointDef}; pub use self::rope::{RopeJoint, RopeJointDef}; pub use self::weld::{WeldJoint, WeldJointDef}; pub use self::wheel::{WheelJoint, WheelJointDef}; use std::ops::{Deref, DerefMut}; use wrap::*; use common::math::Vec2; use dynamics::world::{World, BodyHandle, JointHandle}; use user_data::{UserDataTypes, UserData, RawUserData, RawUserDataMut, InternalUserData}; #[repr(C)] #[derive(Copy, Clone, PartialEq, Debug)] pub enum JointType { Unknown, Revolute, Prismatic, Distance, Pulley, Mouse, Gear, Wheel, Weld, Friction, Rope, Motor, } #[repr(C)] #[derive(Copy, Clone, PartialEq, Debug)] pub enum LimitState { Inactive, Lower, Upper, Equal, } pub trait JointDef { fn joint_type() -> JointType where Self: Sized; #[doc(hidden)] unsafe fn create<U: UserDataTypes>(&self, world: &mut World<U>) -> *mut ffi::Joint; } pub struct MetaJoint<U: UserDataTypes> { joint: UnknownJoint, user_data: Box<InternalUserData<Joint, U::JointData>>, } impl<U: UserDataTypes> MetaJoint<U> { #[doc(hidden)] pub unsafe fn new(ptr: *mut ffi::Joint, handle: JointHandle, custom: U::JointData) -> Self { let mut j = MetaJoint { joint: UnknownJoint::from_ffi(ptr), user_data: Box::new(InternalUserData { handle: handle, custom: custom, }), }; j.mut_base_ptr().set_internal_user_data(&mut *j.user_data); j } } impl<U: UserDataTypes> UserData<U::JointData> for MetaJoint<U> { fn user_data(&self) -> &U::JointData { &self.user_data.custom } fn user_data_mut(&mut self) -> &mut U::JointData { &mut self.user_data.custom } } impl<U: UserDataTypes> Deref for MetaJoint<U> { type Target = UnknownJoint; fn deref(&self) -> &UnknownJoint
} impl<U: UserDataTypes> DerefMut for MetaJoint<U> { fn deref_mut(&mut self) -> &mut UnknownJoint { &mut self.joint } } pub trait Joint: WrappedBase<ffi::Joint> + FromFFI<ffi::Joint> { fn handle(&self) -> JointHandle { unsafe { self.base_ptr().handle() } } fn assumed_type() -> JointType where Self: Sized; fn get_type(&self) -> JointType { unsafe { ffi::Joint_get_type(self.base_ptr()) } } fn body_a(&self) -> BodyHandle { // we don't need &mut self because nothing is actually mutated here unsafe { ffi::Joint_get_body_a(self.base_ptr() as *mut _).handle() } } fn body_b(&self) -> BodyHandle { // we don't need &mut self because nothing is actually mutated here unsafe { ffi::Joint_get_body_b(self.base_ptr() as *mut _).handle() } } fn anchor_a(&self) -> Vec2 { unsafe { ffi::Joint_get_anchor_a_virtual(self.base_ptr()) } } fn anchor_b(&self) -> Vec2 { unsafe { ffi::Joint_get_anchor_b_virtual(self.base_ptr()) } } fn reaction_force(&self) -> Vec2 { unsafe { ffi::Joint_get_reaction_force_virtual(self.base_ptr()) } } fn reaction_torque(&self) -> f32 { unsafe { ffi::Joint_get_reaction_torque_virtual(self.base_ptr()) } } fn is_active(&self) -> bool { unsafe { ffi::Joint_is_active(self.base_ptr()) } } fn is_collide_connected(&self) -> bool { unsafe { ffi::Joint_get_collide_connected(self.base_ptr()) } } fn dump(&mut self) { unsafe { ffi::Joint_dump_virtual(self.mut_base_ptr()) } } fn shift_origin(&mut self, origin: &Vec2) { unsafe { ffi::Joint_shift_origin_virtual(self.mut_base_ptr(), origin) } } } #[repr(C)] #[doc(hidden)] pub struct JointEdge { pub other: *mut ffi::Body, pub joint: *mut ffi::Joint, pub prev: *mut JointEdge, pub next: *mut JointEdge, } pub enum UnknownJoint { Unknown, Revolute(RevoluteJoint), Prismatic(PrismaticJoint), Distance(DistanceJoint), Pulley(PulleyJoint), Mouse(MouseJoint), Gear(GearJoint), Wheel(WheelJoint), Weld(WeldJoint), Friction(FrictionJoint), Rope(RopeJoint), Motor(MotorJoint), } impl WrappedBase<ffi::Joint> for UnknownJoint { unsafe fn base_ptr(&self) -> *const ffi::Joint { use self::UnknownJoint::*; match self { &Distance(ref x) => x.base_ptr(), &Friction(ref x) => x.base_ptr(), &Gear(ref x) => x.base_ptr(), &Motor(ref x) => x.base_ptr(), &Mouse(ref x) => x.base_ptr(), &Prismatic(ref x) => x.base_ptr(), &Pulley(ref x) => x.base_ptr(), &Revolute(ref x) => x.base_ptr(), &Rope(ref x) => x.base_ptr(), &Weld(ref x) => x.base_ptr(), &Wheel(ref x) => x.base_ptr(), _ => panic!("Truly unknown joint"), } } unsafe fn mut_base_ptr(&mut self) -> *mut ffi::Joint { use self::UnknownJoint::*; match self { &mut Distance(ref mut x) => x.mut_base_ptr(), &mut Friction(ref mut x) => x.mut_base_ptr(), &mut Gear(ref mut x) => x.mut_base_ptr(), &mut Motor(ref mut x) => x.mut_base_ptr(), &mut Mouse(ref mut x) => x.mut_base_ptr(), &mut Prismatic(ref mut x) => x.mut_base_ptr(), &mut Pulley(ref mut x) => x.mut_base_ptr(), &mut Revolute(ref mut x) => x.mut_base_ptr(), &mut Rope(ref mut x) => x.mut_base_ptr(), &mut Weld(ref mut x) => x.mut_base_ptr(), &mut Wheel(ref mut x) => x.mut_base_ptr(), _ => panic!("Truly unknown joint"), } } } impl FromFFI<ffi::Joint> for UnknownJoint { unsafe fn from_ffi(ptr: *mut ffi::Joint) -> UnknownJoint { use self::UnknownJoint::*; assert!(!ptr.is_null()); let joint_type = ffi::Joint_get_type(ptr as *const ffi::Joint); match joint_type { JointType::Revolute => Revolute(RevoluteJoint::from_ffi(ptr)), JointType::Prismatic => Prismatic(PrismaticJoint::from_ffi(ptr)), JointType::Distance => Distance(DistanceJoint::from_ffi(ptr)), JointType::Pulley => Pulley(PulleyJoint::from_ffi(ptr)), JointType::Mouse => Mouse(MouseJoint::from_ffi(ptr)), JointType::Gear => Gear(GearJoint::from_ffi(ptr)), JointType::Wheel => Wheel(WheelJoint::from_ffi(ptr)), JointType::Weld => Weld(WeldJoint::from_ffi(ptr)), JointType::Friction => Friction(FrictionJoint::from_ffi(ptr)), JointType::Rope => Rope(RopeJoint::from_ffi(ptr)), JointType::Motor => Motor(MotorJoint::from_ffi(ptr)), _ => Unknown, } } } impl Joint for UnknownJoint { fn assumed_type() -> JointType { JointType::Unknown } } #[doc(hidden)] pub mod ffi { pub use ffi::Any; pub use dynamics::body::ffi::Body; use common::math::Vec2; use super::JointType; pub enum Joint {} extern "C" { pub fn Joint_get_type(slf: *const Joint) -> JointType; pub fn Joint_get_body_a(slf: *mut Joint) -> *mut Body; pub fn Joint_get_body_b(slf: *mut Joint) -> *mut Body; pub fn Joint_get_anchor_a_virtual(slf: *const Joint) -> Vec2; pub fn Joint_get_anchor_b_virtual(slf: *const Joint) -> Vec2; pub fn Joint_get_reaction_force_virtual(slf: *const Joint) -> Vec2; pub fn Joint_get_reaction_torque_virtual(slf: *const Joint) -> f32; // pub fn Joint_get_next(slf: *mut Joint) -> *mut Joint; // pub fn Joint_get_next_const(slf: *const Joint) -> *const Joint; pub fn Joint_is_active(slf: *const Joint) -> bool; pub fn Joint_get_collide_connected(slf: *const Joint) -> bool; pub fn Joint_dump_virtual(slf: *mut Joint); pub fn Joint_shift_origin_virtual(slf: *mut Joint, origin: *const Vec2); } }
{ &self.joint }
astar.rs
use std::collections::{ HashMap, BinaryHeap, }; use std::collections::hash_map::Entry::{ Occupied, Vacant, }; use std::hash::Hash; use scored::MinScored; use super::visit::{ EdgeRef, GraphBase, IntoEdges, VisitMap, Visitable, }; use algo::Measure; /// [Generic] A* shortest path algorithm. /// /// Computes the shortest path from `start` to `finish`, including the total path cost. /// /// `finish` is implicitly given via the `is_goal` callback, which should return `true` if the /// given node is the finish node. /// /// The function `edge_cost` should return the cost for a particular edge. Edge costs must be /// non-negative. /// /// The function `estimate_cost` should return the estimated cost to the finish for a particular /// node. For the algorithm to find the actual shortest path, it should be admissible, meaning that /// it should never overestimate the actual cost to get to the nearest goal node. Estimate costs /// must also be non-negative. /// /// The graph should be `Visitable` and implement `IntoEdges`. /// /// ``` /// use petgraph::Graph; /// use petgraph::algo::astar; /// /// let mut g = Graph::new(); /// let a = g.add_node((0., 0.)); /// let b = g.add_node((2., 0.)); /// let c = g.add_node((1., 1.)); /// let d = g.add_node((0., 2.)); /// let e = g.add_node((3., 3.)); /// let f = g.add_node((4., 2.)); /// g.extend_with_edges(&[ /// (a, b, 2), /// (a, d, 4), /// (b, c, 1), /// (b, f, 7), /// (c, e, 5), /// (e, f, 1), /// (d, e, 1), /// ]); /// /// let path = astar(&g, a, |finish| finish == f, |e| *e.weight(), |_| 0); /// assert_eq!(path, Some((6, vec![a, d, e, f]))); /// ``` /// /// Returns the total cost + the path of subsequent `NodeId` from start to finish, if one was /// found. pub fn astar<G, F, H, K, IsGoal>(graph: G, start: G::NodeId, mut is_goal: IsGoal, mut edge_cost: F, mut estimate_cost: H) -> Option<(K, Vec<G::NodeId>)> where G: IntoEdges + Visitable, IsGoal: FnMut(G::NodeId) -> bool, G::NodeId: Eq + Hash, F: FnMut(G::EdgeRef) -> K, H: FnMut(G::NodeId) -> K, K: Measure + Copy, { let mut visited = graph.visit_map(); let mut visit_next = BinaryHeap::new(); let mut scores = HashMap::new(); let mut path_tracker = PathTracker::<G>::new(); let zero_score = K::default(); scores.insert(start, zero_score); visit_next.push(MinScored(estimate_cost(start), start)); while let Some(MinScored(_, node)) = visit_next.pop() { if is_goal(node) { let path = path_tracker.reconstruct_path_to(node); let cost = scores[&node]; return Some((cost, path)); } // Don't visit the same node several times, as the first time it was visited it was using // the shortest available path. if !visited.visit(node) { continue } // This lookup can be unwrapped without fear of panic since the node was necessarily scored // before adding him to `visit_next`. let node_score = scores[&node]; for edge in graph.edges(node) { let next = edge.target(); if visited.is_visited(&next) { continue } let mut next_score = node_score + edge_cost(edge); match scores.entry(next) { Occupied(ent) => { let old_score = *ent.get(); if next_score < old_score { *ent.into_mut() = next_score; path_tracker.set_predecessor(next, node); } else { next_score = old_score; } }, Vacant(ent) => { ent.insert(next_score); path_tracker.set_predecessor(next, node); } } let next_estimate_score = next_score + estimate_cost(next); visit_next.push(MinScored(next_estimate_score, next)); } } None } struct PathTracker<G> where G: GraphBase, G::NodeId: Eq + Hash, { came_from: HashMap<G::NodeId, G::NodeId>, } impl<G> PathTracker<G> where G: GraphBase, G::NodeId: Eq + Hash, { fn new() -> PathTracker<G> { PathTracker { came_from: HashMap::new(), } } fn set_predecessor(&mut self, node: G::NodeId, previous: G::NodeId) { self.came_from.insert(node, previous); } fn reconstruct_path_to(&self, last: G::NodeId) -> Vec<G::NodeId> { let mut path = vec![last]; let mut current = last; while let Some(&previous) = self.came_from.get(&current) { path.push(previous); current = previous; } path.reverse(); path
} }
import_certificate.go
package kms //Licensed under the Apache License, Version 2.0 (the "License"); //you may not use this file except in compliance with the License. //You may obtain a copy of the License at // //http://www.apache.org/licenses/LICENSE-2.0 // //Unless required by applicable law or agreed to in writing, software //distributed under the License is distributed on an "AS IS" BASIS, //WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //See the License for the specific language governing permissions and //limitations under the License. // // Code generated by Alibaba Cloud SDK Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" ) // ImportCertificate invokes the kms.ImportCertificate API synchronously func (client *Client) ImportCertificate(request *ImportCertificateRequest) (response *ImportCertificateResponse, err error) { response = CreateImportCertificateResponse() err = client.DoAction(request, response) return } // ImportCertificateWithChan invokes the kms.ImportCertificate API asynchronously func (client *Client) ImportCertificateWithChan(request *ImportCertificateRequest) (<-chan *ImportCertificateResponse, <-chan error) { responseChan := make(chan *ImportCertificateResponse, 1) errChan := make(chan error, 1) err := client.AddAsyncTask(func() { defer close(responseChan) defer close(errChan) response, err := client.ImportCertificate(request) if err != nil { errChan <- err } else { responseChan <- response } }) if err != nil { errChan <- err close(responseChan) close(errChan) } return responseChan, errChan } // ImportCertificateWithCallback invokes the kms.ImportCertificate API asynchronously func (client *Client) ImportCertificateWithCallback(request *ImportCertificateRequest, callback func(response *ImportCertificateResponse, err error)) <-chan int { result := make(chan int, 1) err := client.AddAsyncTask(func() { var response *ImportCertificateResponse var err error defer close(result) response, err = client.ImportCertificate(request) callback(response, err) result <- 1 }) if err != nil { defer close(result) callback(nil, err) result <- 0 } return result } // ImportCertificateRequest is the request struct for api ImportCertificate type ImportCertificateRequest struct { *requests.RpcRequest PKCS12Blob string `position:"Query" name:"PKCS12Blob"` Passphrase string `position:"Query" name:"Passphrase"` } // ImportCertificateResponse is the response struct for api ImportCertificate type ImportCertificateResponse struct { *responses.BaseResponse RequestId string `json:"RequestId" xml:"RequestId"` CertificateId string `json:"CertificateId" xml:"CertificateId"` Arn string `json:"Arn" xml:"Arn"` } // CreateImportCertificateRequest creates a request to invoke ImportCertificate API func CreateImportCertificateRequest() (request *ImportCertificateRequest) { request = &ImportCertificateRequest{ RpcRequest: &requests.RpcRequest{}, } request.InitWithApiInfo("Kms", "2016-01-20", "ImportCertificate", "kms-service", "openAPI") request.Method = requests.POST return } // CreateImportCertificateResponse creates a response to parse from ImportCertificate response func
() (response *ImportCertificateResponse) { response = &ImportCertificateResponse{ BaseResponse: &responses.BaseResponse{}, } return }
CreateImportCertificateResponse
SLOCorrectionCategory.ts
/** * Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. * This product includes software developed at Datadog (https://www.datadoghq.com/). * Copyright 2020-Present Datadog, Inc. * * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). * https://openapi-generator.tech * Do not edit the class manually. */ export type SLOCorrectionCategory = | typeof SCHEDULED_MAINTENANCE | typeof OUTSIDE_BUSINESS_HOURS | typeof DEPLOYMENT | typeof OTHER; export const SCHEDULED_MAINTENANCE = "Scheduled Maintenance"; export const OUTSIDE_BUSINESS_HOURS = "Outside Business Hours"; export const DEPLOYMENT = "Deployment";
export const OTHER = "Other";
bounding_sphere_cone.rs
use na::{Translate}; use na;
use math::{Scalar, Point}; #[old_impl_check] impl<N, P, V, M> HasBoundingSphere<N, P, M> for Cone<N> where N: Scalar, P: Point<N, V>, M: Translate<P> { #[inline] fn bounding_sphere(&self, m: &M) -> BoundingSphere<N, P> { let center = m.translate(&na::orig()); let radius = (self.radius() * self.radius() + self.half_height() * self.half_height()).sqrt(); BoundingSphere::new(center, radius) } }
use bounding_volume::{HasBoundingSphere, BoundingSphere}; use shape::Cone;
autotest_plan.go
// Copyright (c) 2021 Terminus, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bundle import ( "fmt" "strconv" "github.com/erda-project/erda/apistructs" "github.com/erda-project/erda/bundle/apierrors" "github.com/erda-project/erda/pkg/http/httputil" ) // CreateTestPlansV2Step 新建测试计划步骤 func (b *Bundle) CreateTestPlansV2Step(req apistructs.TestPlanV2StepAddRequest) (uint64, error) { host, err := b.urls.DOP() if err != nil { return 0, err } hc := b.hc var rsp apistructs.TestPlanV2StepAddResp httpResp, err := hc.Post(host).Path(fmt.Sprintf("/api/autotests/testplans/"+strconv.FormatInt(int64(req.TestPlanID), 10)+"/actions/add-step")). Header(httputil.UserHeader, req.UserID). JSONBody(&req). Do().JSON(&rsp) if err != nil { return 0, apierrors.ErrInvoke.InternalError(err) } if !httpResp.IsOK() || !rsp.Success { return 0, toAPIError(httpResp.StatusCode(), rsp.Error) } return rsp.Data, nil } // DeleteTestPlansV2Step 删除测试计划步骤 func (b *Bundle) DeleteTestPlansV2Step(req apistructs.TestPlanV2StepDeleteRequest) error { host, err := b.urls.DOP() if err != nil { return err } hc := b.hc var rsp apistructs.TestPlanV2StepMoveResp httpResp, err := hc.Delete(host).Path(fmt.Sprintf("/api/autotests/testplans/"+strconv.FormatInt(int64(req.TestPlanID), 10)+"/actions/delete-step")). Header(httputil.UserHeader, req.UserID). JSONBody(&req). Do().JSON(&rsp) if err != nil { return apierrors.ErrInvoke.I
ccess { return toAPIError(httpResp.StatusCode(), rsp.Error) } return nil } // MoveTestPlansV2Step 移动测试计划步骤 func (b *Bundle) MoveTestPlansV2Step(req apistructs.TestPlanV2StepMoveRequest) error { host, err := b.urls.DOP() if err != nil { return err } hc := b.hc var rsp apistructs.TestPlanV2StepMoveResp httpResp, err := hc.Put(host).Path(fmt.Sprintf("/api/autotests/testplans/"+strconv.FormatInt(int64(req.TestPlanID), 10)+"/actions/move-step")). Header(httputil.UserHeader, req.UserID). JSONBody(&req). Do().JSON(&rsp) if err != nil { return apierrors.ErrInvoke.InternalError(err) } if !httpResp.IsOK() || !rsp.Success { return toAPIError(httpResp.StatusCode(), rsp.Error) } return nil } // PagingTestPlansV2 分页查询测试计划列表 func (b *Bundle) PagingTestPlansV2(req apistructs.TestPlanV2PagingRequest) (*apistructs.TestPlanV2PagingResponseData, error) { host, err := b.urls.DOP() if err != nil { return nil, err } hc := b.hc var pageResp apistructs.TestPlanV2PagingResponse resp, err := hc.Get(host).Path("/api/autotests/testplans"). Header(httputil.UserHeader, req.UserID). Params(req.UrlQueryString()). Do().JSON(&pageResp) if err != nil { return nil, apierrors.ErrInvoke.InternalError(err) } if !resp.IsOK() || !pageResp.Success { return nil, toAPIError(resp.StatusCode(), pageResp.Error) } return &pageResp.Data, nil } // CreateTestPlanV2 创建测试计划 func (b *Bundle) CreateTestPlanV2(req apistructs.TestPlanV2CreateRequest) error { host, err := b.urls.DOP() if err != nil { return err } hc := b.hc var createResp apistructs.TestPlanV2UpdateResponse resp, err := hc.Post(host).Path("/api/autotests/testplans").Header(httputil.UserHeader, req.UserID). JSONBody(&req).Do().JSON(&createResp) if err != nil { return apierrors.ErrInvoke.InternalError(err) } if !resp.IsOK() || !createResp.Success { return toAPIError(resp.StatusCode(), createResp.Error) } return nil } // UpdateTestPlanV2 更新测试计划 func (b *Bundle) UpdateTestPlanV2(req apistructs.TestPlanV2UpdateRequest) error { host, err := b.urls.DOP() if err != nil { return err } hc := b.hc var updateResp apistructs.TestPlanV2UpdateResponse resp, err := hc.Put(host).Path(fmt.Sprintf("/api/autotests/testplans/%d", req.TestPlanID)). Header(httputil.UserHeader, req.UserID).JSONBody(&req).Do().JSON(&updateResp) if err != nil { return apierrors.ErrInvoke.InternalError(err) } if !resp.IsOK() || !updateResp.Success { return toAPIError(resp.StatusCode(), updateResp.Error) } return nil } // GetTestPlanV2 获取测试计划详情 func (b *Bundle) GetTestPlanV2(testPlanID uint64) (*apistructs.TestPlanV2GetResponse, error) { host, err := b.urls.DOP() if err != nil { return nil, err } hc := b.hc var getResp apistructs.TestPlanV2GetResponse resp, err := hc.Get(host).Path(fmt.Sprintf("/api/autotests/testplans/%d", testPlanID)). Header(httputil.InternalHeader, "bundle").Do().JSON(&getResp) if err != nil { return nil, apierrors.ErrInvoke.InternalError(err) } if !resp.IsOK() || !getResp.Success { return nil, toAPIError(resp.StatusCode(), getResp.Error) } return &getResp, nil } // GetTestPlanV2Step 获取测试计划步骤 func (b *Bundle) GetTestPlanV2Step(stepID uint64) (*apistructs.TestPlanV2Step, error) { host, err := b.urls.DOP() if err != nil { return nil, err } hc := b.hc var getResp apistructs.TestPlanV2StepGetResponse resp, err := hc.Get(host).Path(fmt.Sprintf("/api/autotests/testplans-step/%d", stepID)). Header(httputil.InternalHeader, "bundle").Do().JSON(&getResp) if err != nil { return nil, apierrors.ErrInvoke.InternalError(err) } if !resp.IsOK() || !getResp.Success { return nil, toAPIError(resp.StatusCode(), getResp.Error) } return &getResp.Data, nil } // ListTestPlanV2Step list test plan step func (b *Bundle) ListTestPlanV2Step(testPlanID, groupID uint64) ([]*apistructs.TestPlanV2Step, error) { host, err := b.urls.DOP() if err != nil { return nil, err } hc := b.hc var getResp apistructs.TestPlanV2StepListResponse resp, err := hc.Get(host).Path(fmt.Sprintf("/api/autotests/testplans/%d/steps/actions/list-by-group-id", testPlanID)). Param("groupID", strconv.FormatUint(groupID, 10)). Header(httputil.InternalHeader, "bundle").Do().JSON(&getResp) if err != nil { return nil, apierrors.ErrInvoke.InternalError(err) } if !resp.IsOK() || !getResp.Success { return nil, toAPIError(resp.StatusCode(), getResp.Error) } return getResp.Data, nil } // UpdateTestPlanV2Step 获取测试计划步骤 func (b *Bundle) UpdateTestPlanV2Step(req apistructs.TestPlanV2StepUpdateRequest) error { host, err := b.urls.DOP() if err != nil { return err } hc := b.hc var updateResp apistructs.TestPlanV2StepUpdateResp resp, err := hc.Put(host).Path(fmt.Sprintf("/api/autotests/testplans-step/%d", req.StepID)). Header(httputil.UserHeader, req.UserID).JSONBody(&req).Do().JSON(&updateResp) if err != nil { return apierrors.ErrInvoke.InternalError(err) } if !resp.IsOK() || !updateResp.Success { return toAPIError(resp.StatusCode(), updateResp.Error) } return nil } func (b *Bundle) ExecuteDiceAutotestTestPlan(req apistructs.AutotestExecuteTestPlansRequest) (*apistructs.AutotestExecuteTestPlansResponse, error) { host, err := b.urls.DOP() if err != nil { return nil, err } hc := b.hc var rsp apistructs.AutotestExecuteTestPlansResponse httpResp, err := hc.Post(host).Path(fmt.Sprintf("/api/autotests/testplans/%v/actions/execute", req.TestPlan.ID)). Header(httputil.UserHeader, req.UserID). JSONBody(&req). Do().JSON(&rsp) if err != nil { return nil, apierrors.ErrInvoke.InternalError(err) } if !httpResp.IsOK() || !rsp.Success { return nil, toAPIError(httpResp.StatusCode(), rsp.Error) } return &rsp, nil } func (b *Bundle) CancelDiceAutotestTestPlan(req apistructs.AutotestCancelTestPlansRequest) (string, error) { host, err := b.urls.DOP() if err != nil { return "", err } hc := b.hc var rsp apistructs.AutotestCancelTestPlansResponse httpResp, err := hc.Post(host).Path(fmt.Sprintf("/api/autotests/testplans/%v/actions/cancel", req.TestPlan.ID)). Header(httputil.UserHeader, req.UserID). JSONBody(&req). Do().JSON(&rsp) if err != nil { return "", apierrors.ErrInvoke.InternalError(err) } if !httpResp.IsOK() || !rsp.Success { return "", toAPIError(httpResp.StatusCode(), rsp.Error) } return rsp.Data, nil } // ListAutoTestGlobalConfig 获取全局配置 func (b *Bundle) ListAutoTestGlobalConfig(req apistructs.AutoTestGlobalConfigListRequest) ([]apistructs.AutoTestGlobalConfig, error) { host, err := b.urls.DOP() if err != nil { return nil, err } hc := b.hc var cfgResp apistructs.AutoTestGlobalConfigListResponse resp, err := hc.Get(host).Path("/api/autotests/global-configs").Header(httputil.UserHeader, req.IdentityInfo.UserID). Param("scopeID", req.ScopeID).Param("scope", req.Scope).Do().JSON(&cfgResp) if err != nil { return nil, apierrors.ErrInvoke.InternalError(err) } if !resp.IsOK() || !cfgResp.Success { return nil, toAPIError(resp.StatusCode(), cfgResp.Error) } return cfgResp.Data, nil } func (b *Bundle) GetAutoTestExecHistory(pipelineID uint64) (*apistructs.AutoTestExecHistoryDto, error) { host, err := b.urls.DOP() if err != nil { return nil, err } hc := b.hc var getResp apistructs.AutoTestExecHistoryResp resp, err := hc.Get(host).Path(fmt.Sprintf("/api/autotests/testplans/history/actions/by-pipeline-id?pipelineID=%d", pipelineID)). Header(httputil.InternalHeader, "bundle").Do().JSON(&getResp) if err != nil { return nil, apierrors.ErrInvoke.InternalError(err) } if !resp.IsOK() || !getResp.Success { return nil, toAPIError(resp.StatusCode(), getResp.Error) } return &getResp.Data, nil }
nternalError(err) } if !httpResp.IsOK() || !rsp.Su
Secret.js
var redis = require("ioredis"); var config = require("config"); var util = require("util"); //var resource = config.Host.resource; //change and modify the secret var redisip = config.Security.ip; var redisport = config.Security.port; var redispass = config.Security.password; var redismode = config.Security.mode; var redisdb = config.Security.db; var redisSetting = { port: redisport, host: redisip, family: 4, db: redisdb, password: redispass, retryStrategy: function (times) { var delay = Math.min(times * 50, 2000); return delay; }, reconnectOnError: function (err) { return true; }, }; if (redismode == "sentinel") { if ( (config.Security.sentinels && config.Security.sentinels.hosts && config.Security.sentinels.port, config.Security.sentinels.name) ) { var sentinelHosts = config.Security.sentinels.hosts.split(","); if (Array.isArray(sentinelHosts) && sentinelHosts.length > 2) { var sentinelConnections = []; sentinelHosts.forEach(function (item) { sentinelConnections.push({ host: item, port: config.Security.sentinels.port, }); }); redisSetting = { sentinels: sentinelConnections, name: config.Security.sentinels.name, password: redispass, }; } else { console.log("No enough sentinel servers found ........."); } } } var redisClient = undefined; if (redismode != "cluster") { redisClient = new redis(redisSetting); } else { var redisHosts = redisip.split(","); if (Array.isArray(redisHosts)) { redisSetting = []; redisHosts.forEach(function (item) { redisSetting.push({ host: item, port: redisport,
family: 4, password: redispass, }); }); var redisClient = new redis.Cluster([redisSetting]); } else { redisClient = new redis(redisSetting); } } redisClient.on("error", function (err) { console.log("Error " + err); }); var Secret = function (req, payload, done) { if (payload && payload.iss && payload.jti) { var issuer = payload.iss; var jti = payload.jti; ////////////////this is just for testing/////////////////// //req.user = payload; redisClient .multi() .get(`token:iss:${issuer}:${jti}`) .get(`claims:iss:${issuer}:${jti}`) .exec(function (err, results) { if (err) { return done(err); } if (results && Array.isArray(results) && results.length > 1) { if (results[1][0] == null) { try { req.scope = JSON.parse(results[1][1]); } catch (ex) { return done(new Error("scope_error")); } } if (results[0][0] == null) { return done(null, results[0][1]); } else { return done(new Error("missing_secret")); } } return done(new Error("missing_secret")); }); } else { done(new Error("wrong token format")); } }; var CompanyChatSecret = function (req, payload, done) { if ( payload && payload.iss && payload.jti && payload.company && payload.tenant ) { var issuer = payload.iss; var jti = payload.jti; var chatKey = util.format( "%d:%d:keys:chat:public", payload.tenant, payload.company ); redisClient.get(chatKey, function (err, key) { if (err) { return done(err); } if (!key) { return done(new Error("missing_secret")); } return done(null, key); }); } else { done(new Error("wrong token format")); } }; module.exports.Secret = Secret; module.exports.CompanyChatSecret = CompanyChatSecret;
cats.service.ts
import { Injectable } from '@nestjs/common'; import { Cat } from './interfaces/cat.interface'; @Injectable() export class
{ private readonly cats: Cat[] = []; create(cat: Cat) { this.cats.push(cat); } findAll(): Cat[] { return this.cats; } }
CatsService
train_pybullet_cartpole.py
#add parent dir to find package. Only needed for source code build, pip install doesn't need it. import os, inspect currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) parentdir = os.path.dirname(os.path.dirname(currentdir)) os.sys.path.insert(0,parentdir) import gym from pybullet_envs.bullet.cartpole_bullet import CartPoleBulletEnv from baselines import deepq def callback(lcl, glb): # stop training if reward exceeds 199 is_solved = lcl['t'] > 100 and sum(lcl['episode_rewards'][-101:-1]) / 100 >= 199 return is_solved def
(): env = CartPoleBulletEnv(renders=False) model = deepq.models.mlp([64]) act = deepq.learn( env, q_func=model, lr=1e-3, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, print_freq=10, callback=callback ) print("Saving model to cartpole_model.pkl") act.save("cartpole_model.pkl") if __name__ == '__main__': main()
main