repo
stringlengths
7
60
instance_id
stringlengths
11
64
base_commit
stringlengths
40
40
patch
stringlengths
83
793k
test_patch
stringclasses
1 value
problem_statement
stringlengths
22
112k
hints_text
stringlengths
0
189k
created_at
timestamp[ns]date
2015-02-23 20:51:45
2024-12-13 21:31:14
environment_setup_commit
stringclasses
1 value
version
stringclasses
1 value
FAIL_TO_PASS
sequencelengths
0
0
PASS_TO_PASS
sequencelengths
0
0
intelligent-machine-learning/dlrover
intelligent-machine-learning__dlrover-266
524430bec9d4f22c1ff89d274a304fbe9c77f8bd
diff --git a/dlrover/go/brain/cmd/brain/main.go b/dlrover/go/brain/cmd/brain/main.go index 45a1ee153..b06ed21b5 100644 --- a/dlrover/go/brain/cmd/brain/main.go +++ b/dlrover/go/brain/cmd/brain/main.go @@ -75,7 +75,7 @@ func main() { err = brainServer.Run(ctx, errHandler) if err != nil { - log.Fatalf("Fail to run EasyDL server: %v", err) + log.Fatalf("Fail to run Brain server: %v", err) } pb.RegisterBrainServer(s, brainServer) diff --git a/dlrover/go/brain/manifests/k8s/brain-optimizer-configmap.yaml b/dlrover/go/brain/manifests/k8s/brain-optimizer-configmap.yaml new file mode 100644 index 000000000..a3099e51e --- /dev/null +++ b/dlrover/go/brain/manifests/k8s/brain-optimizer-configmap.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: dlrover-brain-optimizers + namespace: dlrover +data: + optimizers: | + job_ps_create_resource_optimizer: + data-store.name: "base_datastore" + job_ps_init_adjust_resource_optimizer: + data-store.name: "base_datastore" + optimizer.ps.initial.target-worker-count: 32 + job_ps_running_resource_optimizer: + data-store.name: "base_datastore" + optimizer.ps.cpu.hot-adjust: 8 + optimizer.ps.cpu.hot-threshold: 0.8 + optimizer.ps.cpu.hot-target-worker-count: 32 + job_worker_create_resource_optimizer: + data-store.name: "base_datastore" + job_worker_resource_optimizer: + data-store.name: "base_datastore" + optimizer.worker.memory.margin-percent: 0.6 + optimizer.worker.cpu.margin-core: 0.0 + optimizer.step.count.threshold: 5 + optimizer.training-speed.less-percent: 0.2 + optimizer.worker.max.count-per-step: 8 + optimizer.ps.cpu.overload: 0.85 + optimizer.ps.cpu.exhausted-threshold: 0.99 + optimizer.worker.replica.decrease-count: 1 + optimizer.worker.cpu-util.comp-count: 3 + optimizer.worker.cpu.min: 2 diff --git a/dlrover/go/brain/manifests/k8s/brain-service-configmap.yaml b/dlrover/go/brain/manifests/k8s/brain-service-configmap.yaml index d75864249..6d4ee9357 100644 --- a/dlrover/go/brain/manifests/k8s/brain-service-configmap.yaml +++ b/dlrover/go/brain/manifests/k8s/brain-service-configmap.yaml @@ -2,10 +2,12 @@ apiVersion: v1 kind: ConfigMap metadata: - name: dlrover-bain-service + name: dlrover-brain-service namespace: dlrover data: config: | namespace: "dlrover" data-store.config-map.name: "dlrover-data-store" data-store.config-map.key: "datastore" + optimizer.config-map.name: "dlrover-brain-optimizers" + optimizer.config-map.key: "optimizers" diff --git a/dlrover/go/brain/manifests/k8s/brain.yaml b/dlrover/go/brain/manifests/k8s/brain.yaml new file mode 100644 index 000000000..f53b51cd0 --- /dev/null +++ b/dlrover/go/brain/manifests/k8s/brain.yaml @@ -0,0 +1,57 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: brain + namespace: dlrover +spec: + type: NodePort + ports: + - port: 50001 + protocol: TCP + targetPort: 50001 + selector: + app: dlrover-brain + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: dlrover-brain + name: dlrover-brain + namespace: dlrover +spec: + replicas: 1 + selector: + matchLabels: + app: dlrover-brain + template: + metadata: + labels: + app: dlrover-brain + name: dlrover-brain + namespace: dlrover + spec: + serviceAccountName: dlrover-controller-manager + containers: + - command: + - /bin/bash + - -c + - (/root/brain -alsologtostderr --namespace dlrover + --serviceConfigMapName dlrover-brain-service + --serviceConfigMapKey config 2>&1) | + tee /root/logs/brain.log; exit ${PIPESTATUS[0]} + image: registry.cn-hangzhou.aliyuncs.com/dlrover/brain:test + imagePullPolicy: Always + name: dlrover-brain + ports: + - containerPort: 50001 + protocol: TCP + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 500m + memory: 128Mi diff --git a/dlrover/go/brain/manifests/k8s/data-store-configmap.yaml b/dlrover/go/brain/manifests/k8s/data-store-configmap.yaml index c3b49bead..1cbb540ee 100644 --- a/dlrover/go/brain/manifests/k8s/data-store-configmap.yaml +++ b/dlrover/go/brain/manifests/k8s/data-store-configmap.yaml @@ -16,5 +16,5 @@ data: db.user: "root" db.password: "root" db.engine.type: "mysql" - db.url: "tcp(mysql:3306)/dlrover?parseTime=true&interpolateParams=true&loc=Local" + db.url: "tcp(mysql.dlrover.svc.cluster.local:3306)/dlrover?parseTime=true&interpolateParams=true&loc=Local" # yamllint enable \ No newline at end of file diff --git a/dlrover/go/brain/pkg/config/manager.go b/dlrover/go/brain/pkg/config/manager.go index 801fa4a4c..4bfb81af5 100644 --- a/dlrover/go/brain/pkg/config/manager.go +++ b/dlrover/go/brain/pkg/config/manager.go @@ -51,6 +51,7 @@ type Manager struct { // NewManager create an instance of config map manager by the specified opts of config manager. func NewManager(namespace string, configMapName string, configMapKey string, kubeClientSet kubernetes.Interface) *Manager { + log.Infof("Create config manager with namespace: %s, configMapName: %s, configMapKey: %s", namespace, configMapName, configMapKey) return &Manager{ controller: NewController(namespace, configMapName, kubeClientSet), configmapKey: configMapKey, diff --git a/dlrover/go/brain/pkg/optimizer/manager.go b/dlrover/go/brain/pkg/optimizer/manager.go index 2c6e6dca3..97963508f 100644 --- a/dlrover/go/brain/pkg/optimizer/manager.go +++ b/dlrover/go/brain/pkg/optimizer/manager.go @@ -29,12 +29,16 @@ import ( // Manager is the struct to manage optimizers type Manager struct { - optimizers map[string]optapi.Optimizer - dataStoreManager *datastore.Manager - confRetrieverManager *ConfigRetrieverManager - optimizerConfig *config.Config - configManager *config.Manager - locker *sync.RWMutex + conf *config.Config + + optimizerConfig *config.Config + optimizerConfigManager *config.Manager + optimizers map[string]optapi.Optimizer + + dataStoreManager *datastore.Manager + configRetrieverManager *ConfigRetrieverManager + + locker *sync.RWMutex } // NewManager creates a new OptimizerManager @@ -47,28 +51,29 @@ func NewManager(conf *config.Config) *Manager { configManager := config.NewManager(namespace, configMapName, configMapKey, kubeClientSet) return &Manager{ - configManager: configManager, - dataStoreManager: datastore.NewManager(conf), - confRetrieverManager: NewConfigRetrieverManager(conf), - locker: &sync.RWMutex{}, + conf: conf, + optimizerConfigManager: configManager, + configRetrieverManager: NewConfigRetrieverManager(conf), + dataStoreManager: datastore.NewManager(conf), + locker: &sync.RWMutex{}, } } // Run starts the manager func (m *Manager) Run(ctx context.Context, errReporter common.ErrorReporter) error { - if err := m.configManager.Run(ctx, errReporter); err != nil { + if err := m.optimizerConfigManager.Run(ctx, errReporter); err != nil { log.Errorf("[Optimizer Manager] fail to run optimizer config manager: %v", err) return err } - optimizerConfig, err := m.configManager.GetConfig() + optimizerConfig, err := m.optimizerConfigManager.GetConfig() if err != nil { log.Errorf("[Optimizer Manager] fail to get optimizer config: %v", err) return err } m.optimizerConfig = optimizerConfig - m.configManager.RegisterConfigObserver("easydl-optimizers", m.OptimizersConfigUpdateNotify) + m.optimizerConfigManager.RegisterConfigObserver("dlrover-brain-optimizers", m.OptimizersConfigUpdateNotify) err = m.dataStoreManager.Run(ctx, errReporter) if err != nil { @@ -94,7 +99,7 @@ func (m *Manager) Optimize(request *pb.OptimizeRequest) ([]*pb.JobOptimizePlan, return nil, err } - optimizerConfig, err := m.confRetrieverManager.RetrieveOptimizerConfig(request.Config) + optimizerConfig, err := m.configRetrieverManager.RetrieveOptimizerConfig(request.Config) if err != nil { log.Errorf("Fail to retrieve optimizer config from %v: %v", request, err) return nil, err diff --git a/dlrover/go/brain/pkg/server/server.go b/dlrover/go/brain/pkg/server/server.go index 41aab45a3..f53b22e84 100644 --- a/dlrover/go/brain/pkg/server/server.go +++ b/dlrover/go/brain/pkg/server/server.go @@ -25,6 +25,7 @@ import ( datastoreapi "github.com/intelligent-machine-learning/easydl/brain/pkg/datastore/api" dsimpl "github.com/intelligent-machine-learning/easydl/brain/pkg/datastore/implementation" "github.com/intelligent-machine-learning/easydl/brain/pkg/datastore/recorder/mysql" + "github.com/intelligent-machine-learning/easydl/brain/pkg/optimizer" pb "github.com/intelligent-machine-learning/easydl/brain/pkg/proto" "k8s.io/client-go/kubernetes" ) @@ -43,7 +44,8 @@ type BrainServer struct { conf *config.Config configManager *config.Manager - dsManager *datastore.Manager + dsManager *datastore.Manager + optimizerManager *optimizer.Manager } // NewBrainServer creates an EasyDLServer instance @@ -72,21 +74,28 @@ func (s *BrainServer) Run(ctx context.Context, errReporter common.ErrorReporter) log.Errorf("[%s] fail to get brain server config: %v", logName, err) return err } + s.conf.Set(config.KubeClientInterface, s.kubeClientSet) log.Infof("[%s] brain server config: %v", logName, s.conf) - dsConf := config.NewEmptyConfig() - dsConf.Set(config.KubeClientInterface, s.kubeClientSet) - dsConf.Set(config.DataStoreConfigMapName, s.conf.GetString(config.DataStoreConfigMapName)) - dsConf.Set(config.DataStoreConfigMapKey, s.conf.GetString(config.DataStoreConfigMapKey)) - dsConf.Set(config.Namespace, s.conf.GetString(config.Namespace)) - - s.dsManager = datastore.NewManager(dsConf) + s.dsManager = datastore.NewManager(s.conf) err = s.dsManager.Run(ctx, errReporter) if err != nil { log.Errorf("[%s] fail to run the data store manager: %v", logName, err) return err } - return s.dsManager.Run(ctx, errReporter) + err = s.dsManager.Run(ctx, errReporter) + if err != nil { + log.Errorf("[%s] fail to run data store manager: %v", logName, err) + return err + } + + s.optimizerManager = optimizer.NewManager(s.conf) + err = s.optimizerManager.Run(ctx, errReporter) + if err != nil { + log.Errorf("[%s] fail to run the optimizer manager: %v", logName, err) + return err + } + return nil } // PersistMetrics persists job metrics to data store @@ -104,6 +113,22 @@ func (s *BrainServer) PersistMetrics(ctx context.Context, in *pb.JobMetrics) (*e // Optimize returns the initial resource of a job. func (s *BrainServer) Optimize(ctx context.Context, in *pb.OptimizeRequest) (*pb.OptimizeResponse, error) { + log.Infof("Receive optimize request: %v", in) + plans, err := s.optimizerManager.Optimize(in) + if err != nil { + errReason := fmt.Sprintf("Fail to optimize request %v: %v", in, err) + log.Errorf(errReason) + return &pb.OptimizeResponse{ + Response: &pb.Response{Success: false, Reason: errReason}, + }, err + } + + return &pb.OptimizeResponse{ + Response: &pb.Response{ + Success: true, + }, + JobOptimizePlans: plans, + }, nil return nil, nil } diff --git a/dlrover/go/brain/pkg/utils/grpc.go b/dlrover/go/brain/pkg/utils/grpc.go new file mode 100644 index 000000000..ee708d4ad --- /dev/null +++ b/dlrover/go/brain/pkg/utils/grpc.go @@ -0,0 +1,33 @@ +// Copyright 2023 The DLRover Authors. All rights reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "context" + log "github.com/golang/glog" + "google.golang.org/grpc" +) + +// NewRPCConnection returns a new grpc client +func NewRPCConnection(ctx context.Context, target string, opts ...grpc.DialOption) (conn *grpc.ClientConn, err error) { + conn, err = grpc.DialContext(ctx, target, opts...) + if err != nil { + if conn != nil { + conn.Close() + } + log.Errorf("Failed to connect server %s with err: %v ", target, err) + return nil, err + } + return conn, err +} diff --git a/dlrover/python/master/resource/local_optimizer.py b/dlrover/python/master/resource/local_optimizer.py index 83fe481de..2f6514913 100644 --- a/dlrover/python/master/resource/local_optimizer.py +++ b/dlrover/python/master/resource/local_optimizer.py @@ -101,7 +101,7 @@ def _generate_job_create_resource(self): self._resource_limits.cpu < 16 and self._resource_limits.memory < 32 * 1024 ): - # Set a little resource to test an elastic job on minikube. + # Set a little resource to test an elastic job on k8s. node_cpu = _MINIKUBE_INITIAL_NODE_CPU node_memory = _MINIKUBE_INITIAL_NODE_MEMORY
Directory design of EasyDL 1. How to organize directories of EasyDL? ``` |-brain # Automaticallly generates the resource plan of the job. |-operator |-controllers |-elastic-job. # Creates a k8s Job |-resource-scale # Scale out or in the job resource according to the Custom Resource(CR) |-elasticdl # dispatches data shards to workers and monitors training nodes. |-easydl # APIs for the training loop of TensorFlow/Pytorch to use elastic training. ``` 2. Which framework do we use to implement a trainer to support elastic training? We need a trainer to catch the exception and rebuild the session if parameter servers change. Now, a trainer is implemented with `tf.estimator` framework in AntGroup. However, Keras is more common than `tf.estimator` and TF 2.x has supported training a Keras model using [ParameterServerStrategy](https://www.tensorflow.org/api_docs/python/tf/distribute/experimental/ParameterServerStrategy). In another way, we can implement a trainer based on `tf.estimator` and convert a Keras model to an estimator model in TensorFlow.
2023-03-02T03:34:39
0.0
[]
[]
intelligent-machine-learning/dlrover
intelligent-machine-learning__dlrover-241
c6ba4f81c8a948facc6c3fa8990b4e1fcd1b9263
diff --git a/dlrover/examples/iris_job.yaml b/dlrover/examples/iris_job.yaml index a2ced95d7..450e9bf8d 100644 --- a/dlrover/examples/iris_job.yaml +++ b/dlrover/examples/iris_job.yaml @@ -2,6 +2,7 @@ apiVersion: elastic.iml.github.io/v1alpha1 kind: ElasticJob metadata: name: elasticjob-sample + namespace: dlrover spec: distributionStrategy: ParameterServerStrategy optimizeMode: cluster @@ -18,10 +19,12 @@ spec: - name: main image: easydl/tf-estimator:iris_dnn_v0 imagePullPolicy: IfNotPresent + # yamllint disable command: - /bin/bash - -c - "python -m model_zoo.tf_estimator.iris_dnn_elastic" + # yamllint enable worker: replicas: 0 restartCount: 3 diff --git a/dlrover/go/brain/cmd/brain/main.go b/dlrover/go/brain/cmd/brain/main.go index 93ced81de..45a1ee153 100644 --- a/dlrover/go/brain/cmd/brain/main.go +++ b/dlrover/go/brain/cmd/brain/main.go @@ -13,66 +13,74 @@ package main +import ( + "context" + "flag" + log "github.com/golang/glog" + "github.com/intelligent-machine-learning/easydl/brain/pkg/common" + "github.com/intelligent-machine-learning/easydl/brain/pkg/config" + pb "github.com/intelligent-machine-learning/easydl/brain/pkg/proto" + "github.com/intelligent-machine-learning/easydl/brain/pkg/server" + "google.golang.org/grpc" + kubeclientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + "net" +) + func main() { - //log.Info("Start DLRover Brain") - //flag.Parse() - //mConfig := config.CommandConfig - // - //ctx, cancel := context.WithCancel(context.Background()) - //defer cancel() - // - //cfg, err := clientcmd.BuildConfigFromFlags(mConfig.APIServer, mConfig.KubeConfig) - //if err != nil { - // log.Fatalf("Error building kube config: %v", err) - //} + log.Info("Start DLRover Brain") + flag.Parse() + mConfig := config.CommandConfig + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - //kubeClient, err := kubeclientset.NewForConfig(cfg) - //if err != nil { - // log.Fatalf("Error building kube client: %v", err.Error()) - //} + cfg, err := clientcmd.BuildConfigFromFlags(mConfig.APIServer, mConfig.KubeConfig) + if err != nil { + log.Fatalf("Error building kube config: %v", err) + } + + kubeClient, err := kubeclientset.NewForConfig(cfg) + if err != nil { + log.Fatalf("Error building kube client: %v", err.Error()) + } // Start error handler - //log.Info("Start error handler") - //errHandler, err := common.NewStopAllErrorHandler(cancel) - //if err != nil { - // log.Fatalf("Create ErrorHandler error: %v", err) - //} else { - // go errHandler.HandleError(ctx) - //} - // - //log.Infof("serviceConfigMapName=%s, serviceConfigMapKey=%s", mConfig.ServiceConfigMapName, mConfig.ServiceConfigMapKey) - //configManager := config.NewManager(mConfig.Namespace, mConfig.ServiceConfigMapName, mConfig.ServiceConfigMapKey, kubeClient) - //err = configManager.Run(ctx, errHandler) - //if err != nil { - // log.Fatalf("Fail to run the config manager: %v", err) - //} - // - //conf, err := configManager.GetConfig() - //if err != nil { - // log.Fatalf("Fail to get config: %v", err) - //} - //conf.Set(config.KubeClientInterface, kubeClient) - //conf.Set(config.Namespace, mConfig.Namespace) - // - //lis, err := net.Listen("tcp", mConfig.Port) - //if err != nil { - // log.Fatalf("failed to listen: %v", err) - //} - //s := grpc.NewServer() - // - //brainServer, err := server.NewBrainServer(conf) - //if err != nil { - // log.Fatalf("fail to create Brain server: %v", err) - //} - // - //err = brainServer.Run(ctx, errHandler) - //if err != nil { - // log.Fatalf("Fail to run EasyDL server: %v", err) - //} - // - //pb.RegisterBrainServer(s, brainServer) - //log.Infof("server listening at %v", lis.Addr()) - //if err = s.Serve(lis); err != nil { - // log.Fatalf("failed to serve: %v", err) - //} + log.Info("Start error handler") + errHandler, err := common.NewStopAllErrorHandler(cancel) + if err != nil { + log.Fatalf("Create ErrorHandler error: %v", err) + } else { + go errHandler.HandleError(ctx) + } + + log.Infof("namespace=%s, serviceConfigMapName=%s, serviceConfigMapKey=%s", mConfig.Namespace, + mConfig.ServiceConfigMapName, mConfig.ServiceConfigMapKey) + conf := config.NewEmptyConfig() + conf.Set(config.KubeClientInterface, kubeClient) + conf.Set(config.BrainServerConfigMapName, mConfig.ServiceConfigMapName) + conf.Set(config.BrainServerConfigMapKey, mConfig.ServiceConfigMapKey) + conf.Set(config.Namespace, mConfig.Namespace) + + lis, err := net.Listen("tcp", mConfig.Port) + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + s := grpc.NewServer() + + brainServer, err := server.NewBrainServer(conf) + if err != nil { + log.Fatalf("fail to create Brain server: %v", err) + } + + err = brainServer.Run(ctx, errHandler) + if err != nil { + log.Fatalf("Fail to run EasyDL server: %v", err) + } + + pb.RegisterBrainServer(s, brainServer) + log.Infof("server listening at %v", lis.Addr()) + if err = s.Serve(lis); err != nil { + log.Fatalf("failed to serve: %v", err) + } } diff --git a/dlrover/go/brain/cmd/k8smonitor/main.go b/dlrover/go/brain/cmd/k8smonitor/main.go index 5396b0d76..92a43a828 100644 --- a/dlrover/go/brain/cmd/k8smonitor/main.go +++ b/dlrover/go/brain/cmd/k8smonitor/main.go @@ -53,16 +53,6 @@ func main() { log.Infof("namespace=%s, serviceConfigMapName=%s, serviceConfigMapKey=%s", mConfig.Namespace, mConfig.ServiceConfigMapName, mConfig.ServiceConfigMapKey) - //configManager := config.NewManager(mConfig.Namespace, mConfig.ServiceConfigMapName, mConfig.ServiceConfigMapKey, kubeClient) - //err = configManager.Run(ctx, errHandler) - //if err != nil { - // log.Fatalf("Fail to run the config manager: %v", err) - //} - // - //conf, err := configManager.GetConfig() - //if err != nil { - // log.Fatalf("Fail to get config: %v", err) - //} conf := config.NewEmptyConfig() conf.Set(config.KubeClientInterface, kubeClient) diff --git a/dlrover/go/brain/manifests/k8s/brain-service-configmap.yaml b/dlrover/go/brain/manifests/k8s/brain-service-configmap.yaml new file mode 100644 index 000000000..d75864249 --- /dev/null +++ b/dlrover/go/brain/manifests/k8s/brain-service-configmap.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: dlrover-bain-service + namespace: dlrover +data: + config: | + namespace: "dlrover" + data-store.config-map.name: "dlrover-data-store" + data-store.config-map.key: "datastore" diff --git a/dlrover/go/brain/pkg/config/command_config.go b/dlrover/go/brain/pkg/config/command_config.go index 8ac864d85..87ac3b25d 100644 --- a/dlrover/go/brain/pkg/config/command_config.go +++ b/dlrover/go/brain/pkg/config/command_config.go @@ -26,6 +26,8 @@ const ( SpecServiceConfigMapKey = "serviceConfigMapKey" // SpecNamespaceName is the spec name of namespace SpecNamespaceName = "namespace" + // SpecServerPort is the port of the EasyDL server gRPC service + SpecServerPort = "port" ) // Spec is the struct of configure specifications @@ -40,6 +42,8 @@ type Spec struct { ServiceConfigMapKey string // Namespace is the namespace Namespace string + // Port is the port of the EasyDL server gRPC service + Port string } // CommandConfig is the variable of type ConfigSpec @@ -66,4 +70,9 @@ func init() { SpecNamespaceName, "", "The namespace name used for k8s ConfigMap & LeaderElection") + flag.StringVar( + &CommandConfig.Port, + SpecServerPort, + ":50001", + "Port of the EasyDL server gRPC service") } diff --git a/dlrover/go/brain/pkg/config/common.go b/dlrover/go/brain/pkg/config/common.go index 39300df2d..975f295ca 100644 --- a/dlrover/go/brain/pkg/config/common.go +++ b/dlrover/go/brain/pkg/config/common.go @@ -55,6 +55,11 @@ const ( // KubeWatcherHandlerConfigMapKey is the key of kube watcher handler config map KubeWatcherHandlerConfigMapKey = "kube-watcher.handler.config-map.key" + // BrainServerConfigMapName is the name of brain server config map + BrainServerConfigMapName = "brain.server.config-map.name" + // BrainServerConfigMapKey is the name of brain server config key + BrainServerConfigMapKey = "brain.server.config-map.key" + // QueryBackwardTimePeriodInHour is the config key for query backward time period in hour QueryBackwardTimePeriodInHour = "query.backward.time-period.hour" diff --git a/dlrover/go/brain/pkg/datastore/dbbase/recorder.go b/dlrover/go/brain/pkg/datastore/dbbase/recorder.go index 1d42c672b..fe988687f 100644 --- a/dlrover/go/brain/pkg/datastore/dbbase/recorder.go +++ b/dlrover/go/brain/pkg/datastore/dbbase/recorder.go @@ -83,7 +83,7 @@ func (r *DatabaseRecorder) Get(row interface{}, condition Condition) error { } session := r.Table(r.TableName) session = condition.Apply(session) - found, err := session.Desc("id").Get(row) + found, err := session.Get(row) if err != nil { log.Errorf("Failed to get %v of %+v: %v", r.TableName, condition, err) return err diff --git a/dlrover/go/brain/pkg/datastore/implementation/elasticjob_datastore.go b/dlrover/go/brain/pkg/datastore/implementation/elasticjob_datastore.go index d27696f18..23f1723d3 100644 --- a/dlrover/go/brain/pkg/datastore/implementation/elasticjob_datastore.go +++ b/dlrover/go/brain/pkg/datastore/implementation/elasticjob_datastore.go @@ -18,6 +18,7 @@ import ( "github.com/intelligent-machine-learning/easydl/brain/pkg/common" "github.com/intelligent-machine-learning/easydl/brain/pkg/config" datastoreapi "github.com/intelligent-machine-learning/easydl/brain/pkg/datastore/api" + dsimplutils "github.com/intelligent-machine-learning/easydl/brain/pkg/datastore/implementation/utils" "github.com/intelligent-machine-learning/easydl/brain/pkg/datastore/recorder/mysql" ) @@ -65,5 +66,5 @@ func (store *ElasticJobDataStore) PersistData(condition *datastoreapi.Condition, // GetData returns data for a given condition func (store *ElasticJobDataStore) GetData(condition *datastoreapi.Condition, data interface{}) error { - return nil + return dsimplutils.GetData(store.client, condition, data) } diff --git a/dlrover/go/brain/pkg/datastore/manager.go b/dlrover/go/brain/pkg/datastore/manager.go index e5154a936..747504a84 100644 --- a/dlrover/go/brain/pkg/datastore/manager.go +++ b/dlrover/go/brain/pkg/datastore/manager.go @@ -21,6 +21,7 @@ import ( "github.com/intelligent-machine-learning/easydl/brain/pkg/config" datastoreapi "github.com/intelligent-machine-learning/easydl/brain/pkg/datastore/api" dsimpl "github.com/intelligent-machine-learning/easydl/brain/pkg/datastore/implementation" + "sync" ) // Manager is the struct of data store manager @@ -28,6 +29,7 @@ type Manager struct { conf *config.Config configManager *config.Manager dataStores map[string]datastoreapi.DataStore + mutex *sync.RWMutex } // NewManager creates a data store manager @@ -41,6 +43,8 @@ func NewManager(conf *config.Config) *Manager { return &Manager{ configManager: configManager, + dataStores: make(map[string]datastoreapi.DataStore), + mutex: &sync.RWMutex{}, } } @@ -63,11 +67,24 @@ func (m *Manager) Run(ctx context.Context, errReporter common.ErrorReporter) err // CreateDataStore creates a data store for a given name func (m *Manager) CreateDataStore(name string) (datastoreapi.DataStore, error) { + m.mutex.Lock() + defer m.mutex.Unlock() + + if dataStore, found := m.dataStores[name]; found { + return dataStore, nil + } + conf := m.conf.GetConfig(name) if conf == nil { err := fmt.Errorf("There is no config for data store %s", name) return nil, err } - return dsimpl.CreateDataStore(name, conf) + dataStore, err := dsimpl.CreateDataStore(name, conf) + if err != nil { + log.Errorf("fail to create data store %s: %v", name, err) + return nil, err + } + m.dataStores[name] = dataStore + return dataStore, nil } diff --git a/dlrover/go/brain/pkg/platform/k8s/common/elastic_job.go b/dlrover/go/brain/pkg/platform/k8s/common/elastic_job.go new file mode 100644 index 000000000..912d407b3 --- /dev/null +++ b/dlrover/go/brain/pkg/platform/k8s/common/elastic_job.go @@ -0,0 +1,23 @@ +// Copyright 2023 The DLRover Authors. All rights reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +const ( + // JobPhaseRunning indicates job is running + JobPhaseRunning = "Running" + // JobPhaseError indicates job runs into errors + JobPhaseError = "Error" + // JobPhaseSucceeded indicates job completed successfully + JobPhaseSucceeded = "Succeeded" +) diff --git a/dlrover/go/brain/pkg/platform/k8s/implementation/watchhandler/elasticjob_handler.go b/dlrover/go/brain/pkg/platform/k8s/implementation/watchhandler/elasticjob_handler.go index 0fb477bb1..4992334c1 100644 --- a/dlrover/go/brain/pkg/platform/k8s/implementation/watchhandler/elasticjob_handler.go +++ b/dlrover/go/brain/pkg/platform/k8s/implementation/watchhandler/elasticjob_handler.go @@ -19,6 +19,7 @@ import ( "github.com/intelligent-machine-learning/easydl/brain/pkg/config" datastoreapi "github.com/intelligent-machine-learning/easydl/brain/pkg/datastore/api" "github.com/intelligent-machine-learning/easydl/brain/pkg/datastore/recorder/mysql" + k8scommon "github.com/intelligent-machine-learning/easydl/brain/pkg/platform/k8s/common" handlerutils "github.com/intelligent-machine-learning/easydl/brain/pkg/platform/k8s/implementation/watchhandler/utils" watchercommon "github.com/intelligent-machine-learning/easydl/brain/pkg/platform/k8s/watcher/common" elasticv1alpha1 "github.com/intelligent-machine-learning/easydl/dlrover/go/operator/api/v1alpha1" @@ -85,7 +86,43 @@ func (handler *ElasticJobHandler) HandleCreateEvent(object runtime.Object, event // HandleUpdateEvent handles update events func (handler *ElasticJobHandler) HandleUpdateEvent(object runtime.Object, oldObject runtime.Object, event watchercommon.Event) error { - return nil + newJob := &elasticv1alpha1.ElasticJob{} + oldJob := &elasticv1alpha1.ElasticJob{} + + unstructNewObj := object.(*unstructured.Unstructured) + unstructOldObj := oldObject.(*unstructured.Unstructured) + + runtime.DefaultUnstructuredConverter.FromUnstructured(unstructOldObj.Object, oldJob) + runtime.DefaultUnstructuredConverter.FromUnstructured(unstructNewObj.Object, newJob) + + jobUID := string(newJob.UID) + + cond := &datastoreapi.Condition{ + Type: common.TypeGetDataGetJob, + Extra: &mysql.JobCondition{ + UID: jobUID, + }, + } + record := &mysql.Job{} + + err := handler.dataStore.GetData(cond, record) + if err != nil { + log.Errorf("fail to get job record for %s: %v", jobUID, err) + return err + } + + if newJob.Status.Phase == k8scommon.JobPhaseRunning && record.StartedAt.IsZero() { + record.StartedAt = newJob.Status.StartTime.Time + } + + if newJob.Status.Phase == k8scommon.JobPhaseSucceeded && record.FinishedAt.IsZero() { + record.FinishedAt = newJob.Status.CompletionTime.Time + } + + upsertCond := &datastoreapi.Condition{ + Type: common.TypeUpsertJob, + } + return handler.dataStore.PersistData(upsertCond, record, nil) } // HandleDeleteEvent handles delete events diff --git a/dlrover/go/brain/pkg/server/server.go b/dlrover/go/brain/pkg/server/server.go index 7f8044d02..41aab45a3 100644 --- a/dlrover/go/brain/pkg/server/server.go +++ b/dlrover/go/brain/pkg/server/server.go @@ -16,6 +16,7 @@ package server import ( "context" "encoding/json" + "fmt" log "github.com/golang/glog" "github.com/golang/protobuf/ptypes/empty" "github.com/intelligent-machine-learning/easydl/brain/pkg/common" @@ -25,36 +26,72 @@ import ( dsimpl "github.com/intelligent-machine-learning/easydl/brain/pkg/datastore/implementation" "github.com/intelligent-machine-learning/easydl/brain/pkg/datastore/recorder/mysql" pb "github.com/intelligent-machine-learning/easydl/brain/pkg/proto" + "k8s.io/client-go/kubernetes" ) const ( defaultDataStoreName = dsimpl.BaseDataStoreName + logName = "brain-server" ) // BrainServer is the interface of DLRover Brain type BrainServer struct { pb.UnimplementedBrainServer - dsManager *datastore.Manager - dataStores map[string]datastoreapi.DataStore + + kubeClientSet kubernetes.Interface + + conf *config.Config + configManager *config.Manager + + dsManager *datastore.Manager } // NewBrainServer creates an EasyDLServer instance func NewBrainServer(conf *config.Config) (*BrainServer, error) { - dsManager := datastore.NewManager(conf) + namespace := conf.GetString(config.Namespace) + configMapName := conf.GetString(config.BrainServerConfigMapName) + configMapKey := conf.GetString(config.BrainServerConfigMapKey) + kubeClientSet := conf.GetKubeClientInterface() + return &BrainServer{ - dsManager: dsManager, - dataStores: make(map[string]datastoreapi.DataStore), + configManager: config.NewManager(namespace, configMapName, configMapKey, kubeClientSet), + kubeClientSet: kubeClientSet, }, nil } // Run starts the server func (s *BrainServer) Run(ctx context.Context, errReporter common.ErrorReporter) error { + err := s.configManager.Run(ctx, errReporter) + if err != nil { + err = fmt.Errorf("[%s] failed to initialize config manager: %v", logName, err) + log.Error(err) + return err + } + s.conf, err = s.configManager.GetConfig() + if err != nil { + log.Errorf("[%s] fail to get brain server config: %v", logName, err) + return err + } + log.Infof("[%s] brain server config: %v", logName, s.conf) + + dsConf := config.NewEmptyConfig() + dsConf.Set(config.KubeClientInterface, s.kubeClientSet) + dsConf.Set(config.DataStoreConfigMapName, s.conf.GetString(config.DataStoreConfigMapName)) + dsConf.Set(config.DataStoreConfigMapKey, s.conf.GetString(config.DataStoreConfigMapKey)) + dsConf.Set(config.Namespace, s.conf.GetString(config.Namespace)) + + s.dsManager = datastore.NewManager(dsConf) + err = s.dsManager.Run(ctx, errReporter) + if err != nil { + log.Errorf("[%s] fail to run the data store manager: %v", logName, err) + return err + } return s.dsManager.Run(ctx, errReporter) } // PersistMetrics persists job metrics to data store func (s *BrainServer) PersistMetrics(ctx context.Context, in *pb.JobMetrics) (*empty.Empty, error) { - dataStore, err := s.getDataStore(in.DataStore) + dataStore, err := s.dsManager.CreateDataStore(in.DataStore) if err != nil { return nil, err } @@ -65,18 +102,6 @@ func (s *BrainServer) PersistMetrics(ctx context.Context, in *pb.JobMetrics) (*e return &empty.Empty{}, nil } -func (s *BrainServer) getDataStore(name string) (datastoreapi.DataStore, error) { - _, found := s.dataStores[name] - if !found { - dataStore, err := s.dsManager.CreateDataStore(name) - if err != nil { - return nil, err - } - s.dataStores[name] = dataStore - } - return s.dataStores[name], nil -} - // Optimize returns the initial resource of a job. func (s *BrainServer) Optimize(ctx context.Context, in *pb.OptimizeRequest) (*pb.OptimizeResponse, error) { return nil, nil @@ -84,7 +109,7 @@ func (s *BrainServer) Optimize(ctx context.Context, in *pb.OptimizeRequest) (*pb // GetJobMetrics returns a job metrics func (s *BrainServer) GetJobMetrics(ctx context.Context, in *pb.JobMetricsRequest) (*pb.JobMetricsResponse, error) { - dataStore, err := s.getDataStore(defaultDataStoreName) + dataStore, err := s.dsManager.CreateDataStore(defaultDataStoreName) if err != nil { log.Errorf("fail to get data store %s", defaultDataStoreName) return &pb.JobMetricsResponse{
Directory design of EasyDL 1. How to organize directories of EasyDL? ``` |-brain # Automaticallly generates the resource plan of the job. |-operator |-controllers |-elastic-job. # Creates a k8s Job |-resource-scale # Scale out or in the job resource according to the Custom Resource(CR) |-elasticdl # dispatches data shards to workers and monitors training nodes. |-easydl # APIs for the training loop of TensorFlow/Pytorch to use elastic training. ``` 2. Which framework do we use to implement a trainer to support elastic training? We need a trainer to catch the exception and rebuild the session if parameter servers change. Now, a trainer is implemented with `tf.estimator` framework in AntGroup. However, Keras is more common than `tf.estimator` and TF 2.x has supported training a Keras model using [ParameterServerStrategy](https://www.tensorflow.org/api_docs/python/tf/distribute/experimental/ParameterServerStrategy). In another way, we can implement a trainer based on `tf.estimator` and convert a Keras model to an estimator model in TensorFlow.
2023-02-21T09:38:35
0.0
[]
[]
intelligent-machine-learning/dlrover
intelligent-machine-learning__dlrover-195
90bf39a0cc52cac8b35b95b1c7368b55c72112a3
diff --git a/dlrover/go/operator/config/default/manager_auth_proxy_patch.yaml b/dlrover/go/operator/config/default/manager_auth_proxy_patch.yaml index dc4081d60..7204d829c 100644 --- a/dlrover/go/operator/config/default/manager_auth_proxy_patch.yaml +++ b/dlrover/go/operator/config/default/manager_auth_proxy_patch.yaml @@ -37,3 +37,4 @@ spec: - "--health-probe-bind-address=:8081" - "--metrics-bind-address=127.0.0.1:8080" - "--leader-elect" + - "--master-image=registry.cn-hangzhou.aliyuncs.com/intell-ai/dlrover:test" diff --git a/dlrover/go/operator/main.go b/dlrover/go/operator/main.go index 6ed0aab9d..f34a96d9e 100644 --- a/dlrover/go/operator/main.go +++ b/dlrover/go/operator/main.go @@ -53,11 +53,14 @@ func main() { var metricsAddr string var enableLeaderElection bool var probeAddr string + var masterImage string flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") flag.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") + flag.StringVar(&masterImage, "master-image", "registry.cn-hangzhou.aliyuncs.com/intell-ai/dlrover:latest", + "The image to launch a dlrover master Pod of an ElasticJob.") opts := zap.Options{ Development: true, } @@ -90,7 +93,7 @@ func main() { os.Exit(1) } - if err = controllers.NewElasticJobReconciler(mgr).SetupWithManager(mgr); err != nil { + if err = controllers.NewElasticJobReconciler(mgr, masterImage).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "ElasticJob") os.Exit(1) } diff --git a/dlrover/go/operator/pkg/controllers/elasticjob_controller.go b/dlrover/go/operator/pkg/controllers/elasticjob_controller.go index 88482a949..5acd3b183 100644 --- a/dlrover/go/operator/pkg/controllers/elasticjob_controller.go +++ b/dlrover/go/operator/pkg/controllers/elasticjob_controller.go @@ -46,18 +46,20 @@ const ( // ElasticJobReconciler reconciles a ElasticJob object type ElasticJobReconciler struct { client.Client - Scheme *runtime.Scheme - Recorder record.EventRecorder - Log logr.Logger + Scheme *runtime.Scheme + Recorder record.EventRecorder + Log logr.Logger + masterImage string } // NewElasticJobReconciler creates a JobReconciler -func NewElasticJobReconciler(mgr ctrl.Manager) *ElasticJobReconciler { +func NewElasticJobReconciler(mgr ctrl.Manager, masterImage string) *ElasticJobReconciler { r := &ElasticJobReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("elasticjob-controller"), - Log: ctrl.Log.WithName("controllers").WithName("ElasticJob"), + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("elasticjob-controller"), + Log: ctrl.Log.WithName("controllers").WithName("ElasticJob"), + masterImage: masterImage, } return r } @@ -191,6 +193,7 @@ func (r *ElasticJobReconciler) stopRunningPods(job *elasticv1alpha1.ElasticJob) } func (r *ElasticJobReconciler) createEasydlMaster(job *elasticv1alpha1.ElasticJob) error { + master.NewMasterTemplateToJob(job, r.masterImage) masterManager := common.ReplicaManagers[master.ReplicaTypeTrainerMaster] err := masterManager.ReconcilePods(r.Client, job, nil) if err != nil { diff --git a/dlrover/go/operator/pkg/controllers/master/master.go b/dlrover/go/operator/pkg/controllers/master/master.go index 78377358c..1b4582d4e 100644 --- a/dlrover/go/operator/pkg/controllers/master/master.go +++ b/dlrover/go/operator/pkg/controllers/master/master.go @@ -33,11 +33,10 @@ const ( initMasterContainerMemory = "2Gi" initMasterContainerStorage = "2Gi" masterCommand = "python -m dlrover.python.master.main" - masterImage = "registry.cn-hangzhou.aliyuncs.com/intell-ai/dlrover:test" masterServicePort = 50001 initMasterIndex = 0 - - envMasterAddrKey = "MASTER_ADDR" + defaultImagePullPolicy = "IfNotPresent" + envMasterAddrKey = "MASTER_ADDR" // ReplicaTypeTrainerMaster is the type for DLRover Master replica. ReplicaTypeTrainerMaster commonv1.ReplicaType = "dlrover-master" @@ -47,46 +46,16 @@ const ( type Manager struct{} func init() { - common.ReplicaManagers[ReplicaTypeTrainerMaster] = newManager() -} - -func newManager() *Manager { - return &Manager{} + common.ReplicaManagers[ReplicaTypeTrainerMaster] = &Manager{} } func (m *Manager) newJobMaster( job *elasticv1alpha1.ElasticJob, replicaIndex int, ) *corev1.Pod { - command := masterCommand + fmt.Sprintf( - " --namespace %s --job_name %s --port %d", - job.Namespace, job.Name, masterServicePort, - ) - container := corev1.Container{ - Name: "main", - Image: masterImage, - ImagePullPolicy: "IfNotPresent", - Command: []string{"/bin/bash", "-c", command}, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse(initMasterContainerCPU), - corev1.ResourceMemory: resource.MustParse(initMasterContainerMemory), - corev1.ResourceEphemeralStorage: resource.MustParse(initMasterContainerStorage), - }, - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse(initMasterContainerCPU), - corev1.ResourceMemory: resource.MustParse(initMasterContainerMemory), - corev1.ResourceEphemeralStorage: resource.MustParse(initMasterContainerStorage), - }, - }, - } - podTemplate := &corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{container}, - RestartPolicy: corev1.RestartPolicyNever, - }, - } masterName := newJobMasterName(job.Name) - pod := common.NewPod(job, podTemplate, masterName) + pod := common.NewPod( + job, &job.Spec.ReplicaSpecs[ReplicaTypeTrainerMaster].Template, masterName, + ) pod.Labels[common.LabelReplicaTypeKey] = string(ReplicaTypeTrainerMaster) pod.Labels[common.LabelReplicaIndexKey] = fmt.Sprintf("%d", replicaIndex) return pod @@ -240,3 +209,49 @@ func (m *Manager) StopRunningPods( } return nil } + +// NewMasterTemplateToJob sets configurations to the master template of a job. +func NewMasterTemplateToJob(job *elasticv1alpha1.ElasticJob, masterImage string) { + command := masterCommand + fmt.Sprintf( + " --namespace %s --job_name %s --port %d", + job.Namespace, job.Name, masterServicePort, + ) + container := corev1.Container{ + Name: "main", + Image: masterImage, + ImagePullPolicy: defaultImagePullPolicy, + Command: []string{"/bin/bash", "-c", command}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse(initMasterContainerCPU), + corev1.ResourceMemory: resource.MustParse(initMasterContainerMemory), + corev1.ResourceEphemeralStorage: resource.MustParse(initMasterContainerStorage), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse(initMasterContainerCPU), + corev1.ResourceMemory: resource.MustParse(initMasterContainerMemory), + corev1.ResourceEphemeralStorage: resource.MustParse(initMasterContainerStorage), + }, + }, + } + podTemplate := &corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{container}, + RestartPolicy: corev1.RestartPolicyNever, + }, + } + if _, ok := job.Spec.ReplicaSpecs[ReplicaTypeTrainerMaster]; ok { + mainContainer := job.Spec.ReplicaSpecs[ReplicaTypeTrainerMaster].ReplicaSpec.Template.Spec.Containers[0] + if mainContainer.Image != "" { + podTemplate.Spec.Containers[0].Image = mainContainer.Image + } + if mainContainer.ImagePullPolicy != "" { + podTemplate.Spec.Containers[0].ImagePullPolicy = mainContainer.ImagePullPolicy + } + } + job.Spec.ReplicaSpecs[ReplicaTypeTrainerMaster] = &elasticv1alpha1.ReplicaSpec{ + ReplicaSpec: commonv1.ReplicaSpec{ + Template: *podTemplate, + }, + } +} diff --git a/scripts/build_wheel.sh b/scripts/build_wheel.sh index 59908bdef..11dde03ce 100644 --- a/scripts/build_wheel.sh +++ b/scripts/build_wheel.sh @@ -22,4 +22,4 @@ make -f dlrover/Makefile # Create dlrover package echo "Building the wheel for dlrover." rm -rf ./build/lib -python setup.py --quiet bdist_wheel --dist-dir ./build +python setup.py --quiet bdist_wheel
Support setting the image for dlrover master in a job yaml file.
2023-01-28T08:31:06
0.0
[]
[]
Backblaze/B2_Command_Line_Tool
Backblaze__B2_Command_Line_Tool-954
1967d20f3f62e74a885be1edf6ad83c26e17871c
diff --git a/b2/console_tool.py b/b2/console_tool.py index f802ca29a..8fe8b4bc7 100644 --- a/b2/console_tool.py +++ b/b2/console_tool.py @@ -1044,7 +1044,7 @@ def _setup_parser(cls, parser): def _run(self, args): # Handle internal options for testing inside Backblaze. # These are not documented in the usage string. - realm = self._get_realm(args) + realm = self._get_user_requested_realm(args) if args.applicationKeyId is None: args.applicationKeyId = ( @@ -1060,17 +1060,21 @@ def _run(self, args): return self.authorize(args.applicationKeyId, args.applicationKey, realm) - def authorize(self, application_key_id, application_key, realm): + def authorize(self, application_key_id, application_key, realm: str | None): """ Perform the authorization and capability checks, report errors. :param application_key_id: application key ID used to authenticate :param application_key: application key - :param realm: authorization realm + :param realm: authorization realm; if None, production is used :return: exit status """ + verbose_realm = bool(realm) + realm = realm or 'production' url = REALM_URLS.get(realm, realm) - self._print_stderr(f"Using {url}") + logger.info(f"Using {url}") + if verbose_realm: + self._print_stderr(f'Using {url}') try: self.api.authorize_account(realm, application_key_id, application_key) @@ -1099,7 +1103,10 @@ def authorize(self, application_key_id, application_key, realm): return 1 @classmethod - def _get_realm(cls, args): + def _get_user_requested_realm(cls, args) -> str | None: + """ + Determine the realm to use for authorization. + """ if args.dev: return 'dev' if args.staging: @@ -1107,7 +1114,7 @@ def _get_realm(cls, args): if args.environment: return args.environment - return os.environ.get(B2_ENVIRONMENT_ENV_VAR, 'production') + return os.environ.get(B2_ENVIRONMENT_ENV_VAR) @B2.register_subcommand @@ -4032,9 +4039,9 @@ def authorize_from_env(self, command_class): f'Please provide both "{B2_APPLICATION_KEY_ENV_VAR}" and "{B2_APPLICATION_KEY_ID_ENV_VAR}" environment variables or none of them' ) return 1 - realm = os.environ.get(B2_ENVIRONMENT_ENV_VAR, 'production') + realm = os.environ.get(B2_ENVIRONMENT_ENV_VAR) - if self.api.account_info.is_same_key(key_id, realm): + if self.api.account_info.is_same_key(key_id, realm or 'production'): return 0 logger.info('authorize-account is being run from env variables') diff --git a/changelog.d/949.fixed.md b/changelog.d/949.fixed.md new file mode 100644 index 000000000..72b54cf28 --- /dev/null +++ b/changelog.d/949.fixed.md @@ -0,0 +1,1 @@ +Don't print `Using https://REALM" in stderr unless explicitly set by user.
`Using https://api.backblazeb2.com` output to stderr causing problems for automation that only expects errors and warnings on stderr #941 makes the very reasonable change so the `Using https://api.backblazeb2.com` won't interfere with JSON output; redirecting this string from stdout to stderr. However, the fallout of this change is automation that only expects errors and warnings to be output to stderr would now report this message, in my case generating unnecessary reports of warnings. Reporting on stderr has been valuable in the past to ensure deprecation warnings are exposed, as well as any other non-fatal issue. If you were using the `logging` framewok I'd propose changing the `Using https://api.backblazeb2.com` message to `DEBUG` level, however as you only have the two options and given it doesn't belong in either location perhaps the message should be dropped?
2023-11-21T12:39:48
0.0
[]
[]
pytroll/pygac
pytroll__pygac-98
48a6567408179f3059862ed574f66b5df2c09d26
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index d73a100..f4fa96e 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -39,8 +39,7 @@ jobs: - name: Run unit tests shell: bash -l {0} run: | - coverage run --source=pygac setup.py test - coverage xml + pytest --cov=pygac pygac/tests --cov-report=xml - name: Upload unittest coverage uses: codecov/codecov-action@v1
Unit test discovery Unittests starting with `test__` are not discovered by `python setup.py test`: ``` ...pygac/pygac/tests> grep test__ *.py test_klm.py: def test__validate_header(self): test_pod.py: def test__validate_header(self): test_pod.py: def test__get_calibrated_channels_uniform_shape(self, get_channels): test_pod.py: def test__adjust_clock_drift(self, avhrr_gac, get_tle_lines, test_reader.py: def test__read_scanlines(self): test_reader.py: def test__validate_header(self): test_reader.py: def test__correct_data_set_name(self): test_reader.py: def test__get_calibrated_channels_uniform_shape(self, get_channels): ``` Some of them are failing.
2021-11-29T09:34:22
0.0
[]
[]
pytroll/pygac
pytroll__pygac-95
5423880afd30b504fd74d4bc56be1789a526ed9d
diff --git a/pygac/pod_reader.py b/pygac/pod_reader.py index be7e6cf..5b28471 100644 --- a/pygac/pod_reader.py +++ b/pygac/pod_reader.py @@ -49,7 +49,7 @@ from pygac.clock_offsets_converter import get_offsets from pygac.correct_tsm_issue import TSM_AFFECTED_INTERVALS_POD, get_tsm_idx -from pygac.reader import Reader, ReaderError +from pygac.reader import Reader, ReaderError, NoTLEData from pygac.slerp import slerp from pygac.utils import file_opener @@ -484,7 +484,11 @@ def _adjust_clock_drift(self): missed_utcs = ((missed_lines - scan_lines[0])*np.timedelta64(scan_rate, "ms") + self.utcs[0]) # calculate the missing geo locations - missed_lons, missed_lats = self._compute_missing_lonlat(missed_utcs) + try: + missed_lons, missed_lats = self._compute_missing_lonlat(missed_utcs) + except NoTLEData as err: + LOG.warning('Cannot perform clock drift correction: %s', str(err)) + return # create arrays of lons and lats for interpolation. The locations # correspond to not yet corrected utcs, i.e. the time difference from diff --git a/pygac/reader.py b/pygac/reader.py index 5e56ac4..c72c57d 100644 --- a/pygac/reader.py +++ b/pygac/reader.py @@ -80,6 +80,10 @@ class ReaderError(ValueError): pass +class NoTLEData(IndexError): + """Raised if no TLE data available within time range.""" + + class Reader(six.with_metaclass(ABCMeta)): """Reader for Gac and Lac, POD and KLM data.""" @@ -690,7 +694,7 @@ def get_tle_lines(self): # Make sure the TLE we found is within the threshold delta_days = abs(sdate - dates[iindex]) / np.timedelta64(1, 'D') if delta_days > self.tle_thresh: - raise IndexError( + raise NoTLEData( "Can't find tle data for %s within +/- %d days around %s" % (self.spacecraft_name, self.tle_thresh, sdate)) @@ -707,7 +711,30 @@ def get_tle_lines(self): self.tle_lines = tle1, tle2 return tle1, tle2 - def get_sat_angles_without_tle(self): + def get_sat_angles(self): + """Get satellite angles. + + Returns: + Azimuth, elevation (degrees) + """ + try: + return self._get_sat_angles_with_tle() + except NoTLEData: + LOG.warning( + 'No TLE data available. Falling back to approximate ' + 'calculation of satellite angles.' + ) + return self._get_sat_angles_without_tle() + + def _get_sat_angles_with_tle(self): + tle1, tle2 = self.get_tle_lines() + orb = Orbital(self.spacecrafts_orbital[self.spacecraft_id], + line1=tle1, line2=tle2) + sat_azi, sat_elev = orb.get_observer_look(self.times[:, np.newaxis], + self.lons, self.lats, 0) + return sat_azi, sat_elev + + def _get_sat_angles_without_tle(self): """Get satellite angles using lat/lon from data to approximate satellite postition instead of TLE.""" from pyorbital.orbital import get_observer_look as get_observer_look_no_tle LOG.warning('Approximating satellite height to 850km (TIROS-N OSCAR)!') @@ -746,14 +773,7 @@ def get_angles(self): self.get_times() self.get_lonlat() times = self.times - try: - tle1, tle2 = self.get_tle_lines() - orb = Orbital(self.spacecrafts_orbital[self.spacecraft_id], - line1=tle1, line2=tle2) - sat_azi, sat_elev = orb.get_observer_look(times[:, np.newaxis], - self.lons, self.lats, 0) - except IndexError: - sat_azi, sat_elev = self.get_sat_angles_without_tle() + sat_azi, sat_elev = self.get_sat_angles() sat_zenith = 90 - sat_elev sun_zenith = astronomy.sun_zenith_angle(times[:, np.newaxis],
Method get_tle_lines() raises index error when tle lines are empty strings In pod_reader.py._compute_missing_lonlat(): Function get_tle_lines() will raise an IndexError if the next available tle line is not within a user-defined threshold of n days. Pygac should be able to handle this issue, as tle data are gappy, and a fix has been introduced to deal with such cases. In fact, before a recent commit (3f0430687), this error has been caught and tle lines were set to emtpy strings. One could undo that commit, i.e. put that function back into a try clause like so: ``` try: tle1, tle2 = self.get_tle_lines() except IndexError: tle1 = tle2 = '' ``` This would at least allow pygac to continue. However, this causes new problems in compute_pixels(), which calls pyorbital, thus initializing class Tle. Tle will be unhappy about tle lines being empty strings and thus raise IndexErrors. As I do not know the code structure well enough: does it make sense to just quit compute_missing_lonlat when there are no tle data within the threshold? Or apply the tle fix here?
2021-09-09T15:32:17
0.0
[]
[]
maroba/multipoles
maroba__multipoles-13
6f687c075b49486af8fda1157719074df3efcf29
diff --git a/multipoles/expansion.py b/multipoles/expansion.py index 1afc832..e1e609a 100644 --- a/multipoles/expansion.py +++ b/multipoles/expansion.py @@ -1,4 +1,5 @@ import numpy as np +import numbers from scipy.special import sph_harm from scipy.integrate import simpson @@ -262,8 +263,11 @@ def eval(self, xyz, l_max=None): Parameters ---------- - xyz: 3-tuple of floats + xyz: 3-tuple of (floats or arrays) The x,y,z coordinates of the points where to evaluate the expansion. + If three floats are given, then only one point with coordinates (x, y, z) + is evaluated. If three arrays xyz=(x, y, z) are given, the evaluation is done at each + point (x[i], y[i], z[i]). l_max: int, optional The maximum angular momentum to use for the expansion. If no value @@ -277,9 +281,38 @@ def eval(self, xyz, l_max=None): raise ValueError( "Multipole expansion only contains multipoles up to l_max={}.".format(self.l_max) ) - contribs = self._multipole_contribs(xyz) - return sum(contribs[:l_max + 1]) - + assert isinstance(xyz, tuple) and len(xyz) == 3 + + if any(hasattr(item, "__len__") for item in xyz): # some args are array-like + # All passed arrays must have same length. Validate this and determine this + # length. + len_array = None + for item in xyz: + if hasattr(item, "__len__"): + if len_array is None: + len_array = len(item) + else: + if len(item) != len_array: + raise ValueError(f"All arrays must have the same length. Received: {len_array} != {len(item)}") + + # If numbers are passed along with arrays, expand the numbers to constant + # arrays of the right size. + xyz = list(xyz) + for i in range(3): + if isinstance(xyz[i], numbers.Number): + xyz[i] = np.ones(len_array) * xyz[i] + xyz = tuple(xyz) + + # Now do the evaluation + return np.array( + [sum(self._multipole_contribs((x, y, z))[:l_max + 1]) for x, y, z in zip(*xyz)] + ) + elif all(isinstance(item, numbers.Number) for item in xyz): + # only numbers were passed; evaluate at single point + return sum(self._multipole_contribs(xyz)[:l_max + 1]) + else: + raise ValueError("Only triple of floats or tripe of arrays allowed.") + def _multipole_contribs(self, xyz): if not isinstance(xyz, np.ndarray): xyz = np.array(xyz)
Feature request: evaluate multipole expansion on single axis ### Discussed in https://github.com/maroba/multipoles/discussions/10 <div type='discussions-op-text'> <sup>Originally posted by **nordic-node** July 14, 2023</sup> Hi, instead of evaluating the multipole expansion point by point, like ```python phi = np.array([mpe(r, 0, 0, l_max=l_max) for r in rr]) ``` where `mpe` is a multipole expansion object. I would rather like to write something like this: ``` phi = mpe(rr, 0, 0, l_max=l_max) ``` As far as I can see, the call function just allows to pass a point. and the getitem function wants slices or masks on the same grid that was used for the charge distribution. Is there a possibility to do the same for outside of that grid? Cheers, Sara </div>
Let me try this one, myself, please. Sure, go ahead! If you need help, just ask. :-)
2023-07-14T09:58:19
0.0
[]
[]
innolitics/pacsman
innolitics__pacsman-41
15dbaec595c748662bdb64127b0eae2993a27f03
diff --git a/README.md b/README.md index 0a8ca26..468ce2c 100644 --- a/README.md +++ b/README.md @@ -17,3 +17,27 @@ files. In addition to the supplied backends, you can write your own backend implementing the `BaseDicomClient`. This can be a useful interface layer for non-PACS systems such as a cloud storage system. + +## Development +Linting is done with `flake8` and testing with `pytest`. + +GitHub actions has automatic checks for both linting and tests, using [`tox`](https://tox.wiki/en/latest/) as the runner (see [`./tox.ini`](tox.ini)). To replicate this locally, install `tox`, then run `tox .` in the root of the project. + +> If you get an error about "InterpreterNotFound", make sure you have that version of Python installed and in the path (e.g., discoverable with `which python{version}`). Or use `--skip-missing-interpreters` to skip those. + + +### Remote DICOM Testing - Using Orthanc +Tests marked *remote* rely on a live DICOM server. For GitHub actions, an instance of Orthanc will be used, and you can re-use this service locally as well. + +```bash +docker-compose up -d orthanc + +# If this is the first time, and test data files have not yet been loaded +python3 pacsman/upload_test.py remote + +# If you have DCMTK installed, here is a quick test +echoscu localhost 4242 + +# Or, with pynetdicom +python -m pynetdicom echoscu localhost 4242 +``` diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..2f94faa --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,29 @@ +# For use with GitHub Actions, or for local testing +# https://book.orthanc-server.com/users/docker-osimis.html +# Legacy docs: https://osimis.atlassian.net/wiki/spaces/OKB/pages/26738689/How+to+use+osimis+orthanc+Docker+images (some of these settings are still used) +version: '3' +services: + orthanc: + image: osimis/orthanc:22.6.2 + restart: unless-stopped + ports: + # For local testing, if you want to poke around the web interface + - "8042:8042" + # DICOM port + # If we ever dockerize the entire repo, then host-binding like this would not be necessary, and we could move to + # using `orthanc:4242` + - "4242:4242" + environment: + # This is the default value, just documenting + DICOM_AET: "ORTHANC" + # Syntax is [AET, Address, Port, optional_Vendor_Patch] + DICOM_MODALITIES: | + { + "testing": [ "TEST", "localhost", 4242 ] + } + AC_AUTHENTICATION_ENABLED: "false" + VERBOSE_ENABLED: "true" + volumes: + - "orthanc-storage-volume:/var/lib/orthanc/db" +volumes: + orthanc-storage-volume:
Start Using Orthanc in GitHub Actions Currently this repo uses `dicomserver.co.uk` for automated remote-based tests. Unfortunately, it looks like things have changed significantly with what query retrieve models or attributes they accept and without a conformance statement, it is very difficult to track down the right set of things to change to get these to work. > The `integration_test.py::test_remote_patient_search` test is failing with no changes to the codebase Even if we could fix this test to work with the existing remote site, it seems like a good strategy to switch to using a GitHub actions hosted Orthanc instance to avoid any future flakiness. I'll be working on this shortly, to unblock a future PR. Start Using Orthanc in GitHub Actions Currently this repo uses `dicomserver.co.uk` for automated remote-based tests. Unfortunately, it looks like things have changed significantly with what query retrieve models or attributes they accept and without a conformance statement, it is very difficult to track down the right set of things to change to get these to work. > The `integration_test.py::test_remote_patient_search` test is failing with no changes to the codebase Even if we could fix this test to work with the existing remote site, it seems like a good strategy to switch to using a GitHub actions hosted Orthanc instance to avoid any future flakiness. I'll be working on this shortly, to unblock a future PR.
2022-06-27T18:53:37
0.0
[]
[]
Project-MONAI/MONAILabel
Project-MONAI__MONAILabel-763
e47a94d37fef6b3ab90a037e8a3aa5c978fe9188
diff --git a/monailabel/scribbles/infer.py b/monailabel/scribbles/infer.py index 439d013cc..7350c6a65 100644 --- a/monailabel/scribbles/infer.py +++ b/monailabel/scribbles/infer.py @@ -9,37 +9,28 @@ # See the License for the specific language governing permissions and # limitations under the License. -from monai.transforms import Compose, EnsureChannelFirstd, LoadImaged, Orientationd, ScaleIntensityRanged, Spacingd +from monai.transforms import Compose, EnsureChannelFirstd, LoadImaged, ScaleIntensityRanged, Spacingd from monailabel.interfaces.tasks.infer import InferTask, InferType from monailabel.scribbles.transforms import ( AddBackgroundScribblesFromROId, ApplyGraphCutOptimisationd, MakeISegUnaryd, + MakeLikelihoodFromScribblesGMMd, MakeLikelihoodFromScribblesHistogramd, ) from monailabel.transform.post import BoundingBoxd, Restored -class HistogramBasedGraphCut(InferTask): +class ScribblesLikelihoodInferTask(InferTask): """ - Defines histogram-based GraphCut task for Generic segmentation from the following paper: - - Wang, Guotai, et al. "Interactive medical image segmentation using deep learning with image-specific fine tuning." - IEEE transactions on medical imaging 37.7 (2018): 1562-1573. (preprint: https://arxiv.org/pdf/1710.04043.pdf) - - This task takes as input 1) original image volume and 2) scribbles from user - indicating foreground and background regions. A likelihood volume is generated using histogram method. - User-scribbles are incorporated using Equation 7 on page 4 of the paper. - - numpymaxflow's GraphCut layer is used to optimise Equation 5 from the paper, where unaries come from Equation 7 - and pairwise is the original input volume. + Defines a generic Scribbles Likelihood based segmentor infertask """ def __init__( self, dimension=3, - description="A post processing step with histogram-based GraphCut for Generic segmentation", + description="A post processing step with likelihood + GraphCut for Generic segmentation", intensity_range=(-300, 200, 0.0, 1.0, True), pix_dim=(2.5, 2.5, 5.0), lamda=1.0, @@ -78,10 +69,7 @@ def pre_transforms(self, data): scribbles_bg_label=self.scribbles_bg_label, scribbles_fg_label=self.scribbles_fg_label, ), - # at the moment optimisers are bottleneck taking a long time, - # therefore scaling non-isotropic with big spacing Spacingd(keys=["image", "label"], pixdim=self.pix_dim, mode=["bilinear", "nearest"]), - Orientationd(keys=["image", "label"], axcodes="RAS"), ScaleIntensityRanged( keys="image", a_min=self.intensity_range[0], @@ -90,41 +78,153 @@ def pre_transforms(self, data): b_max=self.intensity_range[3], clip=self.intensity_range[4], ), - MakeLikelihoodFromScribblesHistogramd( + ] + + def inferer(self, data): + raise NotImplementedError("Inferer not implemented in ScribblesLikelihoodInferTask") + + def post_transforms(self, data): + return [ + # unary term maker + MakeISegUnaryd( image="image", + logits="prob", scribbles="label", - post_proc_label="prob", + unary="unary", scribbles_bg_label=self.scribbles_bg_label, scribbles_fg_label=self.scribbles_fg_label, - normalise=True, ), + # optimiser + ApplyGraphCutOptimisationd( + unary="unary", + pairwise="image", + post_proc_label="pred", + lamda=self.lamda, + sigma=self.sigma, + ), + Restored(keys="pred", ref_image="image"), + BoundingBoxd(keys="pred", result="result", bbox="bbox"), ] + +class HistogramBasedGraphCut(ScribblesLikelihoodInferTask): + """ + Defines histogram-based GraphCut task for Generic segmentation from the following paper: + + Wang, Guotai, et al. "Interactive medical image segmentation using deep learning with image-specific fine tuning." + IEEE transactions on medical imaging 37.7 (2018): 1562-1573. (preprint: https://arxiv.org/pdf/1710.04043.pdf) + + This task takes as input 1) original image volume and 2) scribbles from user + indicating foreground and background regions. A likelihood volume is generated using histogram method. + User-scribbles are incorporated using Equation 7 on page 4 of the paper. + + numpymaxflow's GraphCut layer is used to optimise Equation 5 from the paper, where unaries come from Equation 7 + and pairwise is the original input volume. + """ + + def __init__( + self, + dimension=3, + description="A post processing step with histogram-based GraphCut for Generic segmentation", + intensity_range=(-300, 200, 0.0, 1.0, True), + pix_dim=(2.5, 2.5, 5.0), + lamda=1.0, + sigma=0.1, + num_bins=64, + labels=None, + config=None, + ): + if config: + config.update({"num_bins": num_bins}) + else: + config = {"num_bins": num_bins} + + super().__init__( + dimension=dimension, + description=description, + intensity_range=intensity_range, + pix_dim=pix_dim, + lamda=lamda, + sigma=sigma, + labels=labels, + config=config, + ) + self.num_bins = num_bins + def inferer(self, data): return Compose( [ - # unary term maker - MakeISegUnaryd( + MakeLikelihoodFromScribblesHistogramd( image="image", - logits="prob", scribbles="label", - unary="unary", + post_proc_label="prob", scribbles_bg_label=self.scribbles_bg_label, scribbles_fg_label=self.scribbles_fg_label, - ), - # optimiser - ApplyGraphCutOptimisationd( - unary="unary", - pairwise="image", - post_proc_label="pred", - lamda=self.lamda, - sigma=self.sigma, + num_bins=self.num_bins, + normalise=True, ), ] ) - def post_transforms(self, data): - return [ - Restored(keys="pred", ref_image="image"), - BoundingBoxd(keys="pred", result="result", bbox="bbox"), - ] + +class GMMBasedGraphCut(ScribblesLikelihoodInferTask): + """ + Defines Gaussian Mixture Model (GMM) based task for Generic segmentation from the following papers: + + Rother, Carsten, Vladimir Kolmogorov, and Andrew Blake. "" GrabCut" interactive foreground extraction using iterated graph cuts." + ACM transactions on graphics (TOG) 23.3 (2004): 309-314. + + Wang, Guotai, et al. "Interactive medical image segmentation using deep learning with image-specific fine tuning." + IEEE transactions on medical imaging 37.7 (2018): 1562-1573. (preprint: https://arxiv.org/pdf/1710.04043.pdf) + + This task takes as input 1) original image volume and 2) scribbles from user + indicating foreground and background regions. A likelihood volume is generated using GMM method. + User-scribbles are incorporated using Equation 7 on page 4 from Guotai et al. + + numpymaxflow's GraphCut layer is used to optimise Equation 5 from Guotai et al., where unaries come from Equation 7 + and pairwise is the original input volume. + """ + + def __init__( + self, + dimension=3, + description="A post processing step with GMM-based GraphCut for Generic segmentation", + intensity_range=(-300, 200, 0.0, 1.0, True), + pix_dim=(2.5, 2.5, 5.0), + lamda=1.0, + sigma=0.1, + num_mixtures=20, + labels=None, + config=None, + ): + if config: + config.update({"num_mixtures": num_mixtures}) + else: + config = {"num_mixtures": num_mixtures} + + super().__init__( + dimension=dimension, + description=description, + intensity_range=intensity_range, + pix_dim=pix_dim, + lamda=lamda, + sigma=sigma, + labels=labels, + config=config, + ) + self.num_mixtures = num_mixtures + + def inferer(self, data): + return Compose( + [ + MakeLikelihoodFromScribblesGMMd( + image="image", + scribbles="label", + post_proc_label="prob", + scribbles_bg_label=self.scribbles_bg_label, + scribbles_fg_label=self.scribbles_fg_label, + num_mixtures=self.num_mixtures, + normalise=False, + ), + ] + ) diff --git a/monailabel/scribbles/transforms.py b/monailabel/scribbles/transforms.py index 13a7407a2..1cff6b58b 100644 --- a/monailabel/scribbles/transforms.py +++ b/monailabel/scribbles/transforms.py @@ -20,7 +20,7 @@ from monailabel.transform.writer import Writer -from .utils import make_iseg_unary, make_likelihood_image_histogram, maxflow +from .utils import make_iseg_unary, make_likelihood_image_gmm, make_likelihood_image_histogram, maxflow logger = logging.getLogger(__name__) @@ -161,6 +161,7 @@ def __init__( post_proc_label: str = "prob", scribbles_bg_label: int = 2, scribbles_fg_label: int = 3, + num_bins: int = 64, normalise: bool = True, ) -> None: super().__init__(meta_key_postfix) @@ -169,11 +170,15 @@ def __init__( self.scribbles_bg_label = scribbles_bg_label self.scribbles_fg_label = scribbles_fg_label self.post_proc_label = post_proc_label + self.num_bins = num_bins self.normalise = normalise def __call__(self, data): d = dict(data) + # attempt to fetch algorithmic parameters from app if present + self.num_bins = d.get("num_bins", self.num_bins) + # load scribbles idx from labels_info (if available) self._set_scribbles_idx_from_labelinfo(d) @@ -190,6 +195,62 @@ def __call__(self, data): scribbles, scribbles_bg_label=self.scribbles_bg_label, scribbles_fg_label=self.scribbles_fg_label, + num_bins=self.num_bins, + return_label=False, + ) + + if self.normalise: + post_proc_label = self._normalise_logits(post_proc_label, axis=0) + + d[self.post_proc_label] = post_proc_label + + return d + + +class MakeLikelihoodFromScribblesGMMd(InteractiveSegmentationTransform): + def __init__( + self, + image: str, + scribbles: str, + meta_key_postfix: str = "meta_dict", + post_proc_label: str = "prob", + scribbles_bg_label: int = 2, + scribbles_fg_label: int = 3, + num_mixtures: int = 20, + normalise: bool = False, + ) -> None: + super().__init__(meta_key_postfix) + self.image = image + self.scribbles = scribbles + self.scribbles_bg_label = scribbles_bg_label + self.scribbles_fg_label = scribbles_fg_label + self.post_proc_label = post_proc_label + self.num_mixtures = num_mixtures + self.normalise = normalise + + def __call__(self, data): + d = dict(data) + + # attempt to fetch algorithmic parameters from app if present + self.num_mixtures = d.get("num_mixtures", self.num_mixtures) + + # load scribbles idx from labels_info (if available) + self._set_scribbles_idx_from_labelinfo(d) + + # copy affine meta data from image input + d = self._copy_affine(d, src=self.image, dst=self.post_proc_label) + + # read relevant terms from data + image = self._fetch_data(d, self.image) + scribbles = self._fetch_data(d, self.scribbles) + + # make likelihood image + post_proc_label = make_likelihood_image_gmm( + image, + scribbles, + scribbles_bg_label=self.scribbles_bg_label, + scribbles_fg_label=self.scribbles_fg_label, + num_mixtures=self.num_mixtures, return_label=False, ) diff --git a/monailabel/scribbles/utils.py b/monailabel/scribbles/utils.py index e7d12aa1a..9864dee46 100644 --- a/monailabel/scribbles/utils.py +++ b/monailabel/scribbles/utils.py @@ -12,6 +12,8 @@ import numpy as np import numpymaxflow +import torch +from monai.networks.layers import GaussianMixtureModel logger = logging.getLogger(__name__) @@ -137,7 +139,9 @@ def expand_pseudocounts(alpha): return bg_hist.astype(np.float32), fg_hist.astype(np.float32), fg_bin_edges.astype(np.float32) -def make_likelihood_image_histogram(image, scrib, scribbles_bg_label, scribbles_fg_label, return_label=False): +def make_likelihood_image_histogram( + image, scrib, scribbles_bg_label, scribbles_fg_label, num_bins=64, return_label=False +): # normalise image in range [0, 1] if needed min_img = np.min(image) max_img = np.max(image) @@ -146,7 +150,7 @@ def make_likelihood_image_histogram(image, scrib, scribbles_bg_label, scribbles_ # generate histograms for background/foreground bg_hist, fg_hist, bin_edges = make_histograms( - image, scrib, scribbles_bg_label, scribbles_fg_label, alpha_bg=1, alpha_fg=1, bins=64 + image, scrib, scribbles_bg_label, scribbles_fg_label, alpha_bg=1, alpha_fg=1, bins=num_bins ) # lookup values for each voxel for generating background/foreground probabilities @@ -160,3 +164,77 @@ def make_likelihood_image_histogram(image, scrib, scribbles_bg_label, scribbles_ retprob = np.expand_dims(np.argmax(retprob, axis=0), axis=0).astype(np.float32) return retprob + + +def learn_and_apply_gmm_monai(image, scrib, scribbles_bg_label, scribbles_fg_label, num_mixtures): + # this function is limited to binary segmentation at the moment + n_classes = 2 + + # make trimap + trimap = np.zeros_like(scrib).astype(np.int32) + + # fetch anything that is not scribbles + not_scribbles = ~((scrib == scribbles_bg_label) | (scrib == scribbles_fg_label)) + + # set these to -1 == unused + trimap[not_scribbles] = -1 + + # set background scrib to 0 + trimap[scrib == scribbles_bg_label] = 0 + # set foreground scrib to 1 + trimap[scrib == scribbles_fg_label] = 1 + + # add empty channel to image and scrib to be inline with pytorch layout + image = np.expand_dims(image, axis=0) + trimap = np.expand_dims(trimap, axis=0) + + # transfer everything to pytorch tensor + # we use CUDA as GMM from MONAI is only available on CUDA atm (29/04/2022) + # if no cuda device found, then exit now + if not torch.cuda.is_available(): + raise OSError("Unable to find CUDA device, check your torch/monai installation") + + device = "cuda" + image = torch.from_numpy(image).type(torch.float32).to(device) + trimap = torch.from_numpy(trimap).type(torch.int32).to(device) + + # initialise our GMM + gmm = GaussianMixtureModel( + image.size(1), + mixture_count=n_classes, + mixture_size=num_mixtures, + verbose_build=False, + ) + + # learn gmm from image and trimap + gmm.learn(image, trimap) + + # apply gmm on image + gmm_output = gmm.apply(image) + + # return output + return gmm_output.squeeze(0).cpu().numpy() + + +def make_likelihood_image_gmm( + image, + scrib, + scribbles_bg_label, + scribbles_fg_label, + num_mixtures=20, + return_label=False, +): + # learn gmm and apply to image, return output label prob + retprob = learn_and_apply_gmm_monai( + image=image, + scrib=scrib, + scribbles_bg_label=scribbles_bg_label, + scribbles_fg_label=scribbles_fg_label, + num_mixtures=num_mixtures, + ) + + # if needed, convert to discrete labels instead of probability + if return_label: + retprob = np.expand_dims(np.argmax(retprob, axis=0), axis=0).astype(np.float32) + + return retprob diff --git a/requirements-dev.txt b/requirements-dev.txt index 3f17cb8ae..44da411c4 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -18,7 +18,6 @@ black isort pytype; sys_platform != 'win32' mypy -ninja parameterized types-PyYAML types-filelock diff --git a/requirements.txt b/requirements.txt index c4ac60408..e5be37fb1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -26,5 +26,6 @@ opencv-python-headless==4.5.5.64 Shapely==1.8.1.post1 girder_client==3.1.8 numpymaxflow==0.0.2 +ninja==1.10.2.3 #sudo apt-get install openslide-tools -y diff --git a/sample-apps/radiology/main.py b/sample-apps/radiology/main.py index e1e401918..728aae5c4 100644 --- a/sample-apps/radiology/main.py +++ b/sample-apps/radiology/main.py @@ -24,7 +24,7 @@ from monailabel.interfaces.tasks.scoring import ScoringMethod from monailabel.interfaces.tasks.strategy import Strategy from monailabel.interfaces.tasks.train import TrainTask -from monailabel.scribbles.infer import HistogramBasedGraphCut +from monailabel.scribbles.infer import GMMBasedGraphCut, HistogramBasedGraphCut from monailabel.tasks.activelearning.random import Random from monailabel.tasks.infer.deepgrow_pipeline import InferDeepgrowPipeline from monailabel.utils.others.class_utils import get_class_names @@ -122,6 +122,15 @@ def init_infers(self) -> Dict[str, InferTask]: pix_dim=(2.5, 2.5, 5.0), lamda=1.0, sigma=0.1, + num_bins=64, + labels=task_config.labels, + ), + "GMM+GraphCut": GMMBasedGraphCut( + intensity_range=(-300, 200, 0.0, 1.0, True), + pix_dim=(2.5, 2.5, 5.0), + lamda=5.0, + sigma=0.5, + num_mixtures=20, labels=task_config.labels, ), }
Scribbles - Implement a likelihood model using Gaussian Mixture Model We currently have a histogram-based likelihood model for estimating appearance-based proposals for foreground and background. While this works for most of the sample apps, it may be more relevant to implement and include more advanced likelihood models (see some discussion here: https://github.com/Project-MONAI/MONAILabel/issues/432#issuecomment-932309545) For example, the original GrabCut paper (https://www.microsoft.com/en-us/research/wp-content/uploads/2004/08/siggraph04-grabcut.pdf) uses a Gaussian Mixture Model (GMMs) which may be useful and a bit more advanced than histograms There are a number of implementations for GMMs out there, perhaps the most relevant ones will be the GPU implementation from MONAI and scikit-learn's CPU implementation: - MONAI (GPU): https://docs.monai.io/en/latest/networks.html?highlight=gmm#monai.networks.layers.GaussianMixtureModel - Scikit-learn (CPU): https://scikit-learn.org/stable/modules/generated/sklearn.mixture.GaussianMixture.html GMM can also naturally extend to multi-class problems, so it will be relevant for when we extend scribbles for multi-class.
The underpinning mixture model feature probably belongs to MONAI rather than MONAI Label. That said, it makes sense to have it accessible for MONAI Label. Related pointers: - https://github.com/Project-MONAI/MONAI/issues/1601 - https://github.com/ldeecke/gmm-torch - https://pytorch.org/docs/stable/distributions.html#mixturesamefamily By the way, this might be interesting to represent the histogram-based likelihood: https://pytorch.org/docs/stable/distributions.html#categorical Thanks @tvercaut - I think most of the transforms in ./monailabel/scribbles/transform.py belong to MONAI. They are kept here for updating/improving as much as possible before they are moved. In regard to GMM in MONAILabel, how I see it for now is that this will primarily be a transform that uses the GMM from MONAI. Once this is working as expected and looks in final shape, we can move it to MONAI. Once we have GMM, GrabCut may be doable in MONAILabel. I am aware that this may be achieving something similar to the GrabCut issue in MONAI. Has there been any update on that after GMM? Thanks for sharing the pytorch distribution function. I will check it out in the coming days. @SachidanandAlle resolving this issue will give us a new infer method for scribbles. It may potentially address some limitations of histogram method, especially in ambiguous cases. Do you want me to include this alongside Histogram+GraphCut method we have in radiology app? Makes sense.. already some were complaining about empty results for certain images.. Also multiple options will help others to follow and contribute more such methods Okay many thanks! I am looking into this now and will add this once it is ready.
2022-04-29T17:22:53
0.0
[]
[]
Project-MONAI/MONAILabel
Project-MONAI__MONAILabel-738
0479459d7036051bea30f4268cc79327a5d7af0b
diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index d7a0e5b1d..1e2a50885 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -82,7 +82,7 @@ jobs: run: | sudo apt-get install openslide-tools -y python -m pip install --user --upgrade pip setuptools wheel - python -m pip install torch>=1.5 torchvision + python -m pip install torch>=1.6 torchvision - name: Build Package run: | python setup.py sdist bdist_wheel diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 0217981ca..c386ac3c4 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -37,7 +37,7 @@ jobs: run: | sudo apt-get install openslide-tools -y python -m pip install --user --upgrade pip setuptools wheel - python -m pip install torch>=1.5 torchvision + python -m pip install torch>=1.6 torchvision - name: Build Package run: | python setup.py sdist bdist_wheel --build-number $(date +'%Y%m%d%H%M') diff --git a/monailabel/__init__.py b/monailabel/__init__.py index ed4f5c9d3..4f036d772 100644 --- a/monailabel/__init__.py +++ b/monailabel/__init__.py @@ -15,7 +15,7 @@ from ._version import get_versions PY_REQUIRED_MAJOR = 3 -PY_REQUIRED_MINOR = 6 +PY_REQUIRED_MINOR = 7 version_dict = get_versions() __version__ = version_dict.get("version", "0+unknown") diff --git a/pyproject.toml b/pyproject.toml index 9ecb20f12..de02d8cd2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ requires = [ "wheel", "setuptools", - "torch>=1.5", + "torch>=1.6", "ninja", ]
drop python 3.6 support This is a follow-up to #378 and is possible as Slicer client upgraded to Python 3.9 from Python 3.6 (https://github.com/Project-MONAI/MONAILabel/pull/378#issuecomment-1021702308). Monai has dropped Python 3.6 support (https://github.com/Project-MONAI/MONAI/commit/e655b4e4898c314cbc9d4d6da8f70ff1162cab7e). PyTorch dropped Python 3.6 starting in version 1.11.0 (https://github.com/pytorch/pytorch/commit/dc5cda0cca429a3080f63e7b30b3e87d18df8601). Python 3.6 official end of support date was 23rd Dec 2021. Signed-off-by: James Butler <[email protected]>
2022-04-10T12:15:08
0.0
[]
[]
holoviz/datashader
holoviz__datashader-1326
846bf0851e076f8f6d5a681f04e2d11c2985a7bc
diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index bcf3536df..e16052d57 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -47,7 +47,7 @@ jobs: - name: fetch data run: | conda activate test-environment - datashader fetch-data --path=examples --force + python scripts/download_data.py - name: generate rst run: | conda activate test-environment diff --git a/examples/user_guide/8_Polygons.ipynb b/examples/user_guide/8_Polygons.ipynb index cc9a6bf81..fddd00691 100644 --- a/examples/user_guide/8_Polygons.ipynb +++ b/examples/user_guide/8_Polygons.ipynb @@ -166,7 +166,7 @@ "metadata": {}, "source": [ "## Realistic Example\n", - "Here is a more realistic example, plotting the unemployment rate of the counties in Texas. " + "Here is a more realistic example, plotting the unemployment rate of the counties in Texas. To run you need to run `bokeh sampledata` or install the package `bokeh_sampledata`. " ] }, { @@ -175,11 +175,6 @@ "metadata": {}, "outputs": [], "source": [ - "import bokeh.sampledata\n", - "try:\n", - " from bokeh.sampledata.us_counties import data # noqa\n", - "except:\n", - " bokeh.sampledata.download()\n", "from bokeh.sampledata.us_counties import data as counties\n", "from bokeh.sampledata.unemployment import data as unemployment\n", "\n", diff --git a/scripts/download_data.py b/scripts/download_data.py new file mode 100644 index 000000000..9b7a494a9 --- /dev/null +++ b/scripts/download_data.py @@ -0,0 +1,23 @@ +from contextlib import suppress + +import pyct.cmd +from packaging.version import Version + +pyct.cmd.fetch_data(name="data", path="examples", datasets="datasets.yml") + + +with suppress(ImportError): + import bokeh + + # Replaced with bokeh_sampledata in 3.5 + if Version(bokeh.__version__) < Version("3.5"): + import bokeh.sampledata + + bokeh.sampledata.download() + + +with suppress(ImportError): + import geodatasets as gds + + gds.get_path("geoda.natregimes") + gds.get_path("nybb") diff --git a/setup.py b/setup.py index 78a09d0b9..f5c184250 100644 --- a/setup.py +++ b/setup.py @@ -40,6 +40,11 @@ 'spatialpandas', ] +if sys.version_info[:2] >= (3, 10): + examples += [ + 'bokeh_sampledata', + ] + # Numpy 2 packages, should be removed when all commented out packages works with Numpy 2 numpy2 = [ 'numba ==0.60.0rc1', diff --git a/tox.ini b/tox.ini index a6bdc77b0..477ff0e3b 100644 --- a/tox.ini +++ b/tox.ini @@ -32,8 +32,7 @@ commands = pytest datashader -k "not benchmarks and not test_tiles" --cov=./data [_examples] description = Test that default examples run deps = .[examples, tests] -commands = datashader fetch-data --path=examples --use-test-data - pytest --nbsmoke-run -k ".ipynb" +commands = pytest --nbsmoke-run -k ".ipynb" # could add more, to test types of example other than nbs [_examples_extra]
Polygon User Guide Long CSV Download Output Under the "[Realistic Example](https://datashader.org/user_guide/Polygons.html#realistic-example)" in the Polygon User Guide, the CSV download is taking up the majority of the page. ![image](https://github.com/holoviz/datashader/assets/67855069/db184602-9a3d-4812-9f80-66981a923856) It continues like this for about 80% of the entire page.
Thanks for the alert! Something changed about how chatty it was compared to when that page was first built. Gotta fix that!
2024-05-01T15:40:24
0.0
[]
[]
holoviz/datashader
holoviz__datashader-1271
ea163e9600a2659be3a8763982bdd0f57a035608
diff --git a/datashader/compiler.py b/datashader/compiler.py index b5c8b5f6d..bf55ac53f 100644 --- a/datashader/compiler.py +++ b/datashader/compiler.py @@ -335,6 +335,7 @@ def make_append(bases, cols, calls, glyph, antialias): subscript = None prev_local_cuda_mutex = False categorical_args = {} # Reuse categorical arguments if used in more than one reduction + where_selectors = {} # Reuse where.selector if used more than once in a summary reduction def get_cuda_mutex_call(lock: bool) -> str: func = "cuda_mutex_lock" if lock else "cuda_mutex_unlock" @@ -379,9 +380,18 @@ def get_cuda_mutex_call(lock: bool) -> str: # Avoid unnecessary mutex unlock and lock cycle body.pop() - where_reduction = len(bases) == 1 and bases[0].is_where() - if where_reduction: - update_index_arg_name = next(names) + is_where = len(bases) == 1 and bases[0].is_where() + if is_where: + where_reduction = bases[0] + if isinstance(where_reduction, by): + where_reduction = where_reduction.reduction + + selector_hash = hash(where_reduction.selector) + update_index_arg_name = where_selectors.get(selector_hash, None) + new_selector = update_index_arg_name is None + if new_selector: + update_index_arg_name = next(names) + where_selectors[selector_hash] = update_index_arg_name args.append(update_index_arg_name) # where reduction needs access to the return of the contained @@ -389,7 +399,10 @@ def get_cuda_mutex_call(lock: bool) -> str: prev_body = body.pop() if local_cuda_mutex and not prev_local_cuda_mutex: body.append(get_cuda_mutex_call(True)) - body.append(f'{update_index_arg_name} = {prev_body}') + if new_selector: + body.append(f'{update_index_arg_name} = {prev_body}') + else: + body.append(prev_body) # If nan_check_column is defined then need to check if value of # correct row in that column is NaN and if so do nothing. This
Bug in summary reduction using multiple where reductions with the same selector Using a `summary` reduction containing multiple `where` reductions that use the same `selector` gives an error. Code to reproduce: ```python import datashader as ds import pandas as pd df = pd.DataFrame(dict(x=[0, 1], y=[0, 1], value=[0, 1], other=[1, 0])) reduction = ds.summary( name1=ds.where(ds.max("value"), "other"), name2=ds.where(ds.max("value")), ) canvas = ds.Canvas(plot_height=3, plot_width=3) agg = canvas.points(source=df, x="x", y="y", agg=reduction) ``` Error produced is ``` File "/Users/iant/github/datashader/datashader/compiler.py", line 137, in compile_components append, any_uses_cuda_mutex = make_append(bases, cols, calls, glyph, antialias) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/iant/github/datashader/datashader/compiler.py", line 415, in make_append exec(code, namespace) File "<string>", line 4 _10 = _7(x, y, _1, _4[i0], _8) ^ IndentationError: expected an indented block after 'if' statement on line 3 ``` The cause of the error is the `ds.max("value")` which is used in both `where` reductions and only needs to be calculated once, but its reuse in the second `where` is not correct.
2023-08-16T10:21:26
0.0
[]
[]
holoviz/datashader
holoviz__datashader-1254
7bff9b440b7186da14d4bad1d6ae4c8f091d113d
diff --git a/datashader/compiler.py b/datashader/compiler.py index ffa69f214..a2c110bc5 100644 --- a/datashader/compiler.py +++ b/datashader/compiler.py @@ -116,8 +116,9 @@ def compile_components(agg, schema, glyph, *, antialias=False, cuda=False, parti temps = list(pluck(4, calls)) combine_temps = list(pluck(5, calls)) + categorical = agg.is_categorical() create = make_create(bases, dshapes, cuda) - append, uses_cuda_mutex = make_append(bases, cols, calls, glyph, isinstance(agg, by), antialias) + append, uses_cuda_mutex = make_append(bases, cols, calls, glyph, categorical, antialias) info = make_info(cols, uses_cuda_mutex) combine = make_combine(bases, dshapes, temps, combine_temps, antialias, cuda, partitioned) finalize = make_finalize(bases, agg, schema, cuda, partitioned) diff --git a/datashader/reductions.py b/datashader/reductions.py index d1d575853..9fd440297 100644 --- a/datashader/reductions.py +++ b/datashader/reductions.py @@ -324,6 +324,10 @@ def validate(self, in_dshape): def inputs(self): return (extract(self.column),) + def is_categorical(self): + """Return ``True`` if this is or contains a categorical reduction.""" + return False + def is_where(self): """Return ``True`` if this is a ``where`` reduction or directly wraps a where reduction.""" @@ -686,6 +690,9 @@ def out_dshape(self, input_dshape, antialias, cuda, partitioned): def inputs(self): return (self.preprocess,) + def is_categorical(self): + return True + def is_where(self): return self.reduction.is_where() @@ -1996,8 +2003,17 @@ def __init__(self, **kwargs): def __hash__(self): return hash((type(self), tuple(self.keys), tuple(self.values))) + def is_categorical(self): + for v in self.values: + if v.is_categorical(): + return True + return False + def uses_row_index(self, cuda, partitioned): - return any(v.uses_row_index(cuda, partitioned) for v in self.values) + for v in self.values: + if v.uses_row_index(cuda, partitioned): + return True + return False def validate(self, input_dshape): for v in self.values:
Cannot use by reduction within a summary reduction Aggregations that use a `by` reduction within a `summary`, such as `ds.summary(name=ds.by("cat"))` do not work. Tested with `datashader 0.15.1`. Test code: ```python import datashader as ds import numpy as np import pandas as pd x = np.arange(2) df = pd.DataFrame(dict( y_from = [0.0, 1.0, 0.0, 1.0, 0.0], y_to = [0.0, 1.0, 1.0, 0.0, 0.5], cat = ['a', 'b', 'a', 'b', 'a'], )) df["cat"] = df["cat"].astype("category") canvas = ds.Canvas(10, 10) agg = canvas.line( source=df, x=x, y=["y_from", "y_to"], axis=1, agg=ds.summary(other=ds.by("cat")), ) ``` Error produced is: ``` <snip> Internal error at <numba.core.typeinfer.CallConstraint object at 0x124115890>. too many positional arguments During: resolving callee type: type(CPUDispatcher(<function count._append_no_field at 0x1217eed40>)) <snip> ``` If `numba` is disabled the error message is more informative: ``` <snip> File "/Users/iant/.miniconda3/envs/ds0.15.1_3.11/lib/python3.11/site-packages/datashader/glyphs/line.py", line 897, in _bresenham append(i, x0, y0, *aggs_and_cols) File "<string>", line 2, in append TypeError: count._append_no_field() takes 3 positional arguments but 4 were given ``` The same error occurs using `datashader` `0.13.0` and `0.14.0`, so I assume it has never worked.
2023-07-24T09:45:12
0.0
[]
[]
holoviz/datashader
holoviz__datashader-1237
5a8982089c95958b6bb08be93f5f4eb6d28e1846
diff --git a/datashader/compiler.py b/datashader/compiler.py index bfdd01dcb..4bbdf15f8 100644 --- a/datashader/compiler.py +++ b/datashader/compiler.py @@ -6,7 +6,7 @@ import numpy as np import xarray as xr -from .reductions import SpecialColumn, by, category_codes, summary, where +from .reductions import SpecialColumn, by, category_codes, summary from .utils import isnull, ngjit try: @@ -206,12 +206,12 @@ def make_append(bases, cols, calls, glyph, categorical, antialias): namespace[func_name] = func args = [arg_lk[i] for i in bases] if categorical and isinstance(cols[0], category_codes): - categorical_arg = arg_lk[cols[0]] + categorical_arg = categorical_arg or arg_lk[cols[0]] args.extend('{0}[{1}]'.format(arg_lk[col], subscript) for col in cols[1:]) elif ndims is None: args.extend('{0}'.format(arg_lk[i]) for i in cols) elif categorical: - categorical_arg = arg_lk[cols[0]] + categorical_arg = categorical_arg or arg_lk[cols[0]] args.extend('{0}[{1}][1]'.format(arg_lk[i], subscript) for i in cols) else: @@ -226,7 +226,7 @@ def make_append(bases, cols, calls, glyph, categorical, antialias): # Avoid unnecessary mutex unlock and lock cycle body.pop() - where_reduction = len(bases) == 1 and isinstance(bases[0], where) + where_reduction = len(bases) == 1 and bases[0].is_where() if where_reduction: update_index_arg_name = next(names) args.append(update_index_arg_name) @@ -292,11 +292,14 @@ def make_append(bases, cols, calls, glyph, categorical, antialias): def make_combine(bases, dshapes, temps, combine_temps, antialias, cuda, partitioned): + # Lookup of base Reduction to argument index. arg_lk = dict((k, v) for (v, k) in enumerate(bases)) + # Also need lookup of by.reduction as the contained reduction is not aware of its wrapper. + arg_lk.update(dict((k.reduction, v) for (v, k) in enumerate(bases) if isinstance(k, by))) # where._combine() deals with combine of preceding reduction so exclude # it from explicit combine calls. - base_is_where = [isinstance(b, where) for b in bases] + base_is_where = [b.is_where() for b in bases] next_base_is_where = base_is_where[1:] + [False] calls = [(None if n else b._build_combine(d, antialias, cuda, partitioned), [arg_lk[i] for i in (b,) + t + ct]) for (b, d, t, ct, n) in zip(bases, dshapes, temps, combine_temps, next_base_is_where)] diff --git a/datashader/reductions.py b/datashader/reductions.py index 94f44f35e..2f4628c25 100644 --- a/datashader/reductions.py +++ b/datashader/reductions.py @@ -11,6 +11,7 @@ from datashader.enums import AntialiasCombination from datashader.utils import isminus1, isnull from numba import cuda as nb_cuda +from numba.typed import List try: from datashader.transfer_functions._cuda_utils import ( @@ -323,6 +324,11 @@ def validate(self, in_dshape): def inputs(self): return (extract(self.column),) + def is_where(self): + """Return ``True`` if this is a ``where`` reduction or directly wraps + a where reduction.""" + return False + def _antialias_requires_2_stages(self): # Return True if this Reduction must be processed with 2 stages, # False if it doesn't matter. @@ -636,12 +642,16 @@ def __init__(self, cat_column, reduction=count()): else: raise TypeError("first argument must be a column name or a CategoryPreprocess instance") - if isinstance(reduction, where): - raise TypeError( - "'by' reduction does not support 'where' reduction for its first argument") - self.column = self.categorizer.column # for backwards compatibility with count_cat - self.columns = (self.categorizer.column, getattr(reduction, 'column', None)) + + self.columns = (self.categorizer.column,) + if (columns := getattr(reduction, 'columns', None)) is not None: + # Must reverse columns (from where reduction) so that val_column property + # is the column that is returned to the user. + self.columns += columns[::-1] + else: + self.columns += (getattr(reduction, 'column', None),) + self.reduction = reduction # if a value column is supplied, set category_values preprocessor if self.val_column is not None: @@ -676,6 +686,13 @@ def out_dshape(self, input_dshape, antialias, cuda, partitioned): def inputs(self): return (self.preprocess,) + def is_where(self): + return self.reduction.is_where() + + @property + def nan_check_column(self): + return self.reduction.nan_check_column + def uses_cuda_mutex(self): return self.reduction.uses_cuda_mutex() @@ -705,6 +722,9 @@ def _build_append(self, dshape, schema, cuda, antialias, self_intersect): def _build_combine(self, dshape, antialias, cuda, partitioned): return self.reduction._build_combine(dshape, antialias, cuda, partitioned) + def _build_combine_temps(self, cuda, partitioned): + return self.reduction._build_combine_temps(cuda, partitioned) + def _build_finalize(self, dshape): cats = list(self.categorizer.categories(dshape)) @@ -1657,7 +1677,9 @@ def __init__(self, selector: Reduction, lookup_column: str | None=None): raise TypeError( "selector can only be a first, first_n, last, last_n, " "max, max_n, min or min_n reduction") - super().__init__(SpecialColumn.RowIndex if lookup_column is None else lookup_column) + if lookup_column is None: + lookup_column = SpecialColumn.RowIndex + super().__init__(lookup_column) self.selector = selector # List of all column names that this reduction uses. self.columns = (selector.column, lookup_column) @@ -1665,6 +1687,9 @@ def __init__(self, selector: Reduction, lookup_column: str | None=None): def __hash__(self): return hash((type(self), self._hashable_inputs(), self.selector)) + def is_where(self): + return True + def out_dshape(self, input_dshape, antialias, cuda, partitioned): if self.column == SpecialColumn.RowIndex: return dshape(ct.int64) @@ -1781,8 +1806,8 @@ def _build_bases(self, cuda, partitioned): return selector._build_bases(cuda, partitioned) + super()._build_bases(cuda, partitioned) def _build_combine(self, dshape, antialias, cuda, partitioned): - # Does not support categorical reductions. selector = self.selector + is_n_reduction = isinstance(selector, FloatingNReduction) if cuda: append = selector._append_cuda else: @@ -1793,32 +1818,34 @@ def _build_combine(self, dshape, antialias, cuda, partitioned): invalid = isminus1 if self.selector.uses_row_index(cuda, partitioned) else isnull @ngjit - def combine_cpu_2d(aggs, selector_aggs): - ny, nx = aggs[0].shape + def combine_cpu(aggs, selector_aggs): + ny, nx, ncat = aggs[0].shape for y in range(ny): for x in range(nx): - value = selector_aggs[1][y, x] - if not invalid(value) and append(x, y, selector_aggs[0], value) >= 0: - aggs[0][y, x] = aggs[1][y, x] + for cat in range(ncat): + value = selector_aggs[1][y, x, cat] + if not invalid(value) and append(x, y, selector_aggs[0][:, :, cat], value) >= 0: + aggs[0][y, x, cat] = aggs[1][y, x, cat] @ngjit - def combine_cpu_3d(aggs, selector_aggs): + def combine_cpu_n(aggs, selector_aggs): # Generic solution for combining dask partitions of a where # reduction with a selector that is a FloatingNReduction. - ny, nx, n = aggs[0].shape + ny, nx, ncat, n = aggs[0].shape for y in range(ny): for x in range(nx): - for i in range(n): - value = selector_aggs[1][y, x, i] - if invalid(value): - break - update_index = append(x, y, selector_aggs[0], value) - if update_index < 0: - break - # Bump values along in the same way that append() has done above. - for j in range(n-1, update_index, -1): - aggs[0][y, x, j] = aggs[0][y, x, j-1] - aggs[0][y, x, update_index] = aggs[1][y, x, i] + for cat in range(ncat): + for i in range(n): + value = selector_aggs[1][y, x, cat, i] + if invalid(value): + break + update_index = append(x, y, selector_aggs[0][:, :, cat, :], value) + if update_index < 0: + break + # Bump values along in the same way that append() has done above. + for j in range(n-1, update_index, -1): + aggs[0][y, x, cat, j] = aggs[0][y, x, cat, j-1] + aggs[0][y, x, cat, update_index] = aggs[1][y, x, cat, i] @nb_cuda.jit def combine_cuda_2d(aggs, selector_aggs): @@ -1831,6 +1858,15 @@ def combine_cuda_2d(aggs, selector_aggs): @nb_cuda.jit def combine_cuda_3d(aggs, selector_aggs): + ny, nx, ncat = aggs[0].shape + x, y, cat = nb_cuda.grid(3) + if x < nx and y < ny and cat < ncat: + value = selector_aggs[1][y, x, cat] + if not invalid(value) and append(x, y, selector_aggs[0][:, :, cat], value) >= 0: + aggs[0][y, x, cat] = aggs[1][y, x, cat] + + @nb_cuda.jit + def combine_cuda_n_3d(aggs, selector_aggs): ny, nx, n = aggs[0].shape x, y = nb_cuda.grid(2) if x < nx and y < ny: @@ -1846,21 +1882,57 @@ def combine_cuda_3d(aggs, selector_aggs): aggs[0][y, x, j] = aggs[0][y, x, j-1] aggs[0][y, x, update_index] = aggs[1][y, x, i] + @nb_cuda.jit + def combine_cuda_n_4d(aggs, selector_aggs): + ny, nx, ncat, n = aggs[0].shape + x, y, cat = nb_cuda.grid(3) + if x < nx and y < ny and cat < ncat: + for i in range(n): + value = selector_aggs[1][y, x, cat, i] + if invalid(value): + break + update_index = append(x, y, selector_aggs[0][:, :, cat, :], value) + if update_index < 0: + break + # Bump values along in the same way that append() has done above. + for j in range(n-1, update_index, -1): + aggs[0][y, x, cat, j] = aggs[0][y, x, cat, j-1] + aggs[0][y, x, cat, update_index] = aggs[1][y, x, cat, i] + def wrapped_combine(aggs, selector_aggs): + ret = aggs[0], selector_aggs[0] + ndim = aggs[0].ndim + if len(aggs) == 1: pass - elif cuda: - if aggs[0].ndim == 3: - combine_cuda_3d[cuda_args(aggs[0].shape[:2])](aggs, selector_aggs) + elif is_n_reduction: + # ndim is either 3 (ny, nx, n) or 4 (ny, nx, ncat, n) + if cuda: + if ndim == 3: + combine_cuda_n_3d[cuda_args(aggs[0].shape[:2])](aggs, selector_aggs) + else: + combine_cuda_n_4d[cuda_args(aggs[0].shape[:3])](aggs, selector_aggs) else: - combine_cuda_2d[cuda_args(aggs[0].shape)](aggs, selector_aggs) + if ndim == 3: + # 4d view of each agg, note use of numba typed list. + aggs = List([np.expand_dims(agg, 2) for agg in aggs]) + selector_aggs = List([np.expand_dims(agg, 2) for agg in selector_aggs]) + combine_cpu_n(aggs, selector_aggs) else: - if aggs[0].ndim == 3: - combine_cpu_3d(aggs, selector_aggs) + # ndim is either 2 (ny, nx) or 3 (ny, nx, ncat) + if cuda: + if ndim == 2: + combine_cuda_2d[cuda_args(aggs[0].shape)](aggs, selector_aggs) + else: + combine_cuda_3d[cuda_args(aggs[0].shape)](aggs, selector_aggs) else: - combine_cpu_2d(aggs, selector_aggs) + if ndim == 2: + # 3d view of each agg, note use of numba typed list. + aggs = List([np.expand_dims(agg, 2) for agg in aggs]) + selector_aggs = List([np.expand_dims(agg, 2) for agg in selector_aggs]) + combine_cpu(aggs, selector_aggs) - return aggs[0], selector_aggs[0] + return ret return wrapped_combine
Categorical support for where and <whatever>_n reductions The extent to which this works already needs to be investigated, and whatever is missing needs to be implemented. See #1126.
2023-06-21T11:57:54
0.0
[]
[]
holoviz/datashader
holoviz__datashader-1236
9f5b4110c4153b431a4a6f0ec371ec2a948295a6
diff --git a/datashader/compiler.py b/datashader/compiler.py index 7487ef0a0..bfdd01dcb 100644 --- a/datashader/compiler.py +++ b/datashader/compiler.py @@ -6,7 +6,7 @@ import numpy as np import xarray as xr -from .reductions import by, category_codes, summary, where +from .reductions import SpecialColumn, by, category_codes, summary, where from .utils import isnull, ngjit try: @@ -46,33 +46,37 @@ def compile_components(agg, schema, glyph, *, antialias=False, cuda=False, parti Returns ------- - A tuple of the following functions: + A tuple of the following: ``create(shape)`` - Takes the aggregate shape, and returns a tuple of initialized numpy - arrays. + Function that takes the aggregate shape, and returns a tuple of + initialized numpy arrays. ``info(df, canvas_shape)`` - Takes a dataframe, and returns preprocessed 1D numpy arrays of the - needed columns. + Function that takes a dataframe, and returns preprocessed 1D numpy + arrays of the needed columns. ``append(i, x, y, *aggs_and_cols)`` - Appends the ``i``th row of the table to the ``(x, y)`` bin, given the - base arrays and columns in ``aggs_and_cols``. This does the bulk of the - work. + Function that appends the ``i``th row of the table to the ``(x, y)`` + bin, given the base arrays and columns in ``aggs_and_cols``. This does + the bulk of the work. ``combine(base_tuples)`` - Combine a list of base tuples into a single base tuple. This forms the - reducing step in a reduction tree. + Function that combines a list of base tuples into a single base tuple. + This forms the reducing step in a reduction tree. ``finalize(aggs, cuda)`` - Given a tuple of base numpy arrays, returns the finalized ``DataArray`` - or ``Dataset``. + Function that is given a tuple of base numpy arrays and returns the + finalized ``DataArray`` or ``Dataset``. ``antialias_stage_2`` If using antialiased lines this is a tuple of the ``AntialiasCombination`` values corresponding to the aggs. If not using antialiased lines then this is False. + + ``column_names`` + Names of DataFrame columns or DataArray variables that are used by the + agg. """ reds = list(traverse_aggregation(agg)) @@ -115,7 +119,9 @@ def compile_components(agg, schema, glyph, *, antialias=False, cuda=False, parti combine = make_combine(bases, dshapes, temps, combine_temps, antialias, cuda, partitioned) finalize = make_finalize(bases, agg, schema, cuda, partitioned) - return create, info, append, combine, finalize, antialias_stage_2 + column_names = [c.column for c in cols if c.column != SpecialColumn.RowIndex] + + return create, info, append, combine, finalize, antialias_stage_2, column_names def traverse_aggregation(agg): diff --git a/datashader/data_libraries/dask.py b/datashader/data_libraries/dask.py index 026220a53..e29849a1e 100644 --- a/datashader/data_libraries/dask.py +++ b/datashader/data_libraries/dask.py @@ -72,7 +72,7 @@ def default(glyph, df, schema, canvas, summary, *, antialias=False, cuda=False): # Compile functions partitioned = isinstance(df, dd.DataFrame) and df.npartitions > 1 - create, info, append, combine, finalize, antialias_stage_2 = compile_components( + create, info, append, combine, finalize, antialias_stage_2, column_names = compile_components( summary, schema, glyph, antialias=antialias, cuda=cuda, partitioned=partitioned) x_mapper = canvas.x_axis.mapper y_mapper = canvas.y_axis.mapper @@ -102,9 +102,10 @@ def func(partition: pd.DataFrame, cumulative_lens, partition_info=None): graph = df.__dask_graph__() # Guess a reasonable output dtype from combination of dataframe dtypes + # Only consider columns used, not all columns in dataframe (issue #1235) dtypes = [] - for dt in df.dtypes: + for dt in df.dtypes[column_names]: if isinstance(dt, pd.CategoricalDtype): continue elif isinstance(dt, pd.api.extensions.ExtensionDtype): @@ -119,7 +120,7 @@ def func(partition: pd.DataFrame, cumulative_lens, partition_info=None): else: dtypes.append(dt) - dtype = np.result_type(*dtypes) + dtype = np.result_type(*dtypes) if dtypes else np.float64 # Create a meta object so that dask.array doesn't try to look # too closely at the type of the chunks it's wrapping # they're actually dataframes, tell dask they're ndarrays @@ -209,7 +210,7 @@ def line(glyph, df, schema, canvas, summary, *, antialias=False, cuda=False): # Compile functions partitioned = isinstance(df, dd.DataFrame) and df.npartitions > 1 - create, info, append, combine, finalize, antialias_stage_2 = compile_components( + create, info, append, combine, finalize, antialias_stage_2, _ = compile_components( summary, schema, glyph, antialias=antialias, cuda=cuda, partitioned=partitioned) x_mapper = canvas.x_axis.mapper y_mapper = canvas.y_axis.mapper diff --git a/datashader/data_libraries/dask_xarray.py b/datashader/data_libraries/dask_xarray.py index 222098fe4..9ef9be5cf 100644 --- a/datashader/data_libraries/dask_xarray.py +++ b/datashader/data_libraries/dask_xarray.py @@ -59,7 +59,7 @@ def dask_rectilinear(glyph, xr_ds, schema, canvas, summary, *, antialias=False, shape, bounds, st, axis = shape_bounds_st_and_axis(xr_ds, canvas, glyph) # Compile functions - create, info, append, combine, finalize, antialias_stage_2 = compile_components( + create, info, append, combine, finalize, antialias_stage_2, _ = compile_components( summary, schema, glyph, antialias=antialias, cuda=cuda, partitioned=True) x_mapper = canvas.x_axis.mapper y_mapper = canvas.y_axis.mapper @@ -141,7 +141,7 @@ def dask_raster(glyph, xr_ds, schema, canvas, summary, *, antialias=False, cuda= shape, bounds, st, axis = shape_bounds_st_and_axis(xr_ds, canvas, glyph) # Compile functions - create, info, append, combine, finalize, antialias_stage_2 = compile_components( + create, info, append, combine, finalize, antialias_stage_2, _ = compile_components( summary, schema, glyph, antialias=antialias, cuda=cuda, partitioned=True) x_mapper = canvas.x_axis.mapper y_mapper = canvas.y_axis.mapper @@ -235,7 +235,7 @@ def dask_curvilinear(glyph, xr_ds, schema, canvas, summary, *, antialias=False, shape, bounds, st, axis = shape_bounds_st_and_axis(xr_ds, canvas, glyph) # Compile functions - create, info, append, combine, finalize, antialias_stage_2 = compile_components( + create, info, append, combine, finalize, antialias_stage_2, _ = compile_components( summary, schema, glyph, antialias=antialias, cuda=cuda, partitioned=True) x_mapper = canvas.x_axis.mapper y_mapper = canvas.y_axis.mapper diff --git a/datashader/data_libraries/pandas.py b/datashader/data_libraries/pandas.py index 4168fc67a..1289d71ef 100644 --- a/datashader/data_libraries/pandas.py +++ b/datashader/data_libraries/pandas.py @@ -24,7 +24,7 @@ def pandas_pipeline(df, schema, canvas, glyph, summary, *, antialias=False): @glyph_dispatch.register(_GeometryLike) @glyph_dispatch.register(_AreaToLineLike) def default(glyph, source, schema, canvas, summary, *, antialias=False, cuda=False): - create, info, append, _, finalize, antialias_stage_2 = compile_components( + create, info, append, _, finalize, antialias_stage_2, _ = compile_components( summary, schema, glyph, antialias=antialias, cuda=cuda, partitioned=False) x_mapper = canvas.x_axis.mapper y_mapper = canvas.y_axis.mapper
Error using dask dataframe with incompatible column dtypes Consider datashading a dask dataframe containing columns of different dtypes that are not actually used in the datashade operation: ```python import dask.dataframe as dd import datashader as ds import numpy as np import pandas as pd df = pd.DataFrame( data=dict( x = [0, 1, 2], y = [0, 1, 2], dates = np.array(['2007-07-13', '2006-01-13', '2010-08-13'], dtype='datetime64'), ) ) ddf = dd.from_pandas(df, npartitions=2) canvas = ds.Canvas(2, 2) agg = canvas.points(ddf, 'x', 'y', ds.count()) ``` Note the `dates` column is not used in the `canvas.points` call. Running this gives the following error: ``` Traceback (most recent call last): File "/Users/iant/github_temp/datashader_temp/dask_dtypes.py", line 16, in <module> agg = canvas.points(ddf, 'x', 'y', ds.count()) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/iant/github/datashader/datashader/core.py", line 220, in points return bypixel(source, self, glyph, agg) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/iant/github/datashader/datashader/core.py", line 1257, in bypixel return bypixel.pipeline(source, schema, canvas, glyph, agg, antialias=antialias) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/iant/github/datashader/datashader/utils.py", line 109, in __call__ return lk[typ](head, *rest, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/iant/github/datashader/datashader/data_libraries/dask.py", line 22, in dask_pipeline dsk, name = glyph_dispatch(glyph, df, schema, canvas, summary, antialias=antialias, cuda=cuda) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/iant/github/datashader/datashader/utils.py", line 112, in __call__ return lk[cls](head, *rest, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/iant/github/datashader/datashader/data_libraries/dask.py", line 122, in default dtype = np.result_type(*dtypes) ^^^^^^^^^^^^^^^^^^^^^^^ File "<__array_function__ internals>", line 200, in result_type TypeError: The DType <class 'numpy.dtype[datetime64]'> could not be promoted by <class 'numpy.dtype[int64]'>. This means that no common DType exists for the given inputs. For example they cannot be stored in a single array unless the dtype is `object`. The full list of DTypes is: (<class 'numpy.dtype[int64]'>, <class 'numpy.dtype[int64]'>, <class 'numpy.dtype[datetime64]'>) ``` Internally in the code that handles dask dataframes there is an attempt to find a `dtype` that is compatible for all columns of the dataframe. This is unnecessary, we only need to consider the `x` and `y` columns here so we can ignore the others. First reported by @Hoxbro.
2023-06-16T10:32:47
0.0
[]
[]
pysnippet/thumbnails
pysnippet__thumbnails-67
427c952b9c88e6f814a14eaf90e74fa791e60680
diff --git a/.circleci/config.yml b/.circleci/config.yml index 4e553cb..f285a75 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -5,7 +5,7 @@ orbs: jobs: test: - executor: win/default + executor: win/server-2019 steps: - checkout diff --git a/setup.cfg b/setup.cfg index a4a3ead..f2a7328 100644 --- a/setup.cfg +++ b/setup.cfg @@ -3,13 +3,21 @@ name = thumbnails version = attr: thumbnails.__version__ author = Artyom Vancyan author_email = [email protected] -description = Video thumbnail generator for modern web video players +description = Video thumbnail generator for modern web video players such as Plyr, Videojs, Flowplayer, Fluid Player, etc. long_description = file: README.md long_description_content_type = text/markdown url = https://github.com/pysnippet/thumbnails keywords = vtt json + plyr + video + fluid + player + videojs + thumbnail + generator + flowplayer thumbnails thumbnail-generator license = Apache 2.0 @@ -19,18 +27,18 @@ classifiers = License :: OSI Approved :: Apache Software License Operating System :: OS Independent Programming Language :: Python :: 3 - Programming Language :: Python :: 3 :: Only Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 + Programming Language :: Python :: 3.10 Topic :: Multimedia :: Video [options] packages = thumbnails install_requires = - av==8.0.0;python_version=='3.7' - av==9.2.0;python_version>'3.7' + av==8.0.0;sys_platform=='win32' + av==9.2.0;sys_platform!='win32' click>=8.0.3 imageio-ffmpeg>=0.4.7 imageio>=2.23.0 diff --git a/src/thumbnails/__init__.py b/src/thumbnails/__init__.py index 40a7849..fb9fd69 100644 --- a/src/thumbnails/__init__.py +++ b/src/thumbnails/__init__.py @@ -22,7 +22,7 @@ from .thumbnail import ThumbnailVTT from .thumbnail import register_thumbnail -__version__ = "0.1.9" +__version__ = "0.1.11" __all__ = ( "Generator", "Thumbnail", diff --git a/src/thumbnails/constants.py b/src/thumbnails/constants.py index 1af809e..430ac33 100644 --- a/src/thumbnails/constants.py +++ b/src/thumbnails/constants.py @@ -1,4 +1,4 @@ -DEFAULT_BASE = "" +DEFAULT_BASE = None DEFAULT_SKIP = False DEFAULT_OUTPUT = None DEFAULT_FORMAT = "vtt" diff --git a/src/thumbnails/thumbnail.py b/src/thumbnails/thumbnail.py index bfaca2e..9aeaa16 100644 --- a/src/thumbnails/thumbnail.py +++ b/src/thumbnails/thumbnail.py @@ -119,7 +119,7 @@ def format_time(secs): return ("0%s.000" % delta)[:12] metadata = ["WEBVTT\n\n"] - prefix = self.base or os.path.relpath(self.thumbnail_dir) + prefix = self.base if self.base is not None else os.path.relpath(self.thumbnail_dir) route = os.path.join(prefix, extract_name(self.filepath) + ".png") route = pathlib.Path(route).as_posix() @@ -157,8 +157,8 @@ def generate(self): with Progress("Saving thumbnail metadata at '%s'" % self.metadata_path): for frame, start, *_ in self.thumbnails(): frame = os.path.join(self.thumbnail_dir, os.path.basename(frame)) - base = os.path.join(self.base, os.path.basename(self.thumbnail_dir)) - prefix = base if self.base else os.path.relpath(self.thumbnail_dir) + base = os.path.join(self.base or "", os.path.basename(self.thumbnail_dir)) + prefix = base if self.base is not None else os.path.relpath(self.thumbnail_dir) route = os.path.join(prefix, os.path.basename(frame)) route = pathlib.Path(route).as_posix() thumbnail_data = {
Base should override the output The `base` option was supposed to override the whole route prefix in the `.vtt` and `.json` files. Before the done changes, this did not happen when `base` was set to an empty string. This PR also includes the tests for keeping the casework stable. - #65
2023-04-18T16:21:33
0.0
[]
[]
csdms/bmi-topography
csdms__bmi-topography-67
6efb04176285b4d6e49ea389ec82fe570dfd46a8
diff --git a/environment.yml b/environment.yml index cb8bdd7..d56d02a 100644 --- a/environment.yml +++ b/environment.yml @@ -3,7 +3,7 @@ name: topography channels: - conda-forge dependencies: - - python >=3.9 + - python >=3.10 - pip - requests - numpy diff --git a/noxfile.py b/noxfile.py index b2e6337..4e94e1c 100644 --- a/noxfile.py +++ b/noxfile.py @@ -10,7 +10,7 @@ HERE = pathlib.Path(__file__) ROOT = HERE.parent PATHS = [PACKAGE, "docs", "examples", "tests", HERE.name] -PYTHON_VERSIONS = ["3.9", "3.10", "3.11", "3.12"] +PYTHON_VERSIONS = ["3.10", "3.11", "3.12"] @nox.session(python=PYTHON_VERSIONS) @@ -36,15 +36,15 @@ def test(session: nox.Session) -> None: @nox.session(name="test-bmi", python=PYTHON_VERSIONS, venv_backend="conda") def test_bmi(session: nox.Session) -> None: """Test the Basic Model Interface.""" - session.conda_install("bmi-tester", "pymt>=1.3") + session.install("bmi-tester>=0.5.9") session.install(".") session.run( "bmi-test", - "bmi_topography:BmiTopography", + f"{PACKAGE}:BmiTopography", "--config-file", - "./examples/config.yaml", + f"{ROOT}/examples/config.yaml", "--root-dir", - "./examples", + "examples", "-vvv", ) diff --git a/pyproject.toml b/pyproject.toml index d96d7ca..327d3d1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -27,7 +27,6 @@ keywords = [ license = {text = "MIT License"} classifiers = [ "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", @@ -37,7 +36,7 @@ classifiers = [ "Intended Audience :: Science/Research", "Intended Audience :: Education", ] -requires-python = ">=3.9" +requires-python = ">=3.10" dependencies = [ "requests", "numpy",
Set minimum Python >= 3.10 In keeping with our informal policy of supporting the latest three minor Python versions, the minimum Python version should be bumped to 3.10.
2024-03-27T17:26:18
0.0
[]
[]
csdms/bmi-topography
csdms__bmi-topography-36
6d47074340e29d2613b21c88ff42743f6472709e
diff --git a/bmi_topography/cli.py b/bmi_topography/cli.py index 20c2121..4bbf24a 100644 --- a/bmi_topography/cli.py +++ b/bmi_topography/cli.py @@ -8,7 +8,7 @@ @click.version_option() @click.option("-q", "--quiet", is_flag=True, help="Enables quiet mode.") @click.option( - "--dem_type", + "--dem-type", type=click.Choice(Topography.VALID_DEM_TYPES, case_sensitive=True), default=Topography.DEFAULT["dem_type"], help="The global raster dataset.", @@ -43,19 +43,19 @@ show_default=True, ) @click.option( - "--output_format", + "--output-format", type=click.Choice(Topography.VALID_OUTPUT_FORMATS.keys(), case_sensitive=True), default=Topography.DEFAULT["output_format"], help="Output file format.", show_default=True, ) @click.option( - "--api_key", + "--api-key", type=str, help="OpenTopography API key.", show_default=True, ) [email protected]("--no_fetch", is_flag=True, help="Do not fetch data from server.") [email protected]("--no-fetch", is_flag=True, help="Do not fetch data from server.") def main(quiet, dem_type, south, north, west, east, output_format, api_key, no_fetch): """Fetch and cache NASA SRTM and JAXA ALOS land elevation data @@ -73,9 +73,12 @@ def main(quiet, dem_type, south, north, west, east, output_format, api_key, no_f topo = Topography(dem_type, south, north, west, east, output_format) if not no_fetch: if not quiet: - click.secho("Fetching data...", fg="yellow") - topo.fetch() + click.secho("Fetching data...", fg="yellow", err=True) + path_to_dem = topo.fetch() if not quiet: click.secho( - "File downloaded to {}".format(getattr(topo, "cache_dir")), fg="green" + "File downloaded to {}".format(getattr(topo, "cache_dir")), + fg="green", + err=True, ) + print(path_to_dem) diff --git a/examples/bmi-topography_ex.sh b/examples/bmi-topography_ex.sh index 4234a7c..389ca57 100644 --- a/examples/bmi-topography_ex.sh +++ b/examples/bmi-topography_ex.sh @@ -12,9 +12,9 @@ bmi-topography --version bmi-topography --help bmi-topography \ - --dem_type=$DEM_TYPE \ + --dem-type=$DEM_TYPE \ --south=$SOUTH \ --north=$NORTH \ --west=$WEST \ --east=$EAST \ - --output_format=$OUTPUT_FORMAT + --output-format=$OUTPUT_FORMAT
Underscores in bmi-topography command line options I'm wondering about the `_`s in the *bmi-topography* command line options (e.g. `--no_fetch`, `--api_key`). It seems more standard to use `-`s in such cases (e.g. `--no-fetch`, `--api-key`).
I agree. I'm not sure what moved me to use underscores instead of hyphens.
2022-02-16T20:38:13
0.0
[]
[]
csdms/bmi-topography
csdms__bmi-topography-14
60fb16b6ffbc643691422581746fcdded3b46a0f
diff --git a/CHANGES.md b/CHANGES.md index 1a99a55..8a2a3e0 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,7 +4,8 @@ Changes for bmi-topography 0.3.3 (unreleased) ------------------ -- Support ALOS World 3D 30m products +- Support ALOS World 3D 30m products (#13) +- Support .asc and .img files (#14) - Add format job to CI - Use CITATION.cff file diff --git a/Makefile b/Makefile index a60b96f..cd88c25 100644 --- a/Makefile +++ b/Makefile @@ -59,7 +59,7 @@ pretty: ## reformat files to make them look pretty black bmi_topography tests docs examples test: ## run tests quickly with the default Python - pytest --disable-warnings --cov=bmi_topography --cov-report=xml:./coverage.xml -vvv + pytest --disable-warnings --cov=bmi_topography --cov-config=./setup.cfg --cov-report=xml:./coverage.xml -vvv bmi-test: ## run the bmi-tester bmi-test bmi_topography:BmiTopography --config-file=./examples/config.yaml --root-dir=./examples -vvv diff --git a/bmi_topography/cli.py b/bmi_topography/cli.py index 06016bd..3a8eb59 100644 --- a/bmi_topography/cli.py +++ b/bmi_topography/cli.py @@ -44,14 +44,14 @@ ) @click.option( "--output_format", - type=click.Choice(Topography.VALID_OUTPUT_FORMATS, case_sensitive=True), + type=click.Choice(Topography.VALID_OUTPUT_FORMATS.keys(), case_sensitive=True), default=Topography.DEFAULT["output_format"], help="Output file format.", show_default=True, ) @click.option("--no_fetch", is_flag=True, help="Do not fetch data from server.") def main(quiet, dem_type, south, north, west, east, output_format, no_fetch): - """Fetch and cache Shuttle Radar Topography Mission (SRTM) elevation data""" + """Fetch and cache NASA SRTM and JAXA ALOS land elevation data""" topo = Topography(dem_type, south, north, west, east, output_format) if not no_fetch: if not quiet: diff --git a/bmi_topography/topography.py b/bmi_topography/topography.py index c509f78..8865d66 100644 --- a/bmi_topography/topography.py +++ b/bmi_topography/topography.py @@ -27,7 +27,7 @@ class Topography: } VALID_DEM_TYPES = ("SRTMGL3", "SRTMGL1", "SRTMGL1_E", "AW3D30", "AW3D30_E") - VALID_OUTPUT_FORMATS = ("GTiff", "AAIGrid", "HFA") + VALID_OUTPUT_FORMATS = {"GTiff": "tif", "AAIGrid": "asc", "HFA": "img"} def __init__( self, @@ -47,11 +47,13 @@ def __init__( "dem_type must be one of %s." % (Topography.VALID_DEM_TYPES,) ) - if output_format in Topography.VALID_OUTPUT_FORMATS: + if output_format in Topography.VALID_OUTPUT_FORMATS.keys(): self._output_format = output_format + self._file_extension = Topography.VALID_OUTPUT_FORMATS[output_format] else: raise ValueError( - "output_format must be one of %s." % (Topography.VALID_OUTPUT_FORMATS,) + "output_format must be one of %s." + % [k for k in Topography.VALID_OUTPUT_FORMATS.keys()] ) self._bbox = BoundingBox((south, west), (north, east)) @@ -70,6 +72,10 @@ def dem_type(self): def output_format(self): return str(self._output_format) + @property + def file_extension(self): + return str(self._file_extension) + @property def bbox(self): return self._bbox @@ -92,12 +98,13 @@ def fetch(self): """ fname = Path( self.cache_dir - ) / "{dem_type}_{south}_{west}_{north}_{east}.tif".format( + ) / "{dem_type}_{south}_{west}_{north}_{east}.{ext}".format( dem_type=self.dem_type, south=self.bbox.south, north=self.bbox.north, west=self.bbox.west, east=self.bbox.east, + ext=self.file_extension, ) if not fname.is_file():
Extension of AAIGrid When you download a DEM using the output_format = "AAIGrid”, the data format of the file is OK, but the extension of it still says “.tif” which is a little confusing.
Add support for both `.asc` and `.img` files, since *xarray.open_rasterio* works with both.
2021-09-03T17:02:30
0.0
[]
[]
csdms/bmi-topography
csdms__bmi-topography-13
43722512570c68ddca9696f2a53d0f30dd947d1c
diff --git a/CHANGES.md b/CHANGES.md index feb77c6..1a99a55 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,6 +4,8 @@ Changes for bmi-topography 0.3.3 (unreleased) ------------------ +- Support ALOS World 3D 30m products +- Add format job to CI - Use CITATION.cff file diff --git a/Makefile b/Makefile index 302c2aa..a60b96f 100644 --- a/Makefile +++ b/Makefile @@ -52,11 +52,11 @@ clean-test: ## remove test and coverage artifacts rm -fr .pytest_cache lint: ## check style with flake8 - flake8 bmi_topography tests + flake8 bmi_topography tests docs examples pretty: ## reformat files to make them look pretty - find bmi_topography tests docs -name '*.py' | xargs isort - black bmi_topography tests docs + find bmi_topography tests docs examples -name '*.py' | xargs isort + black bmi_topography tests docs examples test: ## run tests quickly with the default Python pytest --disable-warnings --cov=bmi_topography --cov-report=xml:./coverage.xml -vvv @@ -100,4 +100,4 @@ fullrelease: clean setup ## generate a full release with zest.releaser fullrelease install: clean ## install the package to the active Python's site-packages - pip install . + pip install -e . diff --git a/README.md b/README.md index 1581c22..eb62173 100644 --- a/README.md +++ b/README.md @@ -7,16 +7,19 @@ # bmi-topography *bmi-topography* is a Python library for fetching and caching -NASA [Shuttle Radar Topography Mission](https://www2.jpl.nasa.gov/srtm/) (SRTM) -land elevation data -using the [OpenTopography](https://opentopography.org/) -[REST API](https://portal.opentopography.org/apidocs/). +land elevation data from the +NASA [Shuttle Radar Topography Mission][srtm] (SRTM) +and the +JAXA [Advanced Land Observing Satellite][alos] (ALOS) +using the [OpenTopography][ot] [REST API][ot-rest]. The *bmi-topography* library provides access to the following global raster datasets: * SRTM GL3 (90m) * SRTM GL1 (30m) -* SRTM GL1 (Ellipsoidal) +* SRTM GL1 (30m, Ellipsoidal) +* ALOS World 3D (30m) +* ALOS World 3D (30m, Ellipsoidal) The library includes an API and a CLI that accept the dataset type, @@ -25,16 +28,14 @@ the output file format. Data are downloaded from OpenTopography and cached locally. The cache is checked before downloading new data. Data from a cached file can optionally be loaded into an -[xarray](http://xarray.pydata.org/en/stable/) -[DataArray](http://xarray.pydata.org/en/stable/api.html#dataarray) -using the experimental [open_rasterio](http://xarray.pydata.org/en/stable/generated/xarray.open_rasterio.html#xarray.open_rasterio) method. +[xarray][xarray] [DataArray][xarray-da] +using the experimental [open_rasterio][xarray-or] method. The *bmi-topography* API is wrapped with a -[Basic Model Interface](https://bmi.readthedocs.io) (BMI), +[Basic Model Interface][bmi] (BMI), which provides a standard set of functions for coupling with data or models that also expose a BMI. -More information on the BMI can found in its -[documentation](https://bmi.readthedocs.io). +More information on the BMI can found in its [documentation][bmi]. ## Installation @@ -50,9 +51,9 @@ conda install -c conda-forge bmi-topography The *bmi-topography* library can also be built and installed from source. The library uses several other open source libraries, so a convenient way of building and installing it is within a -[conda environment](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html). +[conda environment][conda-env]. After cloning or downloading the *bmi-topography* -[repository](https://github.com/csdms/bmi-topography), +[repository][bmi-topo-repo], change into the repository directory and set up a conda environment with the included environment file: ``` @@ -130,8 +131,22 @@ Attributes: For examples with more detail, see the two Jupyter Notebooks, Python script, and shell script -included in the [examples](https://github.com/csdms/bmi-topography/tree/main/examples) directory +included in the [examples][bmi-topo-examples] directory of the *bmi-topography* repository. User and developer documentation for *bmi-topography* is available at https://bmi-topography.readthedocs.io. + +<!-- Links (by alpha) --> + +[alos]: https://www.eorc.jaxa.jp/ALOS/en/aw3d30/index.htm +[bmi]: https://bmi.readthedocs.io +[bmi-topo-examples]: https://github.com/csdms/bmi-topography/tree/main/examples +[bmi-topo-repo]: https://github.com/csdms/bmi-topography +[conda-env]: https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html +[ot]: https://opentopography.org/ +[ot-rest]: https://portal.opentopography.org/apidocs/ +[srtm]: https://www2.jpl.nasa.gov/srtm/ +[xarray]: http://xarray.pydata.org/en/stable/ +[xarray-da]: http://xarray.pydata.org/en/stable/api.html#dataarray +[xarray-or]: http://xarray.pydata.org/en/stable/generated/xarray.open_rasterio.html#xarray.open_rasterio diff --git a/bmi_topography/topography.py b/bmi_topography/topography.py index 94ffaa4..c509f78 100644 --- a/bmi_topography/topography.py +++ b/bmi_topography/topography.py @@ -26,7 +26,7 @@ class Topography: "cache_dir": "~/.bmi_topography", } - VALID_DEM_TYPES = ("SRTMGL3", "SRTMGL1", "SRTMGL1_E") + VALID_DEM_TYPES = ("SRTMGL3", "SRTMGL1", "SRTMGL1_E", "AW3D30", "AW3D30_E") VALID_OUTPUT_FORMATS = ("GTiff", "AAIGrid", "HFA") def __init__( diff --git a/docs/source/README.rst b/docs/source/README.rst index 84acb2c..881680b 100644 --- a/docs/source/README.rst +++ b/docs/source/README.rst @@ -4,10 +4,12 @@ bmi-topography ============== -*bmi-topography* is a Python library for fetching and caching NASA -`Shuttle Radar Topography Mission <https://www2.jpl.nasa.gov/srtm/>`__ -(SRTM) land elevation data using the -`OpenTopography <https://opentopography.org/>`__ `REST +*bmi-topography* is a Python library for fetching and caching land +elevation data from the NASA `Shuttle Radar Topography +Mission <https://www2.jpl.nasa.gov/srtm/>`__ (SRTM) and the JAXA +`Advanced Land Observing +Satellite <https://www.eorc.jaxa.jp/ALOS/en/aw3d30/index.htm>`__ (ALOS) +using the `OpenTopography <https://opentopography.org/>`__ `REST API <https://portal.opentopography.org/apidocs/>`__. The *bmi-topography* library provides access to the following global @@ -15,7 +17,9 @@ raster datasets: - SRTM GL3 (90m) - SRTM GL1 (30m) -- SRTM GL1 (Ellipsoidal) +- SRTM GL1 (30m, Ellipsoidal) +- ALOS World 3D (30m) +- ALOS World 3D (30m, Ellipsoidal) The library includes an API and a CLI that accept the dataset type, a latitude-longitude bounding box, and the output file format. Data are @@ -149,6 +153,10 @@ directory of the *bmi-topography* repository. User and developer documentation for *bmi-topography* is available at https://bmi-topography.readthedocs.io. +.. raw:: html + + <!-- Links (by alpha) --> + .. |Basic Model Interface| image:: https://img.shields.io/badge/CSDMS-Basic%20Model%20Interface-green.svg :target: https://bmi.readthedocs.io/ .. |Conda Version| image:: https://img.shields.io/conda/vn/conda-forge/bmi-topography.svg diff --git a/docs/source/_templates/sidebarintro.html b/docs/source/_templates/sidebarintro.html index 7d15c61..4acd63b 100644 --- a/docs/source/_templates/sidebarintro.html +++ b/docs/source/_templates/sidebarintro.html @@ -1,5 +1,9 @@ <h3>About bmi-topography</h3> <p> - Fetch and cache NASA Shuttle Radar Topography Mission (SRTM) land elevation - data through an API, CLI, or BMI. + Fetch and cache land elevation data + from the + NASA Shuttle Radar Topography Mission (SRTM) + or the + JAXA Advanced Land Observing Satellite (ALOS) + through an API, CLI, or BMI. </p> diff --git a/examples/readme_example.py b/examples/readme_example.py index ef3d358..03d86f7 100644 --- a/examples/readme_example.py +++ b/examples/readme_example.py @@ -1,7 +1,7 @@ """An example used in the README and documentation.""" import matplotlib.pyplot as plt -from bmi_topography import Topography +from bmi_topography import Topography params = Topography.DEFAULT.copy() params["south"] = 39.75 diff --git a/setup.cfg b/setup.cfg index aa5e62e..4e0f722 100644 --- a/setup.cfg +++ b/setup.cfg @@ -3,10 +3,10 @@ name = bmi-topography version = 0.3.3.dev0 author = Mark Piper author_email = [email protected] -description = Fetch and cache NASA SRTM land elevation data +description = Fetch and cache NASA SRTM and JAXA ALOS land elevation data long_description = file: README.md, CHANGES.md, CONTRIBUTING.md, CREDITS.md, CITATION.cff, LICENSE.md long_description_content_type = text/markdown -keywords = bmi, srtm, topography, elevation, dem, data +keywords = bmi, srtm, alos, topography, elevation, dem, data license = MIT License url = https://github.com/csdms/bmi-topography classifiers = @@ -37,6 +37,9 @@ exclude = console_scripts = bmi-topography = bmi_topography.cli:main +[coverage:run] +relative_files = True + [flake8] exclude = docs ignore =
Support download of ALOS data The [OpenTopography API](https://portal.opentopography.org/apidocs/#/Public/getGlobalDem), on which bmi-topography is built, supports not only STRM, but also [ALOS](https://www.eorc.jaxa.jp/ALOS/en/aw3d/index_e.htm). bmi-topography should be enhanced to include downloads of * ALOS World 3D 30m * ALOS World 3D (Ellipsoidal) from the OpenTopography API.
Tagging @BCampforts, who suggested this.
2021-09-02T20:23:01
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-748
335ed3860a160359f26ea792cc455a8213fe1840
diff --git a/src/telliot_feeds/utils/query_search_utils.py b/src/telliot_feeds/utils/query_search_utils.py index cf17a155..f1b492ea 100644 --- a/src/telliot_feeds/utils/query_search_utils.py +++ b/src/telliot_feeds/utils/query_search_utils.py @@ -4,6 +4,7 @@ from clamfig.base import Registry from eth_abi import decode_single +from eth_abi.exceptions import NonEmptyPaddingBytes from web3 import Web3 as w3 from telliot_feeds.feeds import CATALOG_FEEDS @@ -11,6 +12,9 @@ from telliot_feeds.feeds import DATAFEED_BUILDER_MAPPING from telliot_feeds.queries.query import OracleQuery from telliot_feeds.queries.query_catalog import query_catalog +from telliot_feeds.utils.log import get_logger + +logger = get_logger(__name__) def decode_typ_name(qdata: bytes) -> str: @@ -27,6 +31,12 @@ def decode_typ_name(qdata: bytes) -> str: except OverflowError: # string query for some reason encoding isn't the same as the others qtype_name = ast.literal_eval(qdata.decode("utf-8"))["type"] + except NonEmptyPaddingBytes: + logger.error(f"NonEmptyPaddingBytes error for query data: {qdata.hex()}") + return "" + except Exception as e: + logger.error(f"Error decoding query type name for query data: {qdata.hex()}; error: {e}") + return "" return qtype_name
error out on Arbitrum Mainnet start command: telliot report -a arbitrumAccount -mnb 0 full error log: [telliot_error.txt](https://github.com/tellor-io/telliot-feeds/files/14240062/telliot_error.txt) let me know if more info is needed to reproduce
2024-02-12T15:39:54
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-675
304be5efd6e4e0946294f9dc2ee7fd238dcd8cef
diff --git a/contracts/SampleReporterContract/SampleFlexReporter.sol b/contracts/SampleReporterContract/SampleFlexReporter.sol index e5d1817d..ece3e4b1 100644 --- a/contracts/SampleReporterContract/SampleFlexReporter.sol +++ b/contracts/SampleReporterContract/SampleFlexReporter.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.15; +pragma solidity ^0.8.15; interface IFlex { function balanceOf(address account) external view returns (uint256); diff --git a/contracts/TellorFlex/Autopay.sol b/contracts/Tellor/Autopay.sol similarity index 87% rename from contracts/TellorFlex/Autopay.sol rename to contracts/Tellor/Autopay.sol index fd25ed7a..2007e694 100644 --- a/contracts/TellorFlex/Autopay.sol +++ b/contracts/Tellor/Autopay.sol @@ -1,9 +1,9 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.3; +pragma solidity ^0.8.0; -import "contracts/TellorFlex/UsingTellor.sol"; -import "../../interfaces/IERC20.sol"; -import "contracts/TellorFlex/QueryDataStorage.sol"; +import { UsingTellor } from "./UsingTellor.sol"; +import { IERC20 } from "../../interfaces/IERC20.sol"; +import "../../interfaces/IQueryDataStorage.sol"; /** @author Tellor Inc. @@ -14,7 +14,7 @@ import "contracts/TellorFlex/QueryDataStorage.sol"; contract Autopay is UsingTellor { // Storage IERC20 public token; // TRB token address - QueryDataStorage public queryDataStorage; // Query data storage contract + IQueryDataStorage public queryDataStorage; // Query data storage contract uint256 public fee; // 1000 is 100%, 50 is 5%, etc. mapping(bytes32 => bytes32[]) currentFeeds; // mapping queryId to dataFeedIds array @@ -101,12 +101,11 @@ contract Autopay is UsingTellor { */ constructor( address payable _tellor, - address _token, address _queryDataStorage, uint256 _fee ) UsingTellor(_tellor) { - token = IERC20(_token); - queryDataStorage = QueryDataStorage(_queryDataStorage); + token = IERC20(tellor.token()); + queryDataStorage = IQueryDataStorage(_queryDataStorage); fee = _fee; } @@ -125,7 +124,8 @@ contract Autopay is UsingTellor { uint256 _cumulativeReward; for (uint256 _i = 0; _i < _timestamps.length; _i++) { _cumulativeReward += _getOneTimeTipAmount( - _queryId, _timestamps[_i] + _queryId, + _timestamps[_i] ); } require( @@ -135,6 +135,7 @@ contract Autopay is UsingTellor { ) ); token.approve(address(tellor), (_cumulativeReward * fee) / 1000); + tellor.addStakingRewards((_cumulativeReward * fee) / 1000); if (getCurrentTip(_queryId) == 0) { if (queryIdsWithFundingIndex[_queryId] != 0) { uint256 _idx = queryIdsWithFundingIndex[_queryId] - 1; @@ -155,83 +156,68 @@ contract Autopay is UsingTellor { * @dev Allows Tellor reporters to claim their tips in batches * @param _feedId unique feed identifier * @param _queryId ID of reported data - * @param _timestamps[] batch of timestamps array of reported data eligible for reward + * @param _timestamps batch of timestamps array of reported data eligible for reward */ function claimTip( bytes32 _feedId, bytes32 _queryId, uint256[] calldata _timestamps ) external { - uint256 _reward; + Feed storage _feed = dataFeed[_queryId][_feedId]; + uint256 _balance = _feed.details.balance; + require(_balance > 0, "no funds available for this feed"); uint256 _cumulativeReward; for (uint256 _i = 0; _i < _timestamps.length; _i++) { - _reward = _claimTip(_feedId, _queryId, _timestamps[_i]); - _cumulativeReward += _reward; - } - emit TipClaimed(_feedId, _queryId, _cumulativeReward, msg.sender); - } - /** - * @dev Internal function which allows Tellor reporters to claim their autopay tips - * @param _feedId of dataFeed - * @param _queryId id of reported data - * @param _timestamp timestamp of reported data eligible for reward - * @return uint256 reward amount - */ - function _claimTip( - bytes32 _feedId, - bytes32 _queryId, - uint256 _timestamp - ) internal returns (uint256) { - Feed storage _feed = dataFeed[_queryId][_feedId]; - bytes memory _valueRetrieved = retrieveData(_queryId, _timestamp); - uint256 _n = (_timestamp - _feed.details.startTime) / - _feed.details.interval; // finds closest interval _n to timestamp - uint256 _c = _feed.details.startTime + _feed.details.interval * _n; // finds timestamp _c of interval _n - ( - bytes memory _valueRetrievedBefore, - uint256 _timestampBefore - ) = getDataBefore(_queryId, _timestamp); - uint256 _priceChange = 0; //price change from last value to current value - if (_feed.details.priceThreshold != 0) { - uint256 _v1 = _bytesToUint(_valueRetrieved); - uint256 _v2 = _bytesToUint(_valueRetrievedBefore); - if (_v2 == 0) { - _priceChange = 10000; - } else if (_v1 >= _v2) { - _priceChange = (10000 * (_v1 - _v2)) / _v2; - } else { - _priceChange = (10000 * (_v2 - _v1)) / _v2; - } - } - if (_priceChange <= _feed.details.priceThreshold) { - } - uint256 _rewardAmount; - if (_feed.details.balance > _feed.details.reward) { - _rewardAmount = _feed.details.reward; - _feed.details.balance -= _feed.details.reward; - } else { - _rewardAmount = _feed.details.balance; - _feed.details.balance = 0; - // Adjust currently funded feeds - if (feedsWithFunding.length > 1) { - uint256 _idx = _feed.details.feedsWithFundingIndex - 1; - // Replace unfunded feed in array with last element - feedsWithFunding[_idx] = feedsWithFunding[ - feedsWithFunding.length - 1 - ]; - bytes32 _feedIdLastFunded = feedsWithFunding[_idx]; - bytes32 _queryIdLastFunded = queryIdFromDataFeedId[ - _feedIdLastFunded - ]; - dataFeed[_queryIdLastFunded][_feedIdLastFunded] - .details - .feedsWithFundingIndex = _idx + 1; + require( + block.timestamp - _timestamps[_i] > 12 hours, + "buffer time has not passed" + ); + require( + getReporterByTimestamp(_queryId, _timestamps[_i]) == msg.sender, + "message sender not reporter for given queryId and timestamp" + ); + _cumulativeReward += _getRewardAmount( + _feedId, + _queryId, + _timestamps[_i] + ); + if (_cumulativeReward >= _balance) { + // Balance runs out + require( + _i == _timestamps.length - 1, + "insufficient balance for all submitted timestamps" + ); + _cumulativeReward = _balance; + // Adjust currently funded feeds + if (feedsWithFunding.length > 1) { + uint256 _idx = _feed.details.feedsWithFundingIndex - 1; + // Replace unfunded feed in array with last element + feedsWithFunding[_idx] = feedsWithFunding[ + feedsWithFunding.length - 1 + ]; + bytes32 _feedIdLastFunded = feedsWithFunding[_idx]; + bytes32 _queryIdLastFunded = queryIdFromDataFeedId[ + _feedIdLastFunded + ]; + dataFeed[_queryIdLastFunded][_feedIdLastFunded] + .details + .feedsWithFundingIndex = _idx + 1; + } + feedsWithFunding.pop(); + _feed.details.feedsWithFundingIndex = 0; } - feedsWithFunding.pop(); - _feed.details.feedsWithFundingIndex = 0; + _feed.rewardClaimed[_timestamps[_i]] = true; } - _feed.rewardClaimed[_timestamp] = true; - return _rewardAmount; + _feed.details.balance -= _cumulativeReward; + require( + token.transfer( + msg.sender, + _cumulativeReward - ((_cumulativeReward * fee) / 1000) + ) + ); + token.approve(address(tellor), (_cumulativeReward * fee) / 1000); + tellor.addStakingRewards((_cumulativeReward * fee) / 1000); + emit TipClaimed(_feedId, _queryId, _cumulativeReward, msg.sender); } /** @@ -286,7 +272,7 @@ contract Autopay is UsingTellor { uint256 _amount ) external returns (bytes32 _feedId) { require( - _queryId == keccak256(_queryData) || uint256(_queryId) <= 100, + _queryId == keccak256(_queryData), "id must be hash of bytes data" ); _feedId = keccak256( @@ -336,7 +322,7 @@ contract Autopay is UsingTellor { bytes calldata _queryData ) external { require( - _queryId == keccak256(_queryData) || uint256(_queryId) <= 100, + _queryId == keccak256(_queryData), "id must be hash of bytes data" ); require(_amount > 0, "tip must be greater than zero"); @@ -420,6 +406,7 @@ contract Autopay is UsingTellor { { return (dataFeed[queryIdFromDataFeedId[_feedId]][_feedId].details); } + /** * @dev Getter function for currently funded feed details * @return FeedDetailsWithQueryData[] array of details for funded feeds @@ -455,6 +442,7 @@ contract Autopay is UsingTellor { function getFundedQueryIds() external view returns (bytes32[] memory) { return queryIdsWithFunding; } + /** * @dev Getter function for currently funded single tips with queryData * @return SingleTipsWithQueryData[] array of current tips @@ -478,6 +466,7 @@ contract Autopay is UsingTellor { } return _query; } + /** * @dev Getter function to get number of past tips * @param _queryId id of reported data @@ -568,6 +557,7 @@ contract Autopay is UsingTellor { ) external view returns (bool) { return dataFeed[_queryId][_feedId].rewardClaimed[_timestamp]; } + /** * @dev Getter function for reading whether a reward has been claimed * @param _feedId feedId of dataFeed @@ -588,6 +578,7 @@ contract Autopay is UsingTellor { } return _status; } + /** * @dev Getter function for retrieving the total amount of tips paid by a given address * @param _user address of user to query @@ -627,10 +618,9 @@ contract Autopay is UsingTellor { block.timestamp - _timestamp > 12 hours, "buffer time has not passed" ); - - // require(!isInDispute(_queryId, _timestamp), "value disputed"); + require(!isInDispute(_queryId, _timestamp), "value disputed"); require( - msg.sender == tellor.getReporterByTimestamp(_queryId, _timestamp), + msg.sender == getReporterByTimestamp(_queryId, _timestamp), "msg sender must be reporter address" ); Tip[] storage _tips = tips[_queryId]; @@ -770,6 +760,7 @@ contract Autopay is UsingTellor { "price threshold not met" ); } + if (_feed.details.balance < _rewardAmount) { _rewardAmount = _feed.details.balance; } diff --git a/contracts/Tellor/Governance.sol b/contracts/Tellor/Governance.sol new file mode 100644 index 00000000..89975cf3 --- /dev/null +++ b/contracts/Tellor/Governance.sol @@ -0,0 +1,586 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.3; + +import "../../interfaces/IOracle.sol"; +import "../../interfaces/IERC20.sol"; +import "./UsingTellor.sol"; + +/** + @author Tellor Inc. + @title Governance + @dev This is a governance contract to be used with TellorFlex. It handles disputing + * Tellor oracle data and voting on those disputes +*/ +contract Governance is UsingTellor { + // Storage + IOracle public oracle; // Tellor oracle contract + IERC20 public token; // token used for dispute fees, same as reporter staking token + address public oracleAddress; //tellorFlex address + address public teamMultisig; // address of team multisig wallet, one of four stakeholder groups + uint256 public voteCount; // total number of votes initiated + bytes32 public autopayAddrsQueryId = + keccak256(abi.encode("AutopayAddresses", abi.encode(bytes("")))); // query id for autopay addresses array + mapping(uint256 => Dispute) private disputeInfo; // mapping of dispute IDs to the details of the dispute + mapping(bytes32 => uint256) private openDisputesOnId; // mapping of a query ID to the number of disputes on that query ID + mapping(uint256 => Vote) private voteInfo; // mapping of dispute IDs to the details of the vote + mapping(bytes32 => uint256[]) private voteRounds; // mapping of vote identifier hashes to an array of dispute IDs + mapping(address => uint256) private voteTallyByAddress; // mapping of addresses to the number of votes they have cast + mapping(address => uint256[]) private disputeIdsByReporter; // mapping of reporter addresses to an array of dispute IDs + + enum VoteResult { + FAILED, + PASSED, + INVALID + } // status of a potential vote + + // Structs + struct Dispute { + bytes32 queryId; // query ID of disputed value + uint256 timestamp; // timestamp of disputed value + bytes value; // disputed value + address disputedReporter; // reporter who submitted the disputed value + uint256 slashedAmount; // amount of tokens slashed from reporter + } + + struct Tally { + uint256 doesSupport; // number of votes in favor + uint256 against; // number of votes against + uint256 invalidQuery; // number of votes for invalid + } + + struct Vote { + bytes32 identifierHash; // identifier hash of the vote + uint256 voteRound; // the round of voting on a given dispute or proposal + uint256 startDate; // timestamp of when vote was initiated + uint256 blockNumber; // block number of when vote was initiated + uint256 fee; // fee paid to initiate the vote round + uint256 tallyDate; // timestamp of when the votes were tallied + Tally tokenholders; // vote tally of tokenholders + Tally users; // vote tally of users + Tally reporters; // vote tally of reporters + Tally teamMultisig; // vote tally of teamMultisig + bool executed; // boolean of whether the vote was executed + VoteResult result; // VoteResult after votes were tallied + address initiator; // address which initiated dispute/proposal + mapping(address => bool) voted; // mapping of address to whether or not they voted + } + + // Events + event NewDispute( + uint256 _disputeId, + bytes32 _queryId, + uint256 _timestamp, + address _reporter + ); // Emitted when a new dispute is opened + + event Voted( + uint256 _disputeId, + bool _supports, + address _voter, + bool _invalidQuery + ); // Emitted when an address casts their vote + event VoteExecuted(uint256 _disputeId, VoteResult _result); // Emitted when a vote is executed + event VoteTallied( + uint256 _disputeId, + VoteResult _result, + address _initiator, + address _reporter + ); // Emitted when all casting for a vote is tallied + + /** + * @dev Initializes contract parameters + * @param _tellor address of tellor oracle contract to be governed + * @param _teamMultisig address of tellor team multisig, one of four voting + * stakeholder groups + */ + constructor(address payable _tellor, address _teamMultisig) + UsingTellor(_tellor) + { + oracle = IOracle(_tellor); + token = IERC20(oracle.getTokenAddress()); + oracleAddress = _tellor; + teamMultisig = _teamMultisig; + } + + /** + * @dev Initializes a dispute/vote in the system + * @param _queryId being disputed + * @param _timestamp being disputed + */ + function beginDispute(bytes32 _queryId, uint256 _timestamp) external { + // Ensure value actually exists + require( + oracle.getBlockNumberByTimestamp(_queryId, _timestamp) != 0, + "no value exists at given timestamp" + ); + bytes32 _hash = keccak256(abi.encodePacked(_queryId, _timestamp)); + // Push new vote round + uint256 _disputeId = voteCount + 1; + uint256[] storage _voteRounds = voteRounds[_hash]; + _voteRounds.push(_disputeId); + + // Create new vote and dispute + Vote storage _thisVote = voteInfo[_disputeId]; + Dispute storage _thisDispute = disputeInfo[_disputeId]; + + // Initialize dispute information - query ID, timestamp, value, etc. + _thisDispute.queryId = _queryId; + _thisDispute.timestamp = _timestamp; + _thisDispute.disputedReporter = oracle.getReporterByTimestamp( + _queryId, + _timestamp + ); + // Initialize vote information - hash, initiator, block number, etc. + _thisVote.identifierHash = _hash; + _thisVote.initiator = msg.sender; + _thisVote.blockNumber = block.number; + _thisVote.startDate = block.timestamp; + _thisVote.voteRound = _voteRounds.length; + disputeIdsByReporter[_thisDispute.disputedReporter].push(_disputeId); + uint256 _disputeFee = getDisputeFee(); + if (_voteRounds.length == 1) { + require( + block.timestamp - _timestamp < 12 hours, + "Dispute must be started within reporting lock time" + ); + openDisputesOnId[_queryId]++; + // calculate dispute fee based on number of open disputes on query ID + _disputeFee = _disputeFee * 2**(openDisputesOnId[_queryId] - 1); + // slash a single stakeAmount from reporter + _thisDispute.slashedAmount = oracle.slashReporter(_thisDispute.disputedReporter, address(this)); + _thisDispute.value = oracle.retrieveData(_queryId, _timestamp); + oracle.removeValue(_queryId, _timestamp); + } else { + uint256 _prevId = _voteRounds[_voteRounds.length - 2]; + require( + block.timestamp - voteInfo[_prevId].tallyDate < 1 days, + "New dispute round must be started within a day" + ); + _disputeFee = _disputeFee * 2**(_voteRounds.length - 1); + _thisDispute.slashedAmount = disputeInfo[_voteRounds[0]].slashedAmount; + _thisDispute.value = disputeInfo[_voteRounds[0]].value; + } + if (_disputeFee > oracle.getStakeAmount()) { + _disputeFee = oracle.getStakeAmount(); + } + _thisVote.fee = _disputeFee; + voteCount++; + require( + token.transferFrom(msg.sender, address(this), _disputeFee), + "Fee must be paid" + ); // This is the dispute fee. Returned if dispute passes + emit NewDispute( + _disputeId, + _queryId, + _timestamp, + _thisDispute.disputedReporter + ); + } + + /** + * @dev Executes vote and transfers corresponding balances to initiator/reporter + * @param _disputeId is the ID of the vote being executed + */ + function executeVote(uint256 _disputeId) external { + // Ensure validity of vote ID, vote has been executed, and vote must be tallied + Vote storage _thisVote = voteInfo[_disputeId]; + require(_disputeId <= voteCount && _disputeId > 0, "Dispute ID must be valid"); + require(!_thisVote.executed, "Vote has already been executed"); + require(_thisVote.tallyDate > 0, "Vote must be tallied"); + // Ensure vote must be final vote and that time has to be pass (86400 = 24 * 60 * 60 for seconds in a day) + require( + voteRounds[_thisVote.identifierHash].length == _thisVote.voteRound, + "Must be the final vote" + ); + //The time has to pass after the vote is tallied + require( + block.timestamp - _thisVote.tallyDate >= 1 days, + "1 day has to pass after tally to allow for disputes" + ); + _thisVote.executed = true; + Dispute storage _thisDispute = disputeInfo[_disputeId]; + openDisputesOnId[_thisDispute.queryId]--; + uint256 _i; + uint256 _voteID; + if (_thisVote.result == VoteResult.PASSED) { + // If vote is in dispute and passed, iterate through each vote round and transfer the dispute to initiator + for ( + _i = voteRounds[_thisVote.identifierHash].length; + _i > 0; + _i-- + ) { + _voteID = voteRounds[_thisVote.identifierHash][_i - 1]; + _thisVote = voteInfo[_voteID]; + // If the first vote round, also make sure to transfer the reporter's slashed stake to the initiator + if (_i == 1) { + token.transfer( + _thisVote.initiator, + _thisDispute.slashedAmount + ); + } + token.transfer(_thisVote.initiator, _thisVote.fee); + } + } else if (_thisVote.result == VoteResult.INVALID) { + // If vote is in dispute and is invalid, iterate through each vote round and transfer the dispute fee to initiator + for ( + _i = voteRounds[_thisVote.identifierHash].length; + _i > 0; + _i-- + ) { + _voteID = voteRounds[_thisVote.identifierHash][_i - 1]; + _thisVote = voteInfo[_voteID]; + token.transfer(_thisVote.initiator, _thisVote.fee); + } + // Transfer slashed tokens back to disputed reporter + token.transfer( + _thisDispute.disputedReporter, + _thisDispute.slashedAmount + ); + } else if (_thisVote.result == VoteResult.FAILED) { + // If vote is in dispute and fails, iterate through each vote round and transfer the dispute fee to disputed reporter + uint256 _reporterReward = 0; + for ( + _i = voteRounds[_thisVote.identifierHash].length; + _i > 0; + _i-- + ) { + _voteID = voteRounds[_thisVote.identifierHash][_i - 1]; + _thisVote = voteInfo[_voteID]; + _reporterReward += _thisVote.fee; + } + _reporterReward += _thisDispute.slashedAmount; + token.transfer(_thisDispute.disputedReporter, _reporterReward); + } + emit VoteExecuted(_disputeId, voteInfo[_disputeId].result); + } + + /** + * @dev Tallies the votes and begins the 1 day challenge period + * @param _disputeId is the dispute id + */ + function tallyVotes(uint256 _disputeId) external { + // Ensure vote has not been executed and that vote has not been tallied + Vote storage _thisVote = voteInfo[_disputeId]; + require(_thisVote.tallyDate == 0, "Vote has already been tallied"); + require(_disputeId <= voteCount && _disputeId > 0, "Vote does not exist"); + // Determine appropriate vote duration dispute round + // Vote time increases as rounds increase but only up to 6 days (withdrawal period) + require( + block.timestamp - _thisVote.startDate >= + 86400 * _thisVote.voteRound || + block.timestamp - _thisVote.startDate >= 86400 * 6, + "Time for voting has not elapsed" + ); + // Get total votes from each separate stakeholder group. This will allow + // normalization so each group's votes can be combined and compared to + // determine the vote outcome. + uint256 _tokenVoteSum = _thisVote.tokenholders.doesSupport + + _thisVote.tokenholders.against + + _thisVote.tokenholders.invalidQuery; + uint256 _reportersVoteSum = _thisVote.reporters.doesSupport + + _thisVote.reporters.against + + _thisVote.reporters.invalidQuery; + uint256 _multisigVoteSum = _thisVote.teamMultisig.doesSupport + + _thisVote.teamMultisig.against + + _thisVote.teamMultisig.invalidQuery; + uint256 _usersVoteSum = _thisVote.users.doesSupport + + _thisVote.users.against + + _thisVote.users.invalidQuery; + // Cannot divide by zero + if (_tokenVoteSum == 0) { + _tokenVoteSum++; + } + if (_reportersVoteSum == 0) { + _reportersVoteSum++; + } + if (_multisigVoteSum == 0) { + _multisigVoteSum++; + } + if (_usersVoteSum == 0) { + _usersVoteSum++; + } + // Normalize and combine each stakeholder group votes + uint256 _scaledDoesSupport = ((_thisVote.tokenholders.doesSupport * + 1e18) / _tokenVoteSum) + + ((_thisVote.reporters.doesSupport * 1e18) / _reportersVoteSum) + + ((_thisVote.teamMultisig.doesSupport * 1e18) / _multisigVoteSum) + + ((_thisVote.users.doesSupport * 1e18) / _usersVoteSum); + uint256 _scaledAgainst = ((_thisVote.tokenholders.against * 1e18) / + _tokenVoteSum) + + ((_thisVote.reporters.against * 1e18) / _reportersVoteSum) + + ((_thisVote.teamMultisig.against * 1e18) / _multisigVoteSum) + + ((_thisVote.users.against * 1e18) / _usersVoteSum); + uint256 _scaledInvalid = ((_thisVote.tokenholders.invalidQuery * 1e18) / + _tokenVoteSum) + + ((_thisVote.reporters.invalidQuery * 1e18) / _reportersVoteSum) + + ((_thisVote.teamMultisig.invalidQuery * 1e18) / _multisigVoteSum) + + ((_thisVote.users.invalidQuery * 1e18) / _usersVoteSum); + + // If votes in support outweight the sum of against and invalid, result is passed + if (_scaledDoesSupport > _scaledAgainst + _scaledInvalid) { + _thisVote.result = VoteResult.PASSED; + // If votes in against outweight the sum of support and invalid, result is failed + } else if (_scaledAgainst > _scaledDoesSupport + _scaledInvalid) { + _thisVote.result = VoteResult.FAILED; + // Otherwise, result is invalid + } else { + _thisVote.result = VoteResult.INVALID; + } + + _thisVote.tallyDate = block.timestamp; // Update time vote was tallied + emit VoteTallied( + _disputeId, + _thisVote.result, + _thisVote.initiator, + disputeInfo[_disputeId].disputedReporter + ); + } + + /** + * @dev Enables the sender address to cast a vote + * @param _disputeId is the ID of the vote + * @param _supports is the address's vote: whether or not they support or are against + * @param _invalidQuery is whether or not the dispute is valid + */ + function vote( + uint256 _disputeId, + bool _supports, + bool _invalidQuery + ) public { + // Ensure that dispute has not been executed and that vote does not exist and is not tallied + require(_disputeId <= voteCount && _disputeId > 0, "Vote does not exist"); + Vote storage _thisVote = voteInfo[_disputeId]; + require(_thisVote.tallyDate == 0, "Vote has already been tallied"); + require(!_thisVote.voted[msg.sender], "Sender has already voted"); + // Update voting status and increment total queries for support, invalid, or against based on vote + _thisVote.voted[msg.sender] = true; + uint256 _tokenBalance = token.balanceOf(msg.sender); + (, uint256 _stakedBalance, uint256 _lockedBalance, , , , , ) = oracle.getStakerInfo(msg.sender); + _tokenBalance += _stakedBalance + _lockedBalance; + if (_invalidQuery) { + _thisVote.tokenholders.invalidQuery += _tokenBalance; + _thisVote.reporters.invalidQuery += oracle + .getReportsSubmittedByAddress(msg.sender); + _thisVote.users.invalidQuery += _getUserTips(msg.sender); + if (msg.sender == teamMultisig) { + _thisVote.teamMultisig.invalidQuery += 1; + } + } else if (_supports) { + _thisVote.tokenholders.doesSupport += _tokenBalance; + _thisVote.reporters.doesSupport += oracle.getReportsSubmittedByAddress(msg.sender); + _thisVote.users.doesSupport += _getUserTips(msg.sender); + if (msg.sender == teamMultisig) { + _thisVote.teamMultisig.doesSupport += 1; + } + } else { + _thisVote.tokenholders.against += _tokenBalance; + _thisVote.reporters.against += oracle.getReportsSubmittedByAddress( + msg.sender + ); + _thisVote.users.against += _getUserTips(msg.sender); + if (msg.sender == teamMultisig) { + _thisVote.teamMultisig.against += 1; + } + } + voteTallyByAddress[msg.sender]++; + emit Voted(_disputeId, _supports, msg.sender, _invalidQuery); + } + + /** + * @dev Enables the sender address to cast votes for multiple disputes + * @param _disputeIds is an array of vote IDs + * @param _supports is an array of the address's votes: whether or not they support or are against + * @param _invalidQuery is array of whether or not the dispute is valid + */ + function voteOnMultipleDisputes( + uint256[] memory _disputeIds, + bool[] memory _supports, + bool[] memory _invalidQuery + ) external { + for (uint256 _i = 0; _i < _disputeIds.length; _i++) { + vote(_disputeIds[_i], _supports[_i], _invalidQuery[_i]); + } + } + + // ***************************************************************************** + // * * + // * Getters * + // * * + // ***************************************************************************** + + /** + * @dev Determines if an address voted for a specific vote + * @param _disputeId is the ID of the vote + * @param _voter is the address of the voter to check for + * @return bool of whether or note the address voted for the specific vote + */ + function didVote(uint256 _disputeId, address _voter) + external + view + returns (bool) + { + return voteInfo[_disputeId].voted[_voter]; + } + + /** + * @dev Get the latest dispute fee + */ + function getDisputeFee() public view returns (uint256) { + return (oracle.getStakeAmount() / 10); + } + + + function getDisputesByReporter(address _reporter) external view returns (uint256[] memory) { + return disputeIdsByReporter[_reporter]; + } + + /** + * @dev Returns info on a dispute for a given ID + * @param _disputeId is the ID of a specific dispute + * @return bytes32 of the data ID of the dispute + * @return uint256 of the timestamp of the dispute + * @return bytes memory of the value being disputed + * @return address of the reporter being disputed + */ + function getDisputeInfo(uint256 _disputeId) + external + view + returns ( + bytes32, + uint256, + bytes memory, + address + ) + { + Dispute storage _d = disputeInfo[_disputeId]; + return (_d.queryId, _d.timestamp, _d.value, _d.disputedReporter); + } + + /** + * @dev Returns the number of open disputes for a specific query ID + * @param _queryId is the ID of a specific data feed + * @return uint256 of the number of open disputes for the query ID + */ + function getOpenDisputesOnId(bytes32 _queryId) + external + view + returns (uint256) + { + return openDisputesOnId[_queryId]; + } + + /** + * @dev Returns the total number of votes + * @return uint256 of the total number of votes + */ + function getVoteCount() external view returns (uint256) { + return voteCount; + } + + /** + * @dev Returns info on a vote for a given vote ID + * @param _disputeId is the ID of a specific vote + * @return bytes32 identifier hash of the vote + * @return uint256[17] memory of the pertinent round info (vote rounds, start date, fee, etc.) + * @return bool memory of both whether or not the vote was executed + * @return VoteResult result of the vote + * @return address memory of the vote initiator + */ + function getVoteInfo(uint256 _disputeId) + external + view + returns ( + bytes32, + uint256[17] memory, + bool, + VoteResult, + address + ) + { + Vote storage _v = voteInfo[_disputeId]; + return ( + _v.identifierHash, + [ + _v.voteRound, + _v.startDate, + _v.blockNumber, + _v.fee, + _v.tallyDate, + _v.tokenholders.doesSupport, + _v.tokenholders.against, + _v.tokenholders.invalidQuery, + _v.users.doesSupport, + _v.users.against, + _v.users.invalidQuery, + _v.reporters.doesSupport, + _v.reporters.against, + _v.reporters.invalidQuery, + _v.teamMultisig.doesSupport, + _v.teamMultisig.against, + _v.teamMultisig.invalidQuery + ], + _v.executed, + _v.result, + _v.initiator + ); + } + + /** + * @dev Returns an array of voting rounds for a given vote + * @param _hash is the identifier hash for a vote + * @return uint256[] memory dispute IDs of the vote rounds + */ + function getVoteRounds(bytes32 _hash) + external + view + returns (uint256[] memory) + { + return voteRounds[_hash]; + } + + /** + * @dev Returns the total number of votes cast by an address + * @param _voter is the address of the voter to check for + * @return uint256 of the total number of votes cast by the voter + */ + function getVoteTallyByAddress(address _voter) + external + view + returns (uint256) + { + return voteTallyByAddress[_voter]; + } + + // Internal + /** + * @dev Retrieves total tips contributed to autopay by a given address + * @param _user address of the user to check the tip count for + * @return _userTipTally uint256 of total tips contributed to autopay by the address + */ + function _getUserTips(address _user) internal returns (uint256 _userTipTally) { + // get autopay addresses array from oracle + (bytes memory _autopayAddrsBytes, uint256 _timestamp) = getDataBefore( + autopayAddrsQueryId, + block.timestamp - 12 hours + ); + if (_timestamp > 0) { + address[] memory _autopayAddrs = abi.decode( + _autopayAddrsBytes, + (address[]) + ); + // iterate through autopay addresses retrieve tips by user address + for (uint256 _i = 0; _i < _autopayAddrs.length; _i++) { + (bool _success, bytes memory _returnData) = _autopayAddrs[_i] + .call( + abi.encodeWithSignature( + "getTipsByAddress(address)", + _user + ) + ); + if (_success) { + _userTipTally += abi.decode(_returnData, (uint256)); + } + } + } + } +} diff --git a/contracts/TellorFlex/QueryDataStorage.sol b/contracts/Tellor/QueryDataStorage.sol similarity index 97% rename from contracts/TellorFlex/QueryDataStorage.sol rename to contracts/Tellor/QueryDataStorage.sol index b6efe295..7ef68506 100644 --- a/contracts/TellorFlex/QueryDataStorage.sol +++ b/contracts/Tellor/QueryDataStorage.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.3; +pragma solidity ^0.8.3; /** @author Tellor Inc. diff --git a/contracts/TellorFlex/TellorFlex360.sol b/contracts/Tellor/TellorFlex.sol similarity index 65% rename from contracts/TellorFlex/TellorFlex360.sol rename to contracts/Tellor/TellorFlex.sol index 43ceee64..3537419e 100644 --- a/contracts/TellorFlex/TellorFlex360.sol +++ b/contracts/Tellor/TellorFlex.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.3; +pragma solidity ^0.8.3; import "../../interfaces/IERC20.sol"; @@ -11,12 +11,13 @@ import "../../interfaces/IERC20.sol"; * by a single address known as 'governance', which could be an externally owned * account or a contract, allowing for a flexible, modular design. */ -contract TellorFlex360 { +contract TellorFlex { // Storage IERC20 public token; // token used for staking and rewards address public governance; // address with ability to remove values and slash reporters address public owner; // contract deployer, can call init function once uint256 public accumulatedRewardPerShare; // accumulated staking reward per staked token + uint256 public minimumStakeAmount; // minimum amount of tokens required to stake uint256 public reportingLock; // base amount of time before a reporter is able to submit a value again uint256 public rewardRate; // total staking rewards released per second uint256 public stakeAmount; // minimum amount required to be a staker @@ -29,6 +30,7 @@ contract TellorFlex360 { uint256 public totalRewardDebt; // staking reward debt, used to calculate real staking rewards balance uint256 public totalStakeAmount; // total amount of tokens locked in contract (via stake) uint256 public totalStakers; // total number of stakers with at least stakeAmount staked, not exact + uint256 public toWithdraw; //amountLockedForWithdrawal mapping(bytes32 => Report) private reports; // mapping of query IDs to a report mapping(address => StakeInfo) private stakerDetails; // mapping from a persons address to their staking info @@ -56,6 +58,26 @@ contract TellorFlex360 { mapping(bytes32 => uint256) reportsSubmittedByQueryId; // mapping of queryId to number of reports submitted by reporter } + // Events + event NewReport( + bytes32 indexed _queryId, + uint256 indexed _time, + bytes _value, + uint256 _nonce, + bytes _queryData, + address indexed _reporter + ); + event NewStakeAmount(uint256 _newStakeAmount); + event NewStaker(address indexed _staker, uint256 indexed _amount); + event ReporterSlashed( + address indexed _reporter, + address _recipient, + uint256 _slashAmount + ); + event StakeWithdrawn(address _staker); + event StakeWithdrawRequested(address _staker, uint256 _amount); + event ValueRemoved(bytes32 _queryId, uint256 _timestamp); + // Functions /** * @dev Initializes system parameters @@ -70,6 +92,7 @@ contract TellorFlex360 { uint256 _reportingLock, uint256 _stakeAmountDollarTarget, uint256 _stakingTokenPrice, + uint256 _minimumStakeAmount, bytes32 _stakingTokenPriceQueryId ) { require(_token != address(0), "must set token address"); @@ -80,7 +103,13 @@ contract TellorFlex360 { owner = msg.sender; reportingLock = _reportingLock; stakeAmountDollarTarget = _stakeAmountDollarTarget; - stakeAmount = (_stakeAmountDollarTarget * 1e18) / _stakingTokenPrice; + minimumStakeAmount = _minimumStakeAmount; + uint256 _potentialStakeAmount = (_stakeAmountDollarTarget * 1e18) / _stakingTokenPrice; + if(_potentialStakeAmount < _minimumStakeAmount) { + stakeAmount = _minimumStakeAmount; + } else { + stakeAmount = _potentialStakeAmount; + } stakingTokenPriceQueryId = _stakingTokenPriceQueryId; } @@ -89,7 +118,7 @@ contract TellorFlex360 { * @param _governanceAddress address of governance contract (github.com/tellor-io/governance) */ function init(address _governanceAddress) external { - // require(msg.sender == owner, "only owner can set governance address"); + require(msg.sender == owner, "only owner can set governance address"); require(governance == address(0), "governance address already set"); require( _governanceAddress != address(0), @@ -98,6 +127,23 @@ contract TellorFlex360 { governance = _governanceAddress; } + /** + * @dev Funds the Flex contract with staking rewards (paid by autopay and minting) + * @param _amount amount of tokens to fund contract with + */ + function addStakingRewards(uint256 _amount) external { + require(token.transferFrom(msg.sender, address(this), _amount)); + _updateRewards(); + stakingRewardsBalance += _amount; + // update reward rate = real staking rewards balance / 30 days + rewardRate = + (stakingRewardsBalance - + ((accumulatedRewardPerShare * totalStakeAmount) / + 1e18 - + totalRewardDebt)) / + 30 days; + } + /** * @dev Allows a reporter to submit stake * @param _amount amount of tokens to stake @@ -111,6 +157,7 @@ contract TellorFlex360 { if (_lockedBalance >= _amount) { // if staker's locked balance covers full _amount, use that _staker.lockedBalance -= _amount; + toWithdraw -= _amount; } else { // otherwise, stake the whole locked balance and transfer the // remaining amount from the staker's address @@ -121,12 +168,108 @@ contract TellorFlex360 { _amount - _lockedBalance ) ); + toWithdraw -= _staker.lockedBalance; _staker.lockedBalance = 0; } + } else { + if (_stakedBalance == 0) { + // if staked balance and locked balance equal 0, save current vote tally. + // voting participation used for calculating rewards + (bool _success, bytes memory _returnData) = governance.call( + abi.encodeWithSignature("getVoteCount()") + ); + if (_success) { + _staker.startVoteCount = uint256(abi.decode(_returnData, (uint256))); + } + (_success,_returnData) = governance.call( + abi.encodeWithSignature("getVoteTallyByAddress(address)",msg.sender) + ); + if(_success){ + _staker.startVoteTally = abi.decode(_returnData,(uint256)); + } + } require(token.transferFrom(msg.sender, address(this), _amount)); } _updateStakeAndPayRewards(msg.sender, _stakedBalance + _amount); _staker.startDate = block.timestamp; // This resets the staker start date to now + emit NewStaker(msg.sender, _amount); + } + + /** + * @dev Removes a value from the oracle. + * Note: this function is only callable by the Governance contract. + * @param _queryId is ID of the specific data feed + * @param _timestamp is the timestamp of the data value to remove + */ + function removeValue(bytes32 _queryId, uint256 _timestamp) external { + require(msg.sender == governance, "caller must be governance address"); + Report storage _report = reports[_queryId]; + require(!_report.isDisputed[_timestamp], "value already disputed"); + uint256 _index = _report.timestampIndex[_timestamp]; + require(_timestamp == _report.timestamps[_index], "invalid timestamp"); + _report.valueByTimestamp[_timestamp] = ""; + _report.isDisputed[_timestamp] = true; + emit ValueRemoved(_queryId, _timestamp); + } + + /** + * @dev Allows a reporter to request to withdraw their stake + * @param _amount amount of staked tokens requesting to withdraw + */ + function requestStakingWithdraw(uint256 _amount) external { + StakeInfo storage _staker = stakerDetails[msg.sender]; + require( + _staker.stakedBalance >= _amount, + "insufficient staked balance" + ); + _updateStakeAndPayRewards(msg.sender, _staker.stakedBalance - _amount); + _staker.startDate = block.timestamp; + _staker.lockedBalance += _amount; + toWithdraw += _amount; + emit StakeWithdrawRequested(msg.sender, _amount); + } + + /** + * @dev Slashes a reporter and transfers their stake amount to the given recipient + * Note: this function is only callable by the governance address. + * @param _reporter is the address of the reporter being slashed + * @param _recipient is the address receiving the reporter's stake + * @return _slashAmount uint256 amount of token slashed and sent to recipient address + */ + function slashReporter(address _reporter, address _recipient) + external + returns (uint256 _slashAmount) + { + require(msg.sender == governance, "only governance can slash reporter"); + StakeInfo storage _staker = stakerDetails[_reporter]; + uint256 _stakedBalance = _staker.stakedBalance; + uint256 _lockedBalance = _staker.lockedBalance; + require(_stakedBalance + _lockedBalance > 0, "zero staker balance"); + if (_lockedBalance >= stakeAmount) { + // if locked balance is at least stakeAmount, slash from locked balance + _slashAmount = stakeAmount; + _staker.lockedBalance -= stakeAmount; + toWithdraw -= stakeAmount; + } else if (_lockedBalance + _stakedBalance >= stakeAmount) { + // if locked balance + staked balance is at least stakeAmount, + // slash from locked balance and slash remainder from staked balance + _slashAmount = stakeAmount; + _updateStakeAndPayRewards( + _reporter, + _stakedBalance - (stakeAmount - _lockedBalance) + ); + toWithdraw -= _lockedBalance; + _staker.lockedBalance = 0; + } else { + // if sum(locked balance + staked balance) is less than stakeAmount, + // slash sum + _slashAmount = _stakedBalance + _lockedBalance; + toWithdraw -= _lockedBalance; + _updateStakeAndPayRewards(_reporter, 0); + _staker.lockedBalance = 0; + } + require(token.transfer(_recipient, _slashAmount)); + emit ReporterSlashed(_reporter, _recipient, _slashAmount); } /** @@ -160,7 +303,7 @@ contract TellorFlex360 { "still in reporter time lock, please wait!" ); require( - _queryId == keccak256(_queryData) || uint256(_queryId) <= 100, + _queryId == keccak256(_queryData), "query id must be hash of query data" ); _staker.reporterLastTimestamp = block.timestamp; @@ -177,24 +320,112 @@ contract TellorFlex360 { _report.reporterByTimestamp[block.timestamp] = msg.sender; // Disperse Time Based Reward uint256 _reward = ((block.timestamp - timeOfLastNewValue) * timeBasedReward) / 300; //.5 TRB per 5 minutes + uint256 _totalTimeBasedRewardsBalance = + token.balanceOf(address(this)) - + (totalStakeAmount + stakingRewardsBalance + toWithdraw); + if (_totalTimeBasedRewardsBalance > 0 && _reward > 0) { + if (_totalTimeBasedRewardsBalance < _reward) { + token.transfer(msg.sender, _totalTimeBasedRewardsBalance); + } else { + token.transfer(msg.sender, _reward); + } + } + // Update last oracle value and number of values submitted by a reporter timeOfLastNewValue = block.timestamp; _staker.reportsSubmitted++; _staker.reportsSubmittedByQueryId[_queryId]++; + emit NewReport( + _queryId, + block.timestamp, + _value, + _nonce, + _queryData, + msg.sender + ); } /** * @dev Updates the stake amount after retrieving the latest * 12+-hour-old staking token price from the oracle */ - function updateStakeAmount(uint256 _amount) external{ - stakeAmount = _amount; + function updateStakeAmount() external { + // get staking token price + (bool _valFound, bytes memory _val, ) = getDataBefore( + stakingTokenPriceQueryId, + block.timestamp - 12 hours + ); + if (_valFound) { + uint256 _stakingTokenPrice = abi.decode(_val, (uint256)); + require( + _stakingTokenPrice >= 0.01 ether && _stakingTokenPrice < 1000000 ether, + "invalid staking token price" + ); + + uint256 _adjustedStakeAmount = (stakeAmountDollarTarget * 1e18) / _stakingTokenPrice; + if(_adjustedStakeAmount < minimumStakeAmount) { + stakeAmount = minimumStakeAmount; + } else { + stakeAmount = _adjustedStakeAmount; + } + emit NewStakeAmount(stakeAmount); } + } + + /** + * @dev Withdraws a reporter's stake after the lock period expires + */ + function withdrawStake() external { + StakeInfo storage _staker = stakerDetails[msg.sender]; + // Ensure reporter is locked and that enough time has passed + require( + block.timestamp - _staker.startDate >= 7 days, + "7 days didn't pass" + ); + require( + _staker.lockedBalance > 0, + "reporter not locked for withdrawal" + ); + require(token.transfer(msg.sender, _staker.lockedBalance)); + toWithdraw -= _staker.lockedBalance; + _staker.lockedBalance = 0; + emit StakeWithdrawn(msg.sender); + } + // ***************************************************************************** // * * // * Getters * // * * // ***************************************************************************** + /** + * @dev Returns the block number at a given timestamp + * @param _queryId is ID of the specific data feed + * @param _timestamp is the timestamp to find the corresponding block number for + * @return uint256 block number of the timestamp for the given data ID + */ + function getBlockNumberByTimestamp(bytes32 _queryId, uint256 _timestamp) + external + view + returns (uint256) + { + return reports[_queryId].timestampToBlockNum[_timestamp]; + } + + /** + * @dev Returns the current value of a data feed given a specific ID + * @param _queryId is the ID of the specific data feed + * @return _value the latest submitted value for the given queryId + */ + function getCurrentValue(bytes32 _queryId) + external + view + returns (bytes memory _value) + { + bool _didGet; + (_didGet, _value, ) = getDataBefore(_queryId, block.timestamp + 1); + if(!_didGet){revert();} + } + /** * @dev Retrieves the latest value for the queryId before the specified timestamp * @param _queryId is the queryId to look up the value for @@ -222,6 +453,14 @@ contract TellorFlex360 { return (true, _value, _timestampRetrieved); } + /** + * @dev Returns governance address + * @return address governance + */ + function getGovernanceAddress() external view returns (address) { + return governance; + } + /** * @dev Counts the number of values that have been submitted for the request. * @param _queryId the id to look up @@ -235,6 +474,39 @@ contract TellorFlex360 { return reports[_queryId].timestamps.length; } + /** + * @dev Returns the pending staking reward for a given address + * @param _stakerAddress staker address to look up + * @return _pendingReward - pending reward for given staker + */ + function getPendingRewardByStaker(address _stakerAddress) + external + returns (uint256 _pendingReward) + { + StakeInfo storage _staker = stakerDetails[_stakerAddress]; + _pendingReward = (_staker.stakedBalance * + _getUpdatedAccumulatedRewardPerShare()) / + 1e18 - + _staker.rewardDebt; + (bool _success, bytes memory _returnData) = governance.call( + abi.encodeWithSignature("getVoteCount()") + ); + uint256 _numberOfVotes; + if (_success) { + _numberOfVotes = uint256(abi.decode(_returnData, (uint256))) - _staker.startVoteCount; + } + if (_numberOfVotes > 0) { + (_success,_returnData) = governance.call( + abi.encodeWithSignature("getVoteTallyByAddress(address)",_stakerAddress) + ); + if(_success){ + _pendingReward = + (_pendingReward * (abi.decode(_returnData,(uint256)) - _staker.startVoteTally)) + / _numberOfVotes; + } + } + } + /** * @dev Returns the real staking rewards balance after accounting for unclaimed rewards * @return uint256 real staking rewards balance @@ -493,12 +765,50 @@ contract TellorFlex360 { return (false, 0); } + /** + * @dev Returns the index of a reporter timestamp in the timestamp array for a specific data ID + * @param _queryId is ID of the specific data feed + * @param _timestamp is the timestamp to find in the timestamps array + * @return uint256 of the index of the reporter timestamp in the array for specific ID + */ + function getTimestampIndexByTimestamp(bytes32 _queryId, uint256 _timestamp) + external + view + returns (uint256) + { + return reports[_queryId].timestampIndex[_timestamp]; + } + + /** + * @dev Returns the address of the token used for staking + * @return address of the token used for staking + */ + function getTokenAddress() external view returns (address) { + return address(token); + } + + /** + * @dev Returns total amount of token staked for reporting + * @return uint256 total amount of token staked + */ + function getTotalStakeAmount() external view returns (uint256) { + return totalStakeAmount; + } + + /** + * @dev Returns total number of current stakers. Reporters with stakedBalance less than stakeAmount are excluded from this total + * @return uint256 total stakers + */ + function getTotalStakers() external view returns (uint256) { + return totalStakers; + } + /** * @dev Returns total balance of time based rewards in contract * @return uint256 amount of trb */ function getTotalTimeBasedRewardsBalance() external view returns (uint256) { - return token.balanceOf(address(this)) - (totalStakeAmount + stakingRewardsBalance); + return token.balanceOf(address(this)) - (totalStakeAmount + stakingRewardsBalance + toWithdraw); } /** @@ -529,6 +839,14 @@ contract TellorFlex360 { return reports[_queryId].valueByTimestamp[_timestamp]; } + /** + * @dev Used during the upgrade process to verify valid Tellor contracts + * @return bool value used to verify valid Tellor contracts + */ + function verify() external pure returns (uint256) { + return 9999; + } + // ***************************************************************************** // * * // * Internal functions * @@ -591,6 +909,32 @@ contract TellorFlex360 { accumulatedRewardPerShare) / 1e18 - _staker.rewardDebt; + // get staker voting participation rate + uint256 _numberOfVotes; + (bool _success, bytes memory _returnData) = governance.call( + abi.encodeWithSignature("getVoteCount()") + ); + if (_success) { + _numberOfVotes = + uint256(abi.decode(_returnData, (uint256))) - + _staker.startVoteCount; + } + if (_numberOfVotes > 0) { + // staking reward = pending reward * voting participation rate + (_success, _returnData) = governance.call( + abi.encodeWithSignature("getVoteTallyByAddress(address)",_stakerAddress) + ); + if(_success){ + uint256 _voteTally = abi.decode(_returnData,(uint256)); + uint256 _tempPendingReward = + (_pendingReward * + (_voteTally - _staker.startVoteTally)) / + _numberOfVotes; + if (_tempPendingReward < _pendingReward) { + _pendingReward = _tempPendingReward; + } + } + } stakingRewardsBalance -= _pendingReward; require(token.transfer(msg.sender, _pendingReward)); totalRewardDebt -= _staker.rewardDebt; @@ -615,6 +959,16 @@ contract TellorFlex360 { 1e18; totalRewardDebt += _staker.rewardDebt; totalStakeAmount += _staker.stakedBalance; + // update reward rate if staking rewards are available + // given staker's updated parameters + if(rewardRate == 0) { + rewardRate = + (stakingRewardsBalance - + ((accumulatedRewardPerShare * totalStakeAmount) / + 1e18 - + totalRewardDebt)) / + 30 days; + } } /** diff --git a/contracts/TellorFlex/TellorPlayground.sol b/contracts/Tellor/TellorPlayground.sol similarity index 63% rename from contracts/TellorFlex/TellorPlayground.sol rename to contracts/Tellor/TellorPlayground.sol index 5c690bbb..345e4b96 100644 --- a/contracts/TellorFlex/TellorPlayground.sol +++ b/contracts/Tellor/TellorPlayground.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.3; +pragma solidity ^0.8.3; contract TellorPlayground { @@ -18,19 +18,11 @@ contract TellorPlayground { address _reporter ); event NewStaker(address _staker, uint256 _amount); - event TipAdded( - address indexed _user, - bytes32 indexed _queryId, - uint256 _tip, - uint256 _totalTip, - bytes _queryData - ); event StakeWithdrawRequested(address _staker, uint256 _amount); event StakeWithdrawn(address _staker); event Transfer(address indexed from, address indexed to, uint256 value); // Storage - mapping(bytes32 => address) public addresses; mapping(bytes32 => mapping(uint256 => bool)) public isDisputed; //queryId -> timestamp -> value mapping(bytes32 => mapping(uint256 => address)) public reporterByTimestamp; mapping(address => StakeInfo) stakerDetails; //mapping from a persons address to their staking info @@ -41,9 +33,11 @@ contract TellorPlayground { mapping(address => mapping(address => uint256)) private _allowances; mapping(address => uint256) private _balances; + uint256 public stakeAmount; uint256 public constant timeBasedReward = 5e17; // time based reward for a reporter for successfully submitting a value uint256 public tipsInContract; // number of tips within the contract uint256 public voteCount; + address public token; uint256 private _totalSupply; string private _name; string private _symbol; @@ -66,14 +60,12 @@ contract TellorPlayground { _name = "TellorPlayground"; _symbol = "TRBP"; _decimals = 18; - addresses[ - keccak256(abi.encodePacked("_GOVERNANCE_CONTRACT")) - ] = address(this); + token = address(this); } /** - * @dev Mock function for adding staking rewards - * @param _amount quantity of tokens to transfer to this contract + * @dev Mock function for adding staking rewards. No rewards actually given to stakers + * @param _amount Amount of TRB to be added to the contract */ function addStakingRewards(uint256 _amount) external { require(_transferFrom(msg.sender, address(this), _amount)); @@ -86,11 +78,7 @@ contract TellorPlayground { * @return bool Whether the transaction succeeded * */ - function approve(address _spender, uint256 _amount) - public - virtual - returns (bool) - { + function approve(address _spender, uint256 _amount) external returns (bool){ _approve(msg.sender, _spender, _amount); return true; } @@ -109,12 +97,55 @@ contract TellorPlayground { ); } + /** + * @dev Allows a reporter to submit stake + * @param _amount amount of tokens to stake + */ + function depositStake(uint256 _amount) external { + StakeInfo storage _staker = stakerDetails[msg.sender]; + if (_staker.lockedBalance > 0) { + if (_staker.lockedBalance >= _amount) { + _staker.lockedBalance -= _amount; + } else { + require( + _transferFrom( + msg.sender, + address(this), + _amount - _staker.lockedBalance + ) + ); + _staker.lockedBalance = 0; + } + } else { + require(_transferFrom(msg.sender, address(this), _amount)); + } + _staker.startDate = block.timestamp; // This resets their stake start date to now + _staker.stakedBalance += _amount; + emit NewStaker(msg.sender, _amount); + } + /** * @dev Public function to mint tokens to the given address * @param _user The address which will receive the tokens */ function faucet(address _user) external { - _mint(_user, 1000 ether); + _mint(_user, 1000000 ether); + } + + /** + * @dev Allows a reporter to request to withdraw their stake + * @param _amount amount of staked tokens requesting to withdraw + */ + function requestStakingWithdraw(uint256 _amount) external { + StakeInfo storage _staker = stakerDetails[msg.sender]; + require( + _staker.stakedBalance >= _amount, + "insufficient staked balance" + ); + _staker.startDate = block.timestamp; + _staker.lockedBalance += _amount; + _staker.stakedBalance -= _amount; + emit StakeWithdrawRequested(msg.sender, _amount); } /** @@ -131,6 +162,7 @@ contract TellorPlayground { uint256 _nonce, bytes memory _queryData ) external { + require(keccak256(_value) != keccak256(""), "value must be submitted"); require( _nonce == timestamps[_queryId].length || _nonce == 0, "nonce must match timestamp index" @@ -154,35 +186,6 @@ contract TellorPlayground { ); } - /** - * @dev Adds a tip to a given query ID. - * @param _queryId is the queryId to look up - * @param _amount is the amount of tips - * @param _queryData is the extra bytes data needed to fulfill the request - */ - function tipQuery( - bytes32 _queryId, - uint256 _amount, - bytes memory _queryData - ) external { - require( - _queryId == keccak256(_queryData) || uint256(_queryId) <= 100, - "id must be hash of bytes data" - ); - _transfer(msg.sender, address(this), _amount); - _amount = _amount / 2; - _burn(address(this), _amount); - tipsInContract += _amount; - tips[_queryId] += _amount; - emit TipAdded( - msg.sender, - _queryId, - _amount, - tips[_queryId], - _queryData - ); - } - /** * @dev Transfer tokens from one user to another * @param _recipient The destination address @@ -191,7 +194,6 @@ contract TellorPlayground { */ function transfer(address _recipient, uint256 _amount) public - virtual returns (bool) { _transfer(msg.sender, _recipient, _amount); @@ -209,7 +211,7 @@ contract TellorPlayground { address _sender, address _recipient, uint256 _amount - ) public virtual returns (bool) { + ) public returns (bool) { _transfer(_sender, _recipient, _amount); _approve( _sender, @@ -219,50 +221,6 @@ contract TellorPlayground { return true; } - // Tellor Flex - /** - * @dev Allows a reporter to submit stake - * @param _amount amount of tokens to stake - */ - function depositStake(uint256 _amount) external { - StakeInfo storage _staker = stakerDetails[msg.sender]; - if (_staker.lockedBalance > 0) { - if (_staker.lockedBalance >= _amount) { - _staker.lockedBalance -= _amount; - } else { - require( - _transferFrom( - msg.sender, - address(this), - _amount - _staker.lockedBalance - ) - ); - _staker.lockedBalance = 0; - } - } else { - require(_transferFrom(msg.sender, address(this), _amount)); - } - _staker.startDate = block.timestamp; // This resets their stake start date to now - _staker.stakedBalance += _amount; - emit NewStaker(msg.sender, _amount); - } - - /** - * @dev Allows a reporter to request to withdraw their stake - * @param _amount amount of staked tokens requesting to withdraw - */ - function requestStakingWithdraw(uint256 _amount) external { - StakeInfo storage _staker = stakerDetails[msg.sender]; - require( - _staker.stakedBalance >= _amount, - "insufficient staked balance" - ); - _staker.startDate = block.timestamp; - _staker.lockedBalance += _amount; - _staker.stakedBalance -= _amount; - emit StakeWithdrawRequested(msg.sender, _amount); - } - /** * @dev Withdraws a reporter's stake */ @@ -276,49 +234,6 @@ contract TellorPlayground { emit StakeWithdrawn(msg.sender); } - /** - * @dev Returns the reporter for a given timestamp and queryId - * @param _queryId bytes32 version of the queryId - * @param _timestamp uint256 timestamp of report - * @return address of data reporter - */ - function getReporterByTimestamp(bytes32 _queryId, uint256 _timestamp) - external - view - returns (address) - { - return reporterByTimestamp[_queryId][_timestamp]; - } - - /** - * @dev Allows users to retrieve all information about a staker - * @param _staker address of staker inquiring about - * @return uint startDate of staking - * @return uint current amount staked - * @return uint current amount locked for withdrawal - * @return uint reporter's last reported timestamp - * @return uint total number of reports submitted by reporter - */ - function getStakerInfo(address _staker) - external - view - returns ( - uint256, - uint256, - uint256, - uint256, - uint256 - ) - { - return ( - stakerDetails[_staker].startDate, - stakerDetails[_staker].stakedBalance, - stakerDetails[_staker].lockedBalance, - stakerDetails[_staker].reporterLastTimestamp, - stakerDetails[_staker].reportsSubmitted - ); - } - // Getters /** * @dev Returns the amount that an address is alowed to spend of behalf of another @@ -326,12 +241,7 @@ contract TellorPlayground { * @param _spender The address that will use the tokens * @return uint256 The amount of allowed tokens */ - function allowance(address _owner, address _spender) - public - view - virtual - returns (uint256) - { + function allowance(address _owner, address _spender) external view returns (uint256){ return _allowances[_owner][_spender]; } @@ -340,7 +250,7 @@ contract TellorPlayground { * @param _account user address * @return uint256 user's token balance */ - function balanceOf(address _account) public view returns (uint256) { + function balanceOf(address _account) external view returns (uint256) { return _balances[_account]; } @@ -348,10 +258,144 @@ contract TellorPlayground { * @dev Returns the number of decimals used to get its user representation. * @return uint8 the number of decimals; used only for display purposes */ - function decimals() public view returns (uint8) { + function decimals() external view returns (uint8) { return _decimals; } + /** + * @dev Retrieves the latest value for the queryId before the specified timestamp + * @param _queryId is the queryId to look up the value for + * @param _timestamp before which to search for latest value + * @return _ifRetrieve bool true if able to retrieve a non-zero value + * @return _value the value retrieved + * @return _timestampRetrieved the value's timestamp + */ + function getDataBefore(bytes32 _queryId, uint256 _timestamp) + external + view + returns ( + bool _ifRetrieve, + bytes memory _value, + uint256 _timestampRetrieved + ) + { + (bool _found, uint256 _index) = getIndexForDataBefore( + _queryId, + _timestamp + ); + if (!_found) return (false, bytes(""), 0); + _timestampRetrieved = getTimestampbyQueryIdandIndex(_queryId, _index); + _value = values[_queryId][_timestampRetrieved]; + return (true, _value, _timestampRetrieved); + } + + /** + * @dev Retrieves latest array index of data before the specified timestamp for the queryId + * @param _queryId is the queryId to look up the index for + * @param _timestamp is the timestamp before which to search for the latest index + * @return _found whether the index was found + * @return _index the latest index found before the specified timestamp + */ + // slither-disable-next-line calls-loop + function getIndexForDataBefore(bytes32 _queryId, uint256 _timestamp) + public + view + returns (bool _found, uint256 _index) + { + uint256 _count = getNewValueCountbyQueryId(_queryId); + if (_count > 0) { + uint256 _middle; + uint256 _start = 0; + uint256 _end = _count - 1; + uint256 _time; + //Checking Boundaries to short-circuit the algorithm + _time = getTimestampbyQueryIdandIndex(_queryId, _start); + if (_time >= _timestamp) return (false, 0); + _time = getTimestampbyQueryIdandIndex(_queryId, _end); + if (_time < _timestamp) { + while (isInDispute(_queryId, _time) && _end > 0) { + _end--; + _time = getTimestampbyQueryIdandIndex(_queryId, _end); + } + if (_end == 0 && isInDispute(_queryId, _time)) { + return (false, 0); + } + return (true, _end); + } + //Since the value is within our boundaries, do a binary search + while (true) { + _middle = (_end - _start) / 2 + 1 + _start; + _time = getTimestampbyQueryIdandIndex(_queryId, _middle); + if (_time < _timestamp) { + //get immediate next value + uint256 _nextTime = getTimestampbyQueryIdandIndex( + _queryId, + _middle + 1 + ); + if (_nextTime >= _timestamp) { + if (!isInDispute(_queryId, _time)) { + // _time is correct + return (true, _middle); + } else { + // iterate backwards until we find a non-disputed value + while ( + isInDispute(_queryId, _time) && _middle > 0 + ) { + _middle--; + _time = getTimestampbyQueryIdandIndex( + _queryId, + _middle + ); + } + if (_middle == 0 && isInDispute(_queryId, _time)) { + return (false, 0); + } + // _time is correct + return (true, _middle); + } + } else { + //look from middle + 1(next value) to end + _start = _middle + 1; + } + } else { + uint256 _prevTime = getTimestampbyQueryIdandIndex( + _queryId, + _middle - 1 + ); + if (_prevTime < _timestamp) { + if (!isInDispute(_queryId, _prevTime)) { + // _prevTime is correct + return (true, _middle - 1); + } else { + // iterate backwards until we find a non-disputed value + _middle--; + while ( + isInDispute(_queryId, _prevTime) && _middle > 0 + ) { + _middle--; + _prevTime = getTimestampbyQueryIdandIndex( + _queryId, + _middle + ); + } + if ( + _middle == 0 && isInDispute(_queryId, _prevTime) + ) { + return (false, 0); + } + // _prevtime is correct + return (true, _middle); + } + } else { + //look from start to middle -1(prev value) + _end = _middle - 1; + } + } + } + } + return (false, 0); + } + /** * @dev Counts the number of values that have been submitted for a given ID * @param _queryId the ID to look up @@ -365,6 +409,70 @@ contract TellorPlayground { return timestamps[_queryId].length; } + /** + * @dev Returns the reporter for a given timestamp and queryId + * @param _queryId bytes32 version of the queryId + * @param _timestamp uint256 timestamp of report + * @return address of data reporter + */ + function getReporterByTimestamp(bytes32 _queryId, uint256 _timestamp) + external + view + returns (address) + { + return reporterByTimestamp[_queryId][_timestamp]; + } + + /** + * @dev Returns mock stake amount + * @return uint256 stake amount + */ + function getStakeAmount() external view returns (uint256) { + return stakeAmount; + } + + /** + * @dev Allows users to retrieve all information about a staker + * @param _stakerAddress address of staker inquiring about + * @return uint startDate of staking + * @return uint current amount staked + * @return uint current amount locked for withdrawal + * @return uint reward debt used to calculate staking reward + * @return uint reporter's last reported timestamp + * @return uint total number of reports submitted by reporter + * @return uint governance vote count when first staked + * @return uint number of votes case by staker when first staked + * @return uint whether staker is counted in totalStakers + */ + function getStakerInfo(address _stakerAddress) + external + view + returns ( + uint256, + uint256, + uint256, + uint256, + uint256, + uint256, + uint256, + uint256, + bool + ) + { + StakeInfo storage _staker = stakerDetails[_stakerAddress]; + return ( + _staker.startDate, + _staker.stakedBalance, + _staker.lockedBalance, + 0, // reward debt + _staker.reporterLastTimestamp, + _staker.reportsSubmitted, + 0, // start vote count + 0, // start vote tally + false + ); + } + /** * @dev Gets the timestamp for the value based on their index * @param _queryId is the queryId to look up @@ -376,8 +484,8 @@ contract TellorPlayground { view returns (uint256) { - uint256 len = timestamps[_queryId].length; - if (len == 0 || len <= _index) return 0; + uint256 _len = timestamps[_queryId].length; + if (_len == 0 || _len <= _index) return 0; return timestamps[_queryId][_index]; } @@ -386,11 +494,7 @@ contract TellorPlayground { * @param _hash is the identifier hash for a vote * @return uint256[] memory dispute IDs of the vote rounds */ - function getVoteRounds(bytes32 _hash) - public - view - returns (uint256[] memory) - { + function getVoteRounds(bytes32 _hash) public view returns (uint256[] memory){ return voteRounds[_hash]; } @@ -398,15 +502,29 @@ contract TellorPlayground { * @dev Returns the governance address of the contract * @return address (this address) */ - function governance() external view returns(address){ + function governance() external view returns (address) { return address(this); } + /** + * @dev Returns whether a given value is disputed + * @param _queryId unique ID of the data feed + * @param _timestamp timestamp of the value + * @return bool whether the value is disputed + */ + function isInDispute(bytes32 _queryId, uint256 _timestamp) + public + view + returns (bool) + { + return isDisputed[_queryId][_timestamp]; + } + /** * @dev Returns the name of the token. * @return string name of the token */ - function name() public view returns (string memory) { + function name() external view returns (string memory) { return _name; } @@ -417,7 +535,7 @@ contract TellorPlayground { * @return bytes value for queryId/timestamp submitted */ function retrieveData(bytes32 _queryId, uint256 _timestamp) - public + external view returns (bytes memory) { @@ -428,7 +546,7 @@ contract TellorPlayground { * @dev Returns the symbol of the token. * @return string symbol of the token */ - function symbol() public view returns (string memory) { + function symbol() external view returns (string memory) { return _symbol; } @@ -436,7 +554,7 @@ contract TellorPlayground { * @dev Returns the total supply of the token. * @return uint256 total supply of token */ - function totalSupply() public view returns (uint256) { + function totalSupply() external view returns (uint256) { return _totalSupply; } @@ -451,7 +569,7 @@ contract TellorPlayground { address _owner, address _spender, uint256 _amount - ) internal virtual { + ) internal { require(_owner != address(0), "ERC20: approve from the zero address"); require(_spender != address(0), "ERC20: approve to the zero address"); _allowances[_owner][_spender] = _amount; @@ -463,7 +581,7 @@ contract TellorPlayground { * @param _account The address whose tokens to burn * @param _amount The quantity of tokens to burn */ - function _burn(address _account, uint256 _amount) internal virtual { + function _burn(address _account, uint256 _amount) internal{ require(_account != address(0), "ERC20: burn from the zero address"); _balances[_account] -= _amount; _totalSupply -= _amount; @@ -475,7 +593,7 @@ contract TellorPlayground { * @param _account The address which receives minted tokens * @param _amount The quantity of tokens to min */ - function _mint(address _account, uint256 _amount) internal virtual { + function _mint(address _account, uint256 _amount) internal{ require(_account != address(0), "ERC20: mint to the zero address"); _totalSupply += _amount; _balances[_account] += _amount; @@ -492,12 +610,9 @@ contract TellorPlayground { address _sender, address _recipient, uint256 _amount - ) internal virtual { + ) internal{ require(_sender != address(0), "ERC20: transfer from the zero address"); - require( - _recipient != address(0), - "ERC20: transfer to the zero address" - ); + require( _recipient != address(0),"ERC20: transfer to the zero address"); _balances[_sender] -= _amount; _balances[_recipient] += _amount; emit Transfer(_sender, _recipient, _amount); @@ -514,7 +629,7 @@ contract TellorPlayground { address _sender, address _recipient, uint256 _amount - ) internal virtual returns (bool) { + ) internal returns (bool) { _transfer(_sender, _recipient, _amount); _approve( _sender, diff --git a/contracts/TellorFlex/UsingTellor.sol b/contracts/Tellor/UsingTellor.sol similarity index 51% rename from contracts/TellorFlex/UsingTellor.sol rename to contracts/Tellor/UsingTellor.sol index dcf8f0d9..c67c3d46 100644 --- a/contracts/TellorFlex/UsingTellor.sol +++ b/contracts/Tellor/UsingTellor.sol @@ -1,20 +1,23 @@ // SPDX-License-Identifier: MIT -pragma solidity >=0.8.0; +pragma solidity ^0.8.0; import "../../interfaces/ITellor.sol"; +import "../../interfaces/IERC2362.sol"; +import "../../interfaces/IMappingContract.sol"; /** - * @title UserContract - * This contract allows for easy integration with the Tellor System - * by helping smart contracts to read data from Tellor + @author Tellor Inc + @title UsingTellor + @dev This contract helps smart contracts read data from Tellor */ -contract UsingTellor { +contract UsingTellor is IERC2362 { ITellor public tellor; + IMappingContract public idMappingContract; /*Constructor*/ /** - * @dev the constructor sets the tellor address in storage - * @param _tellor is the TellorMaster address + * @dev the constructor sets the oracle address in storage + * @param _tellor is the Tellor Oracle address */ constructor(address payable _tellor) { tellor = ITellor(_tellor); @@ -55,62 +58,103 @@ contract UsingTellor { function getDataBefore(bytes32 _queryId, uint256 _timestamp) public view - returns ( - bytes memory _value, - uint256 _timestampRetrieved - ) + returns (bytes memory _value, uint256 _timestampRetrieved) { - (bool _found, uint256 _index) = getIndexForDataBefore( + (, _value, _timestampRetrieved) = tellor.getDataBefore( _queryId, _timestamp ); - if (!_found) return (bytes(""), 0); - uint256 _time = tellor.getTimestampbyQueryIdandIndex(_queryId, _index); - _value = tellor.retrieveData(_queryId, _time); - if (keccak256(_value) != keccak256(bytes(""))) - return (_value, _time); - return (bytes(""), 0); } /** - * @dev Retrieves next array index of data after the specified timestamp for the queryId + * @dev Retrieves latest array index of data before the specified timestamp for the queryId * @param _queryId is the queryId to look up the index for - * @param _timestamp is the timestamp after which to search for the next index + * @param _timestamp is the timestamp before which to search for the latest index * @return _found whether the index was found - * @return _index the next index found after the specified timestamp + * @return _index the latest index found before the specified timestamp */ + // slither-disable-next-line calls-loop function getIndexForDataAfter(bytes32 _queryId, uint256 _timestamp) public view returns (bool _found, uint256 _index) { - (_found, _index) = tellor.getIndexForDataBefore(_queryId, _timestamp); - if (_found) { - _index++; - } - uint256 _valCount = tellor.getNewValueCountbyQueryId(_queryId); - // no value after timestamp - if (_valCount <= _index) { - return (false, 0); - } - uint256 _timestampRetrieved = tellor.getTimestampbyQueryIdandIndex( - _queryId, - _index - ); + uint256 _count = getNewValueCountbyQueryId(_queryId); + if (_count == 0) return (false, 0); + _count--; + bool _search = true; // perform binary search + uint256 _middle = 0; + uint256 _start = 0; + uint256 _end = _count; + uint256 _timestampRetrieved; + // checking boundaries to short-circuit the algorithm + _timestampRetrieved = getTimestampbyQueryIdandIndex(_queryId, _end); + if (_timestampRetrieved <= _timestamp) return (false, 0); + _timestampRetrieved = getTimestampbyQueryIdandIndex(_queryId, _start); if (_timestampRetrieved > _timestamp) { - return (true, _index); + // candidate found, check for disputes + _search = false; } - // if _timestampRetrieved equals _timestamp, try next value - _index++; - // no value after timestamp - if (_valCount <= _index) { - return (false, 0); + // since the value is within our boundaries, do a binary search + while (_search) { + _middle = (_end + _start) / 2; + _timestampRetrieved = getTimestampbyQueryIdandIndex( + _queryId, + _middle + ); + if (_timestampRetrieved > _timestamp) { + // get immediate previous value + uint256 _prevTime = getTimestampbyQueryIdandIndex( + _queryId, + _middle - 1 + ); + if (_prevTime <= _timestamp) { + // candidate found, check for disputes + _search = false; + } else { + // look from start to middle -1(prev value) + _end = _middle - 1; + } + } else { + // get immediate next value + uint256 _nextTime = getTimestampbyQueryIdandIndex( + _queryId, + _middle + 1 + ); + if (_nextTime > _timestamp) { + // candidate found, check for disputes + _search = false; + _middle++; + _timestampRetrieved = _nextTime; + } else { + // look from middle + 1(next value) to end + _start = _middle + 1; + } + } + } + // candidate found, check for disputed values + if (!isInDispute(_queryId, _timestampRetrieved)) { + // _timestampRetrieved is correct + return (true, _middle); + } else { + // iterate forward until we find a non-disputed value + while ( + isInDispute(_queryId, _timestampRetrieved) && _middle < _count + ) { + _middle++; + _timestampRetrieved = getTimestampbyQueryIdandIndex( + _queryId, + _middle + ); + } + if ( + _middle == _count && isInDispute(_queryId, _timestampRetrieved) + ) { + return (false, 0); + } + // _timestampRetrieved is correct + return (true, _middle); } - _timestampRetrieved = tellor.getTimestampbyQueryIdandIndex( - _queryId, - _index - ); - return (true, _index); } /** @@ -148,6 +192,7 @@ contract UsingTellor { view returns (bytes[] memory _values, uint256[] memory _timestamps) { + // get index of first possible value (bool _ifRetrieve, uint256 _startIndex) = getIndexForDataAfter( _queryId, _timestamp - _maxAge @@ -157,27 +202,34 @@ contract UsingTellor { return (new bytes[](0), new uint256[](0)); } uint256 _endIndex; + // get index of last possible value (_ifRetrieve, _endIndex) = getIndexForDataBefore(_queryId, _timestamp); // no value before _timestamp if (!_ifRetrieve) { return (new bytes[](0), new uint256[](0)); } - uint256 _valCount = _endIndex - _startIndex + 1; - // more than _maxCount values found within range - if (_valCount > _maxCount) { - _startIndex = _endIndex - _maxCount + 1; - _valCount = _maxCount; + uint256 _valCount = 0; + uint256 _index = 0; + uint256[] memory _timestampsArrayTemp = new uint256[](_maxCount); + // generate array of non-disputed timestamps within range + while (_valCount < _maxCount && _endIndex + 1 - _index > _startIndex) { + uint256 _timestampRetrieved = getTimestampbyQueryIdandIndex( + _queryId, + _endIndex - _index + ); + if (!isInDispute(_queryId, _timestampRetrieved)) { + _timestampsArrayTemp[_valCount] = _timestampRetrieved; + _valCount++; + } + _index++; } + bytes[] memory _valuesArray = new bytes[](_valCount); uint256[] memory _timestampsArray = new uint256[](_valCount); - bytes memory _valueRetrieved; + // retrieve values and reverse timestamps order for (uint256 _i = 0; _i < _valCount; _i++) { - _timestampsArray[_i] = getTimestampbyQueryIdandIndex( - _queryId, - (_startIndex + _i) - ); - _valueRetrieved = retrieveData(_queryId, _timestampsArray[_i]); - _valuesArray[_i] = _valueRetrieved; + _timestampsArray[_i] = _timestampsArrayTemp[_valCount - 1 - _i]; + _valuesArray[_i] = retrieveData(_queryId, _timestampsArray[_i]); } return (_valuesArray, _timestampsArray); } @@ -223,7 +275,19 @@ contract UsingTellor { return tellor.getTimestampbyQueryIdandIndex(_queryId, _index); } - + /** + * @dev Determines whether a value with a given queryId and timestamp has been disputed + * @param _queryId is the value id to look up + * @param _timestamp is the timestamp of the value to look up + * @return bool true if queryId/timestamp is under dispute + */ + function isInDispute(bytes32 _queryId, uint256 _timestamp) + public + view + returns (bool) + { + return tellor.isInDispute(_queryId, _timestamp); + } /** * @dev Retrieve value from oracle based on queryId/timestamp @@ -239,6 +303,61 @@ contract UsingTellor { return tellor.retrieveData(_queryId, _timestamp); } + /** + * @dev allows dev to set mapping contract for valueFor (EIP2362) + * @param _addy address of mapping contract + */ + function setIdMappingContract(address _addy) external { + require(address(idMappingContract) == address(0)); + idMappingContract = IMappingContract(_addy); + } + + /** + * @dev Retrieve most recent int256 value from oracle based on queryId + * @param _id being requested + * @return _value most recent value submitted + * @return _timestamp timestamp of most recent value + * @return _statusCode 200 if value found, 404 if not found + */ + function valueFor(bytes32 _id) + external + view + override + returns ( + int256 _value, + uint256 _timestamp, + uint256 _statusCode + ) + { + bytes32 _queryId = idMappingContract.getTellorID(_id); + bytes memory _valueBytes; + (_valueBytes, _timestamp) = getDataBefore( + _queryId, + block.timestamp + 1 + ); + if (_timestamp == 0) { + return (0, 0, 404); + } + uint256 _valueUint = _sliceUint(_valueBytes); + _value = int256(_valueUint); + return (_value, _timestamp, 200); + } + + // Internal functions + /** + * @dev Convert bytes to uint256 + * @param _b bytes value to convert to uint256 + * @return _number uint256 converted from bytes + */ + function _sliceUint(bytes memory _b) + internal + pure + returns (uint256 _number) + { + for (uint256 _i = 0; _i < _b.length; _i++) { + _number = _number * 256 + uint8(_b[_i]); + } + } /** * @dev Allows the user to get the latest value for the queryId specified * @param _queryId is the id to look up the value for diff --git a/contracts/TellorFlex/TellorFlex.sol b/contracts/TellorFlex/TellorFlex.sol deleted file mode 100644 index 537a4236..00000000 --- a/contracts/TellorFlex/TellorFlex.sol +++ /dev/null @@ -1,594 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.3; - -import "../../interfaces/IERC20.sol"; - -/** - @author Tellor Inc. - @title TellorFlex - @dev This is a streamlined Tellor oracle system which handles staking, reporting, - * slashing, and user data getters in one contract. This contract is controlled - * by a single address known as 'governance', which could be an externally owned - * account or a contract, allowing for a flexible, modular design. -*/ -contract TellorFlex { - IERC20 public token; - address public governance; - uint256 public stakeAmount; //amount required to be a staker - uint256 public totalStakeAmount; //total amount of tokens locked in contract (via stake) - uint256 public reportingLock; // base amount of time before a reporter is able to submit a value again - uint256 public timeOfLastNewValue = block.timestamp; // time of the last new submitted value, originally set to the block timestamp - mapping(bytes32 => Report) private reports; // mapping of query IDs to a report - mapping(address => StakeInfo) stakerDetails; //mapping from a persons address to their staking info - - // Structs - struct Report { - uint256[] timestamps; // array of all newValueTimestamps reported - mapping(uint256 => uint256) timestampIndex; // mapping of timestamps to respective indices - mapping(uint256 => uint256) timestampToBlockNum; // mapping of timestamp to block number - mapping(uint256 => bytes) valueByTimestamp; // mapping of timestamps to values - mapping(uint256 => address) reporterByTimestamp; // mapping of timestamps to reporters - } - - struct StakeInfo { - uint256 startDate; //stake start date - uint256 stakedBalance; // staked balance - uint256 lockedBalance; // amount locked for withdrawal - uint256 reporterLastTimestamp; // timestamp of reporter's last reported value - uint256 reportsSubmitted; // total number of reports submitted by reporter - mapping(bytes32 => uint256) reportsSubmittedByQueryId; - } - - // Events - event NewGovernanceAddress(address _newGovernanceAddress); - event NewReport( - bytes32 _queryId, - uint256 _time, - bytes _value, - uint256 _nonce, - bytes _queryData, - address _reporter - ); - event NewReportingLock(uint256 _newReportingLock); - event NewStakeAmount(uint256 _newStakeAmount); - event NewStaker(address _staker, uint256 _amount); - event ReporterSlashed( - address _reporter, - address _recipient, - uint256 _slashAmount - ); - event StakeWithdrawRequested(address _staker, uint256 _amount); - event StakeWithdrawn(address _staker); - event ValueRemoved(bytes32 _queryId, uint256 _timestamp); - - /** - * @dev Initializes system parameters - * @param _token address of token used for staking - * @param _governance address which controls system - * @param _stakeAmount amount of token needed to report oracle values - * @param _reportingLock base amount of time (seconds) before reporter is able to report again - */ - constructor( - address _token, - address _governance, - uint256 _stakeAmount, - uint256 _reportingLock - ) { - require(_token != address(0), "must set token address"); - require(_governance != address(0), "must set governance address"); - token = IERC20(_token); - governance = _governance; - stakeAmount = _stakeAmount; - reportingLock = _reportingLock; - } - - /** - * @dev Changes governance address - * @param _newGovernanceAddress new governance address - */ - function changeGovernanceAddress(address _newGovernanceAddress) external { - require(msg.sender == governance, "caller must be governance address"); - require( - _newGovernanceAddress != address(0), - "must set governance address" - ); - governance = _newGovernanceAddress; - emit NewGovernanceAddress(_newGovernanceAddress); - } - - /** - * @dev Changes base amount of time (seconds) before reporter is allowed to report again - * @param _newReportingLock new reporter lock time in seconds - */ - function changeReportingLock(uint256 _newReportingLock) external { - require(msg.sender == governance, "caller must be governance address"); - require( - _newReportingLock > 0, - "reporting lock must be greater than zero" - ); - reportingLock = _newReportingLock; - emit NewReportingLock(_newReportingLock); - } - - /** - * @dev Changes amount of token stake required to report values - * @param _newStakeAmount new reporter stake amount - */ - function changeStakeAmount(uint256 _newStakeAmount) external { - require(msg.sender == governance, "caller must be governance address"); - require(_newStakeAmount > 0, "stake amount must be greater than zero"); - stakeAmount = _newStakeAmount; - emit NewStakeAmount(_newStakeAmount); - } - - /** - * @dev Allows a reporter to submit stake - * @param _amount amount of tokens to stake - */ - function depositStake(uint256 _amount) external { - StakeInfo storage _staker = stakerDetails[msg.sender]; - if (_staker.lockedBalance > 0) { - if (_staker.lockedBalance >= _amount) { - _staker.lockedBalance -= _amount; - } else { - require( - token.transferFrom( - msg.sender, - address(this), - _amount - _staker.lockedBalance - ) - ); - _staker.lockedBalance = 0; - } - } else { - require(token.transferFrom(msg.sender, address(this), _amount)); - } - _staker.startDate = block.timestamp; // This resets their stake start date to now - _staker.stakedBalance += _amount; - totalStakeAmount += _amount; - emit NewStaker(msg.sender, _amount); - } - - /** - * @dev Removes a value from the oracle. - * Note: this function is only callable by the Governance contract. - * @param _queryId is ID of the specific data feed - * @param _timestamp is the timestamp of the data value to remove - */ - function removeValue(bytes32 _queryId, uint256 _timestamp) external { - require(msg.sender == governance, "caller must be governance address"); - Report storage rep = reports[_queryId]; - uint256 _index = rep.timestampIndex[_timestamp]; - require(_timestamp == rep.timestamps[_index], "invalid timestamp"); - // Shift all timestamps back to reflect deletion of value - for (uint256 _i = _index; _i < rep.timestamps.length - 1; _i++) { - rep.timestamps[_i] = rep.timestamps[_i + 1]; - rep.timestampIndex[rep.timestamps[_i]] -= 1; - } - // Delete and reset timestamp and value - delete rep.timestamps[rep.timestamps.length - 1]; - rep.timestamps.pop(); - rep.valueByTimestamp[_timestamp] = ""; - rep.timestampIndex[_timestamp] = 0; - emit ValueRemoved(_queryId, _timestamp); - } - - /** - * @dev Allows a reporter to request to withdraw their stake - * @param _amount amount of staked tokens requesting to withdraw - */ - function requestStakingWithdraw(uint256 _amount) external { - StakeInfo storage _staker = stakerDetails[msg.sender]; - require( - _staker.stakedBalance >= _amount, - "insufficient staked balance" - ); - _staker.startDate = block.timestamp; - _staker.lockedBalance += _amount; - _staker.stakedBalance -= _amount; - totalStakeAmount -= _amount; - emit StakeWithdrawRequested(msg.sender, _amount); - } - - /** - * @dev Slashes a reporter and transfers their stake amount to the given recipient - * Note: this function is only callable by the governance address. - * @param _reporter is the address of the reporter being slashed - * @param _recipient is the address receiving the reporter's stake - * @return uint256 amount of token slashed and sent to recipient address - */ - function slashReporter(address _reporter, address _recipient) - external - returns (uint256) - { - require(msg.sender == governance, "only governance can slash reporter"); - StakeInfo storage _staker = stakerDetails[_reporter]; - require( - _staker.stakedBalance + _staker.lockedBalance > 0, - "zero staker balance" - ); - uint256 _slashAmount; - if (_staker.lockedBalance >= stakeAmount) { - _slashAmount = stakeAmount; - _staker.lockedBalance -= stakeAmount; - } else if ( - _staker.lockedBalance + _staker.stakedBalance >= stakeAmount - ) { - _slashAmount = stakeAmount; - _staker.stakedBalance -= stakeAmount - _staker.lockedBalance; - totalStakeAmount -= stakeAmount - _staker.lockedBalance; - _staker.lockedBalance = 0; - } else { - _slashAmount = _staker.stakedBalance + _staker.lockedBalance; - totalStakeAmount -= _staker.stakedBalance; - _staker.stakedBalance = 0; - _staker.lockedBalance = 0; - } - token.transfer(_recipient, _slashAmount); - emit ReporterSlashed(_reporter, _recipient, _slashAmount); - return (_slashAmount); - } - - /** - * @dev Allows a reporter to submit a value to the oracle - * @param _queryId is ID of the specific data feed. Equals keccak256(_queryData) for non-legacy IDs - * @param _value is the value the user submits to the oracle - * @param _nonce is the current value count for the query id - * @param _queryData is the data used to fulfill the data query - */ - function submitValue( - bytes32 _queryId, - bytes calldata _value, - uint256 _nonce, - bytes memory _queryData - ) external { - Report storage rep = reports[_queryId]; - require( - _nonce == rep.timestamps.length || _nonce == 0, - "nonce must match timestamp index" - ); - StakeInfo storage _staker = stakerDetails[msg.sender]; - require( - _staker.stakedBalance >= stakeAmount, - "balance must be greater than stake amount" - ); - // Require reporter to abide by given reporting lock - // require( - // (block.timestamp - _staker.reporterLastTimestamp) * 1000 > - // (reportingLock * 1000) / (_staker.stakedBalance / stakeAmount), - // "still in reporter time lock, please wait!" - // ); - require( - _queryId == keccak256(_queryData) || uint256(_queryId) <= 100, - "id must be hash of bytes data" - ); - _staker.reporterLastTimestamp = block.timestamp; - // Checks for no double reporting of timestamps - require( - rep.reporterByTimestamp[block.timestamp] == address(0), - "timestamp already reported for" - ); - // Update number of timestamps, value for given timestamp, and reporter for timestamp - rep.timestampIndex[block.timestamp] = rep.timestamps.length; - rep.timestamps.push(block.timestamp); - rep.timestampToBlockNum[block.timestamp] = block.number; - rep.valueByTimestamp[block.timestamp] = _value; - rep.reporterByTimestamp[block.timestamp] = msg.sender; - // Update last oracle value and number of values submitted by a reporter - timeOfLastNewValue = block.timestamp; - _staker.reportsSubmitted++; - _staker.reportsSubmittedByQueryId[_queryId]++; - emit NewReport( - _queryId, - block.timestamp, - _value, - _nonce, - _queryData, - msg.sender - ); - } - - /** - * @dev Withdraws a reporter's stake - */ - function withdrawStake() external { - StakeInfo storage _s = stakerDetails[msg.sender]; - // Ensure reporter is locked and that enough time has passed - require(block.timestamp - _s.startDate >= 7 days, "7 days didn't pass"); - require(_s.lockedBalance > 0, "reporter not locked for withdrawal"); - token.transfer(msg.sender, _s.lockedBalance); - _s.lockedBalance = 0; - emit StakeWithdrawn(msg.sender); - } - - //Getters - /** - * @dev Returns the block number at a given timestamp - * @param _queryId is ID of the specific data feed - * @param _timestamp is the timestamp to find the corresponding block number for - * @return uint256 block number of the timestamp for the given data ID - */ - function getBlockNumberByTimestamp(bytes32 _queryId, uint256 _timestamp) - external - view - returns (uint256) - { - return reports[_queryId].timestampToBlockNum[_timestamp]; - } - - /** - * @dev Returns the current value of a data feed given a specific ID - * @param _queryId is the ID of the specific data feed - * @return bytes memory of the current value of data - */ - function getCurrentValue(bytes32 _queryId) - external - view - returns (bytes memory) - { - return - reports[_queryId].valueByTimestamp[ - reports[_queryId].timestamps[ - reports[_queryId].timestamps.length - 1 - ] - ]; - } - - /** - * @dev Returns governance address - * @return address governance - */ - function getGovernanceAddress() external view returns (address) { - return governance; - } - - /** - * @dev Counts the number of values that have been submitted for the request. - * @param _queryId the id to look up - * @return uint256 count of the number of values received for the id - */ - function getNewValueCountbyQueryId(bytes32 _queryId) - external - view - returns (uint256) - { - return reports[_queryId].timestamps.length; - } - - /** - * @dev Returns reporter address and whether a value was removed for a given queryId and timestamp - * @param _queryId the id to look up - * @param _timestamp is the timestamp of the value to look up - * @return address reporter who submitted the value - * @return bool true if the value was removed - */ - function getReportDetails(bytes32 _queryId, uint256 _timestamp) - external - view - returns (address, bool) - { - bool _wasRemoved = reports[_queryId].timestampIndex[_timestamp] == 0 && - keccak256(reports[_queryId].valueByTimestamp[_timestamp]) == - keccak256(bytes("")) && - reports[_queryId].reporterByTimestamp[_timestamp] != address(0); - return (reports[_queryId].reporterByTimestamp[_timestamp], _wasRemoved); - } - - /** - * @dev Returns the address of the reporter who submitted a value for a data ID at a specific time - * @param _queryId is ID of the specific data feed - * @param _timestamp is the timestamp to find a corresponding reporter for - * @return address of the reporter who reported the value for the data ID at the given timestamp - */ - function getReporterByTimestamp(bytes32 _queryId, uint256 _timestamp) - external - view - returns (address) - { - return reports[_queryId].reporterByTimestamp[_timestamp]; - } - - /** - * @dev Returns the timestamp of the reporter's last submission - * @param _reporter is address of the reporter - * @return uint256 timestamp of the reporter's last submission - */ - function getReporterLastTimestamp(address _reporter) - external - view - returns (uint256) - { - return stakerDetails[_reporter].reporterLastTimestamp; - } - - /** - * @dev Returns the reporting lock time, the amount of time a reporter must wait to submit again - * @return uint256 reporting lock time - */ - function getReportingLock() external view returns (uint256) { - return reportingLock; - } - - /** - * @dev Returns the number of values submitted by a specific reporter address - * @param _reporter is the address of a reporter - * @return uint256 of the number of values submitted by the given reporter - */ - function getReportsSubmittedByAddress(address _reporter) - external - view - returns (uint256) - { - return stakerDetails[_reporter].reportsSubmitted; - } - - /** - * @dev Returns the number of values submitted to a specific queryId by a specific reporter address - * @param _reporter is the address of a reporter - * @param _queryId is the ID of the specific data feed - * @return uint256 of the number of values submitted by the given reporter to the given queryId - */ - function getReportsSubmittedByAddressAndQueryId( - address _reporter, - bytes32 _queryId - ) external view returns (uint256) { - return stakerDetails[_reporter].reportsSubmittedByQueryId[_queryId]; - } - - /** - * @dev Returns amount required to report oracle values - * @return uint256 stake amount - */ - function getStakeAmount() external view returns (uint256) { - return stakeAmount; - } - - /** - * @dev Allows users to retrieve all information about a staker - * @param _staker address of staker inquiring about - * @return uint startDate of staking - * @return uint current amount staked - * @return uint current amount locked for withdrawal - * @return uint reporter's last reported timestamp - * @return uint total number of reports submitted by reporter - */ - function getStakerInfo(address _staker) - external - view - returns ( - uint256, - uint256, - uint256, - uint256, - uint256 - ) - { - return ( - stakerDetails[_staker].startDate, - stakerDetails[_staker].stakedBalance, - stakerDetails[_staker].lockedBalance, - stakerDetails[_staker].reporterLastTimestamp, - stakerDetails[_staker].reportsSubmitted - ); - } - - /** - * @dev Returns the timestamp for the last value of any ID from the oracle - * @return uint256 of timestamp of the last oracle value - */ - function getTimeOfLastNewValue() external view returns (uint256) { - return timeOfLastNewValue; - } - - /** - * @dev Gets the timestamp for the value based on their index - * @param _queryId is the id to look up - * @param _index is the value index to look up - * @return uint256 timestamp - */ - function getTimestampbyQueryIdandIndex(bytes32 _queryId, uint256 _index) - external - view - returns (uint256) - { - return reports[_queryId].timestamps[_index]; - } - - /** - * @dev Returns the index of a reporter timestamp in the timestamp array for a specific data ID - * @param _queryId is ID of the specific data feed - * @param _timestamp is the timestamp to find in the timestamps array - * @return uint256 of the index of the reporter timestamp in the array for specific ID - */ - function getTimestampIndexByTimestamp(bytes32 _queryId, uint256 _timestamp) - external - view - returns (uint256) - { - return reports[_queryId].timestampIndex[_timestamp]; - } - - /** - * @dev Returns the address of the token used for staking - * @return address of the token used for staking - */ - function getTokenAddress() external view returns (address) { - return address(token); - } - - /** - * @dev Returns total amount of token staked for reporting - * @return uint256 total amount of token staked - */ - function getTotalStakeAmount() external view returns (uint256) { - return totalStakeAmount; - } - - /** - * @dev Retrieve value from oracle based on timestamp - * @param _queryId being requested - * @param _timestamp to retrieve data/value from - * @return bytes value for timestamp submitted - */ - function retrieveData(bytes32 _queryId, uint256 _timestamp) - external - view - returns (bytes memory) - { - return reports[_queryId].valueByTimestamp[_timestamp]; - } - - function getIndexForDataBefore(bytes32 _queryId, uint256 _timestamp) - public - view - returns (bool _found, uint256 _index) - { - uint256 _count = this.getNewValueCountbyQueryId(_queryId); - if (_count > 0) { - uint256 _middle; - uint256 _start = 0; - uint256 _end = _count - 1; - uint256 _time; - //Checking Boundaries to short-circuit the algorithm - _time = this.getTimestampbyQueryIdandIndex(_queryId, _start); - if (_time >= _timestamp) return (false, 0); - _time = this.getTimestampbyQueryIdandIndex(_queryId, _end); - if (_time < _timestamp) { - return (true, _end); - } - //Since the value is within our boundaries, do a binary search - while (true) { - _middle = (_end - _start) / 2 + 1 + _start; - _time = this.getTimestampbyQueryIdandIndex(_queryId, _middle); - if (_time < _timestamp) { - //get immediate next value - uint256 _nextTime = this.getTimestampbyQueryIdandIndex( - _queryId, - _middle + 1 - ); - if (_nextTime >= _timestamp) { - return (true, _middle); - - } else { - //look from middle + 1(next value) to end - _start = _middle + 1; - } - } else { - uint256 _prevTime = this.getTimestampbyQueryIdandIndex( - _queryId, - _middle - 1 - ); - if (_prevTime < _timestamp) { - // _prevtime is correct - return (true, _middle); - - } else { - //look from start to middle -1(prev value) - _end = _middle - 1; - } - } - } - } - return (false, 0); - } -} diff --git a/interfaces/IERC20.sol b/interfaces/IERC20.sol index 476d928d..9dc25850 100644 --- a/interfaces/IERC20.sol +++ b/interfaces/IERC20.sol @@ -1,9 +1,8 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.3; +pragma solidity ^0.8.3; interface IERC20 { function balanceOf(address account) external view returns (uint256); - function approve(address account, uint256 amount) external; function transfer(address recipient, uint256 amount) external @@ -14,4 +13,8 @@ interface IERC20 { address recipient, uint256 amount ) external returns (bool); + + function approve(address _spender, uint256 _amount) + external + returns (bool); } diff --git a/interfaces/IERC2362.sol b/interfaces/IERC2362.sol new file mode 100644 index 00000000..f474f913 --- /dev/null +++ b/interfaces/IERC2362.sol @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: MIT +pragma solidity >=0.8.0; + +/** + * @dev EIP2362 Interface for pull oracles + * https://github.com/tellor-io/EIP-2362 +*/ +interface IERC2362 +{ + /** + * @dev Exposed function pertaining to EIP standards + * @param _id bytes32 ID of the query + * @return int,uint,uint returns the value, timestamp, and status code of query + */ + function valueFor(bytes32 _id) external view returns(int256,uint256,uint256); +} diff --git a/interfaces/IMappingContract.sol b/interfaces/IMappingContract.sol new file mode 100644 index 00000000..88685a70 --- /dev/null +++ b/interfaces/IMappingContract.sol @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface IMappingContract{ + function getTellorID(bytes32 _id) external view returns(bytes32); +} diff --git a/interfaces/IOracle.sol b/interfaces/IOracle.sol new file mode 100644 index 00000000..57f19f18 --- /dev/null +++ b/interfaces/IOracle.sol @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.3; + +/** + @author Tellor Inc. + @title TellorFlex + @dev This is a streamlined Tellor oracle system which handles staking, reporting, + * slashing, and user data getters in one contract. This contract is controlled + * by a single address known as 'governance', which could be an externally owned + * account or a contract, allowing for a flexible, modular design. +*/ +interface IOracle { + /** + * @dev Removes a value from the oracle. + * Note: this function is only callable by the Governance contract. + * @param _queryId is ID of the specific data feed + * @param _timestamp is the timestamp of the data value to remove + */ + function removeValue(bytes32 _queryId, uint256 _timestamp) external; + + /** + * @dev Slashes a reporter and transfers their stake amount to the given recipient + * Note: this function is only callable by the governance address. + * @param _reporter is the address of the reporter being slashed + * @param _recipient is the address receiving the reporter's stake + * @return uint256 amount of token slashed and sent to recipient address + */ + function slashReporter(address _reporter, address _recipient) + external + returns (uint256); + + // ***************************************************************************** + // * * + // * Getters * + // * * + // ***************************************************************************** + + /** + * @dev Returns the block number at a given timestamp + * @param _queryId is ID of the specific data feed + * @param _timestamp is the timestamp to find the corresponding block number for + * @return uint256 block number of the timestamp for the given data ID + */ + function getBlockNumberByTimestamp(bytes32 _queryId, uint256 _timestamp) + external + view + returns (uint256); + + /** + * @dev Returns the address of the reporter who submitted a value for a data ID at a specific time + * @param _queryId is ID of the specific data feed + * @param _timestamp is the timestamp to find a corresponding reporter for + * @return address of the reporter who reported the value for the data ID at the given timestamp + */ + function getReporterByTimestamp(bytes32 _queryId, uint256 _timestamp) + external + view + returns (address); + + /** + * @dev Returns the number of values submitted by a specific reporter address + * @param _reporter is the address of a reporter + * @return uint256 of the number of values submitted by the given reporter + */ + function getReportsSubmittedByAddress(address _reporter) + external + view + returns (uint256); + + /** + * @dev Returns amount required to report oracle values + * @return uint256 stake amount + */ + function getStakeAmount() external view returns (uint256); + + /** + * @dev Allows users to retrieve all information about a staker + * @param _stakerAddress address of staker inquiring about + * @return uint startDate of staking + * @return uint current amount staked + * @return uint current amount locked for withdrawal + * @return uint reward debt used to calculate staking rewards + * @return uint reporter's last reported timestamp + * @return uint total number of reports submitted by reporter + * @return uint governance vote count when first staked + * @return uint number of votes cast by staker when first staked + */ + function getStakerInfo(address _stakerAddress) + external + view + returns ( + uint256, + uint256, + uint256, + uint256, + uint256, + uint256, + uint256, + uint256 + ); + + /** + * @dev Retrieves the latest value for the queryId before the specified timestamp + * @param _queryId is the queryId to look up the value for + * @param _timestamp before which to search for latest value + * @return _ifRetrieve bool true if able to retrieve a non-zero value + * @return _value the value retrieved + * @return _timestampRetrieved the value's timestamp + */ + function getDataBefore(bytes32 _queryId, uint256 _timestamp) + external + view + returns ( + bool _ifRetrieve, + bytes memory _value, + uint256 _timestampRetrieved + ); + + /** + * @dev Returns the address of the token used for staking + * @return address of the token used for staking + */ + function getTokenAddress() external view returns (address); + + /** + * @dev Retrieve value from oracle based on timestamp + * @param _queryId being requested + * @param _timestamp to retrieve data/value from + * @return bytes value for timestamp submitted + */ + function retrieveData(bytes32 _queryId, uint256 _timestamp) + external + view + returns (bytes memory); +} diff --git a/interfaces/IQueryDataStorage.sol b/interfaces/IQueryDataStorage.sol new file mode 100644 index 00000000..ac9fe99f --- /dev/null +++ b/interfaces/IQueryDataStorage.sol @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface IQueryDataStorage { + function storeData(bytes memory _queryData) external; + function getQueryData(bytes32 _queryId) external view returns (bytes memory); +} diff --git a/interfaces/ITellor.sol b/interfaces/ITellor.sol index fce4d10a..1d165e99 100644 --- a/interfaces/ITellor.sol +++ b/interfaces/ITellor.sol @@ -1,90 +1,291 @@ // SPDX-License-Identifier: MIT pragma solidity >=0.8.0; -interface ITellor{ +interface ITellor { //Controller - function addresses(bytes32) external view returns(address); - function uints(bytes32) external view returns(uint256); + function addresses(bytes32) external view returns (address); + + function uints(bytes32) external view returns (uint256); + function burn(uint256 _amount) external; + function changeDeity(address _newDeity) external; + function changeOwner(address _newOwner) external; - function changeTellorContract(address _tContract) external; - function changeControllerContract(address _newController) external; - function changeGovernanceContract(address _newGovernance) external; - function changeOracleContract(address _newOracle) external; - function changeTreasuryContract(address _newTreasury) external; function changeUint(bytes32 _target, uint256 _amount) external; + function migrate() external; + function mint(address _reciever, uint256 _amount) external; + function init() external; - function getAllDisputeVars(uint256 _disputeId) external view returns (bytes32,bool,bool,bool,address,address,address,uint256[9] memory,int256); - function getDisputeIdByDisputeHash(bytes32 _hash) external view returns (uint256); - function getDisputeUintVars(uint256 _disputeId, bytes32 _data) external view returns(uint256); - function getLastNewValueById(uint256 _requestId) external view returns (uint256, bool); - function retrieveData(uint256 _requestId, uint256 _timestamp) external view returns (uint256); - function getNewValueCountbyRequestId(uint256 _requestId) external view returns (uint256); + + function getAllDisputeVars(uint256 _disputeId) + external + view + returns ( + bytes32, + bool, + bool, + bool, + address, + address, + address, + uint256[9] memory, + int256 + ); + + function getDisputeIdByDisputeHash(bytes32 _hash) + external + view + returns (uint256); + + function getDisputeUintVars(uint256 _disputeId, bytes32 _data) + external + view + returns (uint256); + + function getLastNewValueById(uint256 _requestId) + external + view + returns (uint256, bool); + + function retrieveData(uint256 _requestId, uint256 _timestamp) + external + view + returns (uint256); + + function getNewValueCountbyRequestId(uint256 _requestId) + external + view + returns (uint256); + function getAddressVars(bytes32 _data) external view returns (address); + function getUintVar(bytes32 _data) external view returns (uint256); + function totalSupply() external view returns (uint256); + function name() external pure returns (string memory); + function symbol() external pure returns (string memory); + function decimals() external pure returns (uint8); + function isMigrated(address _addy) external view returns (bool); - function allowance(address _user, address _spender) external view returns (uint256); - function allowedToTrade(address _user, uint256 _amount) external view returns (bool); + + function allowance(address _user, address _spender) + external + view + returns (uint256); + + function allowedToTrade(address _user, uint256 _amount) + external + view + returns (bool); + function approve(address _spender, uint256 _amount) external returns (bool); - function approveAndTransferFrom(address _from, address _to, uint256 _amount) external returns(bool); + + function approveAndTransferFrom( + address _from, + address _to, + uint256 _amount + ) external returns (bool); + function balanceOf(address _user) external view returns (uint256); - function balanceOfAt(address _user, uint256 _blockNumber)external view returns (uint256); - function transfer(address _to, uint256 _amount)external returns (bool success); - function transferFrom(address _from,address _to,uint256 _amount) external returns (bool success) ; + + function balanceOfAt(address _user, uint256 _blockNumber) + external + view + returns (uint256); + + function transfer(address _to, uint256 _amount) + external + returns (bool success); + + function transferFrom( + address _from, + address _to, + uint256 _amount + ) external returns (bool success); + function depositStake() external; + function requestStakingWithdraw() external; + function withdrawStake() external; - function changeStakingStatus(address _reporter, uint _status) external; + + function changeStakingStatus(address _reporter, uint256 _status) external; + function slashReporter(address _reporter, address _disputer) external; - function getStakerInfo(address _staker) external view returns (uint256, uint256); - function getTimestampbyRequestIDandIndex(uint256 _requestId, uint256 _index) external view returns (uint256); - function getNewCurrentVariables()external view returns (bytes32 _c,uint256[5] memory _r,uint256 _d,uint256 _t); - function getNewValueCountbyQueryId(bytes32 _queryId) external view returns(uint256); - function getTimestampbyQueryIdandIndex(bytes32 _queryId, uint256 _index) external view returns(uint256); - function retrieveData(bytes32 _queryId, uint256 _timestamp) external view returns(bytes memory); + + function getStakerInfo(address _staker) + external + view + returns (uint256, uint256); + + function getTimestampbyRequestIDandIndex(uint256 _requestId, uint256 _index) + external + view + returns (uint256); + + function getNewCurrentVariables() + external + view + returns ( + bytes32 _c, + uint256[5] memory _r, + uint256 _d, + uint256 _t + ); + + function getNewValueCountbyQueryId(bytes32 _queryId) + external + view + returns (uint256); + + function getTimestampbyQueryIdandIndex(bytes32 _queryId, uint256 _index) + external + view + returns (uint256); + + function retrieveData(bytes32 _queryId, uint256 _timestamp) + external + view + returns (bytes memory); + //Governance - enum VoteResult {FAILED,PASSED,INVALID} + enum VoteResult { + FAILED, + PASSED, + INVALID + } + function setApprovedFunction(bytes4 _func, bool _val) external; - function beginDispute(bytes32 _queryId,uint256 _timestamp) external; + + function beginDispute(bytes32 _queryId, uint256 _timestamp) external; + function delegate(address _delegate) external; - function delegateOfAt(address _user, uint256 _blockNumber) external view returns (address); + + function delegateOfAt(address _user, uint256 _blockNumber) + external + view + returns (address); + function executeVote(uint256 _disputeId) external; - function proposeVote(address _contract,bytes4 _function, bytes calldata _data, uint256 _timestamp) external; + + function proposeVote( + address _contract, + bytes4 _function, + bytes calldata _data, + uint256 _timestamp + ) external; + function tallyVotes(uint256 _disputeId) external; + + function governance() external view returns (address); + function updateMinDisputeFee() external; - function verify() external pure returns(uint); - function vote(uint256 _disputeId, bool _supports, bool _invalidQuery) external; - function voteFor(address[] calldata _addys,uint256 _disputeId, bool _supports, bool _invalidQuery) external; - function getDelegateInfo(address _holder) external view returns(address,uint); - function isFunctionApproved(bytes4 _func) external view returns(bool); - function isApprovedGovernanceContract(address _contract) external returns (bool); - function getVoteRounds(bytes32 _hash) external view returns(uint256[] memory); - function getVoteCount() external view returns(uint256); - function getVoteInfo(uint256 _disputeId) external view returns(bytes32,uint256[9] memory,bool[2] memory,VoteResult,bytes memory,bytes4,address[2] memory); - function getDisputeInfo(uint256 _disputeId) external view returns(uint256,uint256,bytes memory, address); - function getOpenDisputesOnId(bytes32 _queryId) external view returns(uint256); - function didVote(uint256 _disputeId, address _voter) external view returns(bool); + + function verify() external pure returns (uint256); + + function vote( + uint256 _disputeId, + bool _supports, + bool _invalidQuery + ) external; + + function voteFor( + address[] calldata _addys, + uint256 _disputeId, + bool _supports, + bool _invalidQuery + ) external; + + function getDelegateInfo(address _holder) + external + view + returns (address, uint256); + + function isFunctionApproved(bytes4 _func) external view returns (bool); + + function isApprovedGovernanceContract(address _contract) + external + returns (bool); + + function getVoteRounds(bytes32 _hash) + external + view + returns (uint256[] memory); + + function getVoteCount() external view returns (uint256); + + function getVoteInfo(uint256 _disputeId) + external + view + returns ( + bytes32, + uint256[9] memory, + bool[2] memory, + VoteResult, + bytes memory, + bytes4, + address[2] memory + ); + + function getDisputeInfo(uint256 _disputeId) + external + view + returns ( + uint256, + uint256, + bytes memory, + address + ); + + function getOpenDisputesOnId(bytes32 _queryId) + external + view + returns (uint256); + + function didVote(uint256 _disputeId, address _voter) + external + view + returns (bool); + //Oracle - function getReportTimestampByIndex(bytes32 _queryId, uint256 _index) external view returns(uint256); - function getValueByTimestamp(bytes32 _queryId, uint256 _timestamp) external view returns(bytes memory); - function getBlockNumberByTimestamp(bytes32 _queryId, uint256 _timestamp) external view returns(uint256); - function getReportingLock() external view returns(uint256); - function getReporterByTimestamp(bytes32 _queryId, uint256 _timestamp) external view returns(address); - function reportingLock() external view returns(uint256); + function getReportTimestampByIndex(bytes32 _queryId, uint256 _index) + external + view + returns (uint256); + + function getValueByTimestamp(bytes32 _queryId, uint256 _timestamp) + external + view + returns (bytes memory); + + function getBlockNumberByTimestamp(bytes32 _queryId, uint256 _timestamp) + external + view + returns (uint256); + + function getReportingLock() external view returns (uint256); + + function getReporterByTimestamp(bytes32 _queryId, uint256 _timestamp) + external + view + returns (address); + + function reportingLock() external view returns (uint256); + function removeValue(bytes32 _queryId, uint256 _timestamp) external; - function getReportsSubmittedByAddress(address _reporter) external view returns(uint256); function getTipsByUser(address _user) external view returns(uint256); function tipQuery(bytes32 _queryId, uint256 _tip, bytes memory _queryData) external; function submitValue(bytes32 _queryId, bytes calldata _value, uint256 _nonce, bytes memory _queryData) external; function burnTips() external; + function changeReportingLock(uint256 _newReportingLock) external; + function getReportsSubmittedByAddress(address _reporter) external view returns(uint256); function changeTimeBasedReward(uint256 _newTimeBasedReward) external; function getReporterLastTimestamp(address _reporter) external view returns(uint256); function getTipsById(bytes32 _queryId) external view returns(uint256); @@ -95,24 +296,189 @@ interface ITellor{ function getCurrentValue(bytes32 _queryId) external view returns(bytes memory); function getDataBefore(bytes32 _queryId, uint256 _timestamp) external view returns(bool _ifRetrieve, bytes memory _value, uint256 _timestampRetrieved); function getTimeOfLastNewValue() external view returns(uint256); - function getIndexForDataBefore(bytes32 _queryId, uint256 _timestamp) external view returns (bool _found, uint256 _index); - //Treasury - function issueTreasury(uint256 _maxAmount, uint256 _rate, uint256 _duration) external; - function payTreasury(address _investor,uint256 _id) external; - function buyTreasury(uint256 _id,uint256 _amount) external; - function getTreasuryDetails(uint256 _id) external view returns(uint256,uint256,uint256,uint256); - function getTreasuryFundsByUser(address _user) external view returns(uint256); - function getTreasuryAccount(uint256 _id, address _investor) external view returns(uint256,uint256,bool); - function getTreasuryCount() external view returns(uint256); - function getTreasuryOwners(uint256 _id) external view returns(address[] memory); - function wasPaid(uint256 _id, address _investor) external view returns(bool); + function depositStake(uint256 _amount) external; + function requestStakingWithdraw(uint256 _amount) external; + //Test functions function changeAddressVar(bytes32 _id, address _addy) external; //parachute functions function killContract() external; - function migrateFor(address _destination,uint256 _amount) external; + + function migrateFor(address _destination, uint256 _amount) external; + function rescue51PercentAttack(address _tokenHolder) external; + function rescueBrokenDataReporting() external; + function rescueFailedUpdate() external; + + //Tellor 360 + function addStakingRewards(uint256 _amount) external; + + function _sliceUint(bytes memory _b) + external + pure + returns (uint256 _number); + + function claimOneTimeTip(bytes32 _queryId, uint256[] memory _timestamps) + external; + + function claimTip( + bytes32 _feedId, + bytes32 _queryId, + uint256[] memory _timestamps + ) external; + + function fee() external view returns (uint256); + + function feedsWithFunding(uint256) external view returns (bytes32); + + function fundFeed( + bytes32 _feedId, + bytes32 _queryId, + uint256 _amount + ) external; + + function getCurrentFeeds(bytes32 _queryId) + external + view + returns (bytes32[] memory); + + function getCurrentTip(bytes32 _queryId) external view returns (uint256); + + function getDataAfter(bytes32 _queryId, uint256 _timestamp) + external + view + returns (bytes memory _value, uint256 _timestampRetrieved); + + function getDataFeed(bytes32 _feedId) + external + view + returns (Autopay.FeedDetails memory); + + function getFundedFeeds() external view returns (bytes32[] memory); + + function getFundedQueryIds() external view returns (bytes32[] memory); + + function getIndexForDataAfter(bytes32 _queryId, uint256 _timestamp) + external + view + returns (bool _found, uint256 _index); + + function getIndexForDataBefore(bytes32 _queryId, uint256 _timestamp) + external + view + returns (bool _found, uint256 _index); + + function getMultipleValuesBefore( + bytes32 _queryId, + uint256 _timestamp, + uint256 _maxAge, + uint256 _maxCount + ) + external + view + returns (uint256[] memory _values, uint256[] memory _timestamps); + + function getPastTipByIndex(bytes32 _queryId, uint256 _index) + external + view + returns (Autopay.Tip memory); + + function getPastTipCount(bytes32 _queryId) external view returns (uint256); + + function getPastTips(bytes32 _queryId) + external + view + returns (Autopay.Tip[] memory); + + function getQueryIdFromFeedId(bytes32 _feedId) + external + view + returns (bytes32); + + function getRewardAmount( + bytes32 _feedId, + bytes32 _queryId, + uint256[] memory _timestamps + ) external view returns (uint256 _cumulativeReward); + + function getRewardClaimedStatus( + bytes32 _feedId, + bytes32 _queryId, + uint256 _timestamp + ) external view returns (bool); + + function getTipsByAddress(address _user) external view returns (uint256); + + function isInDispute(bytes32 _queryId, uint256 _timestamp) + external + view + returns (bool); + + function queryIdFromDataFeedId(bytes32) external view returns (bytes32); + + function queryIdsWithFunding(uint256) external view returns (bytes32); + + function queryIdsWithFundingIndex(bytes32) external view returns (uint256); + + function setupDataFeed( + bytes32 _queryId, + uint256 _reward, + uint256 _startTime, + uint256 _interval, + uint256 _window, + uint256 _priceThreshold, + uint256 _rewardIncreasePerSecond, + bytes memory _queryData, + uint256 _amount + ) external; + + function tellor() external view returns (address); + + function tip( + bytes32 _queryId, + uint256 _amount, + bytes memory _queryData + ) external; + + function tips(bytes32, uint256) + external + view + returns (uint256 amount, uint256 timestamp); + + function token() external view returns (address); + + function userTipsTotal(address) external view returns (uint256); + + function valueFor(bytes32 _id) + external + view + returns ( + int256 _value, + uint256 _timestamp, + uint256 _statusCode + ); +} + +interface Autopay { + struct FeedDetails { + uint256 reward; + uint256 balance; + uint256 startTime; + uint256 interval; + uint256 window; + uint256 priceThreshold; + uint256 rewardIncreasePerSecond; + uint256 feedsWithFundingIndex; + } + + struct Tip { + uint256 amount; + uint256 timestamp; + } + function getStakeAmount() external view returns(uint256); + function stakeAmount() external view returns(uint256); + function token() external view returns(address); } diff --git a/src/telliot_feeds/queries/catalog.py b/src/telliot_feeds/queries/catalog.py index 2341f24d..fcfc235d 100644 --- a/src/telliot_feeds/queries/catalog.py +++ b/src/telliot_feeds/queries/catalog.py @@ -103,7 +103,7 @@ def find( def to_yaml(self) -> str: all_entries = self.find() - return yaml.dump(clamfig.serialize(all_entries), sort_keys=False) # type: ignore + return yaml.dump(clamfig.serialize(all_entries), sort_keys=False) def to_markdown(self) -> str: lines = ["# TellorX Query Catalog", ""]
Check for Disputed Value Tip availability When a reporter submits a value that qualifies for a tip, that tip is made available again if the submitted value is disputed. Currently the telliot tip listener may not be hearing these tips. This issue wants to make sure that the data keeps flowing in these situations.
2023-05-09T15:08:50
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-667
e03d2bfc24afe4ae8780262e59f64732924ddc7c
diff --git a/src/telliot_feeds/reporters/tips/listener/assemble_call.py b/src/telliot_feeds/reporters/tips/listener/assemble_call.py index 446d20c7..05aeb913 100644 --- a/src/telliot_feeds/reporters/tips/listener/assemble_call.py +++ b/src/telliot_feeds/reporters/tips/listener/assemble_call.py @@ -13,6 +13,9 @@ class AssembleCall: Assemble call object for autopay functions to batch call them using multicall """ + # set gas limit to None since this will mostly be used for read-only calls + gas_limit: Optional[int] = None + def __init__(self) -> None: self.autopay: TellorFlexAutopayContract @@ -29,7 +32,9 @@ async def multi_call( - dictionary of of Any type key, could be tuple, string, or number """ status = ResponseStatus() - multi_call = Multicall(calls=calls, _w3=self.autopay.node._web3, require_success=success) + multi_call = Multicall( + calls=calls, _w3=self.autopay.node._web3, require_success=success, gas_limit=self.gas_limit + ) try: data: dict[Any, Any] = await multi_call.coroutine() return data, status diff --git a/src/telliot_feeds/reporters/tips/listener/funded_feeds_filter.py b/src/telliot_feeds/reporters/tips/listener/funded_feeds_filter.py index 8d0b9ad6..847b6937 100644 --- a/src/telliot_feeds/reporters/tips/listener/funded_feeds_filter.py +++ b/src/telliot_feeds/reporters/tips/listener/funded_feeds_filter.py @@ -104,6 +104,16 @@ async def price_change(self, query_data: bytes, value_before: bytes) -> Optional val = getattr(query, param) setattr(datafeed.source, param, val) + # if no value before return 100% change which indicates that price threshold is met + if not value_before: + logger.debug(f"No value before for {query.type}") + return _get_price_change(previous_val=0, current_val=0) + + # before value has to be of length 32 since uint256 has to be 32 bytes to be decoded by eth_abi + if len(value_before) != 32: + logger.info(f"Before value type isn't uint256 {value_before.hex()}; can't calculate price change") + return None + # TODO: handle case when value_before is not uint256 but is exactly 32 bytes value_before_decoded = query.value_type.decode(value_before) if not isinstance(value_before_decoded, (int, float)): logger.info(f"Before value is not a number {value_before_decoded} can't calculate price change") diff --git a/src/telliot_feeds/sources/mimicry/mashup_source.py b/src/telliot_feeds/sources/mimicry/mashup_source.py index f7d4dd42..602bdf19 100644 --- a/src/telliot_feeds/sources/mimicry/mashup_source.py +++ b/src/telliot_feeds/sources/mimicry/mashup_source.py @@ -1,4 +1,5 @@ import asyncio +from asyncio.exceptions import TimeoutError from dataclasses import dataclass from typing import Any from typing import Optional @@ -34,6 +35,7 @@ class NFTMashupSource(DataSource[str]): currency: Optional[str] = None # ("usd") collections: Optional[list[tuple[str, str]]] = None # ("chain-name", "contract-address") tokens: Optional[list[tuple[str, str, str]]] = None # ("chain-name", "token-symbol", "contract-address") + retries: int = 3 async def fetch(self, url: str, headers: Optional[dict[str, str]]) -> Optional[Any]: """ @@ -67,9 +69,7 @@ async def fetch_urls(self, urls: list[str], headers: Optional[dict[str, str]] = responses = await asyncio.gather(*tasks) return responses - async def fetch_url_with_retry( - self, url: str, headers: Optional[dict[str, str]], retries: int = 3 - ) -> Optional[Any]: + async def fetch_url_with_retry(self, url: str, headers: Optional[dict[str, str]]) -> Optional[Any]: """ Fetches a response from an api with retry logic. @@ -80,13 +80,14 @@ async def fetch_url_with_retry( Returns: Any: A list of responses. """ + retries = self.retries for attempt in range(retries): try: response = await self.fetch(url=url, headers=headers) return response - except (ClientError, ClientConnectionError, ClientResponseError) as e: + except (ClientError, ClientConnectionError, ClientResponseError, TimeoutError) as e: if attempt == retries - 1: - logger.warning(f"Failed to fetch from {url} after {retries} attempts: {e}.") + logger.warning(f"Failed to fetch from {url} after {retries} attempts: {type(e).__name__}.") return None wait_time = 2**attempt await asyncio.sleep(wait_time) @@ -130,6 +131,9 @@ async def fetch_collections_mcap(self) -> Optional[Any]: if response is not None: try: market_cap = response[metric] + if not market_cap: + logger.debug(f"No value fetched for collection: {self.collections[idx]}") + return None collection_mcaps.append(market_cap) except KeyError as e: logger.warning( @@ -167,10 +171,13 @@ async def fetch_tokens_mcap(self) -> Optional[Any]: responses = await self.fetch_urls(urls=urls) token_mcaps = [] - for response in responses: + for idx, response in enumerate(responses): if response is not None: try: market_cap = response["market_data"]["market_cap"][self.currency.lower()] + if not market_cap: + logger.debug(f"No value fetched for token: {self.tokens[idx]}") + return None token_mcaps.append(market_cap) except Exception as e: logger.warning(f"Failed to fetch token's market cap: {e}.") @@ -185,7 +192,7 @@ async def fetch_new_datapoint(self) -> OptionalDataPoint[Any]: collections_mcaps = await self.fetch_collections_mcap() tokens_mcaps = await self.fetch_tokens_mcap() - if collections_mcaps is None or tokens_mcaps is None: + if not collections_mcaps or not tokens_mcaps: logger.warning( f"Failed to fetch all market caps. tokens = {tokens_mcaps}, collections = {collections_mcaps}" )
NFTgo Mimicry Mashup related Error start command: `telliot report -a polygon1 -p 10 -wp 120` version: telliot-core #037abbd1d2dd27d5c2278d434d1375d62731c584 telliot-feeds #54908ccba1ed65a2cbfd2062418266d661242e76 log: ``` ERROR | telliot_core | Exception occurred in telliot-core app ERROR | telliot_core | <class 'asyncio.exceptions.TimeoutError'> ERROR | telliot_core | ERROR | telliot_core | [' File "/home/ubuntu/telliot-feeds/src/telliot_feeds/cli/commands/report.py", line 496, in report\n await reporter.report()\n', ' File "/home/ubuntu/tell iot-feeds/src/telliot_feeds/reporters/interval.py", line 511, in report\n _, _ = await self.report_once()\n', ' File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/interva l.py", line 366, in report_once\n datafeed = await self.fetch_datafeed()\n', ' File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/tellor_360.py", line 282, in fetch_dataf eed\n suggested_feed, tip_amount = await get_feed_and_tip(self.autopay)\n', ' File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/tips/suggest_datafeed.py", line 43, in ge t_feed_and_tip\n feed_tips = await funded_feeds.querydata_and_tip(current_time=current_timestamp)\n', ' File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/tips/listener/f unded_feeds.py", line 112, in querydata_and_tip\n eligible_funded_feeds, status = await self.filtered_funded_feeds(\n', ' File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporte rs/tips/listener/funded_feeds.py", line 76, in filtered_funded_feeds\n feeds_timestsamps_and_values_filtered = await self.window_and_priceThreshold_unmet_filter(\n', ' File "/home/ ubuntu/telliot-feeds/src/telliot_feeds/reporters/tips/listener/funded_feeds_filter.py", line 241, in window_and_priceThreshold_unmet_filter\n price_change = await self.price_change( \n', ' File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/tips/listener/funded_feeds_filter.py", line 112, in price_change\n value_now = await datafeed.source.fetch_new_d atapoint()\n', ' File "/home/ubuntu/telliot-feeds/src/telliot_feeds/sources/mimicry/mashup_source.py", line 186, in fetch_new_datapoint\n tokens_mcaps = await self.fetch_tokens_mca p()\n', ' File "/home/ubuntu/telliot-feeds/src/telliot_feeds/sources/mimicry/mashup_source.py", line 168, in fetch_tokens_mcap\n responses = await self.fetch_urls(urls=urls)\n', ' File "/home/ubuntu/telliot-feeds/src/telliot_feeds/sources/mimicry/mashup_source.py", line 67, in fetch_urls\n responses = await asyncio.gather(*tasks)\n', ' File "/home/ubuntu/te lliot-feeds/src/telliot_feeds/sources/mimicry/mashup_source.py", line 85, in fetch_url_with_retry\n response = await self.fetch(url=url, headers=headers)\n', ' File "/home/ubuntu/t elliot-feeds/src/telliot_feeds/sources/mimicry/mashup_source.py", line 52, in fetch\n async with session.get(url, timeout=timeout) as response:\n', ' File "/home/ubuntu/tenv/lib/py thon3.9/site-packages/aiohttp/client.py", line 1141, in __aenter__\n self._resp = await self._coro\n', ' File "/home/ubuntu/tenv/lib/python3.9/site-packages/aiohttp/client.py", lin e 560, in _request\n await resp.start(conn)\n', ' File "/home/ubuntu/tenv/lib/python3.9/site-packages/aiohttp/client_reqrep.py", line 914, in start\n self._continue = None\n', ' File "/home/ubuntu/tenv/lib/python3.9/site-packages/aiohttp/helpers.py", line 720, in __exit__\n raise asyncio.TimeoutError from None\n'] Traceback (most recent call last): File "/home/ubuntu/tenv/bin/telliot", line 8, in <module> sys.exit(main()) File "/home/ubuntu/tenv/lib/python3.9/site-packages/click/core.py", line 1130, in __call__ return self.main(*args, **kwargs) File "/home/ubuntu/tenv/lib/python3.9/site-packages/click/core.py", line 1055, in main rv = self.invoke(ctx) File "/home/ubuntu/tenv/lib/python3.9/site-packages/click/core.py", line 1657, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/home/ubuntu/tenv/lib/python3.9/site-packages/click/core.py", line 1404, in invoke return ctx.invoke(self.callback, **ctx.params) File "/home/ubuntu/tenv/lib/python3.9/site-packages/click/core.py", line 760, in invoke return __callback(*args, **kwargs) File "/home/ubuntu/tenv/lib/python3.9/site-packages/click/decorators.py", line 26, in new_func return f(get_current_context(), *args, **kwargs) File "/home/ubuntu/telliot-core/src/telliot_core/cli/utils.py", line 18, in wrapper return asyncio.run(f(*args, **kwargs)) File "/usr/lib/python3.9/asyncio/runners.py", line 44, in run return loop.run_until_complete(main) File "/usr/lib/python3.9/asyncio/base_events.py", line 647, in run_until_complete return future.result() File "/home/ubuntu/telliot-feeds/src/telliot_feeds/cli/commands/report.py", line 496, in report await reporter.report() File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/interval.py", line 511, in report _, _ = await self.report_once() File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/interval.py", line 366, in report_once datafeed = await self.fetch_datafeed() File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/tellor_360.py", line 282, in fetch_datafeed suggested_feed, tip_amount = await get_feed_and_tip(self.autopay) File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/tips/suggest_datafeed.py", line 43, in get_feed_and_tip feed_tips = await funded_feeds.querydata_and_tip(current_time=current_timestamp) File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/tips/listener/funded_feeds.py", line 112, in querydata_and_tip eligible_funded_feeds, status = await self.filtered_funded_feeds( File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/tips/listener/funded_feeds.py", line 76, in filtered_funded_feeds feeds_timestsamps_and_values_filtered = await self.window_and_priceThreshold_unmet_filter( File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/tips/listener/funded_feeds_filter.py", line 241, in window_and_priceThreshold_unmet_filter price_change = await self.price_change( File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/tips/listener/funded_feeds_filter.py", line 112, in price_change value_now = await datafeed.source.fetch_new_datapoint() File "/home/ubuntu/telliot-feeds/src/telliot_feeds/sources/mimicry/mashup_source.py", line 186, in fetch_new_datapoint tokens_mcaps = await self.fetch_tokens_mcap() File "/home/ubuntu/telliot-feeds/src/telliot_feeds/sources/mimicry/mashup_source.py", line 168, in fetch_tokens_mcap responses = await self.fetch_urls(urls=urls) File "/home/ubuntu/telliot-feeds/src/telliot_feeds/sources/mimicry/mashup_source.py", line 67, in fetch_urls responses = await asyncio.gather(*tasks) File "/home/ubuntu/telliot-feeds/src/telliot_feeds/sources/mimicry/mashup_source.py", line 85, in fetch_url_with_retry response = await self.fetch(url=url, headers=headers) File "/home/ubuntu/telliot-feeds/src/telliot_feeds/sources/mimicry/mashup_source.py", line 52, in fetch async with session.get(url, timeout=timeout) as response: File "/home/ubuntu/tenv/lib/python3.9/site-packages/aiohttp/client.py", line 1141, in __aenter__ self._resp = await self._coro File "/home/ubuntu/tenv/lib/python3.9/site-packages/aiohttp/client.py", line 560, in _request await resp.start(conn) File "/home/ubuntu/tenv/lib/python3.9/site-packages/aiohttp/client_reqrep.py", line 914, in start self._continue = None File "/home/ubuntu/tenv/lib/python3.9/site-packages/aiohttp/helpers.py", line 720, in __exit__ raise asyncio.TimeoutError from None asyncio.exceptions.TimeoutError ```
2023-04-26T11:46:42
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-649
13289b0c3eb50642462c701011f63b8d15af1ad5
diff --git a/src/telliot_feeds/cli/utils.py b/src/telliot_feeds/cli/utils.py index 61bade33..cd3ca07b 100644 --- a/src/telliot_feeds/cli/utils.py +++ b/src/telliot_feeds/cli/utils.py @@ -131,6 +131,21 @@ def valid_diva_chain(chain_id: int) -> bool: return True +def convert_input(target_type: Callable[[Union[str, int, float]], Any], input_str: str) -> Any: + """Convert an input string from cli to whatever type is needed for the QueryParameter""" + if target_type == bytes: + if input_str.startswith("0x"): + return bytes.fromhex(input_str[2:]) + if target_type == int: + return int(input_str) + elif target_type == float: + return float(input_str) + elif target_type == str: + return input_str + else: + return target_type(input_str) + + def build_feed_from_input() -> Optional[DataFeed[Any]]: """ Build a DataFeed from CLI input @@ -180,17 +195,11 @@ def build_feed_from_input() -> Optional[DataFeed[Any]]: param_dtype = type_hints[query_param] val = input(f"Enter value for QueryParameter {query_param}: ") - try: - val = eval(val) # try to evaluate input if it's not string type - except NameError: - pass if val is not None: try: # cast input from string to datatype of query parameter - if param_dtype == bytes and val.startswith("0x"): - val = bytes.fromhex(val[2:]) - val = param_dtype(val) + val = convert_input(param_dtype, val) setattr(feed.query, query_param, val) setattr(feed.source, query_param, val) except ValueError: diff --git a/src/telliot_feeds/feeds/__init__.py b/src/telliot_feeds/feeds/__init__.py index f0977776..f1159277 100644 --- a/src/telliot_feeds/feeds/__init__.py +++ b/src/telliot_feeds/feeds/__init__.py @@ -74,7 +74,7 @@ from telliot_feeds.feeds.yfi_usd_feed import yfi_usd_median_feed -CATALOG_FEEDS = { +CATALOG_FEEDS: Dict[str, DataFeed[Any]] = { "ampleforth-custom": ampl_usd_vwap_feed, "ampleforth-uspce": uspce_feed, "eth-jpy-spot": eth_jpy_median_feed, diff --git a/src/telliot_feeds/feeds/mimicry/macro_market_mashup_feed.py b/src/telliot_feeds/feeds/mimicry/macro_market_mashup_feed.py index e9d4fc4f..e9cf9d98 100644 --- a/src/telliot_feeds/feeds/mimicry/macro_market_mashup_feed.py +++ b/src/telliot_feeds/feeds/mimicry/macro_market_mashup_feed.py @@ -11,9 +11,9 @@ ("ethereum-mainnet", "0x34d85c9cdeb23fa97cb08333b511ac86e1c4e258"), ] TOKENS = [ - ("ethereum-mainnet", "sand", "0x3845badAde8e6dFF049820680d1F14bD3903a5d0"), - ("ethereum-mainnet", "mana", "0x0F5D2fB29fb7d3CFeE444a200298f468908cC942"), - ("ethereum-mainnet", "ape", "0x4d224452801ACEd8B2F0aebE155379bb5D594381"), + ("ethereum-mainnet", "sand", "0x3845badade8e6dff049820680d1f14bd3903a5d0"), + ("ethereum-mainnet", "mana", "0x0f5d2fb29fb7d3cfee444a200298f468908cc942"), + ("ethereum-mainnet", "ape", "0x4d224452801aced8b2f0aebe155379bb5d594381"), ] mimicry_mashup_example_feed = DataFeed( diff --git a/src/telliot_feeds/reporters/interval.py b/src/telliot_feeds/reporters/interval.py index 51383af1..3e9b57b4 100644 --- a/src/telliot_feeds/reporters/interval.py +++ b/src/telliot_feeds/reporters/interval.py @@ -302,7 +302,7 @@ async def fetch_datafeed(self) -> Optional[DataFeed[Any]]: if suggested_qtag is None: logger.warning("Could not get suggested query") return None - self.datafeed = CATALOG_FEEDS[suggested_qtag] # type: ignore + self.datafeed = CATALOG_FEEDS[suggested_qtag] return self.datafeed diff --git a/src/telliot_feeds/reporters/reporter_autopay_utils.py b/src/telliot_feeds/reporters/reporter_autopay_utils.py index a0aad605..8fc87b58 100644 --- a/src/telliot_feeds/reporters/reporter_autopay_utils.py +++ b/src/telliot_feeds/reporters/reporter_autopay_utils.py @@ -453,7 +453,7 @@ async def _get_feed_suggestion(feeds: Any, current_values: Any) -> Any: query_id_with_tips[query_tag] += feed_details.reward else: datafeed = CATALOG_FEEDS[query_tag] - value_now = await datafeed.source.fetch_new_datapoint() # type: ignore + value_now = await datafeed.source.fetch_new_datapoint() # value is always a number for a price oracle submission # convert bytes value to int try: @@ -461,20 +461,20 @@ async def _get_feed_suggestion(feeds: Any, current_values: Any) -> Any: except ValueError: logger.info("Can't check price threshold, oracle price submission not a number") continue - if not value_now: + if value_now[0] is None or value_before_now is None: note = f"Unable to fetch {datafeed} price for tip calculation" error_status(note=note, log=logger.warning) continue - value_now = value_now[0] + current_value = value_now[0] if value_before_now == 0: price_change = 10000 - elif value_now >= value_before_now: - price_change = (10000 * (value_now - value_before_now)) / value_before_now + elif current_value >= value_before_now: + price_change = (10000 * (current_value - value_before_now)) / value_before_now else: - price_change = (10000 * (value_before_now - value_now)) / value_before_now + price_change = (10000 * (value_before_now - current_value)) / value_before_now if price_change > feed_details.priceThreshold: if query_tag not in query_id_with_tips: diff --git a/src/telliot_feeds/reporters/tellor_flex.py b/src/telliot_feeds/reporters/tellor_flex.py index bac4fc18..b53f2936 100644 --- a/src/telliot_feeds/reporters/tellor_flex.py +++ b/src/telliot_feeds/reporters/tellor_flex.py @@ -249,7 +249,7 @@ async def fetch_datafeed(self) -> Optional[DataFeed[Any]]: suggested_qtag, autopay_tip = await autopay_suggested_report(self.autopay) if suggested_qtag: self.autopaytip = autopay_tip - self.datafeed = CATALOG_FEEDS[suggested_qtag] # type: ignore + self.datafeed = CATALOG_FEEDS[suggested_qtag] return self.datafeed if suggested_qtag is None: @@ -261,7 +261,7 @@ async def fetch_datafeed(self) -> Optional[DataFeed[Any]]: logger.warning(f"Suggested query tag not in catalog: {suggested_qtag}") return None else: - self.datafeed = CATALOG_FEEDS[suggested_qtag] # type: ignore + self.datafeed = CATALOG_FEEDS[suggested_qtag] self.autopaytip = await self.rewards() return self.datafeed return None diff --git a/src/telliot_feeds/reporters/tips/listener/funded_feeds_filter.py b/src/telliot_feeds/reporters/tips/listener/funded_feeds_filter.py index 8861e559..28247d56 100644 --- a/src/telliot_feeds/reporters/tips/listener/funded_feeds_filter.py +++ b/src/telliot_feeds/reporters/tips/listener/funded_feeds_filter.py @@ -1,4 +1,5 @@ import math +from typing import Any from typing import Optional from eth_abi import encode_single @@ -6,6 +7,7 @@ from telliot_core.utils.response import error_status from web3 import Web3 as w3 +from telliot_feeds.datafeed import DataFeed from telliot_feeds.feeds import CATALOG_FEEDS from telliot_feeds.reporters.tips import CATALOG_QUERY_IDS from telliot_feeds.reporters.tips.listener.dtypes import QueryIdandFeedDetails @@ -81,15 +83,16 @@ async def price_change(self, query_id: bytes, value_before: float) -> Optional[f query_tag = CATALOG_QUERY_IDS[query_id] if query_tag in CATALOG_FEEDS: - datafeed = CATALOG_FEEDS[query_tag] + datafeed: DataFeed[Any] = CATALOG_FEEDS[query_tag] else: logger.info(f"No Api source found for {query_tag} to check priceThreshold") return None if query_id not in self.prices: - value_now = await datafeed.source.fetch_new_datapoint() # type: ignore - - if not value_now: - note = f"Unable to fetch {datafeed} price for tip calculation" + value_now = await datafeed.source.fetch_new_datapoint() + if value_now[0] is None: + note = ( + f"Unable to fetch data from API for {datafeed.query.descriptor}, to check if price threshold is met" + ) _ = error_status(note=note, log=logger.warning) return None @@ -228,6 +231,7 @@ async def window_and_priceThreshold_unmet_filter( ) if price_change is None: # unable to fetch price data + feeds.remove(feed) continue if price_change < price_threshold: feeds.remove(feed) diff --git a/src/telliot_feeds/reporters/tips/listener/tip_listener_filter.py b/src/telliot_feeds/reporters/tips/listener/tip_listener_filter.py index 713ef202..8af23e30 100644 --- a/src/telliot_feeds/reporters/tips/listener/tip_listener_filter.py +++ b/src/telliot_feeds/reporters/tips/listener/tip_listener_filter.py @@ -70,7 +70,7 @@ def qtag_in_feed_mapping(self, qdata: bytes) -> Optional[DataFeed[Any]]: return None if qtag in CATALOG_FEEDS: datafeed = CATALOG_FEEDS[qtag] - return datafeed # type: ignore + return datafeed else: return None diff --git a/src/telliot_feeds/utils/reporter_utils.py b/src/telliot_feeds/utils/reporter_utils.py index 7fdc96da..3f5228fe 100644 --- a/src/telliot_feeds/utils/reporter_utils.py +++ b/src/telliot_feeds/utils/reporter_utils.py @@ -69,7 +69,7 @@ async def tellor_suggested_report( def suggest_random_feed() -> DataFeed[Any]: """Suggest a random feed to report against.""" - return random.choice(list(CATALOG_FEEDS.values())) # type: ignore + return random.choice(list(CATALOG_FEEDS.values())) async def is_online() -> bool:
TypeError: '>=' not supported between instances of 'NoneType' and 'float' Just had this error pop up while reporting on polygon (listening for tips) with start command: `telliot report -a polygon1 -wp 60` versions: telliot-core.git@7adc3a4bb83e313c45b4936837db51ca9f014e0a telliot-feeds.git@13289b0c3eb50642462c701011f63b8d15af1ad5 ``` ERROR | telliot_feeds.sources.mimicry.nft_market_index | Failed to fetch collections details from NFTGo API. ERROR | telliot_core | Exception occurred in telliot-core app ERROR | telliot_core | <class 'TypeError'> ERROR | telliot_core | '>=' not supported between instances of 'NoneType' and 'float' ERROR | telliot_core | [' File "/home/ubuntu/telliot-feeds/src/telliot_feeds/cli/commands/report.py", line 496, in report\n await reporter.report()\n', ' File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/interval.py", line 511, in report\n _, _ = await self.report_once()\n', ' File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/interval.py", line 366, in report_once\n datafeed = await self.fetch_datafeed()\n', ' File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/tellor_360.py", line 282, in fetch_datafeed\n suggested_feed, tip_amount = await get_feed_and_tip(self.autopay)\n', ' File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/tips/suggest_datafeed.py", line 44, in get_feed_and_tip\n feed_tips = await funded_feeds.querydata_and_tip(current_time=current_timestamp)\n', ' File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/tips/listener/funded_feeds.py", line 133, in querydata_and_tip\n eligible_funded_feeds, status = await self.filtered_funded_feeds(\n', ' File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/tips/listener/funded_feeds.py", line 97, in filtered_funded_feeds\n feeds_timestsamps_and_values_filtered = await self.window_and_priceThreshold_unmet_filter(\n', ' File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/tips/listener/funded_feeds_filter.py", line 225, in window_and_priceThreshold_unmet_filter\n price_change = await self.price_change(\n', ' File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/tips/listener/funded_feeds_filter.py", line 98, in price_change\n return _get_price_change(previous_val=value_before, current_val=self.prices[query_id])\n', ' File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/tips/listener/funded_feeds_filter.py", line 251, in _get_price_change\n elif current_val >= previous_val:\n'] Traceback (most recent call last): File "/home/ubuntu/tenv/bin/telliot", line 8, in <module> sys.exit(main()) File "/home/ubuntu/tenv/lib/python3.9/site-packages/click/core.py", line 1130, in __call__ return self.main(*args, **kwargs) File "/home/ubuntu/tenv/lib/python3.9/site-packages/click/core.py", line 1055, in main rv = self.invoke(ctx) File "/home/ubuntu/tenv/lib/python3.9/site-packages/click/core.py", line 1657, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/home/ubuntu/tenv/lib/python3.9/site-packages/click/core.py", line 1404, in invoke return ctx.invoke(self.callback, **ctx.params) File "/home/ubuntu/tenv/lib/python3.9/site-packages/click/core.py", line 760, in invoke return __callback(*args, **kwargs) File "/home/ubuntu/tenv/lib/python3.9/site-packages/click/decorators.py", line 26, in new_func return f(get_current_context(), *args, **kwargs) File "/home/ubuntu/telliot-core/src/telliot_core/cli/utils.py", line 18, in wrapper return asyncio.run(f(*args, **kwargs)) File "/usr/lib/python3.9/asyncio/runners.py", line 44, in run return loop.run_until_complete(main) File "/usr/lib/python3.9/asyncio/base_events.py", line 647, in run_until_complete return future.result() File "/home/ubuntu/telliot-feeds/src/telliot_feeds/cli/commands/report.py", line 496, in report await reporter.report() File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/interval.py", line 511, in report _, _ = await self.report_once() File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/interval.py", line 366, in report_once datafeed = await self.fetch_datafeed() File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/tellor_360.py", line 282, in fetch_datafeed suggested_feed, tip_amount = await get_feed_and_tip(self.autopay) File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/tips/suggest_datafeed.py", line 44, in get_feed_and_tip feed_tips = await funded_feeds.querydata_and_tip(current_time=current_timestamp) File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/tips/listener/funded_feeds.py", line 133, in querydata_and_tip eligible_funded_feeds, status = await self.filtered_funded_feeds( File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/tips/listener/funded_feeds.py", line 97, in filtered_funded_feeds feeds_timestsamps_and_values_filtered = await self.window_and_priceThreshold_unmet_filter( File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/tips/listener/funded_feeds_filter.py", line 225, in window_and_priceThreshold_unmet_filter price_change = await self.price_change( File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/tips/listener/funded_feeds_filter.py", line 98, in price_change return _get_price_change(previous_val=value_before, current_val=self.prices[query_id]) File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/tips/listener/funded_feeds_filter.py", line 251, in _get_price_change elif current_val >= previous_val: TypeError: '>=' not supported between instances of 'NoneType' and 'float' ```
I believe this was resolved by adding the NFTgo api to the `api_keys.yaml` config file. After adding the key, this telliot instance picked up the mimcry tip and submitted. It's running smoothly.
2023-04-07T12:41:37
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-629
49fb0131b578ab2c0d8d4a74ed7ac4a8174d99ae
diff --git a/src/telliot_feeds/feeds/mimicry_feed.py b/src/telliot_feeds/feeds/mimicry_feed.py index 195daafb..07e94b09 100644 --- a/src/telliot_feeds/feeds/mimicry_feed.py +++ b/src/telliot_feeds/feeds/mimicry_feed.py @@ -1,7 +1,7 @@ """DataFeed for MimicryCollectionStat query type. Calculates TAMI index or NFT market cap""" from telliot_feeds.datafeed import DataFeed from telliot_feeds.queries.mimicry import MimicryCollectionStat -from telliot_feeds.sources.mimicry import MimicryCollectionStatSource +from telliot_feeds.sources.mimicry.collection_stat import MimicryCollectionStatSource chain_id = None collection_address = None diff --git a/src/telliot_feeds/sources/mimicry.py b/src/telliot_feeds/sources/mimicry.py deleted file mode 100644 index 436d6d79..00000000 --- a/src/telliot_feeds/sources/mimicry.py +++ /dev/null @@ -1,117 +0,0 @@ -from __future__ import annotations - -from dataclasses import dataclass -from typing import Any -from typing import Optional - -import requests -from requests import JSONDecodeError -from requests import Response -from requests.adapters import HTTPAdapter -from urllib3.util import Retry - -from telliot_feeds.datasource import DataSource -from telliot_feeds.dtypes.datapoint import datetime_now_utc -from telliot_feeds.dtypes.datapoint import OptionalDataPoint -from telliot_feeds.utils.log import get_logger - - -logger = get_logger(__name__) -retry_strategy = Retry( - total=3, - backoff_factor=1, - status_forcelist=[429, 500, 502, 503, 504], - allowed_methods=["GET"], -) -adapter = HTTPAdapter(max_retries=retry_strategy) - - -@dataclass -class MimicryCollectionStatSource(DataSource[str]): - """DataSource for MimicryCollectionStat expected response data.""" - - chainId: Optional[int] = None - collectionAddress: Optional[str] = None - metric: Optional[int] = None - - async def fetch_mimicry_api(self) -> Optional[Response]: - """ - Request data from hosted api - - see https://github.com/Mimicry-Protocol/TAMI/ - """ - - if self.collectionAddress is not None: - self.collectionAddress = self.collectionAddress.lower() - - url = f"http://50.112.84.236:3000/api/stats?address={self.collectionAddress}&stat={self.metric}" - - with requests.Session() as s: - s.mount("https://", adapter) - try: - return s.get(url=url, timeout=10) - - except requests.exceptions.RequestException as e: - logger.error(f"Mimicry API error: {e}") - return None - - except requests.exceptions.Timeout as e: - logger.error(f"Mimicry API timed out: {e}") - return None - - async def fetch_new_datapoint( - self, - ) -> OptionalDataPoint[Any]: - """ - Calculates desired metric for a collection on the chosen chain id. - - Returns: - float -- the desired metric - """ - - if not self.collectionAddress: - logger.error("Missing a collection address for Mimicry NFT index calculation") - return None, None - - if self.metric is None: - logger.error("Missing a metric for Mimicry NFT index calculation") - return None, None - - rsp = await self.fetch_mimicry_api() - - if rsp is None: - logger.warning("No response from Mimicry API") - return None, None - - if rsp.status_code // 100 != 2: - logger.warning("Invalid response from Mimicry API: " + str(rsp.status_code)) - return None, None - - try: - mimicry_dict = rsp.json() - except JSONDecodeError as e: - logger.error("Mimicry API returned invalid JSON:", e.strerror) - return None, None - - if mimicry_dict == {}: - logger.warning("Mimicry API returned no data.") - return None, None - - try: - mimicry_stat = mimicry_dict["value"] - except KeyError: - logger.error("Unable to parse Mimicry API JSON response") - return None, None - - datapoint = (mimicry_stat, datetime_now_utc()) - self.store_datapoint(datapoint) - - logger.info( - f""" - Mimicry API data retrieved at time - {datapoint[1]} for metric {self.metric} - on collection {self.collectionAddress} - """ - ) - - return datapoint diff --git a/src/telliot_feeds/sources/mimicry/__init__.py b/src/telliot_feeds/sources/mimicry/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/telliot_feeds/sources/mimicry/collection_stat.py b/src/telliot_feeds/sources/mimicry/collection_stat.py new file mode 100644 index 00000000..1520ec09 --- /dev/null +++ b/src/telliot_feeds/sources/mimicry/collection_stat.py @@ -0,0 +1,232 @@ +from dataclasses import dataclass +from dataclasses import field +from datetime import datetime +from datetime import timezone +from typing import Any +from typing import List +from typing import Optional +from typing import Union + +import requests +from dateutil.relativedelta import relativedelta +from requests.adapters import HTTPAdapter +from requests.exceptions import RequestException +from requests.exceptions import Timeout +from urllib3.util import Retry + +from telliot_feeds.datasource import DataSource +from telliot_feeds.dtypes.datapoint import datetime_now_utc +from telliot_feeds.dtypes.datapoint import OptionalDataPoint +from telliot_feeds.sources.mimicry.tami import tami +from telliot_feeds.sources.mimicry.types import Transaction +from telliot_feeds.sources.mimicry.utils import sort_transactions +from telliot_feeds.utils.log import get_logger + + +logger = get_logger(__name__) +retry_strategy = Retry( + total=3, + backoff_factor=1, + status_forcelist=[429, 500, 502, 503, 504], + allowed_methods=["GET"], +) +adapter = HTTPAdapter(max_retries=retry_strategy) + + +@dataclass +class TransactionList: + + transactions: List[Transaction] = field(default_factory=list) + floor_price: float = 0.0 + + +@dataclass +class MimicryCollectionStatSource(DataSource[float]): + """DataSource for MimicryCollectionStat expected response data.""" + + chainId: Optional[int] = None + collectionAddress: Optional[str] = None + metric: Optional[int] = None + + def get_collection_market_cap(self, transaction_history: TransactionList) -> Optional[float]: + """calculate the market cap of an NFT series based on a list of Transactions.""" + + values: List[Union[int, float]] = [] + + sorted_transactions = sort_transactions(transaction_history.transactions) + + sorted_transactions.reverse() + + last_sale_found = [] + + for sale in sorted_transactions: + + if sale.itemId in last_sale_found: + continue + + # For each token in the collection + # calculate its value by taking the greater value + # between the collection's floor price and last sale price of that NFT + if transaction_history.floor_price < sale.price: + values.append(sale.price) + else: + values.append(transaction_history.floor_price) + + last_sale_found.append(sale.itemId) + + return sum(values) + + async def request_historical_sales_data(self, contract: str, all: bool = True) -> Optional[TransactionList]: + """Requests historical sales + data of the selected collection. + Data retrieved from Reservoir. + + Agruments: + all (bool): if True, see all data for the selected collection (if False, only 12 months) + + Returns: + TransactionList: formatted historical sales data of a collection retrieved from Reservoir + + """ + continuation_token = "" + tx_list = TransactionList() + while True: + url = f"https://api.reservoir.tools/sales/v4?contract={contract}" + headers = {"accept": "*/*", "x-api-key": "demo-api-key"} + with requests.Session() as s: + s.mount("https://", adapter) + if not all: + one_year_ago = datetime.utcnow() - relativedelta(years=1) + start_timestamp = int(one_year_ago.timestamp()) + url += f"&startTimestamp={start_timestamp}" + else: + url += "&startTimestamp=0" + # paginate + if continuation_token: + url += "&continuation=" + continuation_token + # 1000 sales per page + url += "&limit=1000" + + try: + request = s.get(url, timeout=10, headers=headers) + request.raise_for_status() + except (RequestException, Timeout) as e: + logger.error(f"Request to Reservoir Sales API failed: {str(e)}") + return None + except Exception as e: + logger.error(f"Reservoir API request unsuccessful: {e}") + return None + + try: + sales_data = request.json()["sales"] + continuation_token = request.json()["continuation"] + except requests.exceptions.JSONDecodeError as e: + logger.error(f"Unable to parse Reservoir Sales API response: {str(e)}") + return None + + for sale in sales_data: + + try: + price = sale["price"]["amount"]["usd"] + item_id = sale["token"]["tokenId"] + timestamp = sale["timestamp"] + except KeyError as e: + logger.error("Mimicry: Reservoir Sales API KeyError: " + str(e)) + return None + tx = Transaction( + price=price, itemId=item_id, date=datetime.fromtimestamp(timestamp, tz=timezone.utc) + ) + tx_list.transactions.append(tx) + + # if on last page + if len(sales_data) < 1000: + break + + if self.metric == 1: + url = ( + "https://api.reservoir.tools/oracle/collections/floor-ask/v4?kind=" + f"spot&currency=0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48&twapSeconds=1&collection={contract}" + ) + headers = {"accept": "*/*", "x-api-key": "demo-api-key"} + + try: + request = s.get(url, timeout=10, headers=headers) + request.raise_for_status() + except (RequestException, Timeout) as e: + logger.error(f"Request to Reservoir FloorPrice API failed: {str(e)}") + return None + + try: + tx_list.floor_price = request.json()["price"] + except requests.exceptions.JSONDecodeError as e: + logger.error(f"Unable to parse price from Reservoir FloorPrice API response: {str(e)}") + return None + + return tx_list + + async def fetch_new_datapoint( + self, + ) -> OptionalDataPoint[Any]: + """ + Calculates desired metric for a collection on the chosen chain id. + + Returns: + float -- the desired metric + """ + + if not self.collectionAddress: + logger.error("Missing a collection address for Mimicry NFT index calculation") + return None, None + + if self.metric == 0: + past_year_sales_data = await self.request_historical_sales_data(contract=self.collectionAddress, all=True) + if past_year_sales_data: + tami_value = tami(past_year_sales_data.transactions) + + if not tami_value: + logger.info( + f"Unable to calculate TAMI index" + f"for collection {self.collectionAddress} on chain id {self.chainId}" + ) + return None, None + datapoint = (tami_value, datetime_now_utc()) + self.store_datapoint(datapoint=datapoint) + return datapoint + else: + logger.error( + f"unable to retrieve NFT collection historical sales data for TAMI " + f"for collection {self.collectionAddress} on chain id {self.chainId}" + ) + return None, None + + elif self.metric == 1: + all_sales_data = await self.request_historical_sales_data(contract=self.collectionAddress) + if all_sales_data: + market_cap = self.get_collection_market_cap(all_sales_data) + + if not market_cap: + logger.info( + f"Unable to calculate NFT market cap" + f"for collection {self.collectionAddress} on chain id {self.chainId}" + ) + return None, None + datapoint = (market_cap, datetime_now_utc()) + self.store_datapoint(datapoint=datapoint) + return datapoint + else: + logger.error("unable to retrieve NFT collection historical sales data for total market cap") + return None, None + + else: + logger.info(msg=f"Invalid metric for Mimicry Protocol: {self.metric}") + return None, None + + +if __name__ == "__main__": + + import asyncio + + source = MimicryCollectionStatSource( + chainId=1, collectionAddress="0x5180db8F5c931aaE63c74266b211F580155ecac8", metric=0 + ) + print(asyncio.run(source.fetch_new_datapoint())) diff --git a/src/telliot_feeds/sources/mimicry/tami.py b/src/telliot_feeds/sources/mimicry/tami.py new file mode 100644 index 00000000..a0c6933f --- /dev/null +++ b/src/telliot_feeds/sources/mimicry/tami.py @@ -0,0 +1,107 @@ +from typing import Any +from typing import Dict +from typing import List +from typing import Optional +from typing import Union + +from telliot_feeds.sources.mimicry.types import IndexValueHistoryItem +from telliot_feeds.sources.mimicry.types import Transaction +from telliot_feeds.sources.mimicry.utils import filter_valid_transactions +from telliot_feeds.sources.mimicry.utils import sort_transactions + + +def create_index_value_history(transaction_history: List[Transaction]) -> List[IndexValueHistoryItem]: + """Given a list of transactions, this creates a list that contains the index value at the + time of each transaction, and includes the transaction as well. + + Args: + - transaction_history: A list of transactions sorted by date. + + Returns: + - A list of IndexValueHistoryItem objects (itemId, price, indexValue, Transaction).""" + transaction_map: Dict[Union[float, int, str], Transaction] = {} + + last_index_value = 0.0 + last_divisor = 1.0 + + result = [] + + for i in range(len(transaction_history)): + transaction = transaction_history[i] + + is_first_sale = transaction_map.get(transaction.itemId) is None + + transaction_map[transaction.itemId] = transaction + + item_count = len(transaction_map) + + all_last_sold_value = sum([transaction_map[item].price for item in transaction_map]) + + index_value = all_last_sold_value / (item_count * last_divisor) + + if i == 0: + last_index_value = index_value + + result.append( + IndexValueHistoryItem( + itemId=transaction.itemId, price=transaction.price, indexValue=index_value, transaction=transaction + ) + ) + + continue + + next_divisor = last_divisor * (index_value / last_index_value) if is_first_sale else last_divisor + + weighted_index_value = all_last_sold_value / (item_count * next_divisor) + + last_index_value = weighted_index_value + last_divisor = next_divisor + + result.append( + IndexValueHistoryItem( + itemId=transaction.itemId, + price=transaction.price, + indexValue=weighted_index_value, + transaction=transaction, + ) + ) + + return result + + +def get_index_value(index_value_history: List[IndexValueHistoryItem]) -> Union[float, int]: + """Given a list of IndexValueHistoryItem, returns the index value of the last item.""" + return index_value_history[-1].indexValue if index_value_history else 0 + + +def get_index_ratios(index_valueHistory: List[IndexValueHistoryItem]) -> List[Dict[str, Any]]: + """Given a list of IndexValueHistoryItem, calculates the index ratio for the last transaction + of each item in the collection. Returns a list of dict objects where each object is items from + IndexValueHistoryItem with an additional `indexRatio` item added.""" + last_sale_map: Dict[Union[float, int, str], IndexValueHistoryItem] = {} + + for history_item in index_valueHistory: + last_sale_map[history_item.itemId] = history_item + + return [{**item.__dict__, "indexRatio": item.price / item.indexValue} for item in last_sale_map.values()] + + +def tami(transaction_history: List[Transaction]) -> Optional[float]: + """Given a list of transactions for a given collection, this calculates the + Time Adjusted Market Index for that collection. + + Returns: + - Optional[float] + """ + sorted_transactions = sort_transactions(transaction_history) + valid_transactions = filter_valid_transactions(sorted_transactions) + index_value_history = create_index_value_history(valid_transactions) + + if len(index_value_history) == 0: + return None + + index_value = get_index_value(index_value_history) + index_ratios = get_index_ratios(index_value_history) + time_adjusted_values = [index_value * item["indexRatio"] for item in index_ratios] + time_adjusted_market_index: float = sum(time_adjusted_values) + return time_adjusted_market_index diff --git a/src/telliot_feeds/sources/mimicry/types.py b/src/telliot_feeds/sources/mimicry/types.py new file mode 100644 index 00000000..7bc6ffd8 --- /dev/null +++ b/src/telliot_feeds/sources/mimicry/types.py @@ -0,0 +1,32 @@ +from dataclasses import dataclass +from datetime import datetime +from typing import Union + + +@dataclass +class Transaction: + itemId: Union[float, int] + price: Union[float, int] + date: datetime + + +@dataclass +class InclusionMapValue: + past_year_sale_count: int + has_sale_in_last_six_months: bool + is_valid: bool + + +@dataclass +class IndexRatiosByCollection: + price: Union[float, int] + indexPrice: Union[float, int] + indexRatio: Union[float, int] + + +@dataclass +class IndexValueHistoryItem: + itemId: Union[float, int, str] + price: Union[float, int] + indexValue: Union[float, int] + transaction: Transaction diff --git a/src/telliot_feeds/sources/mimicry/utils.py b/src/telliot_feeds/sources/mimicry/utils.py new file mode 100644 index 00000000..1cdaf8c2 --- /dev/null +++ b/src/telliot_feeds/sources/mimicry/utils.py @@ -0,0 +1,53 @@ +import operator +from datetime import datetime +from datetime import timezone +from typing import Dict +from typing import List +from typing import Union + +from dateutil.relativedelta import relativedelta + +from telliot_feeds.sources.mimicry.types import InclusionMapValue +from telliot_feeds.sources.mimicry.types import Transaction + + +def sort_transactions(transaction_history: List[Transaction]) -> List[Transaction]: + """Given a list of transactions, this returns those transactions sorted in chronological order.""" + return sorted(transaction_history, key=operator.attrgetter("date")) + + +def filter_valid_transactions(transaction_history: List[Transaction]) -> List[Transaction]: + """Given a list of transactions, this returns only transactions that have at least + 2 sales in the last year, and at least one sale in the last 6 months.""" + now = datetime.utcnow() + one_year_ago = (now - relativedelta(years=1)).replace(tzinfo=timezone.utc) + six_months_ago = (now - relativedelta(months=6)).replace(tzinfo=timezone.utc) + + inclusion_map: Dict[Union[float, int], InclusionMapValue] = {} + for transaction in transaction_history: + + item_id = transaction.itemId + date = transaction.date + + if item_id not in inclusion_map: + inclusion_map[item_id] = InclusionMapValue(0, False, False) + + current_map_item = inclusion_map[item_id] + + if current_map_item.is_valid: + continue + + if date < one_year_ago: + continue + + current_map_item.past_year_sale_count += 1 + + if date < six_months_ago: + continue + + current_map_item.has_sale_in_last_six_months = True + + if current_map_item.past_year_sale_count >= 2: + current_map_item.is_valid = True + + return [transaction for transaction in transaction_history if inclusion_map[transaction.itemId].is_valid]
mimicry feed test failing - [x] unskip test and fix ```console tests/feeds/test_mimicry_feed.py::test_fetch_new_datapoint ---------------------------------------------------------------------------- live log call ----------------------------------------------------------------------------- ERROR telliot_feeds.sources.mimicry:mimicry.py:55 Mimicry API error: HTTPConnectionPool(host='50.112.84.236', port=3000): Read timed out. (read timeout=10) WARNING telliot_feeds.sources.mimicry:mimicry.py:83 No response from Mimicry API FAILED ```
2023-03-17T13:59:53
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-626
dc31ee39568c6696ff5d23df1f413e39c275db71
diff --git a/src/telliot_feeds/reporters/flashbot.py b/src/telliot_feeds/reporters/flashbot.py index 36e466bb..3858ddb3 100644 --- a/src/telliot_feeds/reporters/flashbot.py +++ b/src/telliot_feeds/reporters/flashbot.py @@ -10,6 +10,7 @@ from eth_account.account import Account from eth_account.signers.local import LocalAccount from eth_utils import to_checksum_address +from requests.exceptions import HTTPError from telliot_core.utils.response import error_status from telliot_core.utils.response import ResponseStatus from web3 import Web3 @@ -179,7 +180,11 @@ async def report_once( # ) # ) # result = results[-1] - result = self.endpoint._web3.flashbots.send_bundle(bundle, target_block_number=block + 1) + try: + result = self.endpoint._web3.flashbots.send_bundle(bundle, target_block_number=block + 1) + except HTTPError as e: + msg = "Unable to send bundle to miners due to HTTP error" + return None, error_status(note=msg, e=e, log=logger.error) logger.info(f"Bundle sent to miners in block {block}") # Wait for transaction confirmation
Error Out with Flashbots "Internal Server Error" Message This error was encountered reporting on mainnet through the Flashbots relay. Start Command: `telliot report -a mainnet -sa flashbots -qt eth-usd-spot -mnb 0.1 -gps average` Versions: `telliot-core.git@cc6f165b0724bc70c6f5d96948bfb706e2046d1a` `telliot-feeds.git@91513444fbbcde6729d59b7e3c8ed1f0eeb38ce8` log: ``` ERROR | telliot_core | Exception occurred in telliot-core app ERROR | telliot_core | <class 'requests.exceptions.HTTPError'> ERROR | telliot_core | 500 Server Error: Internal Server Error for url: https://relay.flashbots.net/ ERROR | telliot_core | [' File "/home/ubuntu/telliot-feeds/src/telliot_feeds/cli/commands/report.py", line 496, in report\n await reporter.report()\n', ' File "/home/ubuntu/telliot-feeds/ src/telliot_feeds/reporters/interval.py", line 464, in report\n _, _ = await self.report_once()\n', ' File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/flashbot.py", line 156, in report_once\n result = self.endpoint._web3.flashbots.send_bundle(bundle, target_block_number=block + 1)\n', ' File "/home/ubuntu/tenv/lib/python3.9/site-packages/web3/module.py", line 57, in caller\n result = w3.manager.request_blocking(method_str,\n', ' File "/home/ubuntu/tenv/lib/python3.9/site-packages/web3/manager.py", line 197, in request_blocking\n response = self._mak e_request(method, params)\n', ' File "/home/ubuntu/tenv/lib/python3.9/site-packages/web3/manager.py", line 150, in _make_request\n return request_func(method, params)\n', ' File "/home/ubun tu/telliot-feeds/src/telliot_feeds/flashbots/middleware.py", line 41, in middleware\n return flashbots_provider.make_request(method, params)\n', ' File "/home/ubuntu/telliot-feeds/src/tellio t_feeds/flashbots/provider.py", line 57, in make_request\n raw_response = make_post_request(self.endpoint_uri, request_data, headers=headers)\n', ' File "/home/ubuntu/tenv/lib/python3.9/site -packages/web3/_utils/request.py", line 49, in make_post_request\n response.raise_for_status()\n', ' File "/home/ubuntu/tenv/lib/python3.9/site-packages/requests/models.py", line 1021, in ra ise_for_status\n raise HTTPError(http_error_msg, response=self)\n'] Traceback (most recent call last): File "/home/ubuntu/tenv/bin/telliot", line 8, in <module> sys.exit(main()) File "/home/ubuntu/tenv/lib/python3.9/site-packages/click/core.py", line 1130, in __call__ return self.main(*args, **kwargs) File "/home/ubuntu/tenv/lib/python3.9/site-packages/click/core.py", line 1055, in main rv = self.invoke(ctx) File "/home/ubuntu/tenv/lib/python3.9/site-packages/click/core.py", line 1657, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/home/ubuntu/tenv/lib/python3.9/site-packages/click/core.py", line 1404, in invoke return ctx.invoke(self.callback, **ctx.params) File "/home/ubuntu/tenv/lib/python3.9/site-packages/click/core.py", line 760, in invoke return __callback(*args, **kwargs) File "/home/ubuntu/tenv/lib/python3.9/site-packages/click/decorators.py", line 26, in new_func return f(get_current_context(), *args, **kwargs) File "/home/ubuntu/telliot-core/src/telliot_core/cli/utils.py", line 18, in wrapper return asyncio.run(f(*args, **kwargs)) File "/usr/lib/python3.9/asyncio/runners.py", line 44, in run return loop.run_until_complete(main) File "/usr/lib/python3.9/asyncio/base_events.py", line 647, in run_until_complete return future.result() File "/home/ubuntu/telliot-feeds/src/telliot_feeds/cli/commands/report.py", line 496, in report await reporter.report() File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/interval.py", line 464, in report _, _ = await self.report_once() File "/home/ubuntu/telliot-feeds/src/telliot_feeds/reporters/flashbot.py", line 156, in report_once result = self.endpoint._web3.flashbots.send_bundle(bundle, target_block_number=block + 1) File "/home/ubuntu/tenv/lib/python3.9/site-packages/web3/module.py", line 57, in caller result = w3.manager.request_blocking(method_str, File "/home/ubuntu/tenv/lib/python3.9/site-packages/web3/manager.py", line 197, in request_blocking response = self._make_request(method, params) File "/home/ubuntu/tenv/lib/python3.9/site-packages/web3/manager.py", line 150, in _make_request return request_func(method, params) File "/home/ubuntu/telliot-feeds/src/telliot_feeds/flashbots/middleware.py", line 41, in middleware return flashbots_provider.make_request(method, params) File "/home/ubuntu/telliot-feeds/src/telliot_feeds/flashbots/provider.py", line 57, in make_request raw_response = make_post_request(self.endpoint_uri, request_data, headers=headers) File "/home/ubuntu/tenv/lib/python3.9/site-packages/web3/_utils/request.py", line 49, in make_post_request response.raise_for_status() File "/home/ubuntu/tenv/lib/python3.9/site-packages/requests/models.py", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 500 Server Error: Internal Server Error for url: https://relay.flashbots.net/ ```
2023-03-14T12:32:41
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-618
7e49e8ea64625a47042070e68176724e66a78603
diff --git a/src/telliot_feeds/cli/commands/report.py b/src/telliot_feeds/cli/commands/report.py index e03c073d..b58086e8 100644 --- a/src/telliot_feeds/cli/commands/report.py +++ b/src/telliot_feeds/cli/commands/report.py @@ -369,7 +369,7 @@ async def report( return chosen_feed = None elif rng_timestamp is not None: - chosen_feed = await assemble_rng_datafeed(timestamp=rng_timestamp, node=core.endpoint, account=account) + chosen_feed = await assemble_rng_datafeed(timestamp=rng_timestamp) else: chosen_feed = None diff --git a/src/telliot_feeds/feeds/tellor_rng_feed.py b/src/telliot_feeds/feeds/tellor_rng_feed.py index 66eabe50..47c002ed 100644 --- a/src/telliot_feeds/feeds/tellor_rng_feed.py +++ b/src/telliot_feeds/feeds/tellor_rng_feed.py @@ -1,9 +1,6 @@ """Datafeed for pseudorandom number from hashing multiple blockhashes together.""" from typing import Optional -import chained_accounts -from telliot_core.model.endpoints import RPCEndpoint - from telliot_feeds.datafeed import DataFeed from telliot_feeds.queries.tellor_rng import TellorRNG from telliot_feeds.sources.blockhash_aggregator import TellorRNGManualSource @@ -13,11 +10,10 @@ tellor_rng_feed = DataFeed(source=local_source, query=TellorRNG(timestamp=local_source.timestamp)) -async def assemble_rng_datafeed( - timestamp: int, node: RPCEndpoint, account: chained_accounts -) -> Optional[DataFeed[float]]: +async def assemble_rng_datafeed(timestamp: int) -> Optional[DataFeed[float]]: """Assembles a TellorRNG datafeed for the given timestamp.""" - local_source.set_timestamp(timestamp) - feed = DataFeed(source=local_source, query=TellorRNG(timestamp=timestamp)) + source = TellorRNGManualSource() + source.set_timestamp(timestamp) + feed = DataFeed(source=source, query=TellorRNG(timestamp=timestamp)) return feed diff --git a/src/telliot_feeds/reporters/rng_interval.py b/src/telliot_feeds/reporters/rng_interval.py index f6cb9ead..27ab8e6d 100644 --- a/src/telliot_feeds/reporters/rng_interval.py +++ b/src/telliot_feeds/reporters/rng_interval.py @@ -53,7 +53,7 @@ async def fetch_datafeed(self) -> Optional[DataFeed[Any]]: logger.info(status.error) return None - datafeed = await assemble_rng_datafeed(timestamp=rng_timestamp, node=self.endpoint, account=self.account) + datafeed = await assemble_rng_datafeed(timestamp=rng_timestamp) if datafeed is None: msg = "Unable to assemble RNG datafeed" error_status(note=msg, log=logger.warning)
Tellor RNG reporting the Same Number as a Random Number Every 15 Minutes start command: `telliot report -a mumbai1 --rng-auto -p YOLO -ncr -wp 60 -mnb 0.1` versions: `telliot-core.git@c1b98fd3371a2bfc25d594a561e4a28ff837c75a` `telliot-feeds.git@93a86468c6b00bf089aa74c159b7025b0025500e` Was browsing the data feed and I noticed that the random numbers all looked kind of the same, then when I decoded the values discovered that they were all exactly the same.
update on this issue: I updated telliot-feeds and telliot-core this morning to confirm that the bug is there. The RNG submitted a different number, but then reported that same number over again. The start command was the same. versions: telliot-core.git@16c5d9dcd14c6a37054e718177f3c80ca09e7654 telliot-feeds.git@d5cf93df28ec5f18094475c6633d0a596dc6119b transactions: https://mumbai.polygonscan.com/tx/0x9d3ee50e5547bbb58e5f9cba6245afd2f1e5484f705f2be3589f00a30eef3625 https://mumbai.polygonscan.com/tx/0x2011c670dce3e104fb29a08b872236dd57aa016ffacb24d73589242bbf0248ca
2023-03-09T18:19:42
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-609
d5cf93df28ec5f18094475c6633d0a596dc6119b
diff --git a/src/telliot_feeds/cli/commands/report.py b/src/telliot_feeds/cli/commands/report.py index bd409067..e03c073d 100644 --- a/src/telliot_feeds/cli/commands/report.py +++ b/src/telliot_feeds/cli/commands/report.py @@ -1,4 +1,3 @@ -import getpass from typing import Any from typing import Optional @@ -99,7 +98,6 @@ def reporter() -> None: help="use custom gas limit", nargs=1, type=int, - default=350000, ) @click.option( "--max-fee", @@ -107,7 +105,7 @@ def reporter() -> None: "max_fee", help="use custom maxFeePerGas (gwei)", nargs=1, - type=int, + type=float, required=False, ) @click.option( @@ -116,7 +114,7 @@ def reporter() -> None: "priority_fee", help="use custom maxPriorityFeePerGas (gwei)", nargs=1, - type=int, + type=float, required=False, ) @click.option( @@ -148,19 +146,7 @@ def reporter() -> None: type=click.UNPROCESSED, required=False, callback=valid_transaction_type, - default=0, -) [email protected]( - "--gas-price-speed", - "-gps", - "gas_price_speed", - help="gas price speed for eth gas station API", - nargs=1, - type=click.Choice( - ["safeLow", "average", "fast", "fastest"], - case_sensitive=True, - ), - default="fast", + default=2, ) @click.option( "-wp", @@ -279,6 +265,15 @@ def reporter() -> None: default=False, help="Reporter will use a random datafeed from the catalog.", ) [email protected]( + "--gas-multiplier", + "-gm", + "gas_multiplier", + help="increase gas price by this percentage (default 1%) ie 5 = 5%", + nargs=1, + type=int, + default=1, # 1% above the gas price by web3 +) @click.option("--rng-auto/--rng-auto-off", default=False) @click.option("--submit-once/--submit-continuous", default=False) @click.option("-pwd", "--password", type=str) @@ -291,13 +286,12 @@ async def report( build_feed: bool, tx_type: int, gas_limit: int, - max_fee: Optional[int], - priority_fee: Optional[int], + max_fee: Optional[float], + priority_fee: Optional[float], legacy_gas_price: Optional[int], expected_profit: str, submit_once: bool, wait_period: int, - gas_price_speed: str, reporting_diva_protocol: bool, diva_diamond_address: Optional[str], diva_middleware_address: Optional[str], @@ -315,6 +309,7 @@ async def report( signature_account: str, check_rewards: bool, use_random_feeds: bool, + gas_multiplier: int, ) -> None: """Report values to Tellor oracle""" ctx.obj["ACCOUNT_NAME"] = account_str @@ -325,14 +320,9 @@ async def report( return ctx.obj["CHAIN_ID"] = accounts[0].chains[0] # used in reporter_cli_core - - if signature_account is not None: - try: - if not signature_password: - signature_password = getpass.getpass(f"Enter password for {signature_account} keyfile: ") - except ValueError: - click.echo("Invalid Password") - + # if max_fee flag is set, then priority_fee must also be set + if (max_fee is not None and priority_fee is None) or (max_fee is None and priority_fee is not None): + raise click.UsageError("Must specify both max fee and priority fee") # Initialize telliot core app using CLI context async with reporter_cli_core(ctx) as core: @@ -353,7 +343,7 @@ async def report( if signature_account is not None: sig_account = find_accounts(name=signature_account)[0] if not sig_account.is_unlocked: - sig_account.unlock(password) + sig_account.unlock(signature_password) sig_acct_addr = to_checksum_address(sig_account.address) else: sig_acct_addr = "" @@ -393,7 +383,6 @@ async def report( legacy_gas_price=legacy_gas_price, expected_profit=expected_profit, chain_id=core.config.main.chain_id, - gas_price_speed=gas_price_speed, reporting_diva_protocol=reporting_diva_protocol, stake_amount=stake, min_native_token_balance=min_native_token_balance, @@ -447,7 +436,6 @@ async def report( "max_fee": max_fee, "priority_fee": priority_fee, "legacy_gas_price": legacy_gas_price, - "gas_price_speed": gas_price_speed, "chain_id": core.config.main.chain_id, "wait_period": wait_period, "oracle": contracts.oracle, @@ -459,6 +447,7 @@ async def report( "min_native_token_balance": int(min_native_token_balance * 10**18), "check_rewards": check_rewards, "use_random_feeds": use_random_feeds, + "gas_multiplier": gas_multiplier, } if sig_acct_addr: diff --git a/src/telliot_feeds/cli/utils.py b/src/telliot_feeds/cli/utils.py index 36009592..683a3ef3 100644 --- a/src/telliot_feeds/cli/utils.py +++ b/src/telliot_feeds/cli/utils.py @@ -29,13 +29,12 @@ def print_reporter_settings( signature_address: str, query_tag: str, gas_limit: int, - priority_fee: Optional[int], + priority_fee: Optional[float], expected_profit: str, chain_id: int, - max_fee: Optional[int], + max_fee: Optional[float], transaction_type: int, legacy_gas_price: Optional[int], - gas_price_speed: str, reporting_diva_protocol: bool, stake_amount: float, min_native_token_balance: float, @@ -66,7 +65,6 @@ def print_reporter_settings( click.echo(f"Legacy gas price (gwei): {legacy_gas_price}") click.echo(f"Max fee (gwei): {max_fee}") click.echo(f"Priority fee (gwei): {priority_fee}") - click.echo(f"Gas price speed: {gas_price_speed}") click.echo(f"Desired stake amount: {stake_amount}") click.echo(f"Minimum native token balance (e.g. ETH if on Ethereum mainnet): {min_native_token_balance}") click.echo("\n") diff --git a/src/telliot_feeds/integrations/diva_protocol/report.py b/src/telliot_feeds/integrations/diva_protocol/report.py index e708fbfe..675fe0ed 100644 --- a/src/telliot_feeds/integrations/diva_protocol/report.py +++ b/src/telliot_feeds/integrations/diva_protocol/report.py @@ -146,13 +146,14 @@ async def set_final_ref_value(self, pool_id: int, gas_price: int) -> ResponseSta async def settle_pool(self, pool_id: int) -> ResponseStatus: """Settle pool""" if not self.legacy_gas_price: - gas_price = await self.fetch_gas_price(self.gas_price_speed) + gas_price = await self.fetch_gas_price() if not gas_price: msg = "Unable to fetch gas price for tx type 0" return error_status(note=msg, log=logger.warning) else: gas_price = self.legacy_gas_price + gas_price = int(gas_price) if gas_price >= 1 else 1 status = await self.set_final_ref_value(pool_id=pool_id, gas_price=gas_price) if status is not None and status.ok: logger.info(f"Pool {pool_id} settled.") @@ -267,24 +268,27 @@ async def report_once( _nonce=report_count, _queryData=query_data, ) + # Estimate gas usage amount + gas_limit, status = self.submit_val_tx_gas_limit(submit_val_tx=submit_val_tx) + if not status.ok or gas_limit is None: + return None, status + acc_nonce, nonce_status = self.get_acct_nonce() if not nonce_status.ok: return None, nonce_status # Add transaction type 2 (EIP-1559) data if self.transaction_type == 2: - logger.info(f"maxFeePerGas: {self.max_fee}") - logger.info(f"maxPriorityFeePerGas: {self.priority_fee}") + priority_fee, max_fee = self.get_fee_info() + if priority_fee is None or max_fee is None: + return None, error_status("Unable to suggest type 2 txn fees", log=logger.error) built_submit_val_tx = submit_val_tx.buildTransaction( { "nonce": acc_nonce, - "gas": self.gas_limit, - "maxFeePerGas": Web3.toWei(self.max_fee, "gwei"), # type: ignore - # TODO: Investigate more why etherscan txs using Flashbots have - # the same maxFeePerGas and maxPriorityFeePerGas. Example: - # https://etherscan.io/tx/0x0bd2c8b986be4f183c0a2667ef48ab1d8863c59510f3226ef056e46658541288 # noqa: E501 - "maxPriorityFeePerGas": Web3.toWei(self.priority_fee, "gwei"), # noqa: E501 + "gas": gas_limit, + "maxFeePerGas": Web3.toWei(max_fee, "gwei"), + "maxPriorityFeePerGas": Web3.toWei(priority_fee, "gwei"), "chainId": self.chain_id, } ) @@ -292,8 +296,8 @@ async def report_once( else: # Fetch legacy gas price if not provided by user if not self.legacy_gas_price: - gas_price = await self.fetch_gas_price(self.gas_price_speed) - if not gas_price: + gas_price = await self.fetch_gas_price() + if gas_price is None: note = "Unable to fetch gas price for tx type 0" return None, error_status(note, log=logger.warning) else: @@ -302,7 +306,7 @@ async def report_once( built_submit_val_tx = submit_val_tx.buildTransaction( { "nonce": acc_nonce, - "gas": self.gas_limit, + "gas": gas_limit, "gasPrice": Web3.toWei(gas_price, "gwei"), "chainId": self.chain_id, } diff --git a/src/telliot_feeds/reporters/custom_reporter.py b/src/telliot_feeds/reporters/custom_reporter.py index e4813270..76fc7d39 100644 --- a/src/telliot_feeds/reporters/custom_reporter.py +++ b/src/telliot_feeds/reporters/custom_reporter.py @@ -10,7 +10,6 @@ from telliot_core.utils.key_helpers import lazy_unlock_account from telliot_core.utils.response import error_status from telliot_core.utils.response import ResponseStatus -from web3 import Web3 from web3.datastructures import AttributeDict from telliot_feeds.datafeed import DataFeed @@ -40,11 +39,10 @@ def __init__( datafeed: Optional[DataFeed[Any]] = None, expected_profit: Union[str, float] = 100.0, transaction_type: int = 0, - gas_limit: int = 350000, - max_fee: Optional[int] = None, - priority_fee: int = 5, + gas_limit: Optional[int] = None, + max_fee: Optional[float] = None, + priority_fee: Optional[float] = None, legacy_gas_price: Optional[int] = None, - gas_price_speed: str = "fast", ) -> None: self.endpoint = endpoint @@ -61,10 +59,10 @@ def __init__( self.max_fee = max_fee self.priority_fee = priority_fee self.legacy_gas_price = legacy_gas_price - self.gas_price_speed = [gas_price_speed] self.trb_usd_median_feed = trb_usd_median_feed self.eth_usd_median_feed = eth_usd_median_feed self.custom_contract = custom_contract + self.gas_info: dict[str, Union[float, int]] = {} async def ensure_staked(self) -> Tuple[bool, ResponseStatus]: """Make sure the current user is staked @@ -147,20 +145,12 @@ async def report_once( return None, error_status(note=msg, log=logger.info) logger.info(f"Current query: {datafeed.query.descriptor}") - - status = await self.ensure_profitable(datafeed) - if not status.ok: - return None, status - - status = ResponseStatus() - # Update datafeed value await datafeed.source.fetch_new_datapoint() latest_data = datafeed.source.latest if latest_data[0] is None: msg = "Unable to retrieve updated datafeed value." return None, error_status(msg, log=logger.info) - # Get query info & encode value to bytes query = datafeed.query query_id = query.query_id @@ -170,16 +160,13 @@ async def report_once( except Exception as e: msg = f"Error encoding response value {latest_data[0]}" return None, error_status(msg, e=e, log=logger.error) - # Get nonce report_count, read_status = await self.get_num_reports_by_id(query_id) - if not read_status.ok: status.error = "Unable to retrieve report count: " + read_status.error # error won't be none # noqa: E501 logger.error(status.error) status.e = read_status.e return None, status - # Start transaction build submit_val_func = self.custom_contract.contract.get_function_by_name("submitValue") submit_val_tx = submit_val_func( @@ -188,24 +175,38 @@ async def report_once( _nonce=report_count, _queryData=query_data, ) + # Estimate gas usage amount + gas_limit, status = self.submit_val_tx_gas_limit(submit_val_tx=submit_val_tx) + if not status.ok or gas_limit is None: + return None, status + # Set gas limit to dict + self.gas_info["gas_limit"] = gas_limit + acc_nonce, nonce_status = self.get_acct_nonce() if not nonce_status.ok: return None, nonce_status # Add transaction type 2 (EIP-1559) data if self.transaction_type == 2: - logger.info(f"maxFeePerGas: {self.max_fee}") - logger.info(f"maxPriorityFeePerGas: {self.priority_fee}") + priority_fee, max_fee = self.get_fee_info() + if priority_fee is None or max_fee is None: + return None, error_status("Unable to suggest type 2 txn fees", log=logger.error) + + # Set gas price to max fee used for profitability check + self.gas_info["type"] = 2 + self.gas_info["max_fee"] = max_fee + self.gas_info["priority_fee"] = priority_fee + self.gas_info["base_fee"] = max_fee - priority_fee built_submit_val_tx = submit_val_tx.buildTransaction( { "nonce": acc_nonce, - "gas": self.gas_limit, - "maxFeePerGas": Web3.toWei(self.max_fee, "gwei"), # type: ignore + "gas": gas_limit, + "maxFeePerGas": self.web3.toWei(max_fee, "gwei"), # TODO: Investigate more why etherscan txs using Flashbots have # the same maxFeePerGas and maxPriorityFeePerGas. Example: # https://etherscan.io/tx/0x0bd2c8b986be4f183c0a2667ef48ab1d8863c59510f3226ef056e46658541288 # noqa: E501 - "maxPriorityFeePerGas": Web3.toWei(self.priority_fee, "gwei"), # noqa: E501 + "maxPriorityFeePerGas": self.web3.toWei(priority_fee, "gwei"), # noqa: E501 "chainId": self.chain_id, } ) @@ -213,22 +214,31 @@ async def report_once( else: # Fetch legacy gas price if not provided by user if not self.legacy_gas_price: - gas_price = await self.fetch_gas_price(self.gas_price_speed) + gas_price = await self.fetch_gas_price() if not gas_price: note = "Unable to fetch gas price for tx type 0" return None, error_status(note, log=logger.warning) else: gas_price = self.legacy_gas_price + self.gas_info["type"] = 0 + self.gas_info["gas_price"] = gas_price + built_submit_val_tx = submit_val_tx.buildTransaction( { "nonce": acc_nonce, - "gas": self.gas_limit, - "gasPrice": Web3.toWei(gas_price, "gwei"), + "gas": gas_limit, + "gasPrice": self.web3.toWei(gas_price, "gwei"), "chainId": self.chain_id, } ) + status = await self.ensure_profitable(datafeed) + if not status.ok: + return None, status + + status = ResponseStatus() + lazy_unlock_account(self.account) local_account = self.account.local_account tx_signed = local_account.sign_transaction(built_submit_val_tx) @@ -238,14 +248,14 @@ async def report_once( try: logger.debug("Sending submitValue transaction") - tx_hash = self.endpoint._web3.eth.send_raw_transaction(tx_signed.rawTransaction) + tx_hash = self.web3.eth.send_raw_transaction(tx_signed.rawTransaction) except Exception as e: note = "Send transaction failed" return None, error_status(note, log=logger.error, e=e) try: # Confirm transaction - tx_receipt = self.endpoint._web3.eth.wait_for_transaction_receipt(tx_hash, timeout=360) + tx_receipt = self.web3.eth.wait_for_transaction_receipt(tx_hash, timeout=360) tx_url = f"{self.endpoint.explorer}/tx/{tx_hash.hex()}" diff --git a/src/telliot_feeds/reporters/flashbot.py b/src/telliot_feeds/reporters/flashbot.py index c87bb694..36e466bb 100644 --- a/src/telliot_feeds/reporters/flashbot.py +++ b/src/telliot_feeds/reporters/flashbot.py @@ -65,12 +65,6 @@ async def report_once( logger.info(f"Current query: {datafeed.query.descriptor}") - status = await self.ensure_profitable(datafeed) - if not status.ok: - return None, status - - status = ResponseStatus() - # Update datafeed value await datafeed.source.fetch_new_datapoint() latest_data = datafeed.source.latest @@ -104,38 +98,70 @@ async def report_once( _nonce=timestamp_count, _queryData=query_data, ) + # Estimate gas usage amount + gas_limit, status = self.submit_val_tx_gas_limit(submit_val_tx=submit_val_tx) + if not status.ok or gas_limit is None: + return None, status + + self.gas_info["gas_limit"] = gas_limit + # Get account nonce acc_nonce, nonce_status = self.get_acct_nonce() if not nonce_status.ok: return None, nonce_status # Add transaction type 2 (EIP-1559) data if self.transaction_type == 2: - logger.info(f"maxFeePerGas: {self.max_fee}") - logger.info(f"maxPriorityFeePerGas: {self.priority_fee}") + priority_fee, max_fee = self.get_fee_info() + if priority_fee is None or max_fee is None: + return None, error_status("Unable to suggest type 2 txn fees", log=logger.error) + + logger.info(f"maxFeePerGas: {max_fee}") + logger.info(f"maxPriorityFeePerGas: {priority_fee}") + + # Set gas price to max fee used for profitability check + self.gas_info["type"] = 2 + self.gas_info["max_fee"] = max_fee + self.gas_info["priority_fee"] = priority_fee + self.gas_info["base_fee"] = max_fee - priority_fee built_submit_val_tx = submit_val_tx.buildTransaction( { "nonce": acc_nonce, - "gas": self.gas_limit, - "maxFeePerGas": Web3.toWei(self.max_fee, "gwei"), # type: ignore + "gas": gas_limit, + "maxFeePerGas": Web3.toWei(max_fee, "gwei"), # TODO: Investigate more why etherscan txs using Flashbots have # the same maxFeePerGas and maxPriorityFeePerGas. Example: # https://etherscan.io/tx/0x0bd2c8b986be4f183c0a2667ef48ab1d8863c59510f3226ef056e46658541288 # noqa: E501 - "maxPriorityFeePerGas": Web3.toWei(self.priority_fee, "gwei"), # noqa: E501 + "maxPriorityFeePerGas": Web3.toWei(priority_fee, "gwei"), # noqa: E501 "chainId": self.chain_id, } ) # Add transaction type 0 (legacy) data else: + if not self.legacy_gas_price: + gas_price = await self.fetch_gas_price() + if gas_price is None: + note = "Unable to fetch gas price for tx type 0" + return None, error_status(note, log=logger.warning) + else: + gas_price = self.legacy_gas_price + + self.gas_info["type"] = 0 + self.gas_info["gas_price"] = gas_price built_submit_val_tx = submit_val_tx.buildTransaction( { "nonce": acc_nonce, - "gas": self.gas_limit, - "gasPrice": Web3.toWei(self.legacy_gas_price, "gwei"), # type: ignore + "gas": gas_limit, + "gasPrice": Web3.toWei(gas_price, "gwei"), "chainId": self.chain_id, } ) + status = await self.ensure_profitable(datafeed) + if not status.ok: + return None, status + status = ResponseStatus() + submit_val_tx_signed = self.account.sign_transaction(built_submit_val_tx) # type: ignore # Create bundle of one pre-signed, EIP-1559 (type 2) transaction diff --git a/src/telliot_feeds/reporters/interval.py b/src/telliot_feeds/reporters/interval.py index eee7742d..ff2fdb89 100644 --- a/src/telliot_feeds/reporters/interval.py +++ b/src/telliot_feeds/reporters/interval.py @@ -11,23 +11,23 @@ from chained_accounts import ChainedAccount from eth_utils import to_checksum_address from telliot_core.contract.contract import Contract -from telliot_core.gas.legacy_gas import legacy_gas_station from telliot_core.model.endpoints import RPCEndpoint from telliot_core.utils.key_helpers import lazy_unlock_account from telliot_core.utils.response import error_status from telliot_core.utils.response import ResponseStatus -from web3 import Web3 +from web3._utils.fee_utils import _fee_history_priority_fee_estimate +from web3.contract import ContractFunction from web3.datastructures import AttributeDict from telliot_feeds.datafeed import DataFeed from telliot_feeds.feeds import CATALOG_FEEDS from telliot_feeds.feeds.eth_usd_feed import eth_usd_median_feed from telliot_feeds.feeds.trb_usd_feed import trb_usd_median_feed -from telliot_feeds.sources.etherscan_gas import EtherscanGasPriceSource from telliot_feeds.utils.log import get_logger from telliot_feeds.utils.reporter_utils import has_native_token_funds from telliot_feeds.utils.reporter_utils import is_online from telliot_feeds.utils.reporter_utils import tellor_suggested_report +from telliot_feeds.utils.reporter_utils import tkn_symbol logger = get_logger(__name__) @@ -47,11 +47,11 @@ def __init__( datafeed: Optional[DataFeed[Any]] = None, expected_profit: Union[str, float] = 100.0, transaction_type: int = 0, - gas_limit: int = 350000, - max_fee: Optional[int] = None, - priority_fee: int = 5, + gas_limit: Optional[int] = None, + max_fee: Optional[float] = None, + priority_fee: Optional[float] = None, legacy_gas_price: Optional[int] = None, - gas_price_speed: Union[tuple[str], str] = "fast", + gas_multiplier: int = 1, wait_period: int = 10, min_native_token_balance: int = 10**18, ) -> None: @@ -70,11 +70,14 @@ def __init__( self.max_fee = max_fee self.priority_fee = priority_fee self.legacy_gas_price = legacy_gas_price - self.gas_price_speed = [gas_price_speed] + self.gas_multiplier = gas_multiplier self.trb_usd_median_feed = trb_usd_median_feed self.eth_usd_median_feed = eth_usd_median_feed self.wait_period = wait_period self.min_native_token_balance = min_native_token_balance + self.web3 = self.endpoint._web3 + + self.gas_info: dict[str, Union[float, int]] = {} logger.info(f"Reporting with account: {self.acct_addr}") @@ -108,9 +111,24 @@ async def check_reporter_lock(self) -> ResponseStatus: return status - async def fetch_gas_price(self, speed: Optional[Any] = None) -> Optional[int]: - """Fetch gas price from ethgasstation in gwei.""" - return await legacy_gas_station(chain_id=self.chain_id, speed_parse_lis=speed) # type: ignore + async def fetch_gas_price(self) -> Optional[float]: + """Fetches the current gas price from an EVM network and returns + an adjusted gas price. + + Returns: + An optional integer representing the adjusted gas price in wei, or + None if the gas price could not be retrieved. + """ + try: + price = self.web3.eth.gas_price + priceGwei = self.web3.fromWei(price, "gwei") + except Exception as e: + logger.error(f"Error fetching gas price: {e}") + return None + # increase gas price by 1.0 + gas_multiplier + multiplier = 1.0 + (self.gas_multiplier / 100.0) + gas_price = (float(priceGwei) * multiplier) if priceGwei else None + return gas_price async def ensure_staked(self) -> Tuple[bool, ResponseStatus]: """Make sure the current user is staked @@ -168,10 +186,7 @@ async def ensure_staked(self) -> Tuple[bool, ResponseStatus]: msg = "Current address is locked in dispute or for withdrawal." # noqa: E501 return False, error_status(msg, log=logger.info) - async def ensure_profitable( - self, - datafeed: DataFeed[Any], - ) -> ResponseStatus: + async def ensure_profitable(self, datafeed: DataFeed[Any]) -> ResponseStatus: """Estimate profitability Returns a bool signifying whether submitting for a given @@ -205,63 +220,44 @@ async def ensure_profitable( tips, tb_reward = rewards - # Using transaction type 2 (EIP-1559) - if self.transaction_type == 2: - fee_info = await self.get_fee_info() - base_fee = fee_info[0].suggestBaseFee - - # No miner tip provided by user - if self.priority_fee is None: - # From etherscan docs: - # "Safe/Proposed/Fast gas price recommendations are now modeled as Priority Fees." # noqa: E501 - # Source: https://docs.etherscan.io/api-endpoints/gas-tracker - priority_fee = fee_info[0].SafeGasPrice - self.priority_fee = priority_fee - - if self.max_fee is None: - # From Alchemy docs: - # "maxFeePerGas = baseFeePerGas + maxPriorityFeePerGas" - # Source: https://docs.alchemy.com/alchemy/guides/eip-1559/maxpriorityfeepergas-vs-maxfeepergas # noqa: E501 - self.max_fee = self.priority_fee + base_fee + if not self.gas_info: + return error_status("Gas info not set", log=logger.warning) + + gas_info = self.gas_info + txn_fee = gas_info["gas_price"] * gas_info["gas_limit"] + if gas_info["type"] == 0: + txn_fee = gas_info["gas_price"] * gas_info["gas_limit"] logger.info( f""" - tips: {tips / 1e18} TRB - time-based reward: {tb_reward / 1e18} TRB - gas limit: {self.gas_limit} - base fee: {base_fee} - priority fee: {self.priority_fee} - max fee: {self.max_fee} + + Tips: {tips / 1e18} + Time-based reward: {tb_reward / 1e18} TRB + Transaction fee: {self.web3.fromWei(txn_fee, 'gwei'):.09f} {tkn_symbol(self.chain_id)} + Gas price: {gas_info["gas_price"]} gwei + Gas limit: {gas_info["gas_limit"]} + Txn type: 0 (Legacy) """ ) - - costs = self.gas_limit * self.max_fee - - # Using transaction type 0 (legacy) - else: - # Fetch legacy gas price if not provided by user - if not self.legacy_gas_price: - gas_price = await self.fetch_gas_price(speed=self.gas_price_speed) - self.legacy_gas_price = gas_price - - if not self.legacy_gas_price: - note = "Unable to fetch gas price for tx type 0" - return error_status(note, log=logger.warning) - + if gas_info["type"] == 2: + txn_fee = gas_info["max_fee"] * gas_info["gas_limit"] logger.info( f""" - tips: {tips / 1e18} TRB - time-based reward: {tb_reward / 1e18} TRB - gas limit: {self.gas_limit} - legacy gas price: {self.legacy_gas_price} + + Tips: {tips / 1e18} + Time-based reward: {tb_reward / 1e18} TRB + Max transaction fee: {self.web3.fromWei(txn_fee, 'gwei')} {tkn_symbol(self.chain_id)} + Max fee per gas: {gas_info["max_fee"]} gwei + Max priority fee per gas: {gas_info["priority_fee"]} gwei + Gas limit: {gas_info["gas_limit"]} + Txn type: 2 (EIP-1559) """ ) - costs = self.gas_limit * self.legacy_gas_price # Calculate profit revenue = tb_reward + tips rev_usd = revenue / 1e18 * price_trb_usd - costs_usd = costs / 1e9 * price_eth_usd + costs_usd = txn_fee / 1e9 * price_eth_usd profit_usd = rev_usd - costs_usd logger.info(f"Estimated profit: ${round(profit_usd, 2)}") @@ -276,12 +272,26 @@ async def ensure_profitable( return status - async def get_fee_info(self) -> Any: - """Fetch fee into from Etherscan API. - Source: https://etherscan.io/apis""" - c = EtherscanGasPriceSource() - result = await c.fetch_new_datapoint() - return result + def get_fee_info(self) -> Tuple[Optional[float], Optional[float]]: + """Calculate max fee and priority fee if not set + for more info: + https://web3py.readthedocs.io/en/v5/web3.eth.html?highlight=fee%20history#web3.eth.Eth.fee_history + """ + if self.max_fee is None: + try: + fee_history = self.web3.eth.fee_history( + block_count=5, newest_block="latest", reward_percentiles=[25, 50, 75] + ) + # "base fee for the next block after the newest of the returned range" + base_fee = fee_history.baseFeePerGas[-1] / 1e9 + # estimate priority fee from fee history + priority_fee = _fee_history_priority_fee_estimate(fee_history) / 1e9 + max_fee = base_fee + priority_fee + return priority_fee, max_fee + except Exception as e: + logger.warning(f"Error in calculating gas fees: {e}") + return None, None + return self.priority_fee, self.max_fee async def fetch_datafeed(self) -> Optional[DataFeed[Any]]: if self.datafeed is None: @@ -301,17 +311,36 @@ async def is_online(self) -> bool: return await is_online() def has_native_token(self) -> bool: - return has_native_token_funds(self.acct_addr, self.endpoint._web3, min_balance=self.min_native_token_balance) + """Check if account has native token funds for a network for gas fees + of at least min_native_token_balance that is set in the cli""" + return has_native_token_funds(self.acct_addr, self.web3, min_balance=self.min_native_token_balance) def get_acct_nonce(self) -> tuple[Optional[int], ResponseStatus]: """Get transaction count for an address""" try: - return self.endpoint._web3.eth.get_transaction_count(self.acct_addr), ResponseStatus() + return self.web3.eth.get_transaction_count(self.acct_addr), ResponseStatus() except ValueError as e: return None, error_status("Account nonce request timed out", e=e, log=logger.warning) except Exception as e: return None, error_status("Unable to retrieve account nonce", e=e, log=logger.error) + # Estimate gas usage and set the gas limit if not provided + def submit_val_tx_gas_limit(self, submit_val_tx: ContractFunction) -> tuple[Optional[int], ResponseStatus]: + """Estimate gas usage for submitValue transaction + Args: + submit_val_tx: The submitValue transaction object + Returns a tuple of the gas limit and a ResponseStatus object""" + if self.gas_limit is None: + try: + gas_limit: int = submit_val_tx.estimateGas({"from": self.acct_addr}) + if not gas_limit: + return None, error_status("Unable to estimate gas for submitValue transaction") + return gas_limit, ResponseStatus() + except Exception as e: + msg = "Unable to estimate gas for submitValue transaction" + return None, error_status(msg, e=e, log=logger.error) + return self.gas_limit, ResponseStatus() + async def report_once( self, ) -> Tuple[Optional[AttributeDict[Any, Any]], ResponseStatus]: @@ -338,12 +367,6 @@ async def report_once( logger.info(f"Current query: {datafeed.query.descriptor}") - status = await self.ensure_profitable(datafeed) - if not status.ok: - return None, status - - status = ResponseStatus() - # Update datafeed value await datafeed.source.fetch_new_datapoint() latest_data = datafeed.source.latest @@ -379,24 +402,36 @@ async def report_once( _nonce=report_count, _queryData=query_data, ) + # Estimate gas usage amount + gas_limit, status = self.submit_val_tx_gas_limit(submit_val_tx=submit_val_tx) + if not status.ok or gas_limit is None: + return None, status + + self.gas_info["gas_limit"] = gas_limit + # Get account nonce acc_nonce, nonce_status = self.get_acct_nonce() if not nonce_status.ok: return None, nonce_status - # Add transaction type 2 (EIP-1559) data if self.transaction_type == 2: - logger.info(f"maxFeePerGas: {self.max_fee}") - logger.info(f"maxPriorityFeePerGas: {self.priority_fee}") + priority_fee, max_fee = self.get_fee_info() + if priority_fee is None or max_fee is None: + return None, error_status("Unable to suggest type 2 txn fees", log=logger.error) + # Set gas price to max fee used for profitability check + self.gas_info["type"] = 2 + self.gas_info["max_fee"] = max_fee + self.gas_info["priority_fee"] = priority_fee + self.gas_info["base_fee"] = max_fee - priority_fee built_submit_val_tx = submit_val_tx.buildTransaction( { "nonce": acc_nonce, - "gas": self.gas_limit, - "maxFeePerGas": Web3.toWei(self.max_fee, "gwei"), # type: ignore + "gas": gas_limit, + "maxFeePerGas": self.web3.toWei(max_fee, "gwei"), # TODO: Investigate more why etherscan txs using Flashbots have # the same maxFeePerGas and maxPriorityFeePerGas. Example: # https://etherscan.io/tx/0x0bd2c8b986be4f183c0a2667ef48ab1d8863c59510f3226ef056e46658541288 # noqa: E501 - "maxPriorityFeePerGas": Web3.toWei(self.priority_fee, "gwei"), # noqa: E501 + "maxPriorityFeePerGas": self.web3.toWei(priority_fee, "gwei"), # noqa: E501 "chainId": self.chain_id, } ) @@ -404,22 +439,30 @@ async def report_once( else: # Fetch legacy gas price if not provided by user if not self.legacy_gas_price: - gas_price = await self.fetch_gas_price(self.gas_price_speed) + gas_price = await self.fetch_gas_price() if not gas_price: note = "Unable to fetch gas price for tx type 0" return None, error_status(note, log=logger.warning) + else: gas_price = self.legacy_gas_price - + # Set gas price to legacy gas price used for profitability check + self.gas_info["type"] = 0 + self.gas_info["gas_price"] = gas_price built_submit_val_tx = submit_val_tx.buildTransaction( { "nonce": acc_nonce, - "gas": self.gas_limit, - "gasPrice": Web3.toWei(gas_price, "gwei"), + "gas": gas_limit, + "gasPrice": self.web3.toWei(gas_price, "gwei"), "chainId": self.chain_id, } ) + # Check if profitable if not YOLO + status = await self.ensure_profitable(datafeed) + if not status.ok: + return None, status + lazy_unlock_account(self.account) local_account = self.account.local_account tx_signed = local_account.sign_transaction(built_submit_val_tx) @@ -429,14 +472,14 @@ async def report_once( try: logger.debug("Sending submitValue transaction") - tx_hash = self.endpoint._web3.eth.send_raw_transaction(tx_signed.rawTransaction) + tx_hash = self.web3.eth.send_raw_transaction(tx_signed.rawTransaction) except Exception as e: note = "Send transaction failed" return None, error_status(note, log=logger.error, e=e) try: # Confirm transaction - tx_receipt = self.endpoint._web3.eth.wait_for_transaction_receipt(tx_hash, timeout=360) + tx_receipt = self.web3.eth.wait_for_transaction_receipt(tx_hash, timeout=360) tx_url = f"{self.endpoint.explorer}/tx/{tx_hash.hex()}" diff --git a/src/telliot_feeds/reporters/rng_interval.py b/src/telliot_feeds/reporters/rng_interval.py index c53fa3ab..f6cb9ead 100644 --- a/src/telliot_feeds/reporters/rng_interval.py +++ b/src/telliot_feeds/reporters/rng_interval.py @@ -1,7 +1,6 @@ """TellorRNG auto submitter. submits TellorRNG values at a fixed time interval """ -import asyncio import calendar import time from typing import Any @@ -11,9 +10,7 @@ from telliot_core.utils.response import ResponseStatus from telliot_feeds.datafeed import DataFeed -from telliot_feeds.feeds.matic_usd_feed import matic_usd_median_feed from telliot_feeds.feeds.tellor_rng_feed import assemble_rng_datafeed -from telliot_feeds.feeds.trb_usd_feed import trb_usd_median_feed from telliot_feeds.queries.tellor_rng import TellorRNG from telliot_feeds.reporters.reporter_autopay_utils import get_feed_tip from telliot_feeds.reporters.tellor_360 import Tellor360Reporter @@ -79,83 +76,5 @@ async def fetch_datafeed(self) -> Optional[DataFeed[Any]]: error_status(msg, log=logger.warning) return None tip += feed_tip - - # Fetch token prices in USD - price_feeds = [matic_usd_median_feed, trb_usd_median_feed] - _ = await asyncio.gather(*[feed.source.fetch_new_datapoint() for feed in price_feeds]) - price_matic_usd = matic_usd_median_feed.source.latest[0] - price_trb_usd = trb_usd_median_feed.source.latest[0] - if price_matic_usd is None or price_trb_usd is None: - msg = "Unable to fetch token prices" - error_status(msg, log=logger.warning) - return None - - # Using transaction type 2 (EIP-1559) - if self.transaction_type == 2: - fee_info = await self.get_fee_info() - base_fee = fee_info[0].suggestBaseFee - - # No miner tip provided by user - if self.priority_fee is None: - # From etherscan docs: - # "Safe/Proposed/Fast gas price recommendations are now modeled as Priority Fees." # noqa: E501 - # Source: https://docs.etherscan.io/api-endpoints/gas-tracker - priority_fee = fee_info[0].SafeGasPrice - self.priority_fee = priority_fee - - if self.max_fee is None: - # From Alchemy docs: - # "maxFeePerGas = baseFeePerGas + maxPriorityFeePerGas" - # Source: https://docs.alchemy.com/alchemy/guides/eip-1559/maxpriorityfeepergas-vs-maxfeepergas # noqa: E501 - self.max_fee = self.priority_fee + base_fee - - logger.info( - f""" - tips: {tip} TRB - gas limit: {self.gas_limit} - base fee: {base_fee} - priority fee: {self.priority_fee} - max fee: {self.max_fee} - """ - ) - - costs = self.gas_limit * self.max_fee - - # Using transaction type 0 (legacy) - else: - # Fetch legacy gas price if not provided by user - if not self.legacy_gas_price: - gas_price = await self.fetch_gas_price() - self.legacy_gas_price = gas_price - - if not self.legacy_gas_price: - note = "unable to fetch gas price from api" - error_status(note, log=logger.info) - return None - logger.info( - f""" - tips: {tip/1e18} TRB - gas limit: {self.gas_limit} - legacy gas price: {self.legacy_gas_price} - """ - ) - costs = self.gas_limit * self.legacy_gas_price - - # Calculate profit - rev_usd = tip / 1e18 * price_trb_usd - costs_usd = costs / 1e9 * price_matic_usd - profit_usd = rev_usd - costs_usd - logger.info(f"Estimated profit: ${round(profit_usd, 2)}") - logger.info(f"tip price: {round(rev_usd, 2)}, gas costs: {costs_usd}") - - percent_profit = ((profit_usd) / costs_usd) * 100 - logger.info(f"Estimated percent profit: {round(percent_profit, 2)}%") - if (self.expected_profit != "YOLO") and ( - isinstance(self.expected_profit, float) and percent_profit < self.expected_profit - ): - status.ok = False - status.error = "Estimated profitability below threshold." - logger.info(status.error) - return None - + logger.debug(f"Current tip for RNG query: {tip}") return datafeed diff --git a/src/telliot_feeds/reporters/tellor_360.py b/src/telliot_feeds/reporters/tellor_360.py index b8d0eb7a..862b6e11 100644 --- a/src/telliot_feeds/reporters/tellor_360.py +++ b/src/telliot_feeds/reporters/tellor_360.py @@ -101,6 +101,7 @@ async def ensure_staked(self) -> Tuple[bool, ResponseStatus]: logger.info( f""" + STAKER INFO start date: {staker_info[0]} stake_balance: {staker_info[1] / 1e18!r} @@ -126,10 +127,6 @@ async def ensure_staked(self) -> Tuple[bool, ResponseStatus]: if self.stake_amount > account_staked_bal or (self.stake * 1e18) > account_staked_bal: logger.info("Approving and depositing stake...") - gas_price_gwei = await self.fetch_gas_price() - if gas_price_gwei is None: - return False, error_status("Unable to fetch gas price for staking", log=logger.info) - # amount to deposit whichever largest difference either chosen stake or stakeAmount to keep reporting stake_diff = max(int(self.stake_amount - account_staked_bal), int((self.stake * 1e18) - account_staked_bal)) @@ -145,27 +142,74 @@ async def ensure_staked(self) -> Tuple[bool, ResponseStatus]: if stake_diff > wallet_balance: msg = "Not enough TRB in the account to cover the stake" return False, error_status(msg, log=logger.warning) - - txn_kwargs = {"gas_limit": self.gas_limit, "legacy_gas_price": gas_price_gwei} - # approve token spending - _, approve_status = await self.token.write( - func_name="approve", spender=self.oracle.address, amount=stake_diff, **txn_kwargs - ) - if not approve_status.ok: - msg = "Unable to approve staking" - return False, error_status(msg, log=logger.error) - # deposit stake - _, deposit_status = await self.oracle.write("depositStake", _amount=stake_diff, **txn_kwargs) - - if not deposit_status.ok: - msg = ( - "Unable to stake deposit: " - + deposit_status.error - + f"Make sure {self.acct_addr} has enough of the current chain's " - + "currency and the oracle's currency (TRB)" + if self.transaction_type == 2: + priority_fee, max_fee = self.get_fee_info() + if priority_fee is None or max_fee is None: + return False, error_status("Unable to suggest type 2 txn fees", log=logger.error) + # Approve token spending for a transaction type 2 + receipt, approve_status = await self.token.write( + func_name="approve", + gas_limit=self.gas_limit, + max_priority_fee_per_gas=priority_fee, + max_fee_per_gas=max_fee, + spender=self.oracle.address, + amount=stake_diff, + ) + if not approve_status.ok: + msg = "Unable to approve staking" + return False, error_status(msg, log=logger.error) + logger.debug(f"Approve transaction status: {receipt.status}, block: {receipt.blockNumber}") + # deposit stake for a transaction type 2 + _, deposit_status = await self.oracle.write( + func_name="depositStake", + gas_limit=self.gas_limit, + max_priority_fee_per_gas=priority_fee, + max_fee_per_gas=max_fee, + _amount=stake_diff, + ) + + if not deposit_status.ok: + msg = "Unable to deposit stake" + return False, error_status(msg, log=logger.error) + else: + # Fetch legacy gas price if not provided by user + if self.legacy_gas_price is None: + gas_price_in_gwei = await self.fetch_gas_price() + if not gas_price_in_gwei: + note = "Unable to fetch gas price for tx type 0" + return False, error_status(note, log=logger.warning) + else: + gas_price_in_gwei = self.legacy_gas_price + # Approve token spending for a transaction type 0 and deposit stake + receipt, approve_status = await self.token.write( + func_name="approve", + gas_limit=self.gas_limit, + legacy_gas_price=gas_price_in_gwei, + spender=self.oracle.address, + amount=stake_diff, + ) + if not approve_status.ok: + msg = "Unable to approve staking" + return False, error_status(msg, log=logger.error) + # Add this to avoid nonce error from txn happening too fast + time.sleep(1) + logger.debug(f"Approve transaction status: {receipt.status}, block: {receipt.blockNumber}") + # Deposit stake to oracle contract + _, deposit_status = await self.oracle.write( + func_name="depositStake", + gas_limit=self.gas_limit, + legacy_gas_price=gas_price_in_gwei, + _amount=stake_diff, ) - return False, error_status(msg, log=logger.error) + if not deposit_status.ok: + msg = ( + "Unable to stake deposit: " + + deposit_status.error + + f"Make sure {self.acct_addr} has enough of the current chain's " + + "currency and the oracle's currency (TRB)" + ) + return False, error_status(msg, log=logger.error) # add staked balance after successful stake deposit self.staker_info.stake_balance += stake_diff diff --git a/src/telliot_feeds/reporters/tellor_flex.py b/src/telliot_feeds/reporters/tellor_flex.py index d4c17471..10334822 100644 --- a/src/telliot_feeds/reporters/tellor_flex.py +++ b/src/telliot_feeds/reporters/tellor_flex.py @@ -11,7 +11,6 @@ from eth_abi.exceptions import EncodingTypeError from eth_utils import to_checksum_address from telliot_core.contract.contract import Contract -from telliot_core.gas.legacy_gas import legacy_gas_station from telliot_core.model.endpoints import RPCEndpoint from telliot_core.utils.response import error_status from telliot_core.utils.response import ResponseStatus @@ -26,6 +25,7 @@ from telliot_feeds.utils.log import get_logger from telliot_feeds.utils.reporter_utils import get_native_token_feed from telliot_feeds.utils.reporter_utils import tellor_suggested_report +from telliot_feeds.utils.reporter_utils import tkn_symbol logger = get_logger(__name__) @@ -46,11 +46,11 @@ def __init__( datafeed: Optional[DataFeed[Any]] = None, expected_profit: Union[str, float] = "YOLO", transaction_type: int = 2, - gas_limit: int = 350000, - max_fee: Optional[int] = None, - priority_fee: int = 100, + gas_limit: Optional[int] = None, + max_fee: int = 0, + priority_fee: float = 0.0, legacy_gas_price: Optional[int] = None, - gas_price_speed: Union[tuple[str], str] = ("safeLow",), + gas_multiplier: int = 1, # 1 percent wait_period: int = 7, min_native_token_balance: int = 10**18, check_rewards: bool = True, @@ -72,24 +72,20 @@ def __init__( self.wait_period = wait_period self.priority_fee = priority_fee self.legacy_gas_price = legacy_gas_price - self.gas_price_speed = [gas_price_speed] + self.gas_multiplier = gas_multiplier self.autopaytip = 0 self.staked_amount: Optional[float] = None self.qtag_selected = False if self.datafeed is None else True self.min_native_token_balance = min_native_token_balance self.check_rewards: bool = check_rewards + self.web3 = self.endpoint.web3 + self.gas_info: dict[str, Union[float, int]] = {} logger.info(f"Reporting with account: {self.acct_addr}") self.account: ChainedAccount = account assert self.acct_addr == to_checksum_address(self.account.address) - async def fetch_gas_price(self, speed: Optional[Any] = None) -> Optional[int]: - """Fetch estimated gas prices. - - Expected to return gas price in gwei.""" - return await legacy_gas_station(chain_id=self.chain_id, speed_parse_lis=speed) # type: ignore - async def in_dispute(self, new_stake_amount: Any) -> bool: """Check if staker balance decreased""" if self.staked_amount is not None and self.staked_amount > new_stake_amount: @@ -118,6 +114,7 @@ async def ensure_staked(self) -> Tuple[bool, ResponseStatus]: logger.info( f""" + STAKER INFO start date: {staker_startdate} desired stake: {self.stake} @@ -267,77 +264,57 @@ async def fetch_datafeed(self) -> Optional[DataFeed[Any]]: return self.datafeed return None - async def ensure_profitable( - self, - datafeed: DataFeed[Any], - ) -> ResponseStatus: + async def ensure_profitable(self, datafeed: DataFeed[Any]) -> ResponseStatus: + status = ResponseStatus() if not self.check_rewards: return status - tip = self.autopaytip + tip = self.autopaytip # Fetch token prices in USD native_token_feed = get_native_token_feed(self.chain_id) price_feeds = [native_token_feed, trb_usd_median_feed] _ = await asyncio.gather(*[feed.source.fetch_new_datapoint() for feed in price_feeds]) price_native_token = native_token_feed.source.latest[0] price_trb_usd = trb_usd_median_feed.source.latest[0] + if price_native_token is None or price_trb_usd is None: - logger.warning("Unable to fetch token price") - return None - - # Using transaction type 2 (EIP-1559) - if self.transaction_type == 2: - fee_info = await self.get_fee_info() - base_fee = fee_info[0].suggestBaseFee - - # No miner tip provided by user - if self.priority_fee is None: - # From etherscan docs: - # "Safe/Proposed/Fast gas price recommendations are now modeled as Priority Fees." # noqa: E501 - # Source: https://docs.etherscan.io/api-endpoints/gas-tracker - priority_fee = fee_info[0].SafeGasPrice - self.priority_fee = priority_fee - - if self.max_fee is None: - # From Alchemy docs: - # "maxFeePerGas = baseFeePerGas + maxPriorityFeePerGas" - # Source: https://docs.alchemy.com/alchemy/guides/eip-1559/maxpriorityfeepergas-vs-maxfeepergas # noqa: E501 - self.max_fee = self.priority_fee + base_fee + return error_status("Unable to fetch token price", log=logger.warning) + + if not self.gas_info: + return error_status("Gas info not set", log=logger.warning) + gas_info = self.gas_info + + if gas_info["type"] == 0: + txn_fee = gas_info["gas_price"] * gas_info["gas_limit"] logger.info( f""" - tips: {tip} TRB - gas limit: {self.gas_limit} - base fee: {base_fee} - priority fee: {self.priority_fee} - max fee: {self.max_fee} + + Tips: {tip/1e18} + Transaction fee: {self.web3.fromWei(txn_fee, 'gwei'):.09f} {tkn_symbol(self.chain_id)} + Gas price: {gas_info["gas_price"]} gwei + Gas limit: {gas_info["gas_limit"]} + Txn type: 0 (Legacy) """ ) - - costs = self.gas_limit * self.max_fee # in gwei - - # Using transaction type 0 (legacy) - else: - # Fetch legacy gas price if not provided by user - if not self.legacy_gas_price: - self.legacy_gas_price = await self.fetch_gas_price() - - if self.legacy_gas_price is None: - return error_status("Unable to fetch gas price", log=logger.warning) - + if gas_info["type"] == 2: + txn_fee = gas_info["max_fee"] * gas_info["gas_limit"] logger.info( f""" - tips: {tip/1e18} TRB - gas limit: {self.gas_limit} - legacy gas price: {self.legacy_gas_price} + + Tips: {tip/1e18} + Max transaction fee: {self.web3.fromWei(txn_fee, 'gwei'):.18f} {tkn_symbol(self.chain_id)} + Max fee per gas: {gas_info["max_fee"]} gwei + Max priority fee per gas: {gas_info["priority_fee"]} gwei + Gas limit: {gas_info["gas_limit"]} + Txn type: 2 (EIP-1559) """ ) - costs = self.gas_limit * self.legacy_gas_price # Calculate profit rev_usd = tip / 1e18 * price_trb_usd - costs_usd = costs / 1e9 * price_native_token # convert gwei costs to eth, then to usd + costs_usd = txn_fee / 1e9 * price_native_token # convert gwei costs to eth, then to usd profit_usd = rev_usd - costs_usd logger.info(f"Estimated profit: ${round(profit_usd, 2)}") logger.info(f"tip price: {round(rev_usd, 2)}, gas costs: {costs_usd}") diff --git a/src/telliot_feeds/reporters/tips/__init__.py b/src/telliot_feeds/reporters/tips/__init__.py index 959dcd91..564e3359 100644 --- a/src/telliot_feeds/reporters/tips/__init__.py +++ b/src/telliot_feeds/reporters/tips/__init__.py @@ -1,3 +1,4 @@ +import logging from typing import Optional from multicall.constants import MULTICALL2_ADDRESSES @@ -6,9 +7,8 @@ from multicall.constants import NO_STATE_OVERRIDE from telliot_feeds.queries.query_catalog import query_catalog -from telliot_feeds.utils.log import get_logger -logger = get_logger(__name__) +logger = logging.getLogger(__name__) # add testnet support for multicall that aren't avaialable in the package @@ -33,7 +33,7 @@ def add_multicall_support( else: MULTICALL3_ADDRESSES[attr] = multicall3_address else: - logger.warning(f"Network {network} already exists in multicall package") + print(f"Network {network} already exists in multicall package") add_multicall_support( diff --git a/src/telliot_feeds/utils/reporter_utils.py b/src/telliot_feeds/utils/reporter_utils.py index cbbc198b..e21a6955 100644 --- a/src/telliot_feeds/utils/reporter_utils.py +++ b/src/telliot_feeds/utils/reporter_utils.py @@ -168,3 +168,14 @@ def get_native_token_feed(chain_id: int) -> DataFeed[float]: return xdai_usd_median_feed else: raise ValueError(f"Cannot fetch native token feed. Invalid chain ID: {chain_id}") + + +def tkn_symbol(chain_id: int) -> str: + if chain_id in POLYGON_CHAINS: + return "MATIC" + elif chain_id in GNOSIS_CHAINS: + return "XDAI" + elif chain_id in ETHEREUM_CHAINS: + return "ETH" + else: + return "Unknown native token"
TellorflexReporter doesn't use gas price speed selected by user. Only default speeds! When a user selects a gas price in the cli ie "safeLow" its not passed to the TellorflexReporter class. The default is used always.
2023-03-01T13:45:24
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-586
91513444fbbcde6729d59b7e3c8ed1f0eeb38ce8
diff --git a/src/telliot_feeds/reporters/tellor_360.py b/src/telliot_feeds/reporters/tellor_360.py index 8ae63b57..7f06d33d 100644 --- a/src/telliot_feeds/reporters/tellor_360.py +++ b/src/telliot_feeds/reporters/tellor_360.py @@ -6,6 +6,7 @@ from typing import Optional from typing import Tuple +from eth_abi.exceptions import EncodingTypeError from eth_utils import to_checksum_address from telliot_core.utils.response import error_status from telliot_core.utils.response import ResponseStatus @@ -198,7 +199,12 @@ async def rewards(self) -> int: total_rewards: int = 0 if self.datafeed is not None: - fetch_autopay_tip = await fetch_feed_tip(self.autopay, self.datafeed.query.query_id) + try: + qid = self.datafeed.query.query_id + fetch_autopay_tip = await fetch_feed_tip(self.autopay, qid) + except EncodingTypeError: + logger.warning(f"Unable to generate data/id for query: {self.datafeed.query}") + if fetch_autopay_tip is not None: total_rewards += fetch_autopay_tip diff --git a/src/telliot_feeds/reporters/tellor_flex.py b/src/telliot_feeds/reporters/tellor_flex.py index a24b333a..0faf4bf9 100644 --- a/src/telliot_feeds/reporters/tellor_flex.py +++ b/src/telliot_feeds/reporters/tellor_flex.py @@ -8,6 +8,7 @@ from typing import Union from chained_accounts import ChainedAccount +from eth_abi.exceptions import EncodingTypeError from eth_utils import to_checksum_address from telliot_core.contract.contract import Contract from telliot_core.gas.legacy_gas import legacy_gas_station @@ -236,8 +237,13 @@ async def fetch_datafeed(self) -> Optional[DataFeed[Any]]: if query tag is selected fetches the rewards, if any, for that query tag""" if self.datafeed: # add query id to catalog to fetch tip for legacy autopay - if self.datafeed.query.query_id not in CATALOG_QUERY_IDS: - CATALOG_QUERY_IDS[self.datafeed.query.query_id] = self.datafeed.query.descriptor + try: + qid = self.datafeed.query.query_id + except EncodingTypeError: + logger.warning(f"Unable to generate data/id for query: {self.datafeed.query}") + return None + if qid not in CATALOG_QUERY_IDS: + CATALOG_QUERY_IDS[qid] = self.datafeed.query.descriptor self.autopaytip = await self.rewards() return self.datafeed
"eth_abi.exceptions.EncodingTypeError" This error was encountered running with the start command: `telliot report -a goerli6 -s 10000 -p YOLO -wp 300 -rf` versions: telliot-core.git@c4898d70ce7b035e59d118e170e9d0a977993b7b telliot-feeds.git@30597c491f6c6b25e48cbc605ad842fa465bdc32 Full traceback attached: [eth_abi.exception.txt](https://github.com/tellor-io/telliot-feeds/files/10548853/eth_abi.exception.txt)
2023-02-01T00:58:43
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-581
dd30173cfccbeb318ec10c9ccfb6550904d56746
diff --git a/src/telliot_feeds/utils/cfg.py b/src/telliot_feeds/utils/cfg.py index 0f5069b8..d285a27d 100644 --- a/src/telliot_feeds/utils/cfg.py +++ b/src/telliot_feeds/utils/cfg.py @@ -15,7 +15,7 @@ logger = get_logger(__name__) -def setup_config(cfg: TelliotConfig, account_name: str) -> Tuple[TelliotConfig, ChainedAccount]: +def setup_config(cfg: TelliotConfig, account_name: str) -> Tuple[TelliotConfig, Optional[ChainedAccount]]: """Setup TelliotConfig via CLI if not already configured Inputs: @@ -50,12 +50,11 @@ def setup_config(cfg: TelliotConfig, account_name: str) -> Tuple[TelliotConfig, else: click.echo("No accounts set.") - no_update = click.confirm("Proceed with current settings (y) or update (n)?", default=True) + keep_settings = click.confirm("Proceed with current settings (y) or update (n)?", default=True) - if no_update: - if not accounts or not endpoint: - return cfg, None - return cfg, accounts[0] + if keep_settings: + click.echo("Keeping current settings...") + return cfg, accounts[0] if accounts else None want_to_update_chain_id = click.confirm(f"Chain_id is {cfg.main.chain_id}. Do you want to update it?") @@ -68,7 +67,7 @@ def setup_config(cfg: TelliotConfig, account_name: str) -> Tuple[TelliotConfig, cfg.endpoints.endpoints.insert(0, new_endpoint) click.echo(f"{new_endpoint} added!") - click.echo(f"Your account name: {accounts[0].name}") + click.echo(f"Your account name: {accounts[0].name if accounts else None}") new_account = setup_account(cfg.main.chain_id) if new_account is not None: @@ -93,7 +92,7 @@ def setup_endpoint(cfg: TelliotConfig, chain_id: int) -> RPCEndpoint: return prompt_for_endpoint(chain_id) else: - click.echo(f"No endpoints are available for chain_id {chain_id}. Pleae add one") + click.echo(f"No endpoints are available for chain_id {chain_id}. Please add one:") return prompt_for_endpoint(chain_id) @@ -114,14 +113,12 @@ def check_accounts(cfg: TelliotConfig, account_name: str) -> List[ChainedAccount def prompt_for_endpoint(chain_id: int) -> Optional[RPCEndpoint]: - - network_name = click.prompt("Enter network name", type=str) - provider = click.prompt("Enter Provider", type=str) - rpc_url = click.prompt("Enter RPC url", type=str) - explorer_url = click.prompt("Enter Explorer url", type=str) + """Take user input to create a new RPCEndpoint""" + rpc_url = click.prompt("Enter RPC URL", type=str) + explorer_url = click.prompt("Enter block explorer URL", type=str) try: - return RPCEndpoint(chain_id, network_name, provider, rpc_url, explorer_url) + return RPCEndpoint(chain_id, "n/a", "n/a", rpc_url, explorer_url) except Exception as e: click.echo("Cannot add endpoint: invalid endpoint properties" + str(e)) return None
when configuring endpoint via cli, only ask for node/rpc url get rid of the prompts where it asks for provider and other stuff. just ask for url, simpler
2023-01-26T14:45:18
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-573
3baf9a7a37c8e36e7d06e138324e5ea2884a226e
diff --git a/src/telliot_feeds/sources/price/spot/bittrex.py b/src/telliot_feeds/sources/price/spot/bittrex.py index 89e7022c..3eeed82d 100644 --- a/src/telliot_feeds/sources/price/spot/bittrex.py +++ b/src/telliot_feeds/sources/price/spot/bittrex.py @@ -52,11 +52,15 @@ async def get_price(self, asset: str, currency: str) -> OptionalDataPoint[float] logger.error("Unable to decode Bittrex JSON") return None, None - if "restrictions that prevent you from accessing the site" in d["exception"].strerror: + rate_limit_conditions = ( + "exception" in d, + "restrictions that prevent you from accessing the site" in str(d["exception"]), + ) + if all(rate_limit_conditions): logger.warning("Bittrex API rate limit exceeded") return None, None - logger.error(d) + logger.error(str(d)) return None, None else:
TypeError: argument of type 'NoneType' is not iterable (listening for tips) Got this error after updating to the latest main branch yesterday night (01-21-2023) for one of our testnet reporters. This one was set to listen for tips with the start command: `telliot report -a mumbai3 -wp 60` All the other reporters are stable so far. See attached log for traceback. [traceback_log.txt](https://github.com/tellor-io/telliot-feeds/files/10474890/traceback_log.txt)
2023-01-22T20:43:38
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-571
428b6692e2b4d15047af429894fb8698d692d083
diff --git a/src/telliot_feeds/reporters/tips/listener/tip_listener_filter.py b/src/telliot_feeds/reporters/tips/listener/tip_listener_filter.py index 3bc2a4fb..713ef202 100644 --- a/src/telliot_feeds/reporters/tips/listener/tip_listener_filter.py +++ b/src/telliot_feeds/reporters/tips/listener/tip_listener_filter.py @@ -44,9 +44,6 @@ def qtype_name_in_registry(self, qdata: bytes) -> bool: """ qtyp_name = self.decode_typ_name(qdata) - if qtyp_name == "TellorRNG": - return False - return qtyp_name in Registry.registry def qtag_from_feed_catalog(self, qdata: bytes) -> Optional[str]: diff --git a/src/telliot_feeds/sources/blockhash_aggregator.py b/src/telliot_feeds/sources/blockhash_aggregator.py index d9a902d5..66a98694 100644 --- a/src/telliot_feeds/sources/blockhash_aggregator.py +++ b/src/telliot_feeds/sources/blockhash_aggregator.py @@ -1,4 +1,5 @@ import asyncio +import time from dataclasses import dataclass from datetime import datetime from datetime import timezone @@ -179,6 +180,8 @@ def parse_user_val(self) -> int: try: inpt = int(inpt) + if not self.is_valid_timestamp(inpt): + continue except ValueError: print("Invalid input. Enter decimal value (int).") continue @@ -190,14 +193,30 @@ def parse_user_val(self) -> int: self.timestamp = data return data + def is_valid_timestamp(self, timestamp: int) -> bool: + """Check if timestamp is valid.""" + try: + _ = datetime.fromtimestamp(timestamp) + except ValueError: + logger.info(f"Invalid timestamp: {timestamp}") + return False + + if 1438269973 <= timestamp <= int(time.time()): + return True + else: + logger.info( + f"Invalid timestamp: {timestamp}, should be greater than eth genesis block timestamp" + "and less than current time" + ) + return False + async def fetch_new_datapoint(self) -> OptionalDataPoint[bytes]: """Update current value with time-stamped value fetched from user input. Returns: Current time-stamped value """ - - if self.timestamp == 0: + if not self.is_valid_timestamp(self.timestamp): try: timestamp = self.parse_user_val() except TimeoutOccurred:
TellorRNG bitcoin blockhash source consistently failing cmd to reproduce: `telliot -a mumbaiacct1 report -p YOLO` ```console WARNING | telliot_feeds.sources.blockhash_aggregator | Blockchain.info API returned no blocks WARNING | telliot_feeds.sources.blockhash_aggregator | Unable to retrieve Bitcoin blockhash ```
- [ ] When fixing this, make sure to remove this line from `tip_listener_filter.py`: ```py if qtyp_name == "TellorRNG": return False ``` I was not able to replicate this bug. It reported correctly when I used `telliot -a my-mumbai-acct report -p YOLO -qt tellor-rng-example` > * [ ] When fixing this, make sure to remove this line from `tip_listener_filter.py`: > > ```python > if qtyp_name == "TellorRNG": > return False > ``` Does this mean the error is occurring on the fix-multicall-err branch? The https://blockchain.info/blocks... api does appear to return an empty array in cases where the inputted `timestamp` is too far into the future, which would cause the `Blockchain.info API returned no blocks` error. This seems to be expected behavior as there is no answer to the query yet and no data should be reported. Do we know what timestamp was inputted to create this bug? > The https://blockchain.info/blocks... api does appear to return an empty array in cases where the inputted `timestamp` is too far into the future, which would cause the `Blockchain.info API returned no blocks` error. This seems to be expected behavior as there is no answer to the query yet and no data should be reported. Do we know what timestamp was inputted to create this bug? It's using the automatic rng source, so the timestamp was fetched automatically. I didin't enter it > > * [ ] When fixing this, make sure to remove this line from `tip_listener_filter.py`: > > > > ```python > > if qtyp_name == "TellorRNG": > > return False > > ``` > > Does this mean the error is occurring on the fix-multicall-err branch? That branch's changes are fixing the fact that telliot gets stuck trying to report a TellorRNG value, the branch isn't causing this issue. I'm able to produce it on the main branch w/ the command `telliot -a mymumbaiacct report -p YOLO`: ```console INFO | telliot_feeds.reporters.interval | Current query: {"type":"TellorRNG","timestamp":112233} ERROR | telliot_feeds.sources.price.spot.bittrex | Unable to decode Bittrex JSON WARNING | telliot_feeds.sources.price.spot.binance | Service unavailable from a restricted location according to 'b. Eligibility' in https://www.binance.com/en/terms. Please contact customer service if you believe you received this message in error. INFO | telliot_feeds.sources.price_aggregator | Running median on [0.80127, 0.8018, 0.80199, 0.8011] INFO | telliot_feeds.sources.price_aggregator | Feed Price: 0.801535 reported at time 2022-12-23 19:35:14.201142+00:00 INFO | telliot_feeds.sources.price_aggregator | Number of sources used in aggregate: 4 INFO | telliot_feeds.sources.price_aggregator | Running median on [13.97, 13.97] INFO | telliot_feeds.sources.price_aggregator | Feed Price: 13.97 reported at time 2022-12-23 19:35:14.201340+00:00 INFO | telliot_feeds.sources.price_aggregator | Number of sources used in aggregate: 2 INFO | telliot_feeds.reporters.tellor_flex | tips: 1e-17 TRB gas limit: 350000 legacy gas price: 39 INFO | telliot_feeds.reporters.tellor_flex | Estimated profit: $-0.01 INFO | telliot_feeds.reporters.tellor_flex | tip price: 0.0, gas costs: 0.01094095275 INFO | telliot_feeds.reporters.tellor_flex | Estimated percent profit: -100.0% WARNING | telliot_feeds.sources.blockhash_aggregator | Blockchain.info API returned no blocks WARNING | telliot_feeds.sources.blockhash_aggregator | Unable to retrieve Bitcoin blockhash INFO | telliot_feeds.reporters.interval | Unable to retrieve updated datafeed value. ``` I don't know where it's getting timestamp `112233` from. But it will repeat trying to get a val using that timestamp over and over again, hence how telliot gets stuck. The versions I'm using: ``` telliot-core==0.1.7 -e git+https://github.com/tellor-io/telliot-feed-examples.git@86c1e3487c1cd93044477f0f3eee5c2d3adb3859#egg=telliot_feeds ```
2023-01-20T14:48:14
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-559
2f76f171e0297738db4a3e19e971a7e04ca74edd
diff --git a/docs/getting-started.md b/docs/getting-started.md index 61b8928a..e9c76405 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -109,8 +109,6 @@ To configure your endpoint via the CLI, use the following command: If you don't have your own node URL, a free one can be obtained at [Infura.io](http://www.infura.io). Simply replace `INFURA_API_KEY` with the one provided by Infura. -For the funcitonality of telliot feeds, Endpoints should be configured for both Ethereum mainnet and Goerli testnet. (even if you don't plan on reporting oracle data on those networks) - **Warning! All telliot software and reporter feeds should be validated on testnets prior to deploying on mainnet.** Note that endpoints should use the websocket (wss) protocol because HTTPS endpoints do not support event listeners. (If reporting on Polygon, websockets are not supported, so the HTTPS endpoint is fine.) diff --git a/src/telliot_feeds/sources/blockhash_aggregator.py b/src/telliot_feeds/sources/blockhash_aggregator.py index 1836aa06..d9a902d5 100644 --- a/src/telliot_feeds/sources/blockhash_aggregator.py +++ b/src/telliot_feeds/sources/blockhash_aggregator.py @@ -23,11 +23,6 @@ logger = get_logger(__name__) -cfg = TelliotConfig() -cfg.main.chain_id = 1 -cfg.get_endpoint().connect() -w3 = cfg.get_endpoint().web3 - retry_strategy = Retry( total=3, backoff_factor=1, @@ -37,6 +32,17 @@ adapter = HTTPAdapter(max_retries=retry_strategy) +def get_mainnet_web3() -> Any: + """Get mainnet TelliotConfig.""" + cfg = TelliotConfig() + cfg.main.chain_id = 1 + try: + cfg.get_endpoint().connect() + return cfg.get_endpoint().web3 + except ValueError: + return None + + def block_num_from_timestamp(timestamp: int) -> Optional[int]: with requests.Session() as s: s.mount("https://", adapter) @@ -81,6 +87,10 @@ def block_num_from_timestamp(timestamp: int) -> Optional[int]: async def get_eth_hash(timestamp: int) -> Optional[str]: """Fetches next Ethereum blockhash after timestamp from API.""" + w3 = get_mainnet_web3() + if w3 is None: + logger.warning("Web3 not connected") + return None try: this_block = w3.eth.get_block("latest") diff --git a/src/telliot_feeds/utils/cfg.py b/src/telliot_feeds/utils/cfg.py index a1ddb469..0f5069b8 100644 --- a/src/telliot_feeds/utils/cfg.py +++ b/src/telliot_feeds/utils/cfg.py @@ -1,4 +1,3 @@ -import os from typing import List from typing import Optional from typing import Tuple @@ -16,27 +15,6 @@ logger = get_logger(__name__) -def mainnet_config() -> Optional[TelliotConfig]: - cfg = TelliotConfig() - cfg.main.chain_id = 1 - endpoint = cfg.get_endpoint() - - if "INFURA_API_KEY" in endpoint.url: - endpoint.url = f'wss://mainnet.infura.io/ws/v3/{os.environ["INFURA_API_KEY"]}' - - accounts = find_accounts(chain_id=1) - if not accounts: - # Create a test account using PRIVATE_KEY defined on github. - key = os.getenv("PRIVATE_KEY", None) - if key: - ChainedAccount.add("git-mainnet-key", chains=1, key=os.environ["PRIVATE_KEY"], password="") - else: - logger.warning("No mainnet account added!") - return None - - return cfg - - def setup_config(cfg: TelliotConfig, account_name: str) -> Tuple[TelliotConfig, ChainedAccount]: """Setup TelliotConfig via CLI if not already configured
Make so user only has to setup endpoints for chain they'll be using Take this out of the getting started docs: ``` For the funcitonality of telliot feeds, Endpoints should be configured for both Ethereum mainnet and Goerli testnet. (even if you don't plan on reporting oracle data on those networks) ```
Throws this error if you try to delete all the other endpoints: ```console $ telliot config show Traceback (most recent call last): File "/Users/***/tenv/bin/telliot", line 5, in <module> from telliot_feeds.cli.main import main File "/Users/***/tenv/lib/python3.9/site-packages/telliot_feeds/cli/main.py", line 15, in <module> from telliot_feeds.cli.commands.query import query File "/Users/***/tenv/lib/python3.9/site-packages/telliot_feeds/cli/commands/query.py", line 3, in <module> from telliot_feeds.cli.utils import build_query File "/Users/***/tenv/lib/python3.9/site-packages/telliot_feeds/cli/utils.py", line 21, in <module> from telliot_feeds.feeds import DATAFEED_BUILDER_MAPPING File "/Users/***/tenv/lib/python3.9/site-packages/telliot_feeds/feeds/init.py", line 32, in <module> from telliot_feeds.feeds.tellor_rng_feed import tellor_rng_feed File "/Users/***/tenv/lib/python3.9/site-packages/telliot_feeds/feeds/tellor_rng_feed.py", line 9, in <module> from telliot_feeds.sources.blockhash_aggregator import TellorRNGManualSource File "/Users/***/tenv/lib/python3.9/site-packages/telliot_feeds/sources/blockhash_aggregator.py", line 28, in <module> cfg.get_endpoint().connect() File "/Users/***/tenv/lib/python3.9/site-packages/telliot_core/apps/telliot_config.py", line 93, in get_endpoint raise ValueError(f"Endpoint not found for chain_id={self.main.chain_id}") ValueError: Endpoint not found for chain_id=1 ```
2023-01-10T16:23:05
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-558
384066fa1fa2b7e0df0157031c74d202d12f4f0e
diff --git a/README.md b/README.md index 37e6dac1..cd76e1d8 100644 --- a/README.md +++ b/README.md @@ -12,5 +12,5 @@ This package contains reporting tools and datafeeds for Tellor oracles. Report using the CLI: ``` -telliot -a myaccount report +telliot report -a myacct ``` diff --git a/docs/getting-started.md b/docs/getting-started.md index fefd859e..61b8928a 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -105,7 +105,7 @@ You can add your RPC endpoints via the command line or by editing the `endpoints To configure your endpoint via the CLI, use the following command: - telliot -a myacct report + telliot report -a myacct If you don't have your own node URL, a free one can be obtained at [Infura.io](http://www.infura.io). Simply replace `INFURA_API_KEY` with the one provided by Infura. diff --git a/docs/usage.md b/docs/usage.md index 57ededbd..2a1de0f8 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -5,7 +5,7 @@ Prerequisites: [Getting Started](https://tellor-io.github.io/telliot-feeds/getti To report data to Tellor oracles, or access any other functionality, use the `telliot` CLI. A basic example: ``` -$ telliot -a acct1 report -ncr -qt trb-usd-spot +$ telliot report -a acct1 -ncr -qt trb-usd-spot ``` **Be sure to always confirm the correct settings when prompted and read chain-specific usage sections before setting up your reporter!** @@ -107,7 +107,7 @@ telliot --account acct1 report Use the `report` command to submit data to Tellor oracles. Example `report` command usage: ``` -telliot -a acct2 report +telliot report -a acct2 ``` When calling the `report` command, `telliot` will ask you to confirm the reporter's settings: @@ -133,7 +133,7 @@ The default settings are probably fine to use on testnets, but you may want to a By default, the reporter will continue to attempt reporting whenever out of reporting lock. Use the `--submit-once` flag to only report once: ``` -telliot -a staker1 report --submit-once +telliot report -a staker1 --submit-once ``` ### Build Feed Flag @@ -141,7 +141,7 @@ telliot -a staker1 report --submit-once Use the build-a-feed flag (`--build-feed`) to build a DataFeed of a QueryType with one or more QueryParameters. When reporting, the CLI will list the QueryTypes this flag supports. To select a QueryType, enter a type from the list provided. Then, enter in the corresponding QueryParameters for the QueryType you have selected, and telliot will build the Query and select the appropriate source. ``` -telliot -a staker1 report --build-feed --submit-once -p YOLO +telliot report -a staker1 --build-feed --submit-once -p YOLO ``` ## Profit Flag @@ -151,13 +151,13 @@ telliot -a staker1 report --build-feed --submit-once -p YOLO Use this flag (`--profit/-p`) to set an expected profit. The default is 100%, which will likely result in your reporter never attempting to report unless you're on a testnet. To bypass profitability checks, use the `"YOLO"` string: ``` -telliot -a acct1 report -p YOLO +telliot report -a acct1 -p YOLO ``` Normal profit flag usage: ``` -telliot -a acct4 report -p 2 +telliot report -a acct4 -p 2 ``` **Note: Skipping profit checks does not skip checks for tips on the [AutoPay contract](https://github.com/tellor-io/autoPay). If you'd like to skip these checks as well, use the `--no-check-rewards/-ncr` flag.** @@ -171,7 +171,7 @@ The `--gas-price/-gp` flag is for legacy transactions, while the `--max-fee/-mf` Example usage: ``` -telliot -a acct3 report -tx 0 -gl 310000 -gp 9001 -p 22 +telliot report -a acct3 -tx 0 -gl 310000 -gp 9001 -p 22 ``` # Reporting on Ethereum @@ -182,13 +182,7 @@ Both transaction types (0 & 2) are supported for reporting. It's not advised to report without Flashbots, unless on a testnet like Goerli, because transactions sent to the public mempool on Ethereum mainnet will most likely be [front-run](https://www.paradigm.xyz/2020/08/ethereum-is-a-dark-forest/), so you'll lose money. -If you want to report without flashbots on Ethereum mainnet, use the `--no-flashbots/-nfb` flag. - -Example usage: - -``` -telliot -a acct1 -nfb report -``` +By default, `telliot` will report without Flashbots. You need to use the signature account flag (`--signature-account/-sa`) to report with Flashbots. See [below](#using-flashbots) for more info. ## Using Flashbots @@ -209,7 +203,7 @@ When reporting, select your signatory account by tag as well as your staked main Example usage: ``` -telliot -a acct2 -sgt sigacct -fb report +telliot report -a acct2 -sgt sigacct ``` ## Staking @@ -219,7 +213,7 @@ If reporting to Tellor360 oracles, reporters can stake multiple times. Each stak The reporter will automatically attempt to stake the required amount, but if you'd like to stake more than the current minimum, use the `--stake/-s` flag. ``` -telliot -a acct1 report -s 2000 -ncr -rf +telliot report -a acct1 -s 2000 -ncr -rf ``` If the reporter account's actual stake is reduced after a dispute, the reporter will attempt to stake the difference in TRB to return to the original desired stake amount. diff --git a/src/telliot_feeds/cli/commands/report.py b/src/telliot_feeds/cli/commands/report.py index 48520380..bd409067 100644 --- a/src/telliot_feeds/cli/commands/report.py +++ b/src/telliot_feeds/cli/commands/report.py @@ -10,6 +10,7 @@ from telliot_core.cli.utils import async_run from telliot_feeds.cli.utils import build_feed_from_input +from telliot_feeds.cli.utils import get_accounts_from_name from telliot_feeds.cli.utils import parse_profit_input from telliot_feeds.cli.utils import print_reporter_settings from telliot_feeds.cli.utils import reporter_cli_core @@ -56,6 +57,24 @@ def reporter() -> None: pass [email protected]( + "--account", + "-a", + "account_str", + help="Name of account used for reporting, staking, etc. More info: run `telliot account --help`", + required=True, + nargs=1, + type=str, +) [email protected]( + "--signature-account", + "-sa", + "signature_account", + help="Name of signature account used for reporting with Flashbots.", + required=False, + nargs=1, + type=str, +) @reporter.command() @click.option( "--build-feed", @@ -292,24 +311,32 @@ async def report( custom_autopay_contract: Optional[ChecksumAddress], tellor_360: bool, stake: float, + account_str: str, + signature_account: str, check_rewards: bool, use_random_feeds: bool, ) -> None: """Report values to Tellor oracle""" - name = ctx.obj["ACCOUNT_NAME"] - sig_acct_name = ctx.obj["SIGNATURE_ACCOUNT_NAME"] + ctx.obj["ACCOUNT_NAME"] = account_str + ctx.obj["SIGNATURE_ACCOUNT_NAME"] = signature_account + + accounts = get_accounts_from_name(account_str) + if not accounts: + return + + ctx.obj["CHAIN_ID"] = accounts[0].chains[0] # used in reporter_cli_core - if sig_acct_name is not None: + if signature_account is not None: try: if not signature_password: - signature_password = getpass.getpass(f"Enter password for {sig_acct_name} keyfile: ") + signature_password = getpass.getpass(f"Enter password for {signature_account} keyfile: ") except ValueError: click.echo("Invalid Password") # Initialize telliot core app using CLI context async with reporter_cli_core(ctx) as core: - core._config, account = setup_config(core.config, account_name=name) + core._config, account = setup_config(core.config, account_name=account_str) endpoint = check_endpoint(core._config) @@ -323,13 +350,13 @@ async def report( if not account.is_unlocked: account.unlock(password) - if sig_acct_name is not None: - sig_account = find_accounts(name=sig_acct_name)[0] + if signature_account is not None: + sig_account = find_accounts(name=signature_account)[0] if not sig_account.is_unlocked: sig_account.unlock(password) sig_acct_addr = to_checksum_address(sig_account.address) else: - sig_acct_addr = "" # type: ignore + sig_acct_addr = "" # If we need to build a datafeed if build_feed: diff --git a/src/telliot_feeds/cli/commands/settle.py b/src/telliot_feeds/cli/commands/settle.py index 74066c8b..4cb5eb9c 100644 --- a/src/telliot_feeds/cli/commands/settle.py +++ b/src/telliot_feeds/cli/commands/settle.py @@ -4,6 +4,7 @@ from click.core import Context from telliot_core.cli.utils import async_run +from telliot_feeds.cli.utils import get_accounts_from_name from telliot_feeds.cli.utils import reporter_cli_core from telliot_feeds.cli.utils import valid_diva_chain from telliot_feeds.integrations.diva_protocol.contract import DivaOracleTellorContract @@ -38,21 +39,34 @@ def diva() -> None: required=False, default=100, ) [email protected]( + "--account", + "-a", + "account_str", + help="Name of account used for reporting, staking, etc. More info: run `telliot account --help`", + required=True, + nargs=1, + type=str, +) @click.option("-pswd", "--password", type=str) @click.pass_context @async_run async def settle( ctx: Context, + account_str: str, pool_id: int, password: str, legacy_gas_price: int = 100, ) -> None: """Settle a derivative pool in DIVA Protocol.""" + ctx.obj["ACCOUNT_NAME"] = account_str + accounts = get_accounts_from_name(account_str) + if not accounts: + return - name = ctx.obj["ACCOUNT_NAME"] try: if not password: - password = getpass.getpass(f"Enter password for {name} keyfile: ") + password = getpass.getpass(f"Enter password for {account_str} keyfile: ") except ValueError: click.echo("Invalid Password") diff --git a/src/telliot_feeds/cli/main.py b/src/telliot_feeds/cli/main.py index 3eda84a9..3c5de0ad 100644 --- a/src/telliot_feeds/cli/main.py +++ b/src/telliot_feeds/cli/main.py @@ -5,7 +5,6 @@ or in the configuration file. """ import click -from chained_accounts import find_accounts from click.core import Context from telliot_feeds.cli.commands.account import account @@ -20,28 +19,8 @@ logger = get_logger(__name__) -# from telliot_feeds.cli.commands.tip import tip - @click.group() [email protected]( - "--account", - "-a", - "account", - help="Name of account used for reporting.", - required=False, - nargs=1, - type=str, -) [email protected]( - "--signature-account", - "-sa", - "signature_account", - help="Name of signature account used for reporting with Flashbots.", - required=False, - nargs=1, - type=str, -) @click.option( "--test-config", is_flag=True, @@ -50,30 +29,14 @@ @click.pass_context def main( ctx: Context, - account: str, - signature_account: str, test_config: bool, ) -> None: """Telliot command line interface""" ctx.ensure_object(dict) - ctx.obj["ACCOUNT_NAME"] = account - ctx.obj["SIGNATURE_ACCOUNT_NAME"] = signature_account ctx.obj["TEST_CONFIG"] = test_config - # Pull chain from account - # Note: this is not be reliable because accounts can be associated with - # multiple chains. - accounts = find_accounts(name=account) if account else find_accounts() - if len(accounts) == 0: - click.echo( - "No accounts found. Add one with the account subcommand. For more info run: telliot account add --help" - ) - else: - ctx.obj["CHAIN_ID"] = accounts[0].chains[0] - main.add_command(report) -# main.add_command(tip) main.add_command(query) main.add_command(catalog) main.add_command(settle) diff --git a/src/telliot_feeds/cli/utils.py b/src/telliot_feeds/cli/utils.py index 9ffcd634..1f227ffa 100644 --- a/src/telliot_feeds/cli/utils.py +++ b/src/telliot_feeds/cli/utils.py @@ -261,3 +261,14 @@ def valid_transaction_type(ctx: click.Context, param: Any, value: str) -> int: raise click.BadParameter(f"Transaction type given ({value}) is not supported ({supported}).") except ValueError: raise click.BadParameter("Transaction type must be an integer.") + + +def get_accounts_from_name(name: Optional[str]) -> list[ChainedAccount]: + """Get account from name or return any account if no name is given.""" + accounts: list[ChainedAccount] = find_accounts(name=name) if name else find_accounts() + if not accounts: + click.echo( + f'No account found named: "{name}".\nAdd one with the account subcommand.' + "\nFor more info run: `telliot account add --help`" + ) + return accounts
Move account and signature account flags under the report command Currently they're under telliot main command, so they don't show up when you do `telliot report --help`
add docs for `-a` command
2023-01-10T03:40:06
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-529
8807a348afdd9844e8318ddd07be56efa00d6cfe
diff --git a/src/telliot_feeds/reporters/tips/__init__.py b/src/telliot_feeds/reporters/tips/__init__.py index 0298ab92..959dcd91 100644 --- a/src/telliot_feeds/reporters/tips/__init__.py +++ b/src/telliot_feeds/reporters/tips/__init__.py @@ -1,16 +1,50 @@ +from typing import Optional + from multicall.constants import MULTICALL2_ADDRESSES from multicall.constants import MULTICALL3_ADDRESSES from multicall.constants import Network +from multicall.constants import NO_STATE_OVERRIDE from telliot_feeds.queries.query_catalog import query_catalog +from telliot_feeds.utils.log import get_logger + +logger = get_logger(__name__) # add testnet support for multicall that aren't avaialable in the package -Network.PulsechainTestnet = 941 -MULTICALL2_ADDRESSES[Network.PulsechainTestnet] = "0x959a437F1444DaDaC8aF997E71EAF0479c810267" -Network.Chiado = 10200 -MULTICALL3_ADDRESSES[Network.Chiado] = "0x08e08170712c7751b45b38865B97A50855c8ab13" +def add_multicall_support( + network: str, + network_id: int, + state_override: bool = True, + multicall2_address: Optional[str] = None, + multicall3_address: Optional[str] = None, +) -> None: + """Add support for a network that doesn't have multicall support in the package""" + if not hasattr(Network, network): + setattr(Network, network, network_id) + attr = getattr(Network, network) + if not state_override: + # Gnosis chain doesn't have state override so we need to add it + # to the list of chains that don't have state override in the package + # to avoid errors + NO_STATE_OVERRIDE.append(attr) + if multicall2_address: + MULTICALL2_ADDRESSES[attr] = multicall2_address + else: + MULTICALL3_ADDRESSES[attr] = multicall3_address + else: + logger.warning(f"Network {network} already exists in multicall package") + +add_multicall_support( + network="PulsechainTestnet", network_id=941, multicall2_address="0x959a437F1444DaDaC8aF997E71EAF0479c810267" +) +add_multicall_support( + network="Chiado", + network_id=10200, + state_override=False, + multicall3_address="0x08e08170712c7751b45b38865B97A50855c8ab13", +) CATALOG_QUERY_IDS = {query_catalog._entries[tag].query.query_id: tag for tag in query_catalog._entries} CATALOG_QUERY_DATA = {query_catalog._entries[tag].query.query_data: tag for tag in query_catalog._entries} diff --git a/src/telliot_feeds/reporters/tips/listener/funded_feeds_filter.py b/src/telliot_feeds/reporters/tips/listener/funded_feeds_filter.py index cbd0749c..8861e559 100644 --- a/src/telliot_feeds/reporters/tips/listener/funded_feeds_filter.py +++ b/src/telliot_feeds/reporters/tips/listener/funded_feeds_filter.py @@ -85,17 +85,17 @@ async def price_change(self, query_id: bytes, value_before: float) -> Optional[f else: logger.info(f"No Api source found for {query_tag} to check priceThreshold") return None + if query_id not in self.prices: + value_now = await datafeed.source.fetch_new_datapoint() # type: ignore - value_now = await datafeed.source.fetch_new_datapoint() # type: ignore + if not value_now: + note = f"Unable to fetch {datafeed} price for tip calculation" + _ = error_status(note=note, log=logger.warning) + return None - if not value_now: - note = f"Unable to fetch {datafeed} price for tip calculation" - _ = error_status(note=note, log=logger.warning) - return None - - value_now = value_now[0] + self.prices[query_id] = value_now[0] - return _get_price_change(previous_val=value_before, current_val=value_now) + return _get_price_change(previous_val=value_before, current_val=self.prices[query_id]) def api_support_check(self, feeds: list[QueryIdandFeedDetails]) -> list[QueryIdandFeedDetails]: """Filter funded feeds where threshold is gt zero and no telliot catalog feeds support""" @@ -124,6 +124,10 @@ def filter_historical_submissions(self, feeds: list[QueryIdandFeedDetails]) -> l for current, previous in zip( feed.queryid_timestamps_values_list[::-1], feed.queryid_timestamps_values_list[-2::-1] ): + # if current timestamp is before feed start then no need to check + if feed.params.startTime > current.timestamp: + feed.queryid_timestamps_values_list.remove(current) + continue in_eligibile_window = self.is_timestamp_first_in_window( timestamp_before=previous.timestamp, timestamp_to_check=current.timestamp, @@ -183,6 +187,7 @@ async def window_and_priceThreshold_unmet_filter( Returns: list of feeds that could possibly reward a tip """ + self.prices: dict[bytes, float] = {} for feed in list(feeds): # check if your timestamp will be first in window for # this feed if not discard feed_details diff --git a/src/telliot_feeds/reporters/tips/multicall_functions/multicall_autopay.py b/src/telliot_feeds/reporters/tips/multicall_functions/multicall_autopay.py index 22e95e5a..34cc4494 100644 --- a/src/telliot_feeds/reporters/tips/multicall_functions/multicall_autopay.py +++ b/src/telliot_feeds/reporters/tips/multicall_functions/multicall_autopay.py @@ -24,11 +24,14 @@ async def month_of_timestamps_and_values( Return: a list of QueryIdandFeedDetails to be used in the next batch call that fetches a list of timestamps for each queryId """ + + unique_ids = {feed.query_id for feed in feeds} + calls = [ self.get_multiple_values_before( - query_id=feed.query_id, now_timestamp=now_timestamp, max_age=max_age, max_count=max_count + query_id=qid, now_timestamp=now_timestamp, max_age=max_age, max_count=max_count ) - for feed in feeds + for qid in unique_ids ] if not len(calls): return None, error_status("Unable to assemble getMultipleValues Call object")
Make less API calls for same asset/currency pair When listening for tips, since a query id could have multiple feed ids its calling APIs as many times as the length of feed ids list instead of just once for the same currency. - [ ] cache/store the result from API hits for asset/currency pairs, so they can be reused You can produce an example of this behavior w/ the command `telliot -a mymumbaiacct report -p YOLO`: ```console Press [ENTER] to confirm settings. INFO | telliot_feeds.reporters.tellor_flex | Reporting with account: 0xd5f1Cc896542C111c7Aa7D7fae2C3D654f34b927 INFO | telliot_feeds.reporters.tellor_360 | Current Oracle stakeAmount: 10.0 INFO | telliot_feeds.reporters.tellor_360 | STAKER INFO start date: 1670340607 stake_balance: 2000.0 locked_balance: 0 last report: 1671815607 reports count: 23 {'code': -32000, 'message': 'out of gas'} Multicall batch size reduced from 10000 to 8. The failed batch had 9 calls. INFO | telliot_feeds.sources.price_aggregator | Running median on [0.04745761, 0.04750754389083932, 0.04764] INFO | telliot_feeds.sources.price_aggregator | Feed Price: 0.04750754389083932 reported at time 2022-12-23 19:35:10.656465+00:00 INFO | telliot_feeds.sources.price_aggregator | Number of sources used in aggregate: 3 INFO | telliot_feeds.sources.price_aggregator | Running median on [0.04745761, 0.04750754389083932, 0.04764] INFO | telliot_feeds.sources.price_aggregator | Feed Price: 0.04750754389083932 reported at time 2022-12-23 19:35:11.140805+00:00 INFO | telliot_feeds.sources.price_aggregator | Number of sources used in aggregate: 3 INFO | telliot_feeds.sources.price_aggregator | Running median on [0.04745761, 0.04750754389083932, 0.04764] INFO | telliot_feeds.sources.price_aggregator | Feed Price: 0.04750754389083932 reported at time 2022-12-23 19:35:11.541615+00:00 INFO | telliot_feeds.sources.price_aggregator | Number of sources used in aggregate: 3 INFO | telliot_feeds.sources.price_aggregator | Running median on [0.04745761, 0.04750754389083932, 0.04764] INFO | telliot_feeds.sources.price_aggregator | Feed Price: 0.04750754389083932 reported at time 2022-12-23 19:35:11.997618+00:00 INFO | telliot_feeds.sources.price_aggregator | Number of sources used in aggregate: 3 INFO | telliot_feeds.sources.price_aggregator | Running median on [0.04745761, 0.04750754389083932, 0.04764] INFO | telliot_feeds.sources.price_aggregator | Feed Price: 0.04750754389083932 reported at time 2022-12-23 19:35:12.366965+00:00 INFO | telliot_feeds.sources.price_aggregator | Number of sources used in aggregate: 3 ``` Versions used: ``` telliot-core==0.1.7 -e git+https://github.com/tellor-io/telliot-feed-examples.git@86c1e3487c1cd93044477f0f3eee5c2d3adb3859#egg=telliot_feeds ```
2022-12-30T04:53:48
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-510
3dd579b3e47381357247d4eb97ab60d99ce5287a
diff --git a/src/telliot_feeds/feeds/snapshot_feed.py b/src/telliot_feeds/feeds/snapshot_feed.py index 1cb4d7ea..19265b1f 100644 --- a/src/telliot_feeds/feeds/snapshot_feed.py +++ b/src/telliot_feeds/feeds/snapshot_feed.py @@ -7,7 +7,7 @@ """ from telliot_feeds.datafeed import DataFeed from telliot_feeds.queries.snapshot import Snapshot -from telliot_feeds.sources.manual_input_source import ManualSnapshotInputSource +from telliot_feeds.sources.manual.snapshot import ManualSnapshotInputSource proposalId = None diff --git a/src/telliot_feeds/sources/blockhash_aggregator.py b/src/telliot_feeds/sources/blockhash_aggregator.py index 5d98ca7f..1836aa06 100644 --- a/src/telliot_feeds/sources/blockhash_aggregator.py +++ b/src/telliot_feeds/sources/blockhash_aggregator.py @@ -15,8 +15,11 @@ from telliot_feeds.datasource import DataSource from telliot_feeds.dtypes.datapoint import OptionalDataPoint +from telliot_feeds.utils.input_timeout import input_timeout +from telliot_feeds.utils.input_timeout import TimeoutOccurred from telliot_feeds.utils.log import get_logger + logger = get_logger(__name__) @@ -162,16 +165,16 @@ def parse_user_val(self) -> int: data = None while data is None: - inpt = input() + inpt = input_timeout() try: - inpt = int(inpt) # type: ignore + inpt = int(inpt) except ValueError: print("Invalid input. Enter decimal value (int).") continue print(f"Generating random number from timestamp: {inpt}\nPress [ENTER] to confirm.") - _ = input() + _ = input_timeout() data = inpt self.timestamp = data @@ -185,7 +188,11 @@ async def fetch_new_datapoint(self) -> OptionalDataPoint[bytes]: """ if self.timestamp == 0: - timestamp = self.parse_user_val() + try: + timestamp = self.parse_user_val() + except TimeoutOccurred: + logger.info("Timeout occurred while waiting for user input") + return None, None else: timestamp = self.timestamp @@ -211,12 +218,7 @@ async def fetch_new_datapoint(self) -> OptionalDataPoint[bytes]: return datapoint -async def main() -> None: - """Runs the data source.""" - source = TellorRNGManualSource() - await source.fetch_new_datapoint() - - if __name__ == "__main__": - loop = asyncio.get_event_loop() - loop.run_until_complete(main()) + s = TellorRNGManualSource() + v, t = asyncio.run(s.fetch_new_datapoint()) + print("datapoint:", v, t) diff --git a/src/telliot_feeds/sources/manual_input_source.py b/src/telliot_feeds/sources/manual/snapshot.py similarity index 61% rename from src/telliot_feeds/sources/manual_input_source.py rename to src/telliot_feeds/sources/manual/snapshot.py index 4e09f64e..303d4a40 100644 --- a/src/telliot_feeds/sources/manual_input_source.py +++ b/src/telliot_feeds/sources/manual/snapshot.py @@ -1,8 +1,11 @@ +import asyncio from dataclasses import dataclass from telliot_feeds.datasource import DataSource -from telliot_feeds.dtypes.datapoint import DataPoint from telliot_feeds.dtypes.datapoint import datetime_now_utc +from telliot_feeds.dtypes.datapoint import OptionalDataPoint +from telliot_feeds.utils.input_timeout import input_timeout +from telliot_feeds.utils.input_timeout import TimeoutOccurred from telliot_feeds.utils.log import get_logger @@ -20,7 +23,7 @@ def parse_user_val(self) -> float: err_msg = "Invalid input. " + msg val = None while val is None: - inpt = input().lower() + inpt = input_timeout().lower() if inpt == "y": val = True @@ -31,18 +34,28 @@ def parse_user_val(self) -> float: continue print(f"Submitting result: {inpt}\nPress [ENTER] to confirm.") - _ = input() + _ = input_timeout() return val - async def fetch_new_datapoint(self) -> DataPoint[float]: + async def fetch_new_datapoint(self) -> OptionalDataPoint[float]: """Update current value with time-stamped value fetched from user input. Returns: Current time-stamped value """ - vote = self.parse_user_val() + try: + vote = self.parse_user_val() + except TimeoutOccurred: + logger.info("Timeout occurred while waiting for user input") + return None, None datapoint = (vote, datetime_now_utc()) self.store_datapoint(datapoint) return datapoint + + +if __name__ == "__main__": + s = ManualSnapshotInputSource() + v, t = asyncio.run(s.fetch_new_datapoint()) + print("datapoint:", v, t)
Blockhash aggregator doesn't use input w/ timeout `blockhash_aggregator.py::parse_user_val` needs to use `telliot_feeds.utils.input_timeout.input_timeout` instead of builtin `input`
2022-12-22T22:08:45
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-498
f6c8757f11edcb74dee1f65adab2d84de1d9c148
diff --git a/src/telliot_feeds/feeds/daily_volatility_manual_feed.py b/src/telliot_feeds/feeds/daily_volatility_manual_feed.py index 24d661c7..0c5b1b9c 100644 --- a/src/telliot_feeds/feeds/daily_volatility_manual_feed.py +++ b/src/telliot_feeds/feeds/daily_volatility_manual_feed.py @@ -1,6 +1,6 @@ from telliot_feeds.datafeed import DataFeed from telliot_feeds.queries.daily_volatility import DailyVolatility -from telliot_feeds.sources.manual_sources.daily_volatility_manual_source import DailyVolatilityManualSource +from telliot_feeds.sources.manual.daily_volatility_manual_source import DailyVolatilityManualSource asset = "" diff --git a/src/telliot_feeds/feeds/diva_manual_feed.py b/src/telliot_feeds/feeds/diva_manual_feed.py index 6a3c9a92..8ec437ea 100644 --- a/src/telliot_feeds/feeds/diva_manual_feed.py +++ b/src/telliot_feeds/feeds/diva_manual_feed.py @@ -1,6 +1,6 @@ from telliot_feeds.datafeed import DataFeed from telliot_feeds.queries.diva_protocol import DIVAProtocol -from telliot_feeds.sources.manual_sources.diva_manual_source import DivaManualSource +from telliot_feeds.sources.manual.diva_manual_source import DivaManualSource pool_id = None diff --git a/src/telliot_feeds/feeds/numeric_api_response_manual_feed.py b/src/telliot_feeds/feeds/numeric_api_response_manual_feed.py index b1ddf06c..dfb411d4 100644 --- a/src/telliot_feeds/feeds/numeric_api_response_manual_feed.py +++ b/src/telliot_feeds/feeds/numeric_api_response_manual_feed.py @@ -1,6 +1,6 @@ from telliot_feeds.datafeed import DataFeed from telliot_feeds.queries.numeric_api_response_query import NumericApiResponse -from telliot_feeds.sources.manual_sources.numeric_api_manual_response import NumericApiManualResponse +from telliot_feeds.sources.manual.numeric_api_manual_response import NumericApiManualResponse url = None diff --git a/src/telliot_feeds/feeds/spot_price_manual_feed.py b/src/telliot_feeds/feeds/spot_price_manual_feed.py index ee7c3589..06b5e18c 100644 --- a/src/telliot_feeds/feeds/spot_price_manual_feed.py +++ b/src/telliot_feeds/feeds/spot_price_manual_feed.py @@ -1,6 +1,6 @@ from telliot_feeds.datafeed import DataFeed from telliot_feeds.queries.price.spot_price import SpotPrice -from telliot_feeds.sources.manual_sources.spot_price_input_source import SpotPriceManualSource +from telliot_feeds.sources.manual.spot_price_input_source import SpotPriceManualSource # Using defaults to bypass an error handle for feeds w/out non-manual sources asset: str = "eth" diff --git a/src/telliot_feeds/feeds/string_query_feed.py b/src/telliot_feeds/feeds/string_query_feed.py index 4055c0e4..06e48b90 100644 --- a/src/telliot_feeds/feeds/string_query_feed.py +++ b/src/telliot_feeds/feeds/string_query_feed.py @@ -1,6 +1,6 @@ from telliot_feeds.datafeed import DataFeed from telliot_feeds.queries.string_query import StringQuery -from telliot_feeds.sources.manual_sources.string_query_manual_source import StringQueryManualSource +from telliot_feeds.sources.manual.string_query_manual_source import StringQueryManualSource text = None diff --git a/src/telliot_feeds/feeds/tellor_rng_manual_feed.py b/src/telliot_feeds/feeds/tellor_rng_manual_feed.py index 3d9715d7..76dacec9 100644 --- a/src/telliot_feeds/feeds/tellor_rng_manual_feed.py +++ b/src/telliot_feeds/feeds/tellor_rng_manual_feed.py @@ -1,6 +1,6 @@ from telliot_feeds.datafeed import DataFeed from telliot_feeds.queries.tellor_rng import TellorRNG -from telliot_feeds.sources.manual_sources.tellor_rng_manual_source import TellorRNGManualInputSource +from telliot_feeds.sources.manual.tellor_rng_manual_source import TellorRNGManualInputSource timestamp = None diff --git a/src/telliot_feeds/feeds/twap_manual_feed.py b/src/telliot_feeds/feeds/twap_manual_feed.py index a8b7e957..6553df03 100644 --- a/src/telliot_feeds/feeds/twap_manual_feed.py +++ b/src/telliot_feeds/feeds/twap_manual_feed.py @@ -2,7 +2,7 @@ from telliot_feeds.datafeed import DataFeed from telliot_feeds.queries.price.twap import TWAP -from telliot_feeds.sources.manual_sources.twap_manual_input_source import TWAPManualSource +from telliot_feeds.sources.manual.twap_manual_input_source import TWAPManualSource # Using defaults to bypass an error handle for auto feeds and unsupported currencies diff --git a/src/telliot_feeds/feeds/uspce_feed.py b/src/telliot_feeds/feeds/uspce_feed.py index 16f3c636..84d5b698 100644 --- a/src/telliot_feeds/feeds/uspce_feed.py +++ b/src/telliot_feeds/feeds/uspce_feed.py @@ -1,7 +1,7 @@ """Example datafeed used by USPCEReporter.""" from telliot_feeds.datafeed import DataFeed from telliot_feeds.queries.ampleforth.uspce import AmpleforthUSPCE -from telliot_feeds.sources.uspce import USPCESource +from telliot_feeds.sources.manual.uspce import USPCESource uspce_feed = DataFeed(query=AmpleforthUSPCE(), source=USPCESource()) diff --git a/src/telliot_feeds/sources/manual_sources/__init__.py b/src/telliot_feeds/sources/manual/__init__.py similarity index 100% rename from src/telliot_feeds/sources/manual_sources/__init__.py rename to src/telliot_feeds/sources/manual/__init__.py diff --git a/src/telliot_feeds/sources/manual_sources/daily_volatility_manual_source.py b/src/telliot_feeds/sources/manual/daily_volatility_manual_source.py similarity index 80% rename from src/telliot_feeds/sources/manual_sources/daily_volatility_manual_source.py rename to src/telliot_feeds/sources/manual/daily_volatility_manual_source.py index 57e22100..67d4e0b9 100644 --- a/src/telliot_feeds/sources/manual_sources/daily_volatility_manual_source.py +++ b/src/telliot_feeds/sources/manual/daily_volatility_manual_source.py @@ -3,6 +3,8 @@ from telliot_feeds.datasource import DataSource from telliot_feeds.dtypes.datapoint import datetime_now_utc from telliot_feeds.dtypes.datapoint import OptionalDataPoint +from telliot_feeds.utils.input_timeout import input_timeout +from telliot_feeds.utils.input_timeout import TimeoutOccurred from telliot_feeds.utils.log import get_logger @@ -21,7 +23,7 @@ def parse_user_val(self) -> float: num = None while num is None: - usr_inpt = input() + usr_inpt = input_timeout() try: inpt = float(usr_inpt) @@ -35,7 +37,7 @@ def parse_user_val(self) -> float: print(f"\nVolatility index (with 18 decimals of precision) to be submitted on chain: {inpt*10**18:.0f}") print("Press [ENTER] to confirm.") - _ = input() + _ = input_timeout() num = inpt @@ -47,7 +49,11 @@ async def fetch_new_datapoint(self) -> OptionalDataPoint[float]: Returns: Current time-stamped value """ - response = self.parse_user_val() + try: + response = self.parse_user_val() + except TimeoutOccurred: + logger.info("Timeout occurred while waiting for user input") + return None, None datapoint = (response, datetime_now_utc()) self.store_datapoint(datapoint) diff --git a/src/telliot_feeds/sources/manual_sources/diva_manual_source.py b/src/telliot_feeds/sources/manual/diva_manual_source.py similarity index 85% rename from src/telliot_feeds/sources/manual_sources/diva_manual_source.py rename to src/telliot_feeds/sources/manual/diva_manual_source.py index 63024356..544fb854 100644 --- a/src/telliot_feeds/sources/manual_sources/diva_manual_source.py +++ b/src/telliot_feeds/sources/manual/diva_manual_source.py @@ -4,6 +4,8 @@ from telliot_feeds.datasource import DataSource from telliot_feeds.dtypes.datapoint import datetime_now_utc from telliot_feeds.dtypes.datapoint import OptionalDataPoint +from telliot_feeds.utils.input_timeout import input_timeout +from telliot_feeds.utils.input_timeout import TimeoutOccurred from telliot_feeds.utils.log import get_logger @@ -16,7 +18,7 @@ def get_price_from_user(param: str) -> float: print(f"Type price of {param} and press [ENTER]") while param_price is None: - inpt = input() + inpt = input_timeout() try: price = float(inpt) except ValueError: @@ -52,7 +54,7 @@ def parse_user_val(self) -> list[float]: ) print("Press [ENTER] to confirm.") - _ = input() + _ = input_timeout() return prices @@ -62,7 +64,11 @@ async def fetch_new_datapoint(self) -> OptionalDataPoint[list[float]]: Returns: Current time-stamped value """ - response = self.parse_user_val() + try: + response = self.parse_user_val() + except TimeoutOccurred: + logger.info("Timeout occurred while waiting for user input") + return None, None datapoint = (response, datetime_now_utc()) self.store_datapoint(datapoint) diff --git a/src/telliot_feeds/sources/manual_sources/numeric_api_manual_response.py b/src/telliot_feeds/sources/manual/numeric_api_manual_response.py similarity index 71% rename from src/telliot_feeds/sources/manual_sources/numeric_api_manual_response.py rename to src/telliot_feeds/sources/manual/numeric_api_manual_response.py index 47fef80e..f2ddf426 100644 --- a/src/telliot_feeds/sources/manual_sources/numeric_api_manual_response.py +++ b/src/telliot_feeds/sources/manual/numeric_api_manual_response.py @@ -1,8 +1,10 @@ from dataclasses import dataclass from telliot_feeds.datasource import DataSource -from telliot_feeds.dtypes.datapoint import DataPoint from telliot_feeds.dtypes.datapoint import datetime_now_utc +from telliot_feeds.dtypes.datapoint import OptionalDataPoint +from telliot_feeds.utils.input_timeout import input_timeout +from telliot_feeds.utils.input_timeout import TimeoutOccurred from telliot_feeds.utils.log import get_logger @@ -21,10 +23,10 @@ def parse_user_val(self) -> float: val = None while val is None: - usr_inpt = input() + usr_inpt = input_timeout() try: - usr_inpt = float(usr_inpt) # type: ignore + usr_inpt = float(usr_inpt) except ValueError: print("Invalid input. Enter a numerical value (float).") continue @@ -35,19 +37,23 @@ def parse_user_val(self) -> float: ) print("Press [ENTER] to confirm.") - _ = input() + _ = input_timeout() val = usr_inpt return val - async def fetch_new_datapoint(self) -> DataPoint[float]: + async def fetch_new_datapoint(self) -> OptionalDataPoint[float]: """Update current value with time-stamped value fetched from user input. Returns: Current time-stamped value """ - response = self.parse_user_val() + try: + response = self.parse_user_val() + except TimeoutOccurred: + logger.info("Timeout occurred while waiting for user input") + return None, None datapoint = (response, datetime_now_utc()) self.store_datapoint(datapoint) diff --git a/src/telliot_feeds/sources/manual_sources/spot_price_input_source.py b/src/telliot_feeds/sources/manual/spot_price_input_source.py similarity index 64% rename from src/telliot_feeds/sources/manual_sources/spot_price_input_source.py rename to src/telliot_feeds/sources/manual/spot_price_input_source.py index 13f7b108..ce550c97 100644 --- a/src/telliot_feeds/sources/manual_sources/spot_price_input_source.py +++ b/src/telliot_feeds/sources/manual/spot_price_input_source.py @@ -1,8 +1,11 @@ +import asyncio from dataclasses import dataclass from telliot_feeds.datasource import DataSource -from telliot_feeds.dtypes.datapoint import DataPoint from telliot_feeds.dtypes.datapoint import datetime_now_utc +from telliot_feeds.dtypes.datapoint import OptionalDataPoint +from telliot_feeds.utils.input_timeout import input_timeout +from telliot_feeds.utils.input_timeout import TimeoutOccurred from telliot_feeds.utils.log import get_logger @@ -21,10 +24,10 @@ def parse_user_val(self) -> float: spot = None while spot is None: - usr_inpt = input() + usr_inpt = input_timeout() try: - usr_inpt = float(usr_inpt) # type: ignore + usr_inpt = float(usr_inpt) except ValueError: print("Invalid input. Enter decimal value (float).") continue @@ -32,19 +35,23 @@ def parse_user_val(self) -> float: print(f"\nSpot price (with 18 decimals of precision) to be submitted on chain: {usr_inpt*10**18:.0f}") print("Press [ENTER] to confirm.") - _ = input() + _ = input_timeout() spot = usr_inpt return spot - async def fetch_new_datapoint(self) -> DataPoint[float]: + async def fetch_new_datapoint(self) -> OptionalDataPoint[float]: """Update current value with time-stamped value fetched from user input. Returns: Current time-stamped value """ - price = self.parse_user_val() + try: + price = self.parse_user_val() + except TimeoutOccurred: + logger.info("Timeout occurred while waiting for user input") + return None, None datapoint = (price, datetime_now_utc()) self.store_datapoint(datapoint) @@ -52,3 +59,9 @@ async def fetch_new_datapoint(self) -> DataPoint[float]: logger.info(f"Spot price {datapoint[0]} retrieved at time {datapoint[1]}") return datapoint + + +if __name__ == "__main__": + spot = SpotPriceManualSource() + v, t = asyncio.run(spot.fetch_new_datapoint()) + print("datapoint:", v, t) diff --git a/src/telliot_feeds/sources/manual_sources/string_query_manual_source.py b/src/telliot_feeds/sources/manual/string_query_manual_source.py similarity index 52% rename from src/telliot_feeds/sources/manual_sources/string_query_manual_source.py rename to src/telliot_feeds/sources/manual/string_query_manual_source.py index 57208e8d..bd9ad26c 100644 --- a/src/telliot_feeds/sources/manual_sources/string_query_manual_source.py +++ b/src/telliot_feeds/sources/manual/string_query_manual_source.py @@ -3,6 +3,12 @@ from telliot_feeds.datasource import DataSource from telliot_feeds.dtypes.datapoint import datetime_now_utc from telliot_feeds.dtypes.datapoint import OptionalDataPoint +from telliot_feeds.utils.input_timeout import input_timeout +from telliot_feeds.utils.input_timeout import TimeoutOccurred +from telliot_feeds.utils.log import get_logger + + +logger = get_logger(__name__) class StringQueryManualSource(DataSource[Optional[str]]): @@ -10,11 +16,19 @@ async def fetch_new_datapoint(self) -> OptionalDataPoint[str]: print("Type your string query response:\n") - usr_inpt = input() + try: + usr_inpt = input_timeout() + except TimeoutOccurred: + logger.info("Timeout occurred while waiting for user input") + return None, None print(f"\nString query response to be submitted to oracle->: {usr_inpt}") print("Press [ENTER] to confirm.") - _ = input() + try: + _ = input_timeout() + except TimeoutOccurred: + logger.info("Timeout occurred while waiting for user to confirm") + return None, None datapoint = (usr_inpt, datetime_now_utc()) self.store_datapoint(datapoint) diff --git a/src/telliot_feeds/sources/manual_sources/tellor_rng_manual_source.py b/src/telliot_feeds/sources/manual/tellor_rng_manual_source.py similarity index 84% rename from src/telliot_feeds/sources/manual_sources/tellor_rng_manual_source.py rename to src/telliot_feeds/sources/manual/tellor_rng_manual_source.py index dcc93653..bece6c46 100644 --- a/src/telliot_feeds/sources/manual_sources/tellor_rng_manual_source.py +++ b/src/telliot_feeds/sources/manual/tellor_rng_manual_source.py @@ -6,8 +6,11 @@ from telliot_feeds.datasource import DataSource from telliot_feeds.dtypes.datapoint import datetime_now_utc from telliot_feeds.dtypes.datapoint import OptionalDataPoint +from telliot_feeds.utils.input_timeout import input_timeout +from telliot_feeds.utils.input_timeout import TimeoutOccurred from telliot_feeds.utils.log import get_logger + logger = get_logger(__name__) @@ -21,7 +24,7 @@ def parse_user_input(self) -> bytes: response = None while response is None: - user_input = input() + user_input = input_timeout() if len(user_input) < 2: print( "Invalid input! Not enough characters, " @@ -41,7 +44,7 @@ def parse_user_input(self) -> bytes: continue print(f"\nTellorRNG value to be submitted on chain: {user_input}") print("Press [ENTER] to continue") - _ = input() + _ = input_timeout() response = val return response @@ -52,7 +55,11 @@ async def fetch_new_datapoint(self) -> OptionalDataPoint[bytes]: Returns: Current time-stamped value """ - response = self.parse_user_input() + try: + response = self.parse_user_input() + except TimeoutOccurred: + logger.info("Timeout occurred while waiting for user input") + return None, None datapoint = (response, datetime_now_utc()) self.store_datapoint(datapoint) diff --git a/src/telliot_feeds/sources/manual_sources/twap_manual_input_source.py b/src/telliot_feeds/sources/manual/twap_manual_input_source.py similarity index 80% rename from src/telliot_feeds/sources/manual_sources/twap_manual_input_source.py rename to src/telliot_feeds/sources/manual/twap_manual_input_source.py index 541d8cb0..be153e9b 100644 --- a/src/telliot_feeds/sources/manual_sources/twap_manual_input_source.py +++ b/src/telliot_feeds/sources/manual/twap_manual_input_source.py @@ -3,8 +3,11 @@ from telliot_feeds.datasource import DataSource from telliot_feeds.dtypes.datapoint import datetime_now_utc from telliot_feeds.dtypes.datapoint import OptionalDataPoint +from telliot_feeds.utils.input_timeout import input_timeout +from telliot_feeds.utils.input_timeout import TimeoutOccurred from telliot_feeds.utils.log import get_logger + logger = get_logger(__name__) @@ -20,7 +23,7 @@ def parse_user_val(self) -> float: num = None while num is None: - usr_inpt = input() + usr_inpt = input_timeout() try: inpt = float(usr_inpt) @@ -34,7 +37,7 @@ def parse_user_val(self) -> float: print(f"\nTWAP value (with 18 decimals of precision) to be submitted on chain: {inpt*10**18:.0f}") print("Press [ENTER] to confirm.") - _ = input() + _ = input_timeout() num = inpt @@ -46,7 +49,11 @@ async def fetch_new_datapoint(self) -> OptionalDataPoint[float]: Returns: Current time-stamped value """ - response = self.parse_user_val() + try: + response = self.parse_user_val() + except TimeoutOccurred: + logger.info("Timeout occurred while waiting for user input") + return None, None datapoint = (response, datetime_now_utc()) self.store_datapoint(datapoint) diff --git a/src/telliot_feeds/sources/uspce.py b/src/telliot_feeds/sources/manual/uspce.py similarity index 68% rename from src/telliot_feeds/sources/uspce.py rename to src/telliot_feeds/sources/manual/uspce.py index a0709c98..504fca3c 100644 --- a/src/telliot_feeds/sources/uspce.py +++ b/src/telliot_feeds/sources/manual/uspce.py @@ -1,8 +1,10 @@ from dataclasses import dataclass from telliot_feeds.datasource import DataSource -from telliot_feeds.dtypes.datapoint import DataPoint from telliot_feeds.dtypes.datapoint import datetime_now_utc +from telliot_feeds.dtypes.datapoint import OptionalDataPoint +from telliot_feeds.utils.input_timeout import input_timeout +from telliot_feeds.utils.input_timeout import TimeoutOccurred from telliot_feeds.utils.log import get_logger @@ -16,7 +18,7 @@ class USPCESource(DataSource[float]): def parse_user_val(test_input: str) -> float: """Parse USPCE value from user input.""" # This arg is to avoid a TypeError when the default - # input() method is overriden in test_source.py. + # input_timeout() method is overriden in test_source.py. # The error says this method expects no params, # but is passed one. TODO: fix _ = test_input @@ -26,28 +28,32 @@ def parse_user_val(test_input: str) -> float: uspce = None while uspce is None: - inpt = input() + inpt = input_timeout() try: - inpt = float(inpt) # type: ignore + inpt = float(inpt) except ValueError: print("Invalid input. Enter decimal value (float).") continue print(f"Submitting value: {inpt}\nPress [ENTER] to confirm.") - _ = input() + _ = input_timeout() uspce = inpt return uspce - async def fetch_new_datapoint(self) -> DataPoint[float]: + async def fetch_new_datapoint(self) -> OptionalDataPoint[float]: """Update current value with time-stamped value fetched from user input. Returns: Current time-stamped value """ - uspce = self.parse_user_val() + try: + uspce = self.parse_user_val() + except TimeoutOccurred: + logger.info("Timeout occurred while waiting for user input") + return None, None datapoint = (uspce, datetime_now_utc()) self.store_datapoint(datapoint) diff --git a/src/telliot_feeds/utils/input_timeout.py b/src/telliot_feeds/utils/input_timeout.py new file mode 100644 index 00000000..c2db8b94 --- /dev/null +++ b/src/telliot_feeds/utils/input_timeout.py @@ -0,0 +1,96 @@ +"""" +Original code taken from unmaintained package: +https://github.com/johejo/inputimeout/blob/master/inputimeout/inputimeout.py +""" +import sys +from typing import Any + +DEFAULT_TIMEOUT = 600.0 # 10 mins +# DEFAULT_TIMEOUT = 10.0 # 10 secs +INTERVAL = 0.05 + +SP = " " +CR = "\r" +LF = "\n" +CRLF = CR + LF + + +class TimeoutOccurred(Exception): + pass + + +def echo(string: str) -> None: + sys.stdout.write(string) + sys.stdout.flush() + + +def posix_inputimeout(prompt: str = "", timeout: float = DEFAULT_TIMEOUT) -> Any: + echo(prompt) + sel = selectors.DefaultSelector() + sel.register(sys.stdin, selectors.EVENT_READ) + events = sel.select(timeout) + + if events: + key, _ = events[0] + return key.fileobj.readline().rstrip(LF) # type: ignore + else: + echo(LF) + termios.tcflush(sys.stdin, termios.TCIFLUSH) + raise TimeoutOccurred + + +def win_inputimeout(prompt: str = "", timeout: float = DEFAULT_TIMEOUT) -> str: + echo(prompt) + begin = time.monotonic() + end = begin + timeout + line = "" + + while time.monotonic() < end: + if msvcrt.kbhit(): # type: ignore + c = msvcrt.getwche() # type: ignore + if c in (CR, LF): + echo(CRLF) + return line + if c == "\003": + raise KeyboardInterrupt + if c == "\b": + line = line[:-1] + cover = SP * len(prompt + line + SP) + echo("".join([CR, cover, CR, prompt, line])) + else: + line += c + time.sleep(INTERVAL) + + echo(CRLF) + raise TimeoutOccurred + + +try: + import msvcrt + +except ImportError: + import selectors + import termios + + input_timeout_func = posix_inputimeout + +else: + import time + + input_timeout_func = win_inputimeout + + +class InputTimeout: + def __call__(self, prompt: str = "", timeout: float = DEFAULT_TIMEOUT) -> Any: + return input_timeout_func(prompt, timeout) + + +input_timeout = InputTimeout() + + +if __name__ == "__main__": + try: + user_input = input_timeout("Type something: ", 5.0) + print(f"You typed: {user_input}") + except TimeoutOccurred: + print("Timeout occurred")
Handle tips that require manual input, skip manual data sources after 10min Query IDs like `[uspce-legacy|gas-price-oracle-example|snapshot-proposal-example|diva-protocol-example|string-query-example]` may require manual inputs from the reporter. Currently when these querys are tipped, telliot will pick these up and want to submit for them. This could be an issue for reporters who are not actively watching their reporters. If they are sleeping or away from their computer, telliot is stuck on a prompt for manual input and opportunities to automatically submit for other tips could be lost. Should there be a flag in telliot to ignore tips/requests for QueryTypes that require manual inputs?
Thanks for this @spuddyminer . I think it should default to running continuously, so adding a flag to allow halts for manual input sounds good. Fix- Wait for manual input for 10 minutes and if no answer then skip.
2022-12-21T15:25:32
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-467
24e038483f2679e537af6ae5a80d6a33b4ead60f
diff --git a/setup.cfg b/setup.cfg index b25ebe72..3ca90529 100644 --- a/setup.cfg +++ b/setup.cfg @@ -50,4 +50,4 @@ where = src [options.entry_points] console_scripts = - telliot-feeds = telliot_feeds.cli.main:main + telliot = telliot_feeds.cli.main:main diff --git a/src/telliot_feeds/cli/commands/config.py b/src/telliot_feeds/cli/commands/config.py new file mode 100644 index 00000000..7a60e2c9 --- /dev/null +++ b/src/telliot_feeds/cli/commands/config.py @@ -0,0 +1,24 @@ +import click +import yaml +from telliot_core.apps.telliot_config import TelliotConfig + + [email protected]() +def config() -> None: + """Manage Telliot configuration.""" + pass + + [email protected]() +def init() -> None: + """Create initial configuration files.""" + _ = TelliotConfig() + + [email protected]() +def show() -> None: + """Show current configuration.""" + cfg = TelliotConfig() + state = cfg.get_state() + + print(yaml.dump(state, sort_keys=False)) diff --git a/src/telliot_feeds/cli/main.py b/src/telliot_feeds/cli/main.py index 1cb774e7..460e1990 100644 --- a/src/telliot_feeds/cli/main.py +++ b/src/telliot_feeds/cli/main.py @@ -9,6 +9,7 @@ from click.core import Context from telliot_feeds.cli.commands.catalog import catalog +from telliot_feeds.cli.commands.config import config from telliot_feeds.cli.commands.integrations import integrations from telliot_feeds.cli.commands.query import query from telliot_feeds.cli.commands.report import report @@ -67,6 +68,7 @@ def main( main.add_command(catalog) main.add_command(settle) main.add_command(integrations) +main.add_command(config) if __name__ == "__main__": main() diff --git a/tox.ini b/tox.ini index a4404648..b4c6bc87 100644 --- a/tox.ini +++ b/tox.ini @@ -23,7 +23,7 @@ commands = pip install PyYAML==6.0 pip install multicall pip install simple-term-menu - telliot --version + telliot-core --version - pytest --cov --cov-report xml [testenv:style]
Make setup easier - [ ] change repo/pkg name to telliot - [x] change CLI entry point name to telliot, change telliot-core to telliot-core - [x] move config cmd to telliot/telliot-feeds - [x] have the cli prompt the user on install for pk and rpc endpoint - [x] prompts the user to setup chained-accounts as well - [x] all setup through CLI, reduces need to navigate file system to the /telliot folder
Some of this handled in #377 Confusing default on if you want to change config in CLI (Y/N default is N)
2022-12-01T14:33:35
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-455
a6630aa345ef0a0d4eefe156a2298af73aeb6e20
diff --git a/src/telliot_feeds/cli/commands/report.py b/src/telliot_feeds/cli/commands/report.py index 45c087aa..0a982792 100644 --- a/src/telliot_feeds/cli/commands/report.py +++ b/src/telliot_feeds/cli/commands/report.py @@ -26,7 +26,7 @@ from telliot_feeds.reporters.flashbot import FlashbotsReporter from telliot_feeds.reporters.rng_interval import RNGReporter from telliot_feeds.reporters.tellor_360 import Tellor360Reporter -from telliot_feeds.reporters.tellorflex import TellorFlexReporter +from telliot_feeds.reporters.tellor_flex import TellorFlexReporter from telliot_feeds.utils.cfg import check_endpoint from telliot_feeds.utils.cfg import setup_config from telliot_feeds.utils.log import get_logger @@ -36,7 +36,6 @@ logger = get_logger(__name__) -TELLOR_X_CHAINS = (1, 4, 5) STAKE_MESSAGE = ( "\U00002757Telliot will automatically stake more TRB " diff --git a/src/telliot_feeds/cli/utils.py b/src/telliot_feeds/cli/utils.py index 9ba6fa08..19655d8b 100644 --- a/src/telliot_feeds/cli/utils.py +++ b/src/telliot_feeds/cli/utils.py @@ -16,13 +16,12 @@ from telliot_core.apps.core import TelliotCore from telliot_core.cli.utils import cli_core +from telliot_feeds.constants import DIVA_PROTOCOL_CHAINS from telliot_feeds.datafeed import DataFeed from telliot_feeds.feeds import DATAFEED_BUILDER_MAPPING from telliot_feeds.queries.abi_query import AbiQuery -DIVA_PROTOCOL_CHAINS = (137, 80001, 3, 5) - load_dotenv() diff --git a/src/telliot_feeds/constants.py b/src/telliot_feeds/constants.py new file mode 100644 index 00000000..f30e7299 --- /dev/null +++ b/src/telliot_feeds/constants.py @@ -0,0 +1,5 @@ +DIVA_PROTOCOL_CHAINS = (137, 80001, 3, 5) + +POLYGON_CHAINS = {137, 80001} + +ETHEREUM_CHAINS = {1, 3, 5} diff --git a/src/telliot_feeds/reporters/tellor_360.py b/src/telliot_feeds/reporters/tellor_360.py index c2caa3d1..9909258c 100644 --- a/src/telliot_feeds/reporters/tellor_360.py +++ b/src/telliot_feeds/reporters/tellor_360.py @@ -13,7 +13,7 @@ from telliot_feeds.feeds import CATALOG_FEEDS from telliot_feeds.feeds import DataFeed from telliot_feeds.reporters.rewards.time_based_rewards import get_time_based_rewards -from telliot_feeds.reporters.tellorflex import TellorFlexReporter +from telliot_feeds.reporters.tellor_flex import TellorFlexReporter from telliot_feeds.reporters.tips.suggest_datafeed import get_feed_and_tip from telliot_feeds.reporters.tips.tip_amount import fetch_feed_tip from telliot_feeds.utils.log import get_logger diff --git a/src/telliot_feeds/reporters/tellorflex.py b/src/telliot_feeds/reporters/tellor_flex.py similarity index 96% rename from src/telliot_feeds/reporters/tellorflex.py rename to src/telliot_feeds/reporters/tellor_flex.py index d6755a98..e36cad7f 100644 --- a/src/telliot_feeds/reporters/tellorflex.py +++ b/src/telliot_feeds/reporters/tellor_flex.py @@ -17,13 +17,13 @@ from telliot_feeds.datafeed import DataFeed from telliot_feeds.feeds import CATALOG_FEEDS -from telliot_feeds.feeds.matic_usd_feed import matic_usd_median_feed from telliot_feeds.feeds.trb_usd_feed import trb_usd_median_feed from telliot_feeds.reporters.interval import IntervalReporter from telliot_feeds.reporters.reporter_autopay_utils import autopay_suggested_report from telliot_feeds.reporters.reporter_autopay_utils import CATALOG_QUERY_IDS from telliot_feeds.reporters.reporter_autopay_utils import get_feed_tip from telliot_feeds.utils.log import get_logger +from telliot_feeds.utils.reporter_utils import get_native_token_feed from telliot_feeds.utils.reporter_utils import tellor_suggested_report @@ -82,7 +82,9 @@ def __init__( assert self.acct_addr == to_checksum_address(self.account.address) async def fetch_gas_price(self, speed: Optional[str] = None) -> Optional[int]: - """Fetch estimated gas prices for Polygon mainnet.""" + """Fetch estimated gas prices. + + Expected to return gas price in gwei.""" return await legacy_gas_station(chain_id=self.chain_id, speed=speed) # type: ignore async def in_dispute(self, new_stake_amount: Any) -> bool: @@ -268,11 +270,12 @@ async def ensure_profitable( status = ResponseStatus() tip = self.autopaytip # Fetch token prices in USD - price_feeds = [matic_usd_median_feed, trb_usd_median_feed] + native_token_feed = get_native_token_feed(self.chain_id) + price_feeds = [native_token_feed, trb_usd_median_feed] _ = await asyncio.gather(*[feed.source.fetch_new_datapoint() for feed in price_feeds]) - price_matic_usd = matic_usd_median_feed.source.latest[0] + price_native_token = native_token_feed.source.latest[0] price_trb_usd = trb_usd_median_feed.source.latest[0] - if price_matic_usd is None or price_trb_usd is None: + if price_native_token is None or price_trb_usd is None: logger.warning("Unable to fetch token price") return None @@ -305,7 +308,7 @@ async def ensure_profitable( """ ) - costs = self.gas_limit * self.max_fee + costs = self.gas_limit * self.max_fee # in gwei # Using transaction type 0 (legacy) else: @@ -329,7 +332,7 @@ async def ensure_profitable( # Calculate profit rev_usd = tip / 1e18 * price_trb_usd - costs_usd = costs / 1e9 * price_matic_usd + costs_usd = costs / 1e9 * price_native_token # convert gwei costs to eth, then to usd profit_usd = rev_usd - costs_usd logger.info(f"Estimated profit: ${round(profit_usd, 2)}") logger.info(f"tip price: {round(rev_usd, 2)}, gas costs: {costs_usd}") diff --git a/src/telliot_feeds/sources/etherscan_gas.py b/src/telliot_feeds/sources/etherscan_gas.py index fde63d61..76d962fb 100644 --- a/src/telliot_feeds/sources/etherscan_gas.py +++ b/src/telliot_feeds/sources/etherscan_gas.py @@ -40,7 +40,9 @@ class EtherscanGasPriceSource(DataSource[EtherscanGasPrice]): api_key: str = "" async def fetch_new_datapoint(self) -> OptionalDataPoint[EtherscanGasPrice]: - """Fetch new value and store it for later retrieval""" + """Fetch new value and store it for later retrieval + + Gase prices are returned in gwei.""" headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:77.0) Gecko/20190101 Firefox/77.0"} msg = "https://api.etherscan.io/api?module=gastracker&action=gasoracle" diff --git a/src/telliot_feeds/utils/reporter_utils.py b/src/telliot_feeds/utils/reporter_utils.py index 6be70315..872abfe1 100644 --- a/src/telliot_feeds/utils/reporter_utils.py +++ b/src/telliot_feeds/utils/reporter_utils.py @@ -16,6 +16,11 @@ from telliot_core.tellor.tellorx.oracle import TellorxOracleContract from web3 import Web3 +from telliot_feeds.constants import ETHEREUM_CHAINS +from telliot_feeds.constants import POLYGON_CHAINS +from telliot_feeds.datafeed import DataFeed +from telliot_feeds.feeds.eth_usd_feed import eth_usd_median_feed +from telliot_feeds.feeds.matic_usd_feed import matic_usd_median_feed from telliot_feeds.queries.query_catalog import query_catalog from telliot_feeds.utils.log import get_logger @@ -142,3 +147,13 @@ def prompt_for_abi() -> Any: abi = json.load(f) return abi return None + + +def get_native_token_feed(chain_id: int) -> DataFeed[float]: + """Return native token feed for a given chain ID.""" + if chain_id in ETHEREUM_CHAINS: + return eth_usd_median_feed + elif chain_id in POLYGON_CHAINS: + return matic_usd_median_feed + else: + raise ValueError(f"Cannot fetch native token feed. Invalid chain ID: {chain_id}")
Fix Profitability Calculation? Problem encountered when trying to listen for Autopay tips on mainnet. Expected behavior: telliot listens for tips and submits when it's profitable -gas costs. observed behavior: telliot submits immediately for the small time based reward. Start command: `telliot-feeds -a main report -p 100 -wp 600` Versions: telliot-core commit hash #311909100be6110418bd08a5a1dacd340fbc145d telliot-feeds commit hash #02647ca5b2c2db2e6e2142bc89ee9c4c49c62bc1 log: ``` INFO | telliot_feeds.sources.price_aggregator | Number of Sources used for this report are: 2 INFO | telliot_feeds.reporters.tellorflex | tips: 0.23333333333333334 TRB gas limit: 350000 legacy gas price: 53 INFO | telliot_feeds.reporters.tellorflex | Estimated profit: $2.71 INFO | telliot_feeds.reporters.tellorflex | tip price: 2.73, gas costs: 0.01749265 INFO | telliot_feeds.reporters.tellorflex | Estimated percent profit: 15513.22% INFO | telliot_feeds.sources.price_aggregator | Running median on [16797.66, 16788.64, 16773.02, 16776.14, 16777.3] INFO | telliot_feeds.sources.price_aggregator | Feed Price: 16777.3 reported at time 2022-11-15 19:52:21.212517+00:00 INFO | telliot_feeds.sources.price_aggregator | Number of Sources used for this report are: 5 DEBUG | telliot_feeds.reporters.interval | Sending submitValue transaction INFO | telliot_feeds.reporters.interval | View reported data: https://etherscan.io/tx/NNNHHIJIJKJOJKHLKH ```
reproduced: ```console $ telliot-feeds -a personal-2 report -p 100 -wp 600 INFO | telliot_core | telliot-core 0.1.7dev0 INFO | telliot_core | Connected to eth-mainnet [default account: personal-2], time: 2022-11-23 06:56:18.783310 Your current settings... Your chain id: 1 Your mainnet endpoint: - provider: Infura - RPC url: wss://mainnet.infura.io/ws/v3/**** - explorer url: https://etherscan.io Your account: personal-2 at address **** Proceed with current settings (y) or update (n)? [Y/n]: Enter password for personal-2 account: Reporting with synchronized queries Current chain ID: 1 Expected percent profit: 100.0% Transaction type: 0 Gas Limit: 350000 Legacy gas price (gwei): None Max fee (gwei): None Priority fee (gwei): None Gas price speed: fast Desired stake amount: 10.0 Minimum native token balance: 0.25 ETH Press [ENTER] to confirm settings. INFO | telliot_feeds.reporters.tellorflex | Reporting with account: **** INFO | telliot_feeds.reporters.tellor_360 | Current Oracle stakeAmount: 130.15184381778744 INFO | telliot_feeds.reporters.tellor_360 | STAKER INFO start date: 0 stake_balance: 0.0 locked_balance: 0 last report: 0 reports count: 0 INFO | telliot_feeds.reporters.tellor_360 | Approving and depositing stake... INFO | telliot_feeds.reporters.tellor_360 | Current wallet TRB balance: **** Enter encryption password for personal-2: INFO | telliot_core.contract.contract | approve transaction succeeded. (https://etherscan.io/tx/****) INFO | telliot_core.contract.contract | depositStake transaction succeeded. (https://etherscan.io/tx/****) INFO | telliot_feeds.reporters.tips.listener.funded_feeds | No funded feeds returned by autopay function call INFO | telliot_feeds.reporters.interval | Current query: {"type":"SpotPrice","asset":"sushi","currency":"usd"} ERROR | telliot_feeds.sources.price.spot.bittrex | Unable to decode Bittrex JSON INFO | telliot_feeds.sources.price_aggregator | Running median on [0.858546, 0.8536, 0.8524, 0.85154, 0.8518] INFO | telliot_feeds.sources.price_aggregator | Feed Price: 0.8524 reported at time 2022-11-23 12:57:13.676513+00:00 INFO | telliot_feeds.sources.price_aggregator | Number of sources used in aggregate: 5 INFO | telliot_feeds.sources.price_aggregator | Running median on [11.25, 11.2] INFO | telliot_feeds.sources.price_aggregator | Feed Price: 11.225 reported at time 2022-11-23 12:57:13.676733+00:00 INFO | telliot_feeds.sources.price_aggregator | Number of sources used in aggregate: 2 INFO | telliot_feeds.reporters.tellorflex | tips: 1.0 TRB gas limit: 350000 legacy gas price: 53 INFO | telliot_feeds.reporters.tellorflex | Estimated profit: $11.21 INFO | telliot_feeds.reporters.tellorflex | tip price: 11.22, gas costs: 0.015812020000000003 INFO | telliot_feeds.reporters.tellorflex | Estimated percent profit: 70890.3% INFO | telliot_feeds.reporters.interval | TESTING... INFO | telliot_feeds.reporters.interval | Sleeping for 600 seconds ``` I think I found the problem: - Tellor360 reporter inherits from TellorFlex reporter - Tellor360 is using the profit calculation of TellorFlex reporter - TellorFlex reporter's `ensure_profitable` method is using the price of MATIC, even though we're trying to report on ethereum mainnet: ```python # Fetch token prices in USD price_feeds = [matic_usd_median_feed, trb_usd_median_feed] _ = await asyncio.gather(*[feed.source.fetch_new_datapoint() for feed in price_feeds]) price_matic_usd = matic_usd_median_feed.source.latest[0] ... costs_usd = costs / 1e9 * price_matic_usd ... ```
2022-11-23T13:40:56
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-443
02647ca5b2c2db2e6e2142bc89ee9c4c49c62bc1
diff --git a/src/telliot_feeds/integrations/diva_protocol/report.py b/src/telliot_feeds/integrations/diva_protocol/report.py index db01c596..da5db366 100644 --- a/src/telliot_feeds/integrations/diva_protocol/report.py +++ b/src/telliot_feeds/integrations/diva_protocol/report.py @@ -28,6 +28,7 @@ from telliot_feeds.queries.diva_protocol import DIVAProtocol from telliot_feeds.reporters.tellor_360 import Tellor360Reporter from telliot_feeds.utils.log import get_logger +from telliot_feeds.utils.reporter_utils import has_native_token_funds logger = get_logger(__name__) @@ -352,9 +353,10 @@ async def report(self, report_count: Optional[int] = None) -> None: if not online: logger.warning("Unable to connect to the internet!") else: - _, _ = await self.report_once() - await asyncio.sleep(self.wait_before_settle) - _ = await self.settle_pools() + if has_native_token_funds(self.acct_addr, self.endpoint._web3): + _, _ = await self.report_once() + await asyncio.sleep(self.wait_before_settle) + _ = await self.settle_pools() logger.info(f"Sleeping for {self.wait_period} seconds") await asyncio.sleep(self.wait_period) diff --git a/src/telliot_feeds/reporters/interval.py b/src/telliot_feeds/reporters/interval.py index a4613cc5..3fddc50b 100644 --- a/src/telliot_feeds/reporters/interval.py +++ b/src/telliot_feeds/reporters/interval.py @@ -25,6 +25,7 @@ from telliot_feeds.feeds.trb_usd_feed import trb_usd_median_feed from telliot_feeds.sources.etherscan_gas import EtherscanGasPriceSource from telliot_feeds.utils.log import get_logger +from telliot_feeds.utils.reporter_utils import has_native_token_funds from telliot_feeds.utils.reporter_utils import is_online from telliot_feeds.utils.reporter_utils import tellor_suggested_report @@ -51,6 +52,8 @@ def __init__( priority_fee: int = 5, legacy_gas_price: Optional[int] = None, gas_price_speed: str = "fast", + wait_period: int = 10, + min_native_token_balance: int = 10**18, ) -> None: self.endpoint = endpoint @@ -70,6 +73,8 @@ def __init__( self.gas_price_speed = gas_price_speed self.trb_usd_median_feed = trb_usd_median_feed self.eth_usd_median_feed = eth_usd_median_feed + self.wait_period = wait_period + self.min_native_token_balance = min_native_token_balance logger.info(f"Reporting with account: {self.acct_addr}") @@ -437,14 +442,21 @@ async def report_once( return tx_receipt, status - async def report(self) -> None: - """Submit latest values to the TellorX oracle every 12 hours.""" + async def report(self, report_count: Optional[int] = None) -> None: + """Submit values to Tellor oracles on an interval.""" - while True: + while report_count is None or report_count > 0: online = await self.is_online() if online: - _, _ = await self.report_once() + if has_native_token_funds( + self.acct_addr, self.endpoint._web3, min_balance=self.min_native_token_balance + ): + _, _ = await self.report_once() else: logger.warning("Unable to connect to the internet!") - await asyncio.sleep(7) + logger.info(f"Sleeping for {self.wait_period} seconds") + await asyncio.sleep(self.wait_period) + + if report_count is not None: + report_count -= 1 diff --git a/src/telliot_feeds/reporters/rng_interval.py b/src/telliot_feeds/reporters/rng_interval.py index a52a2a10..00110b7b 100644 --- a/src/telliot_feeds/reporters/rng_interval.py +++ b/src/telliot_feeds/reporters/rng_interval.py @@ -306,15 +306,3 @@ async def report_once( logger.error(status) return tx_receipt, status - - async def report(self) -> None: - """Submit latest values to the TellorFlex oracle.""" - logger.info(f"RNG reporting interval: {INTERVAL} seconds") - while True: - online = await self.is_online() - if online: - _, _ = await self.report_once() - else: - logger.warning("Unable to connect to the internet!") - - await asyncio.sleep(self.wait_period) diff --git a/src/telliot_feeds/reporters/tellorflex.py b/src/telliot_feeds/reporters/tellorflex.py index 525e7878..d6755a98 100644 --- a/src/telliot_feeds/reporters/tellorflex.py +++ b/src/telliot_feeds/reporters/tellorflex.py @@ -51,6 +51,7 @@ def __init__( legacy_gas_price: Optional[int] = None, gas_price_speed: str = "safeLow", wait_period: int = 7, + min_native_token_balance: int = 10**18, ) -> None: self.endpoint = endpoint @@ -73,6 +74,7 @@ def __init__( self.autopaytip = 0 self.staked_amount: Optional[float] = None self.qtag_selected = False if self.datafeed is None else True + self.min_native_token_balance = min_native_token_balance logger.info(f"Reporting with account: {self.acct_addr}") @@ -351,15 +353,3 @@ async def ensure_profitable( self.datafeed = None return status - - async def report(self) -> None: - """Submit latest values to the TellorFlex oracle.""" - - while True: - online = await self.is_online() - if online: - _, _ = await self.report_once() - else: - logger.warning("Unable to connect to the internet!") - - await asyncio.sleep(self.wait_period) diff --git a/src/telliot_feeds/utils/cfg.py b/src/telliot_feeds/utils/cfg.py index 49302335..80244543 100644 --- a/src/telliot_feeds/utils/cfg.py +++ b/src/telliot_feeds/utils/cfg.py @@ -19,7 +19,6 @@ def mainnet_config() -> Optional[TelliotConfig]: cfg = TelliotConfig() cfg.main.chain_id = 1 - logger.info("Using Mainnet Config for RNG Reporter") endpoint = cfg.get_endpoint() if "INFURA_API_KEY" in endpoint.url: diff --git a/src/telliot_feeds/utils/reporter_utils.py b/src/telliot_feeds/utils/reporter_utils.py index 4f173e1a..d2966825 100644 --- a/src/telliot_feeds/utils/reporter_utils.py +++ b/src/telliot_feeds/utils/reporter_utils.py @@ -65,7 +65,7 @@ def has_native_token_funds( account: ChecksumAddress, web3: Web3, alert: Callable[[str], None] = alert_placeholder, - min_balance: int = 1 * 10**18, + min_balance: int = 10**18, ) -> bool: """Check if an account has native token funds.""" try:
Notify or alert when getting close to insufficient funds to call function Telliot ran out and other things broke. Maybe it should not attempt to call other functions when this is the case and alert or text or somethign that funds are getting low
2022-11-16T14:45:00
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-439
3625a24d1388aa87dabf63b9ad9652158c6b9676
diff --git a/src/telliot_feeds/cli/commands/integrations.py b/src/telliot_feeds/cli/commands/integrations.py new file mode 100644 index 00000000..8b02ee12 --- /dev/null +++ b/src/telliot_feeds/cli/commands/integrations.py @@ -0,0 +1,12 @@ +import click + +from telliot_feeds.integrations.diva_protocol.cli import diva + + [email protected]() +def integrations() -> None: + """Commands for Tellor Protocol integrations.""" + pass + + +integrations.add_command(diva) diff --git a/src/telliot_feeds/cli/main.py b/src/telliot_feeds/cli/main.py index 3cfa4c46..1cb774e7 100644 --- a/src/telliot_feeds/cli/main.py +++ b/src/telliot_feeds/cli/main.py @@ -9,6 +9,7 @@ from click.core import Context from telliot_feeds.cli.commands.catalog import catalog +from telliot_feeds.cli.commands.integrations import integrations from telliot_feeds.cli.commands.query import query from telliot_feeds.cli.commands.report import report from telliot_feeds.cli.commands.settle import settle @@ -65,6 +66,7 @@ def main( main.add_command(query) main.add_command(catalog) main.add_command(settle) +main.add_command(integrations) if __name__ == "__main__": main() diff --git a/src/telliot_feeds/integrations/diva_protocol/cli.py b/src/telliot_feeds/integrations/diva_protocol/cli.py new file mode 100644 index 00000000..6eb2ccc1 --- /dev/null +++ b/src/telliot_feeds/integrations/diva_protocol/cli.py @@ -0,0 +1,41 @@ +import click + +from telliot_feeds.integrations.diva_protocol.utils import get_reported_pools +from telliot_feeds.integrations.diva_protocol.utils import update_reported_pools + + [email protected]() +def diva() -> None: + """Commands for Diva Protocol integration.""" + pass + + [email protected]() +def cache() -> None: + """Commands for interacting with reported/settled pools cache (pickle file).""" + pass + + [email protected]() +def view() -> None: + """View reported/settled pools cache.""" + cache = get_reported_pools() + if cache: + click.echo("Reported/Settled Pools Cache:") + else: + click.echo("Reported/Settled Pools Cache is empty") + return + for pool_id in cache: + click.echo(f"Pool ID: {pool_id}") + expiry, status = cache[pool_id] + click.echo(f"\tExpiry: {expiry}") + click.echo(f"\tSettle status: {status}") + + [email protected]() +def clear() -> None: + """Clear reported/settled pools cache. + + Creates and saves an empty dict as a pickle file in the telliot default dir.""" + update_reported_pools({}) + click.echo("Cleared reported/settled pools cache")
Doesn't attempt to settle pool after failed to previously For the DIVA integration: Ran out of gas so couldn't call the settle function, then when restarting it didn't try to settle again, when it should.
Maybe just use the graph instead of local db I think it should still avoid attempting to settle multiple times, so repeat transaction reverts don't drain the funds of the reporter. Instead, make a subcommand to clear the local report/settle db (pickle file).
2022-11-15T04:16:16
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-438
ec687d70df8584eb4eaf3c75ffeeaab90de3ce97
diff --git a/src/telliot_feeds/integrations/diva_protocol/sources.py b/src/telliot_feeds/integrations/diva_protocol/sources.py index bad453f6..ed1156c4 100644 --- a/src/telliot_feeds/integrations/diva_protocol/sources.py +++ b/src/telliot_feeds/integrations/diva_protocol/sources.py @@ -11,9 +11,6 @@ from telliot_feeds.sources.price.historical.kraken import ( KrakenHistoricalPriceSource, ) -from telliot_feeds.sources.price.historical.poloniex import ( - PoloniexHistoricalPriceSource, -) from telliot_feeds.sources.price_aggregator import PriceAggregator from telliot_feeds.utils.log import get_logger @@ -83,10 +80,9 @@ def get_historical_price_source(asset: str, currency: str, timestamp: int) -> Pr sources=[ CryptowatchHistoricalPriceSource(asset=asset, currency=currency, ts=timestamp), KrakenHistoricalPriceSource(asset="xbt" if asset == "btc" else asset, currency=currency, ts=timestamp), - PoloniexHistoricalPriceSource(asset=asset, currency="dai" if currency == "usd" else currency, ts=timestamp), - PoloniexHistoricalPriceSource( - asset=asset, currency="tusd" if currency == "usd" else currency, ts=timestamp - ), + # PoloniexHistoricalPriceSource( + # asset=asset, currency="tusd" if currency == "usd" else currency, ts=timestamp + # ), ], ) return source
Maybe drop third source for BTC/USD historical price Seems inaccurate : ```console INFO | telliot_feeds.integrations.diva_protocol.report | Reporting pool expiry time: 1667927880 INFO | telliot_feeds.integrations.diva_protocol.report | Current time: 1667927910 INFO | telliot_feeds.integrations.diva_protocol.report | Current query: {"type":"DIVAProtocol","poolId":71,"divaDiamond":"0x2d941518E0876Fb6042bfCdB403427DC5620b2EC","chainId":5} ERROR | telliot_feeds.sources.price.historical.poloniex | {'response': {'code': 500, 'message': 'System error'}} INFO | telliot_feeds.sources.price_aggregator | Running median on [20086.36, 20087.7, 20553.76] INFO | telliot_feeds.sources.price_aggregator | Feed Price: 20087.7 reported at time 2022-11-08 17:18:31.771537+00:00 INFO | telliot_feeds.sources.price_aggregator | Number of Sources used for this report are: 3 INFO | telliot_feeds.integrations.diva_protocol.sources | Stored fake price for DIVA USD at 2022-11-08 17:18:31.771654+00:00: 1.0 INFO | telliot_feeds.integrations.diva_protocol.sources | Stored DIVAProtocol query response at 2022-11-08 17:18:31.771707+00:00: [20087.7, 1.0] ```
2022-11-15T02:54:16
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-433
2a9d889126bb76e3e458f045d892de02bb32d1f8
diff --git a/src/telliot_feeds/cli/commands/report.py b/src/telliot_feeds/cli/commands/report.py index 1ea93e64..3e852191 100644 --- a/src/telliot_feeds/cli/commands/report.py +++ b/src/telliot_feeds/cli/commands/report.py @@ -1,19 +1,20 @@ import getpass from typing import Any from typing import Optional -from typing import Union import click from chained_accounts import find_accounts from click.core import Context +from eth_typing import ChecksumAddress from eth_utils import to_checksum_address from telliot_core.cli.utils import async_run -from telliot_core.contract.contract import Contract -from telliot_core.directory import ContractInfo from telliot_feeds.cli.utils import build_feed_from_input +from telliot_feeds.cli.utils import parse_profit_input +from telliot_feeds.cli.utils import print_reporter_settings from telliot_feeds.cli.utils import reporter_cli_core from telliot_feeds.cli.utils import valid_diva_chain +from telliot_feeds.cli.utils import valid_transaction_type from telliot_feeds.cli.utils import validate_address from telliot_feeds.datafeed import DataFeed from telliot_feeds.feeds import CATALOG_FEEDS @@ -22,16 +23,15 @@ from telliot_feeds.integrations.diva_protocol import DIVA_TELLOR_MIDDLEWARE_ADDRESS from telliot_feeds.integrations.diva_protocol.report import DIVAProtocolReporter from telliot_feeds.queries.query_catalog import query_catalog -from telliot_feeds.reporters.custom_flex_reporter import CustomFlexReporter -from telliot_feeds.reporters.custom_reporter import CustomXReporter from telliot_feeds.reporters.flashbot import FlashbotsReporter -from telliot_feeds.reporters.interval import IntervalReporter from telliot_feeds.reporters.rng_interval import RNGReporter from telliot_feeds.reporters.tellor_360 import Tellor360Reporter from telliot_feeds.reporters.tellorflex import TellorFlexReporter from telliot_feeds.utils.cfg import check_endpoint from telliot_feeds.utils.cfg import setup_config from telliot_feeds.utils.log import get_logger +from telliot_feeds.utils.reporter_utils import create_custom_contract +from telliot_feeds.utils.reporter_utils import prompt_for_abi logger = get_logger(__name__) @@ -39,70 +39,13 @@ TELLOR_X_CHAINS = (1, 4, 5) STAKE_MESSAGE = ( - "\n\U00002757Telliot will automatically stake more TRB " + "\U00002757Telliot will automatically stake more TRB " "if your stake is below or falls below the stake amount required to report.\n" "If you would like to stake more than required, enter the TOTAL stake amount you wish to be staked.\n" "For example, if you wish to stake 1000 TRB, enter 1000.\n" ) -def parse_profit_input(expected_profit: str) -> Optional[Union[str, float]]: - """Parses user input expected profit and ensures - the input is either a float or the string 'YOLO'.""" - if expected_profit == "YOLO": - return expected_profit - else: - try: - return float(expected_profit) - except ValueError: - click.echo("Not a valid profit input. Enter float or the string, 'YOLO'") - return None - - -def print_reporter_settings( - signature_address: str, - query_tag: str, - gas_limit: int, - priority_fee: Optional[int], - expected_profit: str, - chain_id: int, - max_fee: Optional[int], - transaction_type: int, - legacy_gas_price: Optional[int], - gas_price_speed: str, - reporting_diva_protocol: bool, - stake_amount: float, -) -> None: - """Print user settings to console.""" - click.echo("") - - if signature_address != "": - click.echo("⚡🤖⚡ Reporting through Flashbots relay ⚡🤖⚡") - click.echo(f"Signature account: {signature_address}") - - if query_tag: - click.echo(f"Reporting query tag: {query_tag}") - elif reporting_diva_protocol: - click.echo("Reporting & settling DIVA Protocol pools") - else: - click.echo("Reporting with synchronized queries") - - click.echo(f"Current chain ID: {chain_id}") - - if expected_profit == "YOLO": - click.echo("🍜🍜🍜 Reporter not enforcing profit threshold! 🍜🍜🍜") - else: - click.echo(f"Expected percent profit: {expected_profit}%") - - click.echo(f"Transaction type: {transaction_type}") - click.echo(f"Gas Limit: {gas_limit}") - click.echo(f"Legacy gas price (gwei): {legacy_gas_price}") - click.echo(f"Max fee (gwei): {max_fee}") - click.echo(f"Priority fee (gwei): {priority_fee}") - click.echo(f"Gas price speed: {gas_price_speed}\n") - click.echo(f"Desired stake amount: {stake_amount}") - - @click.group() def reporter() -> None: """Report data on-chain.""" @@ -169,7 +112,9 @@ def reporter() -> None: help="lower threshold (inclusive) for expected percent profit", nargs=1, # User can omit profitability checks by specifying "YOLO" - type=str, + type=click.UNPROCESSED, + required=False, + callback=parse_profit_input, default="100.0", ) @click.option( @@ -177,7 +122,9 @@ def reporter() -> None: "-tx", "tx_type", help="choose transaction type (0 for legacy txs, 2 for EIP-1559)", - type=int, + type=click.UNPROCESSED, + required=False, + callback=valid_transaction_type, default=0, ) @click.option( @@ -208,24 +155,6 @@ def reporter() -> None: nargs=1, type=int, ) [email protected]( - "--oracle-address", - "-oracle", - "oracle_address", - help="oracle contract address to interact with", - nargs=1, - type=str, - default=None, -) [email protected]( - "--autopay-address", - "-autopay", - "autopay_address", - help="autopay contract address to interact with", - nargs=1, - type=str, - default=None, -) @click.option( "--diva-protocol", "-dpt", @@ -256,13 +185,44 @@ def reporter() -> None: prompt=False, ) @click.option( - "--custom-contract", - "-custom", - "custom_contract_reporter", - help="Use a custom contract to report to oracle", + "--custom-token-contract", + "-custom-token", + "custom_token_contract", + help="Address of custom token contract", + nargs=1, + default=None, + type=click.UNPROCESSED, + callback=validate_address, + prompt=False, +) [email protected]( + "--custom-oracle-contract", + "-custom-oracle", + "custom_oracle_contract", + help="Address of custom oracle contract", nargs=1, default=None, - type=str, + type=click.UNPROCESSED, + callback=validate_address, + prompt=False, +) [email protected]( + "--custom-autopay-contract", + "-custom-autopay", + "custom_autopay_contract", + help="Address of custom autopay contract", + nargs=1, + default=None, + type=click.UNPROCESSED, + callback=validate_address, + prompt=False, +) [email protected]( + "--tellor-360/--tellor-flex", + "-360/-flex", + "tellor_360", + default=True, + help="Choose between Tellor 360 or Flex contracts", ) @click.option( "--stake", @@ -273,8 +233,6 @@ def reporter() -> None: type=float, default=10.0, ) [email protected]("--flex-360/--old-flex", default=True, help="Choose between tellor360 reporter or old flex") [email protected]("--binary-interface", "-abi", "abi", nargs=1, default=None, type=str) @click.option("--rng-auto/--rng-auto-off", default=False) @click.option("--submit-once/--submit-continuous", default=False) @click.option("-pwd", "--password", type=str) @@ -301,35 +259,13 @@ async def report( password: str, signature_password: str, rng_auto: bool, - oracle_address: str, - autopay_address: str, - custom_contract_reporter: Optional[str], - abi: Optional[str], - flex_360: bool, + custom_token_contract: Optional[ChecksumAddress], + custom_oracle_contract: Optional[ChecksumAddress], + custom_autopay_contract: Optional[ChecksumAddress], + tellor_360: bool, stake: float, ) -> None: """Report values to Tellor oracle""" - # Ensure valid user input for expected profit - expected_profit = parse_profit_input(expected_profit) # type: ignore - if expected_profit is None: - return - - if oracle_address: - try: - oracle_address = to_checksum_address(oracle_address) - except ValueError: - click.echo(f"contract address must be a hex string. Got: {oracle_address}") - return - - if autopay_address: - try: - autopay_address = to_checksum_address(autopay_address) - except ValueError: - click.echo(f"contract address must be a hex string. Got: {autopay_address}") - return - - assert tx_type in (0, 2) - name = ctx.obj["ACCOUNT_NAME"] sig_acct_name = ctx.obj["SIGNATURE_ACCOUNT_NAME"] @@ -365,25 +301,6 @@ async def report( else: sig_acct_addr = "" # type: ignore - cid = core.config.main.chain_id - - if custom_contract_reporter: - try: - custom_contract_reporter = to_checksum_address(custom_contract_reporter) - except ValueError: - click.echo(f"Contract address must be a hex string. Got: {custom_contract_reporter}") - return - if abi is None: - try: - abi = ContractInfo( - name=None, org=None, address={int(core.config.main.chain_id): custom_contract_reporter} - ).get_abi(chain_id=core.config.main.chain_id) - except Exception: - print("Unable to fetch contract abi, consider adding abi using -abi flag!") - return - custom_contract = Contract(custom_contract_reporter, abi, core.endpoint, account) - custom_contract.connect() - # If we need to build a datafeed if build_feed: chosen_feed = build_feed_from_input() @@ -400,7 +317,7 @@ async def report( click.echo(f"No corresponding datafeed found for query tag: {query_tag}\n") return elif reporting_diva_protocol: - if not valid_diva_chain(chain_id=cid): + if not valid_diva_chain(chain_id=core.config.main.chain_id): click.echo("Diva Protocol not supported for this chain") return chosen_feed = None @@ -418,7 +335,7 @@ async def report( priority_fee=priority_fee, legacy_gas_price=legacy_gas_price, expected_profit=expected_profit, - chain_id=cid, + chain_id=core.config.main.chain_id, gas_price_speed=gas_price_speed, reporting_diva_protocol=reporting_diva_protocol, stake_amount=stake, @@ -426,98 +343,91 @@ async def report( _ = input("Press [ENTER] to confirm settings.") + contracts = core.get_tellor360_contracts() if tellor_360 else core.get_tellorflex_contracts() + + if custom_oracle_contract: + contracts.oracle.connect() # set telliot_core.contract.Contract.contract attribute + custom_oracle_abi = prompt_for_abi() + contracts.oracle = create_custom_contract( + original_contract=contracts.oracle, + custom_contract_addr=custom_oracle_contract, + custom_abi=custom_oracle_abi, + endpoint=core.endpoint, + account=account, + ) + contracts.oracle.connect() + + if custom_autopay_contract: + contracts.autopay.connect() # set telliot_core.contract.Contract.contract attribute + custom_autopay_abi = prompt_for_abi() + contracts.autopay = create_custom_contract( + original_contract=contracts.autopay, + custom_contract_addr=custom_autopay_contract, + custom_abi=custom_autopay_abi, + endpoint=core.endpoint, + account=account, + ) + contracts.autopay.connect() + + if custom_token_contract: + contracts.token.connect() # set telliot_core.contract.Contract.contract attribute + custom_token_abi = prompt_for_abi() + contracts.token = create_custom_contract( + original_contract=contracts.token, + custom_contract_addr=custom_token_contract, + custom_abi=custom_token_abi, + endpoint=core.endpoint, + account=account, + ) + contracts.token.connect() + common_reporter_kwargs = { "endpoint": core.endpoint, "account": account, "datafeed": chosen_feed, - "transaction_type": tx_type, "gas_limit": gas_limit, "max_fee": max_fee, "priority_fee": priority_fee, "legacy_gas_price": legacy_gas_price, "gas_price_speed": gas_price_speed, - "chain_id": cid, + "chain_id": core.config.main.chain_id, + "wait_period": wait_period, + "oracle": contracts.oracle, + "autopay": contracts.autopay, + "token": contracts.token, + "expected_profit": expected_profit, + "stake": stake, + "transaction_type": tx_type, } - # Report to Polygon TellorFlex - if core.config.main.chain_id in TELLOR_X_CHAINS and not flex_360: - # Report to TellorX - tellorx = core.get_tellorx_contracts() - if oracle_address: - tellorx.oracle.address = oracle_address - tellorx.oracle.connect() - - tellorx_reporter_kwargs = { - "master": tellorx.master, - "oracle": tellorx.oracle, - "expected_profit": expected_profit, + if sig_acct_addr: + reporter = FlashbotsReporter( + signature_account=sig_account, **common_reporter_kwargs, - } - - if custom_contract_reporter: - reporter = CustomXReporter( - custom_contract=custom_contract, - **tellorx_reporter_kwargs, - ) - else: - reporter = IntervalReporter(**tellorx_reporter_kwargs) # type: ignore - + ) + elif rng_auto: + reporter = RNGReporter( # type: ignore + wait_period=120 if wait_period < 120 else wait_period, + **common_reporter_kwargs, + ) + elif reporting_diva_protocol: + diva_reporter_kwargs = {} + if diva_diamond_address is not None: + diva_reporter_kwargs["diva_diamond_address"] = diva_diamond_address + if diva_middleware_address is not None: + diva_reporter_kwargs["middleware_address"] = diva_middleware_address + reporter = DIVAProtocolReporter( + **common_reporter_kwargs, + **diva_reporter_kwargs, # type: ignore + ) + elif tellor_360: + reporter = Tellor360Reporter( + **common_reporter_kwargs, + ) # type: ignore else: - contracts = core.get_tellor360_contracts() if flex_360 else core.get_tellorflex_contracts() - - if oracle_address: - contracts.oracle.address = oracle_address - contracts.oracle.connect() - - if autopay_address: - contracts.autopay.address = autopay_address - contracts.autopay.connect() - - # set additional common kwargs to shorten code - common_reporter_kwargs["oracle"] = contracts.oracle - common_reporter_kwargs["autopay"] = contracts.autopay - common_reporter_kwargs["token"] = contracts.token - common_reporter_kwargs["stake"] = stake - common_reporter_kwargs["expected_profit"] = expected_profit - # selecting the right reporter will be changed after the switch - if flex_360: - if sig_acct_addr != "": - reporter = FlashbotsReporter( # type: ignore - signature_account=sig_account, - **common_reporter_kwargs, - ) - elif rng_auto: - reporter = RNGReporter( # type: ignore - wait_period=120 if wait_period < 120 else wait_period, - **common_reporter_kwargs, - ) - elif reporting_diva_protocol: - diva_reporter_kwargs = {} - if diva_diamond_address is not None: - diva_reporter_kwargs["diva_diamond_address"] = diva_diamond_address - if diva_middleware_address is not None: - diva_reporter_kwargs["middleware_address"] = diva_middleware_address - reporter = DIVAProtocolReporter( - wait_period=wait_period, - **common_reporter_kwargs, - **diva_reporter_kwargs, # type: ignore - ) - elif custom_contract_reporter: - reporter = CustomFlexReporter( - custom_contract=custom_contract, - wait_period=wait_period, - **common_reporter_kwargs, - ) # type: ignore - else: - reporter = Tellor360Reporter( - wait_period=wait_period, - **common_reporter_kwargs, - ) # type: ignore - else: - reporter = TellorFlexReporter( - wait_period=wait_period, - **common_reporter_kwargs, - ) # type: ignore + reporter = TellorFlexReporter( + **common_reporter_kwargs, + ) # type: ignore if submit_once: _, _ = await reporter.report_once() diff --git a/src/telliot_feeds/cli/utils.py b/src/telliot_feeds/cli/utils.py index 13e90d81..bb7a0c31 100644 --- a/src/telliot_feeds/cli/utils.py +++ b/src/telliot_feeds/cli/utils.py @@ -4,11 +4,13 @@ from typing import get_args from typing import get_type_hints from typing import Optional +from typing import Union import click from chained_accounts import ChainedAccount from chained_accounts import find_accounts from dotenv import load_dotenv +from eth_typing import ChecksumAddress from eth_utils import to_checksum_address from simple_term_menu import TerminalMenu from telliot_core.apps.core import TelliotCore @@ -24,6 +26,63 @@ load_dotenv() +def print_reporter_settings( + signature_address: str, + query_tag: str, + gas_limit: int, + priority_fee: Optional[int], + expected_profit: str, + chain_id: int, + max_fee: Optional[int], + transaction_type: int, + legacy_gas_price: Optional[int], + gas_price_speed: str, + reporting_diva_protocol: bool, + stake_amount: float, +) -> None: + """Print user settings to console.""" + click.echo("") + + if signature_address != "": + click.echo("⚡🤖⚡ Reporting through Flashbots relay ⚡🤖⚡") + click.echo(f"Signature account: {signature_address}") + + if query_tag: + click.echo(f"Reporting query tag: {query_tag}") + elif reporting_diva_protocol: + click.echo("Reporting & settling DIVA Protocol pools") + else: + click.echo("Reporting with synchronized queries") + + click.echo(f"Current chain ID: {chain_id}") + + if expected_profit == "YOLO": + click.echo("🍜🍜🍜 Reporter not enforcing profit threshold! 🍜🍜🍜") + else: + click.echo(f"Expected percent profit: {expected_profit}%") + + click.echo(f"Transaction type: {transaction_type}") + click.echo(f"Gas Limit: {gas_limit}") + click.echo(f"Legacy gas price (gwei): {legacy_gas_price}") + click.echo(f"Max fee (gwei): {max_fee}") + click.echo(f"Priority fee (gwei): {priority_fee}") + click.echo(f"Gas price speed: {gas_price_speed}") + click.echo(f"Desired stake amount: {stake_amount}") + click.echo("\n") + + +def parse_profit_input(ctx: click.Context, param: Any, value: str) -> Optional[Union[str, float]]: + """Parses user input expected profit and ensures + the input is either a float or the string 'YOLO'.""" + if value == "YOLO": + return value + else: + try: + return float(value) + except ValueError: + raise click.BadParameter("Not a valid profit input. Enter float or the string, 'YOLO'") + + def reporter_cli_core(ctx: click.Context) -> TelliotCore: """Get telliot core configured in reporter CLI context""" # Delegate to main cli core getter @@ -180,13 +239,24 @@ def build_query(log: Optional[Callable[[str], None]] = click.echo) -> Any: return query -def validate_address(ctx: click.Context, param: Any, value: str) -> str: +def validate_address(ctx: click.Context, param: Any, value: str) -> Optional[ChecksumAddress]: """Ensure input is a valid checksum address""" # Sets default to None if no value is provided if not value: - return value + return None try: - return str(to_checksum_address(value)) + return to_checksum_address(value) except Exception as e: raise click.BadParameter(f"Address must be a valid hex string. Error: {e}") + + +def valid_transaction_type(ctx: click.Context, param: Any, value: str) -> int: + """Ensure input is a valid transaction type""" + supported = (0, 2) + try: + if int(value) in supported: + return int(value) + raise click.BadParameter(f"Transaction type given ({value}) is not supported ({supported}).") + except ValueError: + raise click.BadParameter("Transaction type must be an integer.") diff --git a/src/telliot_feeds/integrations/diva_protocol/__init__.py b/src/telliot_feeds/integrations/diva_protocol/__init__.py index 0348ca98..c376f280 100644 --- a/src/telliot_feeds/integrations/diva_protocol/__init__.py +++ b/src/telliot_feeds/integrations/diva_protocol/__init__.py @@ -3,5 +3,5 @@ "ETH/USD", } SUPPORTED_COLLATERAL_TOKEN_SYMBOLS = {"dUSD"} -DIVA_DIAMOND_ADDRESS = "0x36a10DDa1d71fE161e8856dA1db271e75190c727" -DIVA_TELLOR_MIDDLEWARE_ADDRESS = "0xF4Bb0B9b078B0210eBEA704be2CB3c66B83cdFEF" +DIVA_DIAMOND_ADDRESS = "0x2d941518E0876Fb6042bfCdB403427DC5620b2EC" +DIVA_TELLOR_MIDDLEWARE_ADDRESS = "0x9F6Cd21bF0f18cf7bcd1bd9AF75476537d8295fB" diff --git a/src/telliot_feeds/integrations/diva_protocol/contract.py b/src/telliot_feeds/integrations/diva_protocol/contract.py index 757977fb..64f02a6e 100644 --- a/src/telliot_feeds/integrations/diva_protocol/contract.py +++ b/src/telliot_feeds/integrations/diva_protocol/contract.py @@ -9,8 +9,6 @@ from telliot_core.model.endpoints import RPCEndpoint from telliot_core.utils.response import ResponseStatus -from telliot_feeds.integrations.diva_protocol import DIVA_DIAMOND_ADDRESS - logger = logging.getLogger(__name__) @@ -93,11 +91,12 @@ def __init__( self, node: RPCEndpoint, account: Optional[ChainedAccount] = None, - diva_diamond: Optional[str] = DIVA_DIAMOND_ADDRESS, ): - self.diva_diamond = diva_diamond chain_id = node.chain_id - contract_info = contract_directory.find(chain_id=chain_id, name="diva-oracle-tellor")[0] + try: + contract_info = contract_directory.find(chain_id=chain_id, name="diva-oracle-tellor")[0] + except IndexError: + raise Exception(f"DIVA Tellor middleware contract not found on chain_id {chain_id}") if not contract_info: raise Exception(f"diva-oracle-tellor contract info not found on chain_id {chain_id}") @@ -135,15 +134,10 @@ async def set_final_reference_value( """Settle a pool. Must be called after the the minimum period undisputed has elapsed.""" - if self.diva_diamond is None: - diva_protocol_info = contract_directory.find(chain_id=self.node.chain_id, name="diva-protocol")[0] - diva_protocol_addr = diva_protocol_info.address[self.node.chain_id] - self.diva_diamond = diva_protocol_addr print(f"setfinalref middleware address: {self.address}") _, status = await self.write( "setFinalReferenceValue", - _divaDiamond=self.diva_diamond, _poolId=pool_id, gas_limit=gas_limit, legacy_gas_price=legacy_gas_price, diff --git a/src/telliot_feeds/integrations/diva_protocol/feed.py b/src/telliot_feeds/integrations/diva_protocol/feed.py index 7b6ad936..92cd0188 100644 --- a/src/telliot_feeds/integrations/diva_protocol/feed.py +++ b/src/telliot_feeds/integrations/diva_protocol/feed.py @@ -51,7 +51,6 @@ def assemble_diva_datafeed( source.collat_token_source = collat_token_source feed = DataFeed( - # query=DIVAProtocol(pool.pool_id, divaDiamond="0xebBAA31B1Ebd727A1a42e71dC15E304aD8905211", chainId=3), query=DIVAProtocol(pool.pool_id, divaDiamond=diva_diamond, chainId=chain_id), source=source, ) diff --git a/src/telliot_feeds/integrations/diva_protocol/report.py b/src/telliot_feeds/integrations/diva_protocol/report.py index da5db366..42552322 100644 --- a/src/telliot_feeds/integrations/diva_protocol/report.py +++ b/src/telliot_feeds/integrations/diva_protocol/report.py @@ -58,7 +58,6 @@ def __init__( # type: ignore self.middleware_contract = DivaOracleTellorContract( node=self.endpoint, account=self.account, - diva_diamond=diva_diamond_address, ) self.middleware_contract.address = middleware_address self.middleware_contract.connect() diff --git a/src/telliot_feeds/reporters/custom_flex_reporter.py b/src/telliot_feeds/reporters/custom_flex_reporter.py deleted file mode 100644 index 7a9e6774..00000000 --- a/src/telliot_feeds/reporters/custom_flex_reporter.py +++ /dev/null @@ -1,278 +0,0 @@ -"""TellorFlex compatible reporters""" -from typing import Any -from typing import Optional -from typing import Tuple - -from eth_utils import to_checksum_address -from telliot_core.contract.contract import Contract -from telliot_core.utils.key_helpers import lazy_unlock_account -from telliot_core.utils.response import error_status -from telliot_core.utils.response import ResponseStatus -from web3 import Web3 -from web3.datastructures import AttributeDict - -from telliot_feeds.reporters.tellor_360 import StakerInfo -from telliot_feeds.reporters.tellor_360 import Tellor360Reporter -from telliot_feeds.utils.log import get_logger - -logger = get_logger(__name__) - - -class CustomFlexReporter(Tellor360Reporter): - """Use custom contract to report through to tellorflex.""" - - def __init__(self, custom_contract: Contract, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.custom_contract = custom_contract - - async def ensure_staked(self) -> Tuple[bool, ResponseStatus]: - """Compares stakeAmount and stakerInfo every loop to monitor changes to the stakeAmount or stakerInfo - and deposits stake if needed for continuous reporting - - Return: - - (bool, ResponseStatus) - """ - # get oracle required stake amount - stake_amount: int - stake_amount, status = await self.oracle.read("getStakeAmount") - logger.info(f"Current Oracle stakeAmount: {stake_amount / 1e18!r}") - - if (not status.ok) or (stake_amount is None): - msg = "Unable to read current stake amount" - return False, error_status(msg, log=logger.info) - - # get accounts current stake total - staker_info, status = await self.oracle.read( - "getStakerInfo", - _stakerAddress=self.acct_addr, - ) - if (not status.ok) or (staker_info is None): - msg = "Unable to read reporters staker info" - return False, error_status(msg, log=logger.info) - - # first when reporter start set stakerInfo - if self.staker_info is None: - self.staker_info = StakerInfo(*staker_info) - - # on subsequent loops keeps checking if staked balance in oracle contract decreased - # if it decreased account is probably dispute barring withdrawal - if self.staker_info.stake_balance > staker_info[1]: - # update balance in script - self.staker_info.stake_balance = staker_info[1] - logger.info("your staked balance has decreased and account might be in dispute") - - # after the first loop keep track of the last report's timestamp to calculate reporter lock - self.staker_info.last_report = staker_info[4] - self.staker_info.reports_count = staker_info[5] - - logger.info( - f""" - STAKER INFO - start date: {staker_info[0]} - stake_balance: {staker_info[1] / 1e18!r} - locked_balance: {staker_info[2]} - last report: {staker_info[4]} - reports count: {staker_info[5]} - """ - ) - - account_staked_bal = self.staker_info.stake_balance - - # after the first loop, logs if stakeAmount has increased or decreased - if self.stake_amount is not None: - if self.stake_amount < stake_amount: - logger.info("Stake amount has increased possibly due to TRB price change.") - elif self.stake_amount > stake_amount: - logger.info("Stake amount has decreased possibly due to TRB price change.") - - self.stake_amount = stake_amount - - # deposit stake if stakeAmount in oracle is greater than account stake or - # a stake in cli is selected thats greater than account stake - if self.stake_amount > account_staked_bal or (self.stake * 1e18) > account_staked_bal: - logger.info("Approving and depositing stake...") - - gas_price_gwei = await self.fetch_gas_price() - if gas_price_gwei is None: - return False, error_status("Unable to fetch gas price for staking", log=logger.info) - - # amount to deposit whichever largest difference either chosen stake or stakeAmount to keep reporting - stake_diff = max(int(self.stake_amount - account_staked_bal), int((self.stake * 1e18) - account_staked_bal)) - - # check TRB wallet balance! - wallet_balance, wallet_balance_status = await self.token.read("balanceOf", account=self.acct_addr) - - if not wallet_balance_status.ok: - msg = "unable to read account TRB balance" - return False, error_status(msg, log=logger.info) - - logger.info(f"Current wallet TRB balance: {wallet_balance / 1e18!r}") - - if stake_diff > wallet_balance: - msg = "Not enough TRB in the account to cover the stake" - return False, error_status(msg, log=logger.warning) - - txn_kwargs = {"gas_limit": 350000, "legacy_gas_price": gas_price_gwei} - - # approve token spending - _, approve_status = await self.custom_contract.write(func_name="approve", _amount=stake_diff, **txn_kwargs) - if not approve_status.ok: - msg = "Unable to approve staking" - return False, error_status(msg, log=logger.error) - # deposit stake - _, deposit_status = await self.custom_contract.write("depositStake", _amount=stake_diff, **txn_kwargs) - - if not deposit_status.ok: - msg = ( - "Unable to stake deposit: " - + deposit_status.error - + f"Make sure {self.acct_addr} has enough of the current chain's " - + "currency and the oracle's currency (TRB)" - ) - return False, error_status(msg, log=logger.error) - # add staked balance after successful stake deposit - self.staker_info.stake_balance += stake_diff - - return True, ResponseStatus() - - async def report_once( - self, - ) -> Tuple[Optional[AttributeDict[Any, Any]], ResponseStatus]: - """Report query value once - This method checks to see if a user is able to submit - values to the TellorX oracle, given their staker status - and last submission time. Also, this method does not - submit values if doing so won't make a profit.""" - # Check staker status - staked, status = await self.ensure_staked() - if not staked or not status.ok: - logger.warning(status.error) - return None, status - - status = await self.check_reporter_lock() - if not status.ok: - return None, status - - # Get suggested datafeed if none provided - datafeed = await self.fetch_datafeed() - if not datafeed: - msg = "Unable to suggest datafeed" - return None, error_status(note=msg, log=logger.info) - - logger.info(f"Current query: {datafeed.query.descriptor}") - - status = await self.ensure_profitable(datafeed) - if not status.ok: - return None, status - - status = ResponseStatus() - - address = to_checksum_address(self.account.address) - - # Update datafeed value - await datafeed.source.fetch_new_datapoint() - latest_data = datafeed.source.latest - if latest_data[0] is None: - msg = "Unable to retrieve updated datafeed value." - return None, error_status(msg, log=logger.info) - - # Get query info & encode value to bytes - query = datafeed.query - query_id = query.query_id - query_data = query.query_data - try: - value = query.value_type.encode(latest_data[0]) - except Exception as e: - msg = f"Error encoding response value {latest_data[0]}" - return None, error_status(msg, e=e, log=logger.error) - - # Get nonce - report_count, read_status = await self.get_num_reports_by_id(query_id) - - if not read_status.ok: - status.error = "Unable to retrieve report count: " + read_status.error # error won't be none # noqa: E501 - logger.error(status.error) - status.e = read_status.e - return None, status - - # Start transaction build - submit_val_func = self.custom_contract.contract.get_function_by_name("submitValue") - submit_val_tx = submit_val_func( - _queryId=query_id, - _value=value, - _nonce=report_count, - _queryData=query_data, - ) - acc_nonce = self.endpoint._web3.eth.get_transaction_count(address) - - # Add transaction type 2 (EIP-1559) data - if self.transaction_type == 2: - logger.info(f"maxFeePerGas: {self.max_fee}") - logger.info(f"maxPriorityFeePerGas: {self.priority_fee}") - - built_submit_val_tx = submit_val_tx.buildTransaction( - { - "nonce": acc_nonce, - "gas": self.gas_limit, - "maxFeePerGas": Web3.toWei(self.max_fee, "gwei"), # type: ignore - # TODO: Investigate more why etherscan txs using Flashbots have - # the same maxFeePerGas and maxPriorityFeePerGas. Example: - # https://etherscan.io/tx/0x0bd2c8b986be4f183c0a2667ef48ab1d8863c59510f3226ef056e46658541288 # noqa: E501 - "maxPriorityFeePerGas": Web3.toWei(self.priority_fee, "gwei"), # noqa: E501 - "chainId": self.chain_id, - } - ) - # Add transaction type 0 (legacy) data - else: - # Fetch legacy gas price if not provided by user - if not self.legacy_gas_price: - gas_price = await self.fetch_gas_price(self.gas_price_speed) - if not gas_price: - note = "Unable to fetch gas price for tx type 0" - return None, error_status(note, log=logger.warning) - else: - gas_price = self.legacy_gas_price - - built_submit_val_tx = submit_val_tx.buildTransaction( - { - "nonce": acc_nonce, - "gas": self.gas_limit, - "gasPrice": Web3.toWei(gas_price, "gwei"), - "chainId": self.chain_id, - } - ) - - lazy_unlock_account(self.account) - local_account = self.account.local_account - tx_signed = local_account.sign_transaction(built_submit_val_tx) - - try: - logger.debug("Sending submitValue transaction") - tx_hash = self.endpoint._web3.eth.send_raw_transaction(tx_signed.rawTransaction) - except Exception as e: - note = "Send transaction failed" - return None, error_status(note, log=logger.error, e=e) - - try: - # Confirm transaction - tx_receipt = self.endpoint._web3.eth.wait_for_transaction_receipt(tx_hash, timeout=360) - - tx_url = f"{self.endpoint.explorer}/tx/{tx_hash.hex()}" - - if tx_receipt["status"] == 0: - msg = f"Transaction reverted. ({tx_url})" - return tx_receipt, error_status(msg, log=logger.error) - - except Exception as e: - note = "Failed to confirm transaction" - return None, error_status(note, log=logger.error, e=e) - - if status.ok and not status.error: - # Reset previous submission timestamp - self.last_submission_timestamp = 0 - # Point to relevant explorer - logger.info(f"View reported data: \n{tx_url}") - else: - logger.error(status) - - return tx_receipt, status diff --git a/src/telliot_feeds/reporters/tellor_360.py b/src/telliot_feeds/reporters/tellor_360.py index 6ffb3a90..c2caa3d1 100644 --- a/src/telliot_feeds/reporters/tellor_360.py +++ b/src/telliot_feeds/reporters/tellor_360.py @@ -67,12 +67,13 @@ async def ensure_staked(self) -> Tuple[bool, ResponseStatus]: # get oracle required stake amount stake_amount: int stake_amount, status = await self.oracle.read("getStakeAmount") - logger.info(f"Current Oracle stakeAmount: {stake_amount / 1e18!r}") if (not status.ok) or (stake_amount is None): msg = "Unable to read current stake amount" return False, error_status(msg, log=logger.info) + logger.info(f"Current Oracle stakeAmount: {stake_amount / 1e18!r}") + # get accounts current stake total staker_info, status = await self.oracle.read( "getStakerInfo", diff --git a/src/telliot_feeds/utils/cfg.py b/src/telliot_feeds/utils/cfg.py index 80244543..a1ddb469 100644 --- a/src/telliot_feeds/utils/cfg.py +++ b/src/telliot_feeds/utils/cfg.py @@ -72,11 +72,9 @@ def setup_config(cfg: TelliotConfig, account_name: str) -> Tuple[TelliotConfig, else: click.echo("No accounts set.") - want_to_update_settings = click.confirm( - "Would you like to update settings? If no accounts or endpoints are set, select yes" - ) + no_update = click.confirm("Proceed with current settings (y) or update (n)?", default=True) - if not want_to_update_settings: + if no_update: if not accounts or not endpoint: return cfg, None return cfg, accounts[0] diff --git a/src/telliot_feeds/utils/reporter_utils.py b/src/telliot_feeds/utils/reporter_utils.py index d2966825..5adec4b3 100644 --- a/src/telliot_feeds/utils/reporter_utils.py +++ b/src/telliot_feeds/utils/reporter_utils.py @@ -1,10 +1,17 @@ +import json +from typing import Any from typing import Callable from typing import List from typing import Optional from typing import Union +import click import requests +from chained_accounts import ChainedAccount from eth_typing import ChecksumAddress +from telliot_core.contract.contract import Contract +from telliot_core.directory import ContractInfo +from telliot_core.model.endpoints import RPCEndpoint from telliot_core.tellor.tellorflex.oracle import TellorFlexOracleContract from telliot_core.tellor.tellorx.oracle import TellorxOracleContract from web3 import Web3 @@ -80,3 +87,55 @@ def has_native_token_funds( return False return True + + +def create_custom_contract( + original_contract: Contract, + custom_contract_addr: ChecksumAddress, + endpoint: RPCEndpoint, + account: ChainedAccount, + custom_abi: Any = None, +) -> Contract: + """Verify custom contract ABI is compatible with the original contract ABI. Return custom contract instance. + + Reports to user if custom contract ABI differs from original contract ABI. + Confirms if user wants to continue with custom contract ABI.""" + original_functions = sorted(list(original_contract.contract.functions)) + + if not custom_abi: + # fetch ABI from block explorer + try: + custom_abi = ContractInfo(name=None, org=None, address={endpoint.chain_id: custom_contract_addr}).get_abi( + chain_id=endpoint.chain_id + ) + except Exception as e: + raise click.ClickException(f"Error fetching custom contract ABI from block explorer: {e}") + + custom_contract = Contract(custom_contract_addr, custom_abi, endpoint, account) + custom_contract.connect() + custom_functions = sorted(list(custom_contract.contract.functions)) + + missing_functions = [f for f in original_functions if f not in custom_functions] + if missing_functions: + warning_msg = f"WARNING: Custom contract ABI is missing {len(missing_functions)} functions:" + click.echo(warning_msg) + numbered_missing_functions = "\n".join([f"{i+1:03d}. {f}" for i, f in enumerate(missing_functions)]) + click.echo(numbered_missing_functions) + click.confirm("Continue?", default=True, abort=True) + + return custom_contract + + +def prompt_for_abi() -> Any: + """Prompt user to provide custom contract ABI as a JSON file.""" + if click.confirm( + "Do you want to provide a custom contract ABI? If no, will attempt to fetch from block explorer.", default=False + ): + file_path = click.prompt( + "Provide path to custom contract ABI JSON file (e.g. /Users/foo/custom_reporter_abi.json)", + type=click.Path(exists=True), + ) + with open(file_path, "r") as f: + abi = json.load(f) + return abi + return None
Custom oracle contract flag not working I am using the custom contract flag in order to report to the Goerli playground, but it's submitting to... TellorFlex (?!) Command used: ```console telliot-feeds -a dev-acct-1-goerli report -dpt true -p YOLO -custom 0x7B8AC044ebce66aCdF14197E8De38C1Cc802dB4A -wp 30 ``` `submitValue` transaction link [here](https://goerli.etherscan.io/tx/0x2da04d244eac9f9c55de4e257545fb10490069c3c3a56b9cacb9c43ca1622f7a)
2022-11-08T13:37:13
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-431
fd7cb284b25c857196d1da9ff5eb9dffa669b765
diff --git a/src/telliot_feeds/cli/commands/report.py b/src/telliot_feeds/cli/commands/report.py index 07fe0b9f..1ea93e64 100644 --- a/src/telliot_feeds/cli/commands/report.py +++ b/src/telliot_feeds/cli/commands/report.py @@ -38,30 +38,12 @@ TELLOR_X_CHAINS = (1, 4, 5) - -def get_stake_amount() -> float: - """Retrieve desired stake amount from user - - Each stake is 10 TRB on TellorFlex Polygon. If an address - is not staked for any reason, the TellorFlexReporter will attempt - to stake. Number of stakes determines the reporter lock: - - reporter_lock = 12hrs / N * stakes - - Retrieves desidred stake amount from user input.""" - - warn = ( - "\n\U00002757Telliot will automatically stake more TRB " - "if your stake is below or falls below the stake amount required to report.\n" - "If you would like to stake more than required enter the TOTAL stake amount you wish to be staked.\n" - ) - click.echo(warn) - msg = "Enter amount TRB to stake if unstaked" - stake = click.prompt(msg, type=float, default=10.0, show_default=True) - assert isinstance(stake, float) - assert stake >= 10.0 - - return stake +STAKE_MESSAGE = ( + "\n\U00002757Telliot will automatically stake more TRB " + "if your stake is below or falls below the stake amount required to report.\n" + "If you would like to stake more than required, enter the TOTAL stake amount you wish to be staked.\n" + "For example, if you wish to stake 1000 TRB, enter 1000.\n" +) def parse_profit_input(expected_profit: str) -> Optional[Union[str, float]]: @@ -89,6 +71,7 @@ def print_reporter_settings( legacy_gas_price: Optional[int], gas_price_speed: str, reporting_diva_protocol: bool, + stake_amount: float, ) -> None: """Print user settings to console.""" click.echo("") @@ -117,6 +100,7 @@ def print_reporter_settings( click.echo(f"Max fee (gwei): {max_fee}") click.echo(f"Priority fee (gwei): {priority_fee}") click.echo(f"Gas price speed: {gas_price_speed}\n") + click.echo(f"Desired stake amount: {stake_amount}") @click.group() @@ -280,6 +264,15 @@ def reporter() -> None: default=None, type=str, ) [email protected]( + "--stake", + "-s", + "stake", + help=STAKE_MESSAGE, + nargs=1, + type=float, + default=10.0, +) @click.option("--flex-360/--old-flex", default=True, help="Choose between tellor360 reporter or old flex") @click.option("--binary-interface", "-abi", "abi", nargs=1, default=None, type=str) @click.option("--rng-auto/--rng-auto-off", default=False) @@ -313,6 +306,7 @@ async def report( custom_contract_reporter: Optional[str], abi: Optional[str], flex_360: bool, + stake: float, ) -> None: """Report values to Tellor oracle""" # Ensure valid user input for expected profit @@ -427,6 +421,7 @@ async def report( chain_id=cid, gas_price_speed=gas_price_speed, reporting_diva_protocol=reporting_diva_protocol, + stake_amount=stake, ) _ = input("Press [ENTER] to confirm settings.") @@ -468,8 +463,6 @@ async def report( reporter = IntervalReporter(**tellorx_reporter_kwargs) # type: ignore else: - - stake = get_stake_amount() contracts = core.get_tellor360_contracts() if flex_360 else core.get_tellorflex_contracts() if oracle_address:
Make TRB stake amount user input into optional flag It asks the user to enter desired stake amount every time. User doesn't want this. Make it optional flag with a default value instead.
2022-11-07T14:28:41
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-410
a8346621fab49a59c120ea41413437fae634b928
diff --git a/src/telliot_feeds/reporters/tellor_360.py b/src/telliot_feeds/reporters/tellor_360.py index 31998215..690ec76d 100644 --- a/src/telliot_feeds/reporters/tellor_360.py +++ b/src/telliot_feeds/reporters/tellor_360.py @@ -48,12 +48,12 @@ class StakerInfo: class Tellor360Reporter(TellorFlexReporter): - def __init__(self, *args: Any, **kwargs: Any) -> None: - kwargs["stake"]: float = 0 + def __init__(self, stake: float = 0, *args: Any, **kwargs: Any) -> None: self.stake_amount: Optional[int] = None self.staker_info: Optional[StakerInfo] = None self.allowed_stake_amount = 0 super().__init__(*args, **kwargs) + self.stake: float = stake logger.info(f"Reporting with account: {self.acct_addr}")
Unable to increase stake_balance via Telliot prompts Telliot prompts the user for input if the users wishes to increase their stake amount on tellor360 networks. Expected Behavior: Telliot makes a transaction to `depositStake` for the input amount. Current Behavior: Telliot does not make a `depositStake` transaction for any amount other than the minimum even if the input is a larger stake. (It simply begins reporting / waiting 12 hours to submit) versions: telliot-core 5cace483b6926d59ea575936f0e9f464f97aa2d8 telliot-feeds b92f7e44a3cb537a39fe784fa421f15c99ad315b
2022-10-21T07:22:53
0.0
[]
[]
tellor-io/telliot-feeds
tellor-io__telliot-feeds-399
c40927d9f65dde7a77e20498890872e0f40f27db
diff --git a/src/telliot_feeds/cli/commands/query.py b/src/telliot_feeds/cli/commands/query.py index 8f59e0d5..0c5e1cc8 100644 --- a/src/telliot_feeds/cli/commands/query.py +++ b/src/telliot_feeds/cli/commands/query.py @@ -1,5 +1,6 @@ import click +from telliot_feeds.cli.utils import build_query from telliot_feeds.queries.utils import choose_query_type from telliot_feeds.utils.decode import decode_query_data from telliot_feeds.utils.decode import decode_submit_value_bytes @@ -34,6 +35,12 @@ def decode(query_data: str, submit_value_bytes: str) -> None: _, _ = decode_submit_value_bytes(query, submit_value_bytes, log=click.echo) [email protected]() +def new() -> None: + """Build a new custom query.""" + _ = build_query() + + # @query.command() # @click.pass_context # @click.argument("query_tag", type=str) diff --git a/src/telliot_feeds/cli/utils.py b/src/telliot_feeds/cli/utils.py index e775d7a0..42d99e60 100644 --- a/src/telliot_feeds/cli/utils.py +++ b/src/telliot_feeds/cli/utils.py @@ -1,5 +1,6 @@ import os from typing import Any +from typing import Callable from typing import get_args from typing import get_type_hints from typing import Optional @@ -9,11 +10,13 @@ from chained_accounts import find_accounts from dotenv import load_dotenv from eth_utils import to_checksum_address +from simple_term_menu import TerminalMenu from telliot_core.apps.core import TelliotCore from telliot_core.cli.utils import cli_core from telliot_feeds.datafeed import DataFeed from telliot_feeds.feeds import DATAFEED_BUILDER_MAPPING +from telliot_feeds.queries.abi_query import AbiQuery DIVA_PROTOCOL_CHAINS = (137, 80001, 3, 5) @@ -137,6 +140,46 @@ def build_feed_from_input() -> Optional[DataFeed[Any]]: return feed +def build_query(log: Optional[Callable[[str], None]] = click.echo) -> Any: + """Build a query from CLI input""" + title = "Select a query type:" + queries = [q for q in AbiQuery.__subclasses__() if q.__name__ not in ("LegacyRequest")] + options = [q.__name__ for q in queries] + # Sort options and queries by alphabetical order + options, queries = zip(*sorted(zip(options, queries))) + + menu = TerminalMenu(options, title=title) + selected_index = menu.show() + q = queries[selected_index] + + if not q: + log("No query selected") + return None + + # Get query parameters + query_params = {} + for name, field in q.__dataclass_fields__.items(): + try: + val = click.prompt(name, type=field.type) + except AttributeError: + val = click.prompt(name, type=get_args(field.type)[0]) + + query_params[name] = val + + try: + query = q(**query_params) + log("Query built successfully") + except Exception as e: + log(f"Error building query: {e}") + return None + + log(query) + log(f"Query ID: 0x{query.query_id.hex()}") + log(f"Query data: 0x{query.query_data.hex()}") + + return query + + def validate_address(ctx: click.Context, param: Any, value: str) -> str: """Ensure input is a valid checksum address""" # Sets default to None if no value is provided
Create any supported query instance via CLI Something like: ```console telliot-feeds query new Choose query type: 1 - SpotPrice 2 - StringQuery 3 - NumericApiResponse ... .. user enters 1 .. Enter asset parameter value: eth Enter currency parameter value: USD Your query: SpotPrice(asset='eth', currency='usd') queryId: 0xas;ldfjka;lsdfa;lskd queryData: 0x;alsdkj;askdjf;asdkjf;l ```
2022-10-12T02:36:56
0.0
[]
[]
tuttle-dev/tuttle
tuttle-dev__tuttle-186
f5f55cf6897b3724d11b4c693ae1977f430b872e
diff --git a/app/auth/intent.py b/app/auth/intent.py index 24f1f880..20890af9 100644 --- a/app/auth/intent.py +++ b/app/auth/intent.py @@ -94,7 +94,7 @@ def get_user_if_exists(self) -> IntentResult[Optional[User]]: """ result = self._data_source.get_user_() if not result.was_intent_successful: - result.error_msg = "Checking auth status failed! Please restart the app" + result.error_msg = "No user data found." result.log_message_if_any() return result diff --git a/app/core/database_storage_impl.py b/app/core/database_storage_impl.py index 09501abb..6b5ac30b 100644 --- a/app/core/database_storage_impl.py +++ b/app/core/database_storage_impl.py @@ -1,9 +1,13 @@ +from typing import Callable + import re from pathlib import Path -from loguru import logger -from typing import Callable -import demo + import sqlmodel +from loguru import logger + +from tuttle import demo + from .abstractions import DatabaseStorage diff --git a/app/invoicing/data_source.py b/app/invoicing/data_source.py index c2e1eaca..f42f69dd 100644 --- a/app/invoicing/data_source.py +++ b/app/invoicing/data_source.py @@ -5,7 +5,7 @@ from core.abstractions import SQLModelDataSourceMixin from core.intent_result import IntentResult -from tuttle.model import Invoice, Project +from tuttle.model import Invoice, Project, Timesheet class InvoicingDataSource(SQLModelDataSourceMixin): @@ -74,6 +74,10 @@ def save_invoice( """Creates or updates an invoice with given invoice and project info""" self.store(invoice) + def save_timesheet(self, timesheet: Timesheet): + """Creates or updates a timesheet""" + self.store(timesheet) + def get_last_invoice(self) -> IntentResult[Invoice]: """Get the last invoice. @@ -100,3 +104,23 @@ def get_last_invoice(self) -> IntentResult[Invoice]: log_message=f"Exception raised @InvoicingDataSource.get_last_invoice_number {e.__class__.__name__}", exception=e, ) + + def get_timesheet_for_invoice(self, invoice: Invoice) -> Timesheet: + """Get the timesheet associated with an invoice + + Args: + invoice (Invoice): the invoice to get the timesheet for + + Returns: + Optional[Timesheet]: the timesheet associated with the invoice + """ + if not len(invoice.timesheets) > 0: + raise ValueError( + f"invoice {invoice.id} has no timesheets associated with it" + ) + if len(invoice.timesheets) > 1: + raise ValueError( + f"invoice {invoice.id} has more than one timesheet associated with it: {invoice.timesheets}" + ) + timesheet = invoice.timesheets[0] + return timesheet diff --git a/app/invoicing/intent.py b/app/invoicing/intent.py index 7b66776b..4f8500ad 100644 --- a/app/invoicing/intent.py +++ b/app/invoicing/intent.py @@ -45,9 +45,9 @@ def __init__(self, client_storage: ClientStorage): self._user_data_source = UserDataSource() self._auth_intent = AuthIntent() - def get_user(self) -> Optional[User]: - """Get the current user.""" - return self._auth_intent.get_user_if_exists() + def get_user(self) -> IntentResult[User]: + user = self._user_data_source.get_user() + return IntentResult(was_intent_successful=True, data=user) def get_active_projects_as_map(self) -> Mapping[int, Project]: return self._projects_intent.get_active_projects_as_map() @@ -95,11 +95,13 @@ def create_invoice( render: bool = True, ) -> IntentResult[Invoice]: """Create a new invoice from time tracking data.""" - + logger.info(f"⚙️ Creating invoice for {project.title}...") + user = self._user_data_source.get_user() try: # get the time tracking data timetracking_data = self._timetracking_data_source.get_data_frame() - timesheet: Timesheet = timetracking.create_timesheet( + # generate timesheet + timesheet: Timesheet = timetracking.generate_timesheet( timetracking_data, project, from_date, @@ -116,10 +118,24 @@ def create_invoice( ) if render: - # TODO: render timesheet + # render timesheet + try: + logger.info(f"⚙️ Rendering timesheet for {project.title}...") + rendering.render_timesheet( + user=user, + timesheet=timesheet, + out_dir=Path.home() / ".tuttle" / "Timesheets", + only_final=True, + ) + logger.info(f"✅ rendered timesheet for {project.title}") + except Exception as ex: + logger.error( + f"❌ Error rendering timesheet for {project.title}: {ex}" + ) + logger.exception(ex) # render invoice try: - user = self._user_data_source.get_user() + logger.info(f"⚙️ Rendering invoice for {project.title}...") rendering.render_invoice( user=user, invoice=invoice, @@ -130,7 +146,12 @@ def create_invoice( except Exception as ex: logger.error(f"❌ Error rendering invoice for {project.title}: {ex}") logger.exception(ex) - # save invoice + + # save invoice and timesheet + timesheet.invoice = invoice + assert timesheet.invoice is not None + assert len(invoice.timesheets) == 1 + # self._invoicing_data_source.save_timesheet(timesheet) self._invoicing_data_source.save_invoice(invoice) return IntentResult( was_intent_successful=True, @@ -319,6 +340,29 @@ def view_invoice(self, invoice: Invoice) -> IntentResult[None]: error_msg=error_message, ) + def view_timesheet_for_invoice(self, invoice: Invoice) -> IntentResult[None]: + """Attempts to open the timesheet for the invoice in the default pdf viewer""" + try: + timesheet = self._invoicing_data_source.get_timesheet_for_invoice(invoice) + timesheet_path = ( + Path().home() / ".tuttle" / "Timesheets" / f"{timesheet.prefix}.pdf" + ) + preview_pdf(timesheet_path) + return IntentResult(was_intent_successful=True) + except ValueError as ve: + logger.error(f"❌ Error getting timesheet for invoice: {ve}") + logger.exception(ve) + return IntentResult(was_intent_successful=False, error_msg=str(ve)) + except Exception as ex: + # display the execption name in the error message + error_message = f"❌ Failed to open the timesheet: {ex.__class__.__name__}" + logger.error(error_message) + logger.exception(ex) + return IntentResult( + was_intent_successful=False, + error_msg=error_message, + ) + def generate_invoice_number( self, invoice_date: Optional[date] = None, diff --git a/app/invoicing/view.py b/app/invoicing/view.py index a6bf99dc..95db9dec 100644 --- a/app/invoicing/view.py +++ b/app/invoicing/view.py @@ -216,6 +216,7 @@ def refresh_invoices(self): on_delete_clicked=self.on_delete_invoice_clicked, on_mail_invoice=self.on_mail_invoice, on_view_invoice=self.on_view_invoice, + on_view_timesheet=self.on_view_timesheet, toggle_paid_status=self.toggle_paid_status, toggle_cancelled_status=self.toggle_cancelled_status, toggle_sent_status=self.toggle_sent_status, @@ -241,6 +242,12 @@ def on_view_invoice(self, invoice: Invoice): if not result.was_intent_successful: self.show_snack(result.error_msg, is_error=True) + def on_view_timesheet(self, invoice: Invoice): + """Called when the user clicks view in the context menu of an invoice""" + result = self.intent.view_timesheet_for_invoice(invoice) + if not result.was_intent_successful: + self.show_snack(result.error_msg, is_error=True) + def on_delete_invoice_clicked(self, invoice: Invoice): """Called when the user clicks delete in the context menu of an invoice""" if self.editor is not None: @@ -425,6 +432,7 @@ def __init__( on_delete_clicked, on_mail_invoice, on_view_invoice, + on_view_timesheet, toggle_paid_status, toggle_sent_status, toggle_cancelled_status, @@ -433,6 +441,7 @@ def __init__( self.invoice = invoice self.on_delete_clicked = on_delete_clicked self.on_view_invoice = on_view_invoice + self.on_view_timesheet = on_view_timesheet self.on_mail_invoice = on_mail_invoice self.toggle_paid_status = toggle_paid_status self.toggle_sent_status = toggle_sent_status @@ -504,6 +513,11 @@ def build(self): txt="View", on_click=lambda e: self.on_view_invoice(self.invoice), ), + views.TPopUpMenuItem( + icon=icons.VISIBILITY_OUTLINED, + txt="View Timesheet ", + on_click=lambda e: self.on_view_timesheet(self.invoice), + ), views.TPopUpMenuItem( icon=icons.OUTGOING_MAIL, txt="Send", diff --git a/tuttle/calendar.py b/tuttle/calendar.py index a634e6a8..f01bb5ce 100644 --- a/tuttle/calendar.py +++ b/tuttle/calendar.py @@ -4,6 +4,7 @@ from pathlib import Path import io import re +import calendar from loguru import logger import ics @@ -180,3 +181,17 @@ class GoogleCalendar(CloudCalendar): def to_data(self) -> DataFrame: raise NotImplementedError("TODO") + + +def get_month_start_end(month_str): + # Parse the string into a datetime object + dt = datetime.datetime.strptime(month_str, "%B %Y") + + # Get the date information from the datetime object + year, month = dt.date().year, dt.date().month + + # Get the start and end dates of the month + start_date = datetime.date(year, month, 1) + end_date = datetime.date(year, month, calendar.monthrange(year, month)[1]) + + return start_date, end_date diff --git a/app/demo.py b/tuttle/demo.py similarity index 67% rename from app/demo.py rename to tuttle/demo.py index 8ca61e89..dedfaffa 100644 --- a/app/demo.py +++ b/tuttle/demo.py @@ -24,24 +24,52 @@ Cycle, Invoice, InvoiceItem, + Timesheet, + TimeTrackingItem, Project, TimeUnit, User, ) +def create_fake_user( + fake: faker.Faker, +) -> User: + """ + Create a fake user. + """ + user = User( + name=fake.name(), + email=fake.email(), + subtitle=fake.job(), + VAT_number=fake.ean8(), + ) + return user + + def create_fake_contact( fake: faker.Faker, -): +) -> Contact: split_address_lines = fake.address().splitlines() street_line = split_address_lines[0] city_line = split_address_lines[1] + try: + # TODO: This has a German bias + street = street_line.split(" ", 1)[0] + number = street_line.split(" ", 1)[1] + city = city_line.split(" ")[1] + postal_code = city_line.split(" ")[0] + except IndexError: + street = street_line + number = "" + city = city_line + postal_code = "" a = Address( - street=street_line, - number=city_line, - city=city_line.split(" ")[1], - postal_code=city_line.split(" ")[0], + street=street, + number=number, + city=city, + postal_code=postal_code, country=fake.country(), ) first_name, last_name = fake.name().split(" ", 1) @@ -57,9 +85,11 @@ def create_fake_contact( def create_fake_client( - invoicing_contact: Contact, fake: faker.Faker, -): + invoicing_contact: Optional[Contact] = None, +) -> Client: + if invoicing_contact is None: + invoicing_contact = create_fake_contact(fake) client = Client( name=fake.company(), invoicing_contact=invoicing_contact, @@ -69,12 +99,14 @@ def create_fake_client( def create_fake_contract( - client: Client, fake: faker.Faker, + client: Optional[Client] = None, ) -> Contract: """ Create a fake contract for the given client. """ + if client is None: + client = create_fake_client(fake) unit = random.choice(list(TimeUnit)) if unit == TimeUnit.day: rate = fake.random_int(200, 1000) # realistic distribution for day rates @@ -99,10 +131,13 @@ def create_fake_contract( def create_fake_project( - contract: Contract, fake: faker.Faker, -): - project_title = fake.bs() + contract: Optional[Contract] = None, +) -> Project: + if contract is None: + contract = create_fake_contract(fake) + + project_title = fake.bs().replace("/", "-") project_tag = f"#{'-'.join(project_title.split(' ')[:2]).lower()}" project = Project( @@ -127,10 +162,55 @@ def invoice_number_counting(): invoice_number_counter = invoice_number_counting() +def create_fake_timesheet( + fake: faker.Faker, + project: Optional[Project] = None, +) -> Timesheet: + """ + Create a fake timesheet object with random values. + + Args: + project (Project): The project associated with the timesheet. + fake (faker.Faker): An instance of the Faker class to generate random values. + + Returns: + Timesheet: A fake timesheet object. + """ + if project is None: + project = create_fake_project(fake) + timesheet = Timesheet( + title=fake.bs().replace("/", "-"), + comment=fake.paragraph(nb_sentences=2), + date=datetime.date.today(), + period_start=datetime.date.today() - datetime.timedelta(days=30), + period_end=datetime.date.today(), + project=project, + ) + number_of_items = fake.random_int(min=1, max=5) + for _ in range(number_of_items): + unit = fake.random_element(elements=("hours", "days")) + if unit == "hours": + unit_price = abs(round(numpy.random.normal(50, 20), 2)) + elif unit == "days": + unit_price = abs(round(numpy.random.normal(400, 200), 2)) + time_tracking_item = TimeTrackingItem( + timesheet=timesheet, + begin=fake.date_time_this_year(before_now=True, after_now=False), + end=fake.date_time_this_year(before_now=True, after_now=False), + duration=datetime.timedelta(hours=fake.random_int(min=1, max=8)), + title=f"{fake.bs()} for #{project.tag}", + tag=project.tag, + description=fake.paragraph(nb_sentences=2), + ) + timesheet.items.append(time_tracking_item) + return timesheet + + def create_fake_invoice( - project: Project, - user: User, fake: faker.Faker, + project: Optional[Project] = None, + user: Optional[User] = None, + render: bool = True, ) -> Invoice: """ Create a fake invoice object with random values. @@ -142,6 +222,12 @@ def create_fake_invoice( Returns: Invoice: A fake invoice object. """ + if project is None: + project = create_fake_project(fake) + + if user is None: + user = create_fake_user(fake) + invoice_number = next(invoice_number_counter) invoice = Invoice( number=str(invoice_number), @@ -173,6 +259,14 @@ def create_fake_invoice( invoice=invoice, ) + # an invoice is created together with a timesheet. For the sake of simplicity, timesheet and invoice items are not linked. + timesheet = create_fake_timesheet(fake, project) + # attach timesheet to invoice + timesheet.invoice = invoice + assert len(invoice.timesheets) == 1 + + if render: + # render invoice try: rendering.render_invoice( user=user, @@ -184,6 +278,18 @@ def create_fake_invoice( except Exception as ex: logger.error(f"❌ Error rendering invoice for {project.title}: {ex}") logger.exception(ex) + # render timesheet + try: + rendering.render_timesheet( + user=user, + timesheet=timesheet, + out_dir=Path.home() / ".tuttle" / "Timesheets", + only_final=True, + ) + logger.info(f"✅ rendered timesheet for {project.title}") + except Exception as ex: + logger.error(f"❌ Error rendering timesheet for {project.title}: {ex}") + logger.exception(ex) return invoice @@ -210,11 +316,15 @@ def create_fake_data( fake = faker.Faker(locale=locales) contacts = [create_fake_contact(fake) for _ in range(n)] - clients = [create_fake_client(contact, fake) for contact in contacts] - contracts = [create_fake_contract(client, fake) for client in clients] - projects = [create_fake_project(contract, fake) for contract in contracts] + clients = [ + create_fake_client(fake, invoicing_contact=contact) for contact in contacts + ] + contracts = [create_fake_contract(fake, client=client) for client in clients] + projects = [create_fake_project(fake, contract=contract) for contract in contracts] - invoices = [create_fake_invoice(project, user, fake) for project in projects] + invoices = [ + create_fake_invoice(fake, project=project, user=user) for project in projects + ] return projects, invoices diff --git a/tuttle/invoicing.py b/tuttle/invoicing.py index d33edfb4..98baa455 100644 --- a/tuttle/invoicing.py +++ b/tuttle/invoicing.py @@ -37,6 +37,7 @@ def generate_invoice( VAT_rate=contract.VAT_rate, description=timesheet.title, ) + # TODO: replace with auto-incrementing numbers invoice.generate_number(counter=counter) return invoice diff --git a/tuttle/model.py b/tuttle/model.py index 462ed7c8..ad08a474 100644 --- a/tuttle/model.py +++ b/tuttle/model.py @@ -1,6 +1,10 @@ """Object model.""" -from typing import Dict, List, Optional, Type +from typing import Optional, List, Dict, Type +from pydantic import constr, BaseModel, condecimal +from enum import Enum +import datetime +import textwrap import re import datetime @@ -48,9 +52,10 @@ def to_dataframe(items: List[Type[BaseModel]]) -> pandas.DataFrame: def OneToOneRelationship(back_populates): + """Define a relationship as one-to-one.""" return Relationship( back_populates=back_populates, - sa_relationship_kwargs={"uselist": False}, + sa_relationship_kwargs={"uselist": False, "lazy": "subquery"}, ) @@ -404,6 +409,9 @@ class Project(SQLModel, table=True): sa_relationship_kwargs={"lazy": "subquery"}, ) + def __repr__(self): + return f"Project(id={self.id}, title={self.title}, tag={self.tag})" + # PROPERTIES @property def client(self) -> Optional[Client]: @@ -462,9 +470,7 @@ class TimeTrackingItem(SQLModel, table=True): timesheet: Optional["Timesheet"] = Relationship(back_populates="items") # begin: datetime.datetime = Field(description="Start time of the time interval.") - end: Optional[datetime.datetime] = Field( - description="End time of the time interval." - ) + end: datetime.datetime = Field(description="End time of the time interval.") duration: datetime.timedelta = Field(description="Duration of the time interval.") title: str = Field(description="A short description of the time interval.") tag: str = Field( @@ -479,10 +485,13 @@ class Timesheet(SQLModel, table=True): id: Optional[int] = Field(default=None, primary_key=True) title: str date: datetime.date = Field(description="The date of creation of the timesheet") - # period: str - # table: pandas.DataFrame - # TODO: store dataframe as dict - # table: Dict = Field(default={}, sa_column=sqlalchemy.Column(sqlalchemy.JSON)) + period_start: datetime.date = Field( + description="The start date of the period covered by the timesheet." + ) + period_end: datetime.date = Field( + description="The end date of the period covered by the timesheet." + ) + # Timesheet n:1 Project project_id: Optional[int] = Field(default=None, foreign_key="project.id") project: Project = Relationship( @@ -494,9 +503,28 @@ class Timesheet(SQLModel, table=True): comment: Optional[str] = Field(description="A comment on the timesheet.") items: List[TimeTrackingItem] = Relationship(back_populates="timesheet") + rendered: bool = Field( + default=False, + description="Whether the Timesheet has been rendered as a PDF.", + ) + + # Timesheet n:1 Invoice + invoice_id: Optional[int] = Field(default=None, foreign_key="invoice.id") + invoice: Optional["Invoice"] = Relationship( + back_populates="timesheets", + sa_relationship_kwargs={"lazy": "subquery"}, + ) + # class Config: # arbitrary_types_allowed = True + def __repr__(self): + return f"Timesheet(id={self.id}, tag={self.project.tag}, period_start={self.period_start}, period_end={self.period_end})" + + @property + def prefix(self) -> str: + return f"{self.project.tag[1:]}-{self.period_start.strftime('%Y-%m-%d')}-{self.period_end.strftime('%Y-%m-%d')}" + @property def total(self) -> datetime.timedelta: """Sum of time in timesheet.""" @@ -517,16 +545,14 @@ class Invoice(SQLModel, table=True): """An invoice is a bill for a client.""" id: Optional[int] = Field(default=None, primary_key=True) - number: str + number: Optional[str] = Field(description="The invoice number. Auto-generated.") # date and time date: datetime.date = Field( description="The date of the invoice", ) - # due_date: datetime.date - # sent_date: datetime.date - # Invoice 1:n Timesheet ? - # timesheet_id: Optional[int] = Field(default=None, foreign_key="timesheet.id") - # timesheet: Timesheet = Relationship(back_populates="invoice") + + # RELATIONSHIPTS + # Invoice n:1 Contract ? contract_id: Optional[int] = Field(default=None, foreign_key="contract.id") contract: Contract = Relationship( @@ -539,6 +565,12 @@ class Invoice(SQLModel, table=True): back_populates="invoices", sa_relationship_kwargs={"lazy": "subquery"}, ) + # Invoice 1:n Timesheet + timesheets: List[Timesheet] = Relationship( + back_populates="invoice", + sa_relationship_kwargs={"lazy": "subquery"}, + ) + # status -- corresponds to InvoiceStatus enum above sent: Optional[bool] = Field(default=False) paid: Optional[bool] = Field(default=False) @@ -554,7 +586,7 @@ class Invoice(SQLModel, table=True): ) rendered: bool = Field( default=False, - description="If the invoice has been rendered as a PDF.", + description="Whether the invoice has been rendered as a PDF.", ) # @@ -573,7 +605,7 @@ def total(self) -> Decimal: """Total invoiced amount.""" return self.sum + self.VAT_total - def generate_number(self, pattern=None, counter=None): + def generate_number(self, pattern=None, counter=None) -> str: """Generate an invoice number""" date_prefix = self.date.strftime("%Y-%m-%d") # suffix = hashlib.shake_256(str(uuid.uuid4()).encode("utf-8")).hexdigest(2) @@ -584,9 +616,12 @@ def generate_number(self, pattern=None, counter=None): self.number = f"{date_prefix}-{suffix}" @property - def due_date(self): + def due_date(self) -> Optional[datetime.date]: """Date until which payment is due.""" - return self.date + datetime.timedelta(days=self.contract.term_of_payment) + if self.contract.term_of_payment: + return self.date + datetime.timedelta(days=self.contract.term_of_payment) + else: + return None @property def client(self): @@ -597,7 +632,7 @@ def prefix(self): """A string that can be used as the prefix of a file name, or a folder name.""" client_suffix = "" if self.client: - client_suffix = self.client.name.lower().split()[0] + client_suffix = "-".join(self.client.name.lower().split()) prefix = f"{self.number}-{client_suffix}" return prefix diff --git a/tuttle/rendering.py b/tuttle/rendering.py index e28df137..342def2b 100644 --- a/tuttle/rendering.py +++ b/tuttle/rendering.py @@ -117,8 +117,8 @@ def emit_pdf(finished): def render_invoice( user: User, invoice: Invoice, + out_dir, document_format: str = "pdf", - out_dir=None, style: str = "anvil", only_final: bool = False, ): @@ -203,8 +203,6 @@ def as_percentage(number): invoice_dir / Path(f"{invoice.prefix}.html"), final_output_path ) shutil.rmtree(invoice_dir) - # finally set the rendered flag - invoice.rendered = True # finally set the rendered flag invoice.rendered = True @@ -212,10 +210,11 @@ def as_percentage(number): def render_timesheet( user: User, timesheet: Timesheet, - document_format: str = "html", - out_dir: str = None, + out_dir, + document_format: str = "pdf", style: str = "anvil", -) -> str: + only_final: bool = False, +): """Render a Timeseheet using an HTML template. Args: @@ -239,7 +238,7 @@ def render_timesheet( return html else: # write invoice html - prefix = f"Timesheet-{timesheet.title}" + prefix = timesheet.prefix timesheet_dir = Path(out_dir) / Path(prefix) timesheet_dir.mkdir(parents=True, exist_ok=True) timesheet_path = timesheet_dir / Path(f"{prefix}.html") @@ -273,6 +272,15 @@ def render_timesheet( css_paths=css_paths, out_path=timesheet_dir / Path(f"{prefix}.pdf"), ) + if only_final: + final_output_path = out_dir / Path(f"{prefix}.{document_format}") + if document_format == "pdf": + shutil.move(timesheet_dir / Path(f"{prefix}.pdf"), final_output_path) + else: + shutil.move(timesheet_dir / Path(f"{prefix}.html"), final_output_path) + shutil.rmtree(timesheet_dir) + # finally set the rendered flag + timesheet.rendered = True def generate_document_thumbnail(pdf_path: str, thumbnail_width: int) -> str: diff --git a/tuttle/timetracking.py b/tuttle/timetracking.py index 6f9db47a..787eb554 100644 --- a/tuttle/timetracking.py +++ b/tuttle/timetracking.py @@ -15,7 +15,7 @@ from .model import Project, Timesheet, TimeTrackingItem, User -def create_timesheet( +def generate_timesheet( timetracking_data: DataFrame, project: Project, period_start: datetime.date, @@ -52,84 +52,8 @@ def create_timesheet( period_str = f"{period_start} - {period_end}" ts = Timesheet( title=f"{project.title} - {period_str}", - # period=period, - project=project, - comment=comment, - date=date, - ) - for record in ts_table.reset_index().to_dict("records"): - ts.items.append(TimeTrackingItem(**record)) - - return ts - - -@deprecated -def generate_timesheet( - source, - project: Project, - period_start: str, - period_end: str = None, - date: datetime.date = datetime.date.today(), - comment: str = "", - group_by: str = None, - item_description: str = None, - as_dataframe: bool = False, -) -> Timesheet: - if period_end: - period = (period_start, period_end) - period_str = f"{period_start} - {period_end}" - else: - period = period_start - period_str = f"{period_start}" - # convert cal to data - timetracking_data = None - if issubclass(type(source), Calendar): - cal = source - timetracking_data = cal.to_data() - elif isinstance(source, pandas.DataFrame): - timetracking_data = source - schema.time_tracking.validate(timetracking_data) - else: - raise ValueError(f"unknown source: {source}") - tag_query = f"tag == '{project.tag}'" - if period_end: - ts_table = ( - timetracking_data.loc[period_start:period_end].query(tag_query).sort_index() - ) - else: - ts_table = timetracking_data.loc[period_start].query(tag_query).sort_index() - # convert all-day entries - ts_table.loc[ts_table["all_day"], "duration"] = ( - project.contract.unit.to_timedelta() * project.contract.units_per_workday - ) - if item_description: - # TODO: extract item description from calendar - ts_table["description"] = item_description - # assert not ts_table.empty - if as_dataframe: - return ts_table - - # TODO: grouping - if group_by is None: - pass - elif group_by == "day": - ts_table = ts_table.reset_index() - ts_table = ts_table.groupby(by=ts_table["begin"].dt.date).agg( - { - "title": "first", - "tag": "first", - "description": "first", - "duration": "sum", - } - ) - elif group_by == "week": - raise NotImplementedError("TODO") - else: - raise ValueError(f"unknown group_by argument: {group_by}") - - ts = Timesheet( - title=f"{project.title} - {period_str}", - period=period, + period_start=period_start, + period_end=period_end, project=project, comment=comment, date=date,
View: Timesheet For time-based contracts, invoices are generated together with contracts. The time sheets are rendered to PDF. They should be visible and mangeable in a similar way to invoices. Possibly attached to their invoices in the UI?
2023-01-23T20:10:16
0.0
[]
[]
MaxDude132/drf-serializer-prefetch
MaxDude132__drf-serializer-prefetch-16
9c0f69fd2043e4dc46cd34c824aec439c4472a51
diff --git a/serializer_prefetch/base.py b/serializer_prefetch/base.py index d4cbb54..cc76cf4 100644 --- a/serializer_prefetch/base.py +++ b/serializer_prefetch/base.py @@ -1,7 +1,8 @@ from contextlib import suppress from collections.abc import Iterable +import copy -from django.db.models import Model, QuerySet, prefetch_related_objects +from django.db.models import Model, QuerySet, prefetch_related_objects, Prefetch from django.utils.translation import gettext as _ from rest_framework.fields import empty from rest_framework import serializers @@ -21,9 +22,13 @@ def get_select_related_data(self, serializer): def get_prefetch_related_data(self, serializer): if hasattr(serializer, "get_prefetch_related"): - return serializer.get_prefetch_related() + return self._transform_iterable_to_prefetches( + serializer.get_prefetch_related() + ) - return getattr(serializer, "prefetch_related", []) + return self._transform_iterable_to_prefetches( + getattr(serializer, "prefetch_related", []) + ) def get_additional_serializers_data(self, serializer): if hasattr(serializer, "get_additional_serializers"): @@ -69,7 +74,12 @@ def call_other_prefetching_methods(self): for method in self._other_prefetching_methods: method() - def get_prefetch(self, serializer, current_relation="", should_prefetch=False): + def get_prefetch( + self, + serializer: serializers.Serializer, + current_relation: Prefetch = None, + should_prefetch: bool = False, + ): if hasattr(serializer, "child"): serializer = serializer.child @@ -96,10 +106,28 @@ def get_prefetch(self, serializer, current_relation="", should_prefetch=False): return select_items, prefetch_items - def _extend_relation_items(self, select_items, prefetch_items, return_values): - return select_items.extend(return_values[0]), prefetch_items.extend( - return_values[1] - ) + def _extend_relation_items( + self, + select_items: Iterable[str], + prefetch_items: Iterable[Prefetch], + return_values: Iterable[Iterable[Prefetch | str]], + ): + select_items.extend(return_values[0]) + + simple_prefetch = [ + item.prefetch_through if isinstance(item, Prefetch) else item + for item in prefetch_items + ] + for value in return_values[1]: + prefetch_through = ( + value.prefetch_through if isinstance(value, Prefetch) else value + ) + if prefetch_through in simple_prefetch: + continue + + prefetch_items.append(value) + + return select_items, prefetch_items def _get_custom_relations(self, serializer, current_relation): select_related_attr = self.get_select_related_data(serializer) @@ -120,8 +148,8 @@ def _get_additional_serializers_relations(self, serializer, current_relation): prefetch_items = [] for additional_serializer_data in additional_serializers: - custom_current_relation = additional_serializer_data.get( - "relation_and_field", "" + custom_current_relation = self._transform_str_to_prefetch( + additional_serializer_data.get("relation_and_field", "") ) if current_relation: @@ -136,7 +164,8 @@ def _get_additional_serializers_relations(self, serializer, current_relation): if additional_serializer is None: raise ValueError( _( - "The additional_serializer value is missing the key `serializer`." + "The additional_serializer value is " + "missing the key `serializer`." ) ) @@ -152,6 +181,18 @@ def _get_additional_serializers_relations(self, serializer, current_relation): return select_items, prefetch_items + def _get_all_prefetch(self, serializer): + yield from ( + p + for p in self.get_prefetch_related_data(serializer) + if isinstance(p, Prefetch) + ) + yield from ( + p["relation_and_field"] + for p in self.get_additional_serializers_data(serializer) + if isinstance(["relation_and_field"], Prefetch) + ) + def _get_serializer_field_relations( self, serializer, current_relation, should_prefetch ): @@ -174,13 +215,21 @@ def _get_serializer_field_relations( if not (relation := info.relations.get(source)) or getattr( field, "method_name", None ): - continue + for prefetch in self._get_all_prefetch(serializer): + if prefetch.prefetch_to == source: + relation = info.relations.get(prefetch.prefetch_through) + break + else: + continue if relation.to_many: future_should_prefetch = True append_to = prefetch_items if future_should_prefetch else select_items + if should_prefetch: + source = Prefetch(source) + if current_relation: source = self._get_joined_prefetch(current_relation, source) @@ -197,7 +246,7 @@ def _get_serializer_field_relations( return select_items, prefetch_items - def _get_custom_related(self, related_attr, current_relation=""): + def _get_custom_related(self, related_attr, current_relation=None): if current_relation: computed_related = self._build_computed_related( related_attr, current_relation @@ -215,14 +264,51 @@ def _get_model_from_serializer(self, serializer): return serializer.child.Meta.model @staticmethod - def _get_joined_prefetch(current_relation, item): - return "__".join([current_relation, item]) + def _get_joined_prefetch(current_relation: Prefetch | str, item: Prefetch | str): + if isinstance(item, str): + return "__".join( + ( + current_relation + if isinstance(current_relation, str) + else current_relation.prefetch_through, + item, + ) + ) + + current_relation_through = ( + current_relation + if isinstance(current_relation, str) + else current_relation.prefetch_through + ) + current_relation_to = ( + current_relation + if isinstance(current_relation, str) + else current_relation.prefetch_to + ) + + new_prefetch = copy.deepcopy(item) + + new_prefetch.prefetch_through = "__".join( + [current_relation_through, item.prefetch_through] + ) + new_prefetch.prefetch_to = "__".join([current_relation_to, item.prefetch_to]) + + return new_prefetch def _build_computed_related(self, related_attr, current_relation): return [ self._get_joined_prefetch(current_relation, item) for item in related_attr ] + def _transform_str_to_prefetch(self, item): + if isinstance(item, str): + return Prefetch(item) + + return item + + def _transform_iterable_to_prefetches(self, iterable_items): + return [self._transform_str_to_prefetch(item) for item in iterable_items] + class List(list): _serializer_prefetch_done = False diff --git a/setup.py b/setup.py index ba0da1f..23a0ec8 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ from setuptools import setup, find_packages -VERSION = "1.0.9" +VERSION = "1.1.0" DESCRIPTION = "An automatic prefetcher for django-rest-framework." this_directory = Path(__file__).parent LONG_DESCRIPTION = (this_directory / "README.md").read_text()
13 - Fix traversing relationship when using Prefetch objects
2023-08-22T14:56:12
0.0
[]
[]
saroad2/statue
saroad2__statue-133
242b18de0622db860c760b2b6f1d0996b2b4ab56
diff --git a/setup.cfg b/setup.cfg index 9f204dca..afda4b85 100644 --- a/setup.cfg +++ b/setup.cfg @@ -41,6 +41,7 @@ install_requires = importlib_resources>=5.4.0;python_version<'3.9' toml >= 0.10.2 click >= 8.0.1 + tqdm >= 4.63.0 GitPython >= 3.1.27 types-toml >= 0.10.4 types-setuptools >= 57.4.10 diff --git a/src/statue/cli/run.py b/src/statue/cli/run.py index f90d8cb7..a7fca170 100644 --- a/src/statue/cli/run.py +++ b/src/statue/cli/run.py @@ -15,12 +15,7 @@ verbosity_option, ) from statue.cli.string_util import boxed_string, evaluation_string -from statue.cli.styled_strings import ( - failure_style, - name_style, - source_style, - success_style, -) +from statue.cli.styled_strings import failure_style, name_style, source_style from statue.commands_filter import CommandsFilter from statue.config.configuration import Configuration from statue.evaluation import Evaluation @@ -153,13 +148,7 @@ def run_cli( # pylint: disable=too-many-arguments if is_verbose(verbosity): click.echo(f"Running evaluation in {mode.lower()} mode") runner = build_runner(mode) - with click.progressbar( - length=commands_map.total_commands_count, show_pos=True, show_eta=False - ) as bar: - evaluation = runner.evaluate( - commands_map=commands_map, - update_func=lambda command: __bar_update_func(bar, command), - ) + evaluation = runner.evaluate(commands_map) if not is_silent(verbosity): click.echo(boxed_string("Evaluation")) click.echo(evaluation_string(evaluation, verbosity=verbosity)) @@ -251,13 +240,3 @@ def __handle_missing_commands(ctx, missing_commands, install, verbosity): "commands before running" ) ctx.exit(1) - - -def __bar_update_func(bar, partial_evaluation: Evaluation): - bar.update(1) - - failures = failure_style(f"{partial_evaluation.failed_commands_number} failed") - success = success_style( - f"{partial_evaluation.successful_commands_number} succeeded" - ) - bar.label = f"{failures}, {success}" diff --git a/src/statue/constants.py b/src/statue/constants.py index 846f34ff..8332222f 100644 --- a/src/statue/constants.py +++ b/src/statue/constants.py @@ -25,3 +25,6 @@ MODE = "mode" DATETIME_FORMAT = "%m/%d/%Y, %H:%M:%S" +BAR_FORMAT = "{l_bar}{bar}| {n_fmt}/{total_fmt}" +MAIN_BAR_COLOR = "blue" +SECONDARY_BAR_COLOR = "yellow" diff --git a/src/statue/runner.py b/src/statue/runner.py index 19fe935a..cf9051aa 100644 --- a/src/statue/runner.py +++ b/src/statue/runner.py @@ -3,10 +3,13 @@ import asyncio import time from enum import Enum, auto -from typing import Callable, List, Optional +from typing import List + +import tqdm from statue.command import Command from statue.commands_map import CommandsMap +from statue.constants import BAR_FORMAT, MAIN_BAR_COLOR, SECONDARY_BAR_COLOR from statue.evaluation import Evaluation, SourceEvaluation @@ -24,7 +27,6 @@ class EvaluationRunner: # pylint: disable=too-few-public-methods def evaluate( self, commands_map: CommandsMap, - update_func: Optional[Callable[[Evaluation], None]] = None, ) -> Evaluation: """ Abstract evaluation method. @@ -33,9 +35,6 @@ def evaluate( :param commands_map: map from source file to list of commands to run on it :type commands_map: CommandsMap - :param update_func: Function to be called before every command is - executed. Skip if None - :type update_func: Optional[Callable[[Command], None]] :return: Total evaluation after running all commands. :rtype: Evaluation """ @@ -50,32 +49,38 @@ class SynchronousEvaluationRunner( # pylint: disable=too-few-public-methods def evaluate( self, commands_map: CommandsMap, - update_func: Optional[Callable[[Evaluation], None]] = None, ) -> Evaluation: """ Run commands map and return evaluation report. :param commands_map: map from source file to list of commands to run on it :type commands_map: CommandsMap - :param update_func: Function to be called before every command is - executed. Skip if None - :type update_func: Optional[Callable[[Command], None]] :return: Total evaluation after running all commands. :rtype: Evaluation """ evaluation = Evaluation() total_start_time = time.time() - for source, commands in commands_map.items(): - source_start_time = time.time() - evaluation[source] = SourceEvaluation() - for command in commands: - evaluation[source].append(command.execute(source)) - if update_func is not None: - update_func(evaluation) - source_end_time = time.time() - evaluation[source].source_execution_duration = ( - source_end_time - source_start_time - ) + with tqdm.trange( + commands_map.total_commands_count, + bar_format=BAR_FORMAT, + colour=MAIN_BAR_COLOR, + ) as main_bar: + for source, commands in commands_map.items(): + source_start_time = time.time() + evaluation[source] = SourceEvaluation() + for command in tqdm.tqdm( + commands, + bar_format=BAR_FORMAT, + colour=SECONDARY_BAR_COLOR, + leave=False, + desc=source, + ): + evaluation[source].append(command.execute(source)) + main_bar.update(1) + source_end_time = time.time() + evaluation[source].source_execution_duration = ( + source_end_time - source_start_time + ) total_end_time = time.time() evaluation.total_execution_duration = total_end_time - total_start_time return evaluation @@ -91,97 +96,110 @@ def __init__(self): def evaluate( self, commands_map: CommandsMap, - update_func: Optional[Callable[[Evaluation], None]] = None, ) -> Evaluation: """ Run commands map asynchronously and return evaluation report. :param commands_map: map from source file to list of commands to run on it :type commands_map: CommandsMap - :param update_func: Function to be called before every command is - executed. Skip if None - :type update_func: Optional[Callable[[Command], None]] :return: Total evaluation after running all commands. :rtype: Evaluation """ - return asyncio.run( - self.evaluate_commands_map( - commands_map=commands_map, update_func=update_func - ) - ) + return asyncio.run(self.evaluate_commands_map(commands_map)) async def evaluate_commands_map( self, commands_map: CommandsMap, - update_func: Optional[Callable[[Evaluation], None]] = None, ): """ Main async function to run commands map and return evaluation report. :param commands_map: map from source file to list of commands to run on it :type commands_map: CommandsMap - :param update_func: Function to be called before every command is - executed. Skip if None - :type update_func: Optional[Callable[[Command], None]] :return: Evaluation """ evaluation = Evaluation() start_time = time.time() - coros = [ - self.evaluate_source( - source=source, - commands=commands, - evaluation=evaluation, - update_func=update_func, - ) - for source, commands in commands_map.items() - ] - await asyncio.gather(*coros) + max_source_name_length = max([len(source) for source in commands_map.keys()]) + with tqdm.trange( + commands_map.total_commands_count, + bar_format=BAR_FORMAT, + colour=MAIN_BAR_COLOR, + ) as main_bar: + coros = [ + self.evaluate_source( + source_bar_pos=pos, + source=source, + commands=commands, + evaluation=evaluation, + main_bar=main_bar, + max_source_name_length=max_source_name_length, + ) + for pos, (source, commands) in enumerate(commands_map.items(), start=1) + ] + await asyncio.gather(*coros) end_time = time.time() evaluation.total_execution_duration = end_time - start_time return evaluation - async def evaluate_source( + async def evaluate_source( # pylint: disable=too-many-arguments self, source: str, commands: List[Command], evaluation: Evaluation, - update_func: Optional[Callable[[Evaluation], None]] = None, + main_bar: tqdm.tqdm, + source_bar_pos: int, + max_source_name_length: int, ): """ Evaluate commands on source and return source evaluation report. + :param source_bar_pos: Position of the source bar to print + :type source_bar_pos: int :param source: Path of the desired source. :type source: str :param commands: List of commands to run on the source. :type commands: List[Command] :param evaluation: Evaluation instance to be updated after commands are running. :type evaluation: Evaluation - :param update_func: Function to be called before every command is - executed. Skip if None - :type update_func: Optional[Callable[[Command], None]] + :param main_bar: progress bar that shows how far are we in evaluating the source + :type main_bar: tqdm.tqdm + :param max_source_name_length: Maximum source name length + :type max_source_name_length: int """ evaluation[source] = SourceEvaluation() start_time = time.time() - coros = [ - self.evaluate_command( - command=command, - source=source, - evaluation=evaluation, - update_func=update_func, - ) - for command in commands - ] - await asyncio.gather(*coros) + with tqdm.trange( + len(commands), + bar_format=BAR_FORMAT, + position=source_bar_pos, + leave=False, + colour=SECONDARY_BAR_COLOR, + desc=f"{source:{max_source_name_length}}", + ) as source_bar: + coros = [ + self.evaluate_command( + command=command, + source=source, + evaluation=evaluation, + source_bar=source_bar, + main_bar=main_bar, + ) + for command in commands + ] + await asyncio.gather(*coros) end_time = time.time() evaluation[source].source_execution_duration = end_time - start_time + await self.update_lock.acquire() + self.update_lock.release() - async def evaluate_command( + async def evaluate_command( # pylint: disable=too-many-arguments self, command: Command, source: str, evaluation: Evaluation, - update_func: Optional[Callable[[Evaluation], None]] = None, + source_bar: tqdm.tqdm, + main_bar: tqdm.tqdm, ): """ Evaluate command on source and return command evaluation report. @@ -192,15 +210,17 @@ async def evaluate_command( :type command: Command :param evaluation: Evaluation instance to be updated after commands are running. :type evaluation: Evaluation - :param update_func: Function to be called before every command is - executed. Skip if None - :type update_func: Optional[Callable[[Command], None]] + :param source_bar: tqdm progress bar to show the progress + of evaluating this specific source. + :type source_bar: tqdm.tqdm + :param main_bar: tqdm progress bar to show total progress + :type main_bar: tqdm.tqdm """ command_evaluation = await command.execute_async(source) await self.update_lock.acquire() evaluation[source].append(command_evaluation) - if update_func is not None: - update_func(evaluation) + source_bar.update(1) + main_bar.update(1) self.update_lock.release()
[Feature] Replace `click.progressbar` with `tqdm` Description ======== *Click* will no longer support new features for progressbar, as mentioned [here](https://github.com/pallets/click/issues/2186#issuecomment-1030859560). Therefore, we should use [tqdm](https://github.com/tqdm/tqdm) instead. This will also help us make the progress bars much more elaborated and informative.
2022-03-24T19:47:50
0.0
[]
[]
saroad2/statue
saroad2__statue-100
00e18fa473a9644f0740b89ac6139783a30cdad2
diff --git a/src/statue/configuration.py b/src/statue/configuration.py index 2f7cb80c..58fc927a 100644 --- a/src/statue/configuration.py +++ b/src/statue/configuration.py @@ -15,6 +15,7 @@ DEFAULT_CONFIGURATION_FILE, HELP, OVERRIDE, + REQUIRED_CONTEXTS, SOURCES, STANDARD, STATUE, @@ -321,6 +322,17 @@ def read_command( raise UnknownCommand(command_name) if contexts is None or len(contexts) == 0: contexts = [STANDARD] + required_contexts = command_configuration.get(REQUIRED_CONTEXTS, None) + if required_contexts is not None: + missing_required_contexts = [ + context for context in required_contexts if context not in contexts + ] + if len(missing_required_contexts) != 0: + raise InvalidCommand( + f"Command `{command_name}`" + "requires the following contexts, which are missing: " + f"{', '.join(missing_required_contexts)}" + ) context_objects = [cls.get_context(context_name) for context_name in contexts] for context in context_objects: context_obj = context.search_context(command_configuration) diff --git a/src/statue/constants.py b/src/statue/constants.py index ff0b0f38..0f182f47 100644 --- a/src/statue/constants.py +++ b/src/statue/constants.py @@ -15,6 +15,7 @@ STANDARD = "standard" ALLOW_LIST = "allow_list" DENY_LIST = "deny_list" +REQUIRED_CONTEXTS = "required_contexts" ALIASES = "aliases" PARENT = "parent" IS_DEFAULT = "is_default" diff --git a/src/statue/resources/defaults.toml b/src/statue/resources/defaults.toml index b6d707df..b8ba36f6 100644 --- a/src/statue/resources/defaults.toml +++ b/src/statue/resources/defaults.toml @@ -44,9 +44,10 @@ args = [ "--remove-all-unused-imports" ] help = "Remove unused imports and variables" +required_contexts = ['format'] format = true fast = true -standard = false +test = true [commands.isort] args = [
[Bug] Autoflake is running only for setup.py Description ======== When running `statue run -c format`, the command *Autoflake* is running only on *setup.py*. It does not run on both *src/statue* and *tests*. Reproduction ========= 1. Run `statue run -c format` 2. After it finishes run `statue history show` 3. See that *autoflake* is running on *setup.py* but not on *src/statue* and *tests*
2022-02-11T07:03:48
0.0
[]
[]
mindflayer/python-mocket
mindflayer__python-mocket-239
501088ee1755a222c473b74cfb457ae534478bb0
diff --git a/mocket/mocket.py b/mocket/mocket.py index e3f025df..cca0a4cd 100644 --- a/mocket/mocket.py +++ b/mocket/mocket.py @@ -10,8 +10,8 @@ import socket import ssl from datetime import datetime, timedelta -from io import BytesIO from json.decoder import JSONDecodeError +from typing import Optional, Tuple import urllib3 from urllib3.connection import match_hostname as urllib3_match_hostname @@ -27,6 +27,7 @@ from .utils import ( SSL_PROTOCOL, MocketMode, + MocketSocketCore, get_mocketize, hexdump, hexload, @@ -73,7 +74,7 @@ class SuperFakeSSLContext: - """For Python 3.6""" + """For Python 3.6 and newer.""" class FakeSetter(int): def __set__(self, *args): @@ -81,7 +82,7 @@ def __set__(self, *args): minimum_version = FakeSetter() options = FakeSetter() - verify_mode = FakeSetter(ssl.CERT_NONE) + verify_mode = FakeSetter() class FakeSSLContext(SuperFakeSSLContext): @@ -177,6 +178,7 @@ class MocketSocket: _secure_socket = False _did_handshake = False _sent_non_empty_bytes = False + _io = None def __init__( self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, **kwargs @@ -200,10 +202,18 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.close() @property - def fd(self): - if self._fd is None: - self._fd = BytesIO() - return self._fd + def io(self): + if self._io is None: + self._io = MocketSocketCore((self._host, self._port)) + return self._io + + def fileno(self): + address = (self._host, self._port) + r_fd, _ = Mocket.get_pair(address) + if not r_fd: + r_fd, w_fd = os.pipe() + Mocket.set_pair(address, (r_fd, w_fd)) + return r_fd def gettimeout(self): return self.timeout @@ -264,11 +274,6 @@ def unwrap(self): def write(self, data): return self.send(encode_to_bytes(data)) - def fileno(self): - if self.true_socket: - return self.true_socket.fileno() - return self.fd.fileno() - def connect(self, address): self._address = self._host, self._port = address Mocket._address = address @@ -276,7 +281,7 @@ def connect(self, address): def makefile(self, mode="r", bufsize=-1): self._mode = mode self._bufsize = bufsize - return self.fd + return self.io def get_entry(self, data): return Mocket.get_entry(self._host, self._port, data) @@ -292,13 +297,13 @@ def sendall(self, data, entry=None, *args, **kwargs): response = self.true_sendall(data, *args, **kwargs) if response is not None: - self.fd.seek(0) - self.fd.write(response) - self.fd.truncate() - self.fd.seek(0) + self.io.seek(0) + self.io.write(response) + self.io.truncate() + self.io.seek(0) def read(self, buffersize): - rv = self.fd.read(buffersize) + rv = self.io.read(buffersize) if rv: self._sent_non_empty_bytes = True if self._did_handshake and not self._sent_non_empty_bytes: @@ -315,6 +320,9 @@ def recv_into(self, buffer, buffersize=None, flags=None): return len(data) def recv(self, buffersize, flags=None): + r_fd, _ = Mocket.get_pair((self._host, self._port)) + if r_fd: + return os.read(r_fd, buffersize) data = self.read(buffersize) if data: return data @@ -416,8 +424,8 @@ def true_sendall(self, data, *args, **kwargs): def send(self, data, *args, **kwargs): # pragma: no cover entry = self.get_entry(data) - kwargs["entry"] = entry if not entry or (entry and self._entry != entry): + kwargs["entry"] = entry self.sendall(data, *args, **kwargs) else: req = Mocket.last_request() @@ -441,12 +449,29 @@ def do_nothing(*args, **kwargs): class Mocket: + _socket_pairs = {} _address = (None, None) _entries = collections.defaultdict(list) _requests = [] _namespace = text_type(id(_entries)) _truesocket_recording_dir = None + @classmethod + def get_pair(cls, address: tuple) -> Tuple[Optional[int], Optional[int]]: + """ + Given the id() of the caller, return a pair of file descriptors + as a tuple of two integers: (<read_fd>, <write_fd>) + """ + return cls._socket_pairs.get(address, (None, None)) + + @classmethod + def set_pair(cls, address: tuple, pair: Tuple[int, int]) -> None: + """ + Store a pair of file descriptors under the key `id_` + as a tuple of two integers: (<read_fd>, <write_fd>) + """ + cls._socket_pairs[address] = pair + @classmethod def register(cls, *entries): for entry in entries: @@ -467,6 +492,10 @@ def collect(cls, data): @classmethod def reset(cls): + for r_fd, w_fd in cls._socket_pairs.values(): + os.close(r_fd) + os.close(w_fd) + cls._socket_pairs = {} cls._entries = collections.defaultdict(list) cls._requests = [] diff --git a/mocket/mockhttp.py b/mocket/mockhttp.py index 1a05b7f9..25540915 100644 --- a/mocket/mockhttp.py +++ b/mocket/mockhttp.py @@ -201,7 +201,7 @@ def can_handle(self, data): """ try: requestline, _ = decode_from_bytes(data).split(CRLF, 1) - method, path, version = self._parse_requestline(requestline) + method, path, _ = self._parse_requestline(requestline) except ValueError: return self is getattr(Mocket, "_last_entry", None) diff --git a/mocket/utils.py b/mocket/utils.py index 29b2528c..9efd6ad9 100644 --- a/mocket/utils.py +++ b/mocket/utils.py @@ -1,6 +1,8 @@ from __future__ import annotations import binascii +import io +import os import ssl from typing import TYPE_CHECKING, Any, Callable, ClassVar @@ -14,6 +16,21 @@ SSL_PROTOCOL = ssl.PROTOCOL_TLSv1_2 +class MocketSocketCore(io.BytesIO): + def __init__(self, address) -> None: + self._address = address + super().__init__() + + def write(self, content): + from mocket import Mocket + + super().write(content) + + _, w_fd = Mocket.get_pair(self._address) + if w_fd: + os.write(w_fd, content) + + def hexdump(binary_string: bytes) -> str: r""" >>> hexdump(b"bar foobar foo") == decode_from_bytes(encode_to_bytes("62 61 72 20 66 6F 6F 62 61 72 20 66 6F 6F"))
Regression in 3.12.7 Hi Giorgio. Thank you for your work on this library! It looks like there was a breaking change in https://github.com/mindflayer/python-mocket/compare/3.12.6...3.12.7: - https://github.com/maxmind/GeoIP2-python/actions/runs/9148591197/job/25152243949#step:5:1083 - https://github.com/maxmind/minfraud-api-python/actions/runs/9225347716/job/25382643724
Hi @marselester, thanks for opening this issue. There is something going on with `aiohttp` I still don't fully understand, but I am aware of it. So far my investigation brought me to see very strong similarities with https://github.com/aio-libs/aiohttp/issues/5582. The latest version of Mocket was a big refactoring, mostly meant to fix https://github.com/mindflayer/python-mocket/pull/234/, and I strongly believe there is nothing wrong with Mocket itself, but of course I am open to help with this. I see from your CI that the same code works for the most recent versions of Python, which coincides with my findings. Thank you for looking into this issue ❤️ Hey @marselester, if you manage to write a few lines able to replicate the issue I'll be more than happy to help with that, even if I suspect it's not related to Mocket itself. Maybe this would help. I know it doesn't narrow down the problem greatly, but it could be a starting point. ```sh $ pyenv local 3.10.13 $ virtualenv venv $ . venv/bin/activate (venv) $ pip install -e git+https://github.com/maxmind/GeoIP2-python.git@559f145915c811c2ea93201195ad2139debce760#egg=geoip2 (venv) $ pip install mocket==3.12.7 (venv) $ pip freeze aiohttp==3.9.5 aiosignal==1.3.1 async-timeout==4.0.3 attrs==23.2.0 certifi==2024.2.2 charset-normalizer==3.3.2 decorator==5.1.1 frozenlist==1.4.1 -e git+https://github.com/maxmind/GeoIP2-python.git@559f145915c811c2ea93201195ad2139debce760#egg=geoip2 httptools==0.6.1 idna==3.7 maxminddb==2.6.1 mocket==3.12.7 multidict==6.0.5 python-magic==0.4.27 requests==2.32.3 urllib3==2.2.1 yarl==1.9.4 (venv) $ python mytest.py ``` <details> <summary>Trace</summary> ``` /blah/.pyenv/versions/3.10.13/lib/python3.10/asyncio/selector_events.py:746: ResourceWarning: unclosed <socket.socket fd=7, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=0, laddr=('0.0.0.0', 0)> self._protocol = None ResourceWarning: Enable tracemalloc to get the object allocation traceback E ====================================================================== ERROR: test_200_error (__main__.TestAsyncClient) ---------------------------------------------------------------------- Traceback (most recent call last): File "/blah/blah/venv/lib/python3.10/site-packages/decorator.py", line 232, in fun return caller(func, *(extras + args), **kw) File "/blah/blah/venv/lib/python3.10/site-packages/mocket/mocket.py", line 736, in wrapper return test(*args, **kwargs) File "/blah/blah/mytest.py", line 33, in test_200_error self.run_client(self.client.country("1.1.1.1")) File "/blah/blah/mytest.py", line 19, in run_client return self._loop.run_until_complete(v) File "/blah/.pyenv/versions/3.10.13/lib/python3.10/asyncio/base_events.py", line 649, in run_until_complete return future.result() File "/blah/blah/venv/src/geoip2/geoip2/webservice.py", line 307, in country await self._response_for("country", geoip2.models.Country, ip_address), File "/blah/blah/venv/src/geoip2/geoip2/webservice.py", line 346, in _response_for async with await session.get(uri, proxy=self._proxy) as response: File "/blah/blah/venv/lib/python3.10/site-packages/aiohttp/client.py", line 608, in _request await resp.start(conn) File "/blah/blah/venv/lib/python3.10/site-packages/aiohttp/client_reqrep.py", line 971, in start with self._timer: File "/blah/blah/venv/lib/python3.10/site-packages/aiohttp/helpers.py", line 735, in __exit__ raise asyncio.TimeoutError from None asyncio.exceptions.TimeoutError ---------------------------------------------------------------------- Ran 1 test in 60.079s FAILED (errors=1) ``` </details> <details> <summary>Test</summary> ```python import asyncio import unittest from mocket.plugins.httpretty import httpretty, httprettified from geoip2.errors import GeoIP2Error from geoip2.webservice import AsyncClient class TestAsyncClient(unittest.TestCase): def setUp(self): self._loop = asyncio.new_event_loop() self.client = AsyncClient(42, "abcdef123456") def tearDown(self): self._loop.run_until_complete(self.client.close()) self._loop.close() def run_client(self, v): return self._loop.run_until_complete(v) @httprettified def test_200_error(self): httpretty.register_uri( httpretty.GET, "https://geoip.maxmind.com/geoip/v2.1/country/1.1.1.1", body="", status=200, content_type="application/vnd.maxmind.com-country+json; charset=UTF-8; version=1.0", ) with self.assertRaisesRegex( GeoIP2Error, "could not decode the response as JSON" ): self.run_client(self.client.country("1.1.1.1")) if __name__ == "__main__": unittest.main() ``` </details> I took an example from your README and it also failed with `asyncio.exceptions.TimeoutError`. ```sh (venv) $ python t.py ``` <details> <summary>Test</summary> ```python import json import aiohttp import asyncio import unittest from mocket.plugins.httpretty import httpretty, httprettified class AioHttpEntryTestCase(unittest.TestCase): @httprettified def test_https_session(self): url = 'https://httpbin.org/ip' httpretty.register_uri( httpretty.GET, url, body=json.dumps(dict(origin='127.0.0.1')), ) async def main(l): async with aiohttp.ClientSession( loop=l, timeout=aiohttp.ClientTimeout(total=3) ) as session: async with session.get(url) as get_response: assert get_response.status == 200 assert await get_response.text() == '{"origin": "127.0.0.1"}' loop = asyncio.new_event_loop() loop.set_debug(True) loop.run_until_complete(main(loop)) if __name__ == '__main__': unittest.main() ``` </details> <details> <summary>Trace</summary> ``` /blah/.pyenv/versions/3.10.13/lib/python3.10/asyncio/selector_events.py:746: ResourceWarning: unclosed <socket.socket fd=7, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=0, laddr=('0.0.0.0', 0)> self._protocol = None ResourceWarning: Enable tracemalloc to get the object allocation traceback E ====================================================================== ERROR: test_https_session (__main__.AioHttpEntryTestCase) ---------------------------------------------------------------------- Traceback (most recent call last): File "/blah/blah/venv/lib/python3.10/site-packages/decorator.py", line 232, in fun return caller(func, *(extras + args), **kw) File "/blah/blah/venv/lib/python3.10/site-packages/mocket/mocket.py", line 736, in wrapper return test(*args, **kwargs) File "/blah/blah/t.py", line 30, in test_https_session loop.run_until_complete(main(loop)) File "/blah/.pyenv/versions/3.10.13/lib/python3.10/asyncio/base_events.py", line 649, in run_until_complete return future.result() File "/blah/blah/t.py", line 24, in main async with session.get(url) as get_response: File "/blah/blah/venv/lib/python3.10/site-packages/aiohttp/client.py", line 1197, in __aenter__ self._resp = await self._coro File "/blah/blah/venv/lib/python3.10/site-packages/aiohttp/client.py", line 608, in _request await resp.start(conn) File "/blah/blah/venv/lib/python3.10/site-packages/aiohttp/client_reqrep.py", line 971, in start with self._timer: File "/blah/blah/venv/lib/python3.10/site-packages/aiohttp/helpers.py", line 735, in __exit__ raise asyncio.TimeoutError from None asyncio.exceptions.TimeoutError ---------------------------------------------------------------------- Ran 1 test in 3.011s FAILED (errors=1) ``` </details> The strange thing is that it works well with Python 3.11+.
2024-05-31T08:48:55
0.0
[]
[]
a-r-j/graphein
a-r-j__graphein-357
3d7af1fd07dc16707068b2d63a2f3668fc02c632
diff --git a/CHANGELOG.md b/CHANGELOG.md index ebe2dbc5b..f16d9f38a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,12 @@ +### 1.7.6 - UNRELEASED + +* Fixes bug in sidechain torsion angle computation for structures containing `PYL`/other non-standard amino acids ([#357](https://github.com/a-r-j/graphein/pull/357)). Fixes [#356](https://github.com/a-r-j/graphein/issues/356). + ### 1.7.5 - 27/10/2024 * Improves the tensor->PDB writer (`graphein.protein.tensor.io.to_pdb`) by automatically unravelling residue-level b-factor predictions/annotations ([#352](https://github.com/a-r-j/pull/352)). + ### 1.7.4 - 26/10/2023 * Adds support for PyG 2.4+ ([#350](https://www.github.com/a-r-j/graphein/pull/339)) diff --git a/graphein/protein/tensor/angles.py b/graphein/protein/tensor/angles.py index b91c93013..499d1bade 100644 --- a/graphein/protein/tensor/angles.py +++ b/graphein/protein/tensor/angles.py @@ -77,8 +77,14 @@ def _extract_torsion_coords( for i, res in enumerate(res_types): res_coords = [] - angle_groups = CHI_ANGLES_ATOMS[res] - if not selenium and res == "SEC": + try: + angle_groups = CHI_ANGLES_ATOMS[res] + except KeyError: + log.warning( + f"Can't determine chi angle groups for non-standard residue: {res}. These will be set to 0" + ) + angle_groups = [] + if (not selenium and res == "SEC") or res == "PYL": angle_groups = [] for angle_coord_set in angle_groups:
Sidechain torsion angle computation fails on examples containing Pyrolysin **Describe the bug** Hi Arian, great package! THe sidechain angle computation in the tensor submodule fails on non-standard residues. You've already handled this for `SEC`, it just needs to be extended to account for `PYL`. Thanks and have a great day! Sidechain torsion angle computation fails on examples containing Pyrolysin **Describe the bug** Hi Arian, great package! THe sidechain angle computation in the tensor submodule fails on non-standard residues. You've already handled this for `SEC`, it just needs to be extended to account for `PYL`. Thanks and have a great day!
2023-11-06T08:47:39
0.0
[]
[]
a-r-j/graphein
a-r-j__graphein-268
87985a157623a92f01e7942e048fbdae32e26f14
diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 4e556c5f..c8146bc5 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -13,7 +13,7 @@ on: jobs: build_cpu: - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest strategy: matrix: python-version: [3.8, 3.9] diff --git a/.github/workflows/code-style.yaml b/.github/workflows/code-style.yaml index 87d38f2b..400fec73 100644 --- a/.github/workflows/code-style.yaml +++ b/.github/workflows/code-style.yaml @@ -5,7 +5,7 @@ on: [push] jobs: black: name: "Ensure black compliance" - runs-on: "ubuntu-18.04" + runs-on: "ubuntu-latest" steps: - name: Checkout repository diff --git a/.github/workflows/minimal__install.yaml b/.github/workflows/minimal__install.yaml index f18a6346..f05e0092 100644 --- a/.github/workflows/minimal__install.yaml +++ b/.github/workflows/minimal__install.yaml @@ -17,7 +17,7 @@ on: jobs: build_cpu: - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest strategy: matrix: python-version: [3.7, 3.8, 3.9, 3.11] diff --git a/CHANGELOG.md b/CHANGELOG.md index 4d840fa5..2a94c133 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,8 @@ * [Bugfix] - [#220](https://github.com/a-r-j/graphein/pull/220) Fixes edge metadata conversion to PyG. Contribution by @manonreau. * [Bugfix] - [#220](https://github.com/a-r-j/graphein/pull/220) Fixes centroid atom grouping & avoids unnecessary edge computation where none are found. Contribution by @manonreau. +* [Bugfix] - [#268](https://github.com/a-r-j/graphein/pull/268) Fixes 'sequence' metadata feature for atomistic graphs, removing duplicate residues. Contribution by @kamurani. + #### ML * [Bugfix] - [#234](https://github.com/a-r-j/graphein/pull/234) - Fixes bugs and improves `conversion.convert_nx_to_pyg` and `visualisation.plot_pyg_data`. Removes distance matrix (`dist_mat`) from defualt set of features converted to tensor. diff --git a/graphein/protein/graphs.py b/graphein/protein/graphs.py index ccacee8e..c74d4a43 100644 --- a/graphein/protein/graphs.py +++ b/graphein/protein/graphs.py @@ -493,6 +493,15 @@ def initialise_graph_with_metadata( sequence = protein_df.loc[protein_df["chain_id"] == c][ "residue_name" ].str.cat() + elif granularity == "atom": + sequence = ( + protein_df.loc[ + (protein_df["chain_id"] == c) + & (protein_df["atom_name"] == "CA") + ]["residue_name"] + .apply(three_to_one_with_mods) + .str.cat() + ) else: sequence = ( protein_df.loc[protein_df["chain_id"] == c]["residue_name"] diff --git a/graphein/protein/visualisation.py b/graphein/protein/visualisation.py index e188ca82..ab2eb741 100644 --- a/graphein/protein/visualisation.py +++ b/graphein/protein/visualisation.py @@ -449,7 +449,8 @@ def plot_protein_structure_graph( # 3D network plot with plt.style.context(plot_style): fig = plt.figure(figsize=figsize) - ax = Axes3D(fig, auto_add_to_figure=True) + ax = Axes3D(fig) + fig.add_axes(ax) # Loop on the pos dictionary to extract the x,y,z coordinates of each # node
graph feature `sequence_{chain_id}` contains duplicate residues for atomistic graphs I'm trying to extract sequences from graphs loaded from PDB files. When constructing a residue-granularity graph from the protein, the sequence (stored at `g.graph[f'sequence_{chain_id}'` is as expected. However, when using `atom` granularity, the graph's sequence attribute contains repeated residue letters (I am guessing for each atom in the amino acid). ```python # Construct graphs, one atomistic and one residue level g_atom = construct_graph( pdb_code="6HD6", config=ProteinGraphConfig( pdb_dir=pdb_dir, granularity="atom", # atomistic ) ) g_res = construct_graph( pdb_code="6HD6", config=ProteinGraphConfig( pdb_dir=pdb_dir, granularity="CA", # residue ) ) ``` ```python c = 'A' g_res.graph[f'sequence_{c}'] ``` ``` 'AMDPSSPNYDKWEMERTDITMKHKLGGGQYGEVYEG .... ``` ```python g_atom.graph[f'sequence_{c}'] ``` ``` 'AAAAAMMMMMMMMDDDDDDDDPPPPPPPSSSSSSSSSSS .... ``` Dev merge dev features into master
2023-02-19T22:54:50
0.0
[]
[]
a-r-j/graphein
a-r-j__graphein-73
8cad9c75c83db7236ed262178b06331464402695
diff --git a/graphein/protein/graphs.py b/graphein/protein/graphs.py index 2f44a7f0..03de1338 100644 --- a/graphein/protein/graphs.py +++ b/graphein/protein/graphs.py @@ -8,7 +8,10 @@ from __future__ import annotations import logging -from typing import Callable, List, Optional +import multiprocessing +import traceback +from functools import partial +from typing import Callable, Dict, List, Optional, Tuple, Union import networkx as nx import numpy as np @@ -627,6 +630,106 @@ def construct_graph( return g +def _mp_graph_constructor( + args: Tuple[str, str], use_pdb_code: bool, config: ProteinGraphConfig +) -> nx.Graph: + """ + Protein graph constructor for use in multiprocessing several protein structure graphs. + + :param args: Tuple of pdb code/path and the chain selection for that PDB + :type args: Tuple[str, str] + :param use_pdb_code: Whether or not we are using pdb codes or paths + :type use_pdb_code: bool + :param config: Protein structure graph construction config + :type config: ProteinGraphConfig + :return: Protein structure graph + :rtype: nx.Graph + """ + log.info(f"Constructing graph for: {args[0]}. Chain selection: {args[1]}") + func = partial(construct_graph, config=config) + if use_pdb_code: + try: + result = func(pdb_code=args[0], chain_selection=args[1]) + return result + except Exception as ex: + log.info( + f"Graph construction error (PDB={args[0]})! {traceback.format_exc()}" + ) + log.info(ex) + return None + elif not use_pdb_code: + try: + result = func(pdb_path=args[0], chain_selection=args[1]) + return result + except Exception as ex: + log.info( + f"Graph construction error (PDB={args[0]})! {traceback.format_exc()}" + ) + log.info(ex) + return None + + +def construct_graphs_mp( + pdb_code_it: Optional[List[str]] = None, + pdb_path_it: Optional[List[str]] = None, + chain_selections: Optional[list[str]] = None, + config: ProteinGraphConfig = ProteinGraphConfig(), + num_cores: int = 16, + return_dict: bool = True, +) -> Union[List[nx.Graph], Dict[str, nx.Graph]]: + """ + Constructs protein graphs for a list of pdb codes or pdb paths using multiprocessing. + + :param pdb_code_it: List of pdb codes to use for protein graph construction + :type pdb_code_it: Optional[List[str]], defaults to None + :param pdb_path_it: List of paths to PDB files to use for protein graph construction + :type pdb_path_it: Optional[List[str]], defaults to None + :param chain_selections: List of chains to select from the protein structures (e.g. ["ABC", "A", "L", "CD"...]) + :type chain_selections: Optional[List[str]], defaults to None + :param config: ProteinGraphConfig to use. + :type config: graphein.protein.config.ProteinGraphConfig, defaults to default config params + :param num_cores: Number of cores to use for multiprocessing. The more the merrier + :type num_cores: int, defaults to 16 + :param return_dict: Whether or not to return a dictionary (indexed by pdb codes/paths) or a list of graphs. + :type return_dict: bool, default to True + :return: Iterable of protein graphs. None values indicate there was a problem in constructing the graph for this particular pdb + :rtype: Union[List[nx.Graph], Dict[str, nx.Graph]] + """ + assert ( + pdb_code_it is not None or pdb_path_it is not None + ), "Iterable of pdb codes OR pdb paths required." + + if pdb_code_it is not None: + pdbs = pdb_code_it + use_pdb_code = True + + if pdb_path_it is not None: + pdbs = pdb_path_it + use_pdb_code = False + + if chain_selections is None: + chain_selections = ["all"] * len(pdbs) + + constructor = partial( + _mp_graph_constructor, use_pdb_code=use_pdb_code, config=config + ) + + pool = multiprocessing.Pool(num_cores) + graphs = list( + pool.map( + constructor, + [(pdb, chain_selections[i]) for i, pdb in enumerate(pdbs)], + ) + ) + pool.close() + pool.join() + + if return_dict: + graphs = {pdb: graphs[i] for i, pdb in enumerate(pdbs)} + + return graphs + + if __name__ == "__main__": from functools import partial @@ -647,9 +750,29 @@ def construct_graph( partial(add_k_nn_edges, k=3, long_interaction_threshold=0) ] # Test High-level API - g = construct_graph( - config=config, - pdb_path="../examples/pdbs/3eiy.pdb", + # g = construct_graph( + # config=config, + # pdb_path="../examples/pdbs/3eiy.pdb", + # ) + + # Test multiprocessing + graph_list = [ + "2olg", + "1bjq", + "1omr", + "1a4g", + "2je9", + "3vm5", + "1el1", + "3fzo", + "1mn1", + "1ff5", + "1fic", + "3a47", + "1bir", + ] * 5 + g = construct_graphs_mp( + pdb_code_it=graph_list, config=ProteinGraphConfig(), return_dict=True ) """
Multiprocessing for construction of graph lists Described in #71
2021-10-03T14:42:24
0.0
[]
[]
tsv1/amqp-mock
tsv1__amqp-mock-8
dfcf50a4a455063331fa334b19db98cf59d88ea9
diff --git a/amqp_mock/_storage.py b/amqp_mock/_storage.py index 7b86231..0495a7d 100644 --- a/amqp_mock/_storage.py +++ b/amqp_mock/_storage.py @@ -1,3 +1,4 @@ +from asyncio import Queue from collections import OrderedDict from typing import AsyncGenerator, Dict, List @@ -7,7 +8,7 @@ class Storage: def __init__(self) -> None: self._exchanges: Dict[str, List[Message]] = {} - self._queues: Dict[str, List[Message]] = {} + self._queues: Dict[str, Queue[Message]] = {} self._history: Dict[str, QueuedMessage] = OrderedDict() async def clear(self) -> None: @@ -31,8 +32,8 @@ async def delete_messages_from_exchange(self, exchange: str) -> None: async def add_message_to_queue(self, queue: str, message: Message) -> None: if queue not in self._queues: - self._queues[queue] = [] - self._queues[queue].insert(0, message) + self._queues[queue] = Queue() + await self._queues[queue].put(message) self._history[message.id] = QueuedMessage(message, queue) async def get_history(self) -> List[QueuedMessage]: @@ -44,6 +45,6 @@ async def change_message_status(self, message_id: str, status: MessageStatus) -> async def get_next_message(self, queue: str) -> AsyncGenerator[Message, None]: if queue not in self._queues: return - while len(self._queues[queue]) > 0: - message = self._queues[queue].pop() - yield message + + while True: + yield await self._queues[queue].get() diff --git a/amqp_mock/amqp_server/_amqp_connection.py b/amqp_mock/amqp_server/_amqp_connection.py index e293ede..c600e36 100644 --- a/amqp_mock/amqp_server/_amqp_connection.py +++ b/amqp_mock/amqp_server/_amqp_connection.py @@ -1,6 +1,6 @@ import json import logging -from asyncio import Task, create_task +from asyncio import Task, create_task, gather from asyncio.streams import StreamReader, StreamWriter from typing import Any, AsyncGenerator, Awaitable, Callable, Dict, List, Optional, Union @@ -60,6 +60,12 @@ def _get_delivery_tag(self) -> int: return self._delivery_tag async def close(self) -> None: + for consumer_task in self._consumers: + consumer_task.cancel() + + await gather(*self._consumers, return_exceptions=True) + del self._consumers[:] + self._stream_writer.close() await self._stream_writer.wait_closed()
Handle basic.cancel frames on the connection Previously it was not possible to cancel a consumer with basic_cancel, and attempting to do so would hang the mock server. Now basic.cancel messages are handled and responded to with basic.cancelok. The AmqpConnection._consumers attribute is now a dict mapping (channel_id, consumer_tag) to each consumer task so they can be looked up and cancelled. I tried my best to follow your coding/test styles, but I'm happy to change whatever. I have several other incoming fixes for some relatively non-trivial use cases I've found using amqp-mock on one of my projects; I hope you'll find them useful.
2021-10-26T15:44:54
0.0
[]
[]
tsv1/amqp-mock
tsv1__amqp-mock-3
f719f09d20ca07cc1b01935ff1475b09e3bd3980
diff --git a/amqp_mock/_mock_server.py b/amqp_mock/_mock_server.py index 4e50226..8f7c0ea 100644 --- a/amqp_mock/_mock_server.py +++ b/amqp_mock/_mock_server.py @@ -49,15 +49,18 @@ async def start(self) -> None: host=self._http_server.host, port=self._http_server.port) await http_site.start() + self._http_server.port = self._http_runner.addresses[0][1] self._amqp_runner = AmqpRunner(self._amqp_server) await self._amqp_runner.setup() amqp_site = AmqpSite(self._amqp_runner, host=self._amqp_server.host, - port=self._amqp_server.port) + port=self._amqp_server.port or None) await amqp_site.start() + self._amqp_server.port = amqp_site.port + async def stop(self) -> None: if self._http_runner: await self._http_runner.cleanup() diff --git a/amqp_mock/amqp_server/_amqp_server.py b/amqp_mock/amqp_server/_amqp_server.py index 2c58f9a..f1c0a13 100644 --- a/amqp_mock/amqp_server/_amqp_server.py +++ b/amqp_mock/amqp_server/_amqp_server.py @@ -10,7 +10,7 @@ class AmqpServer: - def __init__(self, storage: Storage, host: str = "0.0.0.0", port: int = 5672, + def __init__(self, storage: Storage, host: str = "0.0.0.0", port: Optional[int] = None, server_properties: Optional[Dict[str, Any]] = None) -> None: self._storage = storage self._host = host @@ -41,9 +41,13 @@ def host(self) -> str: return self._host @property - def port(self) -> int: + def port(self) -> Optional[int]: return self._port + @port.setter + def port(self, value: int) -> None: + self._port = value + async def _on_publish(self, message: Message) -> None: try: message.value = json.loads(message.value.decode()) diff --git a/amqp_mock/amqp_server/_amqp_site.py b/amqp_mock/amqp_server/_amqp_site.py index fd6f3aa..a2ce137 100644 --- a/amqp_mock/amqp_server/_amqp_site.py +++ b/amqp_mock/amqp_server/_amqp_site.py @@ -1,6 +1,6 @@ from asyncio import start_server from asyncio.streams import StreamReader, StreamWriter -from typing import Any, Callable, cast +from typing import Any, Callable, cast, Optional from aiohttp.web import BaseSite @@ -10,15 +10,21 @@ class AmqpSite(BaseSite): - def __init__(self, runner: AmqpRunner, *, host: str, port: int): + def __init__(self, runner: AmqpRunner, *, host: str, port: Optional[int]): super().__init__(runner) self._host = host self._port = port + @property + def port(self) -> Optional[int]: + return self._port + async def start(self) -> None: await super().start() callback = cast(Callable[[StreamReader, StreamWriter], Any], self._runner.server) self._server = await start_server(callback, host=self._host, port=self._port) + if self._server.sockets is not None and len(self._server.sockets) > 0: + self._port = self._server.sockets[0].getsockname()[1] def name(self) -> str: return "ampq://{host}:{port}".format(host=self._host, port=self._port) diff --git a/amqp_mock/http_server/_http_server.py b/amqp_mock/http_server/_http_server.py index 9601fd6..45c43e8 100644 --- a/amqp_mock/http_server/_http_server.py +++ b/amqp_mock/http_server/_http_server.py @@ -9,7 +9,7 @@ class HttpServer: - def __init__(self, storage: Storage, host: str = "0.0.0.0", port: int = 80) -> None: + def __init__(self, storage: Storage, host: str = "0.0.0.0", port: int = 0) -> None: self._storage = storage self._host = host self._port = port @@ -22,6 +22,10 @@ def host(self) -> str: def port(self) -> int: return self._port + @port.setter + def port(self, port: int) -> None: + self._port = port + @route("GET", "/healthcheck") async def healthcheck(self, request: web.Request) -> web.Response: return json_response("200 OK") diff --git a/setup.cfg b/setup.cfg index 8ee86c3..f5e3b82 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.1.2 +current_version = 0.2.0 message = bump version → {new_version} commit = True tag = True diff --git a/setup.py b/setup.py index f347dc1..b3afd3d 100644 --- a/setup.py +++ b/setup.py @@ -13,7 +13,7 @@ def find_dev_required(): setup( name="amqp-mock", - version="0.1.2", + version="0.2.0", description="Remote AMQP mock", long_description=open("README.md").read(), long_description_content_type="text/markdown",
Allow specifying ports for AMQP and HTTP servers Hello, Thank you for posting this project. It looks like it will be useful for writing some of my tests. The only problem I've encountered so far is that it does not make it possible to set the ports for the servers it runs, and a bit presumptuously tries to run on some default ports (including port 80 which is not permissible to use as a normal user). I figured out that I can create the `HttpServer` and `AmqpServer` instances directly and specify ports (for testing I use port=0 to let the system pick a free port). But I just thought it might be useful if the existing `create_amqp_mock` factory function accepted these as arguments. Thanks!
2021-07-03T19:04:01
0.0
[]
[]
Smirkey/powerboxes
Smirkey__powerboxes-43
e190351c7e277d9f1529228287a2518c6f50bc42
diff --git a/bindings/python/powerboxes/__init__.py b/bindings/python/powerboxes/__init__.py index 78426bb..7078f43 100644 --- a/bindings/python/powerboxes/__init__.py +++ b/bindings/python/powerboxes/__init__.py @@ -20,6 +20,7 @@ from ._powerboxes import masks_to_boxes as _masks_to_boxes from ._powerboxes import rotated_giou_distance as _rotated_giou_distance from ._powerboxes import rotated_iou_distance as _rotated_iou_distance +from ._powerboxes import rotated_tiou_distance as _rotated_tiou_distance from ._tiou import _dtype_to_func_tiou_distance _BOXES_NOT_SAME_TYPE = "boxes1 and boxes2 must have the same dtype" @@ -235,7 +236,7 @@ def rotated_iou_distance( def rotated_giou_distance( - boxes1: npt.NDArray[T], boxes2: npt.NDArray[T] + boxes1: npt.NDArray[np.float64], boxes2: npt.NDArray[np.float64] ) -> npt.NDArray[np.float64]: """Compute the pairwise giou distance between rotated boxes @@ -264,6 +265,38 @@ def rotated_giou_distance( ) +def rotated_tiou_distance( + boxes1: npt.NDArray[np.float64], boxes2: npt.NDArray[np.float64] +) -> npt.NDArray[np.float64]: + """Compute pairwise box tiou (tracking iou) distances. + + see https://arxiv.org/pdf/2310.05171.pdf for tiou definition + + Boxes should be in (cx, cy, w, h, a) format + where cx and cy are center coordinates, w and h + width and height and a, the angle in degrees + + Args: + boxes1: 2d array of boxes in cxywha format + boxes2: 2d array of boxes in cxywha format + + Raises: + TypeError: if boxes1 or boxes2 are not numpy arrays + ValueError: if boxes1 and boxes2 have different dtypes + + Returns: + np.ndarray: 2d matrix of pairwise distances + """ + if not isinstance(boxes1, np.ndarray) or not isinstance(boxes2, np.ndarray): + raise TypeError(_BOXES_NOT_NP_ARRAY) + if boxes1.dtype == boxes2.dtype == np.dtype("float64"): + return _rotated_tiou_distance(boxes1, boxes2) + else: + raise TypeError( + f"Boxes dtype: {boxes1.dtype}, {boxes2.dtype} not in float64 dtype" + ) + + def remove_small_boxes(boxes: npt.NDArray[T], min_size) -> npt.NDArray[T]: """Remove boxes with area less than min_area. @@ -430,6 +463,7 @@ def rtree_nms( "tiou_distance", "rotated_iou_distance", "rotated_giou_distance", + "rotated_tiou_distance", "rtree_nms", "__version__", ] diff --git a/bindings/src/lib.rs b/bindings/src/lib.rs index b7c8f3b..e82f93e 100644 --- a/bindings/src/lib.rs +++ b/bindings/src/lib.rs @@ -112,6 +112,8 @@ fn _powerboxes(_py: Python, m: &PyModule) -> PyResult<()> { m.add_function(wrap_pyfunction!(rotated_iou_distance, m)?)?; // Rotated GIoU m.add_function(wrap_pyfunction!(rotated_giou_distance, m)?)?; + // Rotated TIoU + m.add_function(wrap_pyfunction!(rotated_tiou_distance, m)?)?; Ok(()) } // Masks to boxes @@ -153,6 +155,21 @@ fn rotated_giou_distance( return Ok(iou_as_numpy.to_owned()); } +// Rotated box TIoU + +#[pyfunction] +fn rotated_tiou_distance( + _py: Python, + boxes1: &PyArray2<f64>, + boxes2: &PyArray2<f64>, +) -> PyResult<Py<PyArray2<f64>>> { + let boxes1 = preprocess_rotated_boxes(boxes1).unwrap(); + let boxes2 = preprocess_rotated_boxes(boxes2).unwrap(); + let iou = tiou::rotated_tiou_distance(&boxes1, &boxes2); + let iou_as_numpy = utils::array_to_numpy(_py, iou).unwrap(); + return Ok(iou_as_numpy.to_owned()); +} + // IoU fn iou_distance_generic<T>( _py: Python, diff --git a/powerboxesrs/src/tiou.rs b/powerboxesrs/src/tiou.rs index 2e96696..5505ba0 100644 --- a/powerboxesrs/src/tiou.rs +++ b/powerboxesrs/src/tiou.rs @@ -1,7 +1,11 @@ use ndarray::Array2; use num_traits::{Num, ToPrimitive}; -use crate::{boxes, utils}; +use crate::{ + boxes::{self, rotated_box_areas}, + rotation::{minimal_bounding_rect, Rect}, + utils, +}; /// Computes the Tracking Intersection over Union (TIOU) distance between two sets of bounding boxes. /// see https://arxiv.org/pdf/2310.05171.pdf /// # Arguments @@ -67,6 +71,72 @@ where tiou_matrix } +/// Calculates the rotated tracking IoU (Tiou) distance between two sets of rotated bounding boxes. +/// +/// Given two sets of rotated bounding boxes represented by `boxes1` and `boxes2`, this function +/// computes the rotated Tiou distance matrix between them. The rotated Tiou distance is a measure +/// of dissimilarity between two rotated bounding boxes, taking into account both their overlap +/// and the encompassing area. +/// +/// # Arguments +/// +/// * `boxes1` - A reference to a 2D array (Array2) containing the parameters of the first set of rotated bounding boxes. +/// Each row of `boxes1` represents a rotated bounding box with parameters [center_x, center_y, width, height, angle in degrees]. +/// +/// * `boxes2` - A reference to a 2D array (Array2) containing the parameters of the second set of rotated bounding boxes. +/// Each row of `boxes2` represents a rotated bounding box with parameters [center_x, center_y, width, height, angle in degrees]. +/// +/// # Returns +/// +/// A 2D array (Array2) representing the rotated Tiou distance matrix between the input sets of rotated bounding boxes. +/// The element at position (i, j) in the matrix represents the rotated Giou distance between the i-th box in `boxes1` and +/// the j-th box in `boxes2`. +/// +pub fn rotated_tiou_distance(boxes1: &Array2<f64>, boxes2: &Array2<f64>) -> Array2<f64> { + let num_boxes1 = boxes1.nrows(); + let num_boxes2 = boxes2.nrows(); + + let mut iou_matrix = Array2::<f64>::ones((num_boxes1, num_boxes2)); + let areas1 = rotated_box_areas(&boxes1); + let areas2 = rotated_box_areas(&boxes2); + + let boxes1_rects: Vec<(f64, f64, f64, f64)> = boxes1 + .rows() + .into_iter() + .map(|row| { + minimal_bounding_rect(&Rect::new(row[0], row[1], row[2], row[3], row[4]).points()) + }) + .collect(); + let boxes2_rects: Vec<(f64, f64, f64, f64)> = boxes2 + .rows() + .into_iter() + .map(|row| { + minimal_bounding_rect(&Rect::new(row[0], row[1], row[2], row[3], row[4]).points()) + }) + .collect(); + + for (i, r1) in boxes1_rects.iter().enumerate() { + let area1 = areas1[i]; + let (x1_r1, y1_r1, x2_r1, y2_r1) = r1; + + for (j, r2) in boxes2_rects.iter().enumerate() { + let area2 = areas2[j]; + let (x1_r2, y1_r2, x2_r2, y2_r2) = r2; + + // Calculate the enclosing box (C) coordinates + let c_x1 = utils::min(*x1_r1, *x1_r2); + let c_y1 = utils::min(*y1_r1, *y1_r2); + let c_x2 = utils::max(*x2_r1, *x2_r2); + let c_y2 = utils::max(*y2_r1, *y2_r2); + // Calculate the area of the enclosing box (C) + let c_area = (c_x2 - c_x1) * (c_y2 - c_y1); + let c_area = c_area.to_f64().unwrap(); + iou_matrix[[i, j]] = utils::ONE - utils::min(area1 / c_area, area2 / c_area) + } + } + return iou_matrix; +} + #[cfg(test)] mod tests { use ndarray::arr2; @@ -74,11 +144,25 @@ mod tests { use super::*; #[test] - fn test_giou() { + fn test_tiou() { let boxes1 = arr2(&[[0.0, 0.0, 3.0, 3.0], [1.0, 1.0, 4.0, 4.0]]); let boxes2 = arr2(&[[2.0, 2.0, 5.0, 5.0], [3.0, 3.0, 6.0, 6.0]]); let tiou_matrix = tiou_distance(&boxes1, &boxes2); assert_eq!(tiou_matrix, arr2(&[[0.64, 0.75], [0.4375, 0.64]])); } + #[test] + fn test_rotated_tiou() { + let boxes1 = arr2(&[[0.0, 0.0, 3.0, 3.0, 20.0], [1.0, 1.0, 4.0, 4.0, 19.0]]); + let boxes2 = arr2(&[[2.0, 2.0, 5.0, 5.0, 0.0], [3.0, 3.0, 6.0, 6.0, 20.0]]); + + let tiou_matrix = rotated_tiou_distance(&boxes1, &boxes2); + assert_eq!( + tiou_matrix, + arr2(&[ + [0.7818149787949012, 0.8829233169330242], + [0.561738213456193, 0.7725560385451797] + ]) + ); + } }
Rotated tiou distance Compute tiou distance for rotated boxes
2024-01-21T19:03:38
0.0
[]
[]
contentful/contentful-management.py
contentful__contentful-management.py-117
aa2b9e4fcd7b22685e959d6b04df95d78613a8c8
diff --git a/CHANGELOG.md b/CHANGELOG.md index 9d02b3f..0ea0b1c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ ### Changed * Updated `pyyaml` version to fix a vulnerability. +* Fixed defaultValues omit instead of None for fields in content types. ## v2.14.0 * Adds support for cross-space references diff --git a/contentful_management/content_type_field.py b/contentful_management/content_type_field.py index 1151daf..118c5e6 100644 --- a/contentful_management/content_type_field.py +++ b/contentful_management/content_type_field.py @@ -31,7 +31,8 @@ def __init__(self, field_data): self.omitted = field_data.get('omitted', False) self.required = field_data.get('required', False) self.disabled = field_data.get('disabled', False) - self.default_value = field_data.get('defaultValue', None) + if field_data.get('defaultValue', None) is not None: + self.default_value = field_data.get('defaultValue') self.validations = [ContentTypeFieldValidation(v) for v in field_data.get('validations', [])] self.allowedResources = field_data.get('allowedResources') @@ -51,9 +52,11 @@ def to_json(self): 'required': self.required, 'disabled': self.disabled, 'validations': [v.to_json() for v in self.validations], - 'defaultValue': self.default_value } + if hasattr(self, 'default_value'): + result['defaultValue'] = self.default_value + if self.type == 'Array': result['items'] = self.items
Updating a content type that has field's without default values causes an error If i run: ```python myContentType = ( contentful_management.Client(CONTENTFUL_MANAGEMENT_TOKEN, default_locale="en") .environments(CONTENTFUL_SPACE_ID) .find(environment) .content_types() .find("myContentType") ) myContentType.save() ``` I get: ``` contentful_management.errors.UnprocessableEntityError: HTTP status code: 422 Message: Validation error Details: * Name: type - Path: '['fields', 0, 'defaultValue']' - Value: 'None' ``` I can get around this by getting the json value of this field, deleting the `defaultValue` field and then reapplying it.
Hi @99littlebugs, Thanks for reporting the issue. I was able to reproduce the error but haven't identified the cause yet. I plan to investigate it within the next 2-3 weeks, assuming it's not urgent since you have a workaround. If it is urgent, please let me know, and I will prioritize this issue. Cheers
2024-07-25T21:55:58
0.0
[]
[]
contentful/contentful-management.py
contentful__contentful-management.py-100
7bad9dc4de6a188ad32a491b273c8f360f35abad
diff --git a/CHANGELOG.md b/CHANGELOG.md index f37fa90..2c41763 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,7 @@ # CHANGELOG ## Unreleased - +* Preserve fields default values when updating content type [#90](https://github.com/contentful/contentful-management.py/issues/90) ## v2.13.0 ### Fixed diff --git a/contentful_management/content_type_field.py b/contentful_management/content_type_field.py index c854ef5..81eba5e 100644 --- a/contentful_management/content_type_field.py +++ b/contentful_management/content_type_field.py @@ -31,6 +31,7 @@ def __init__(self, field_data): self.omitted = field_data.get('omitted', False) self.required = field_data.get('required', False) self.disabled = field_data.get('disabled', False) + self.default_value = field_data.get('defaultValue', None) self.validations = [ContentTypeFieldValidation(v) for v in field_data.get('validations', [])] self._coercion = self._get_coercion() @@ -48,7 +49,8 @@ def to_json(self): 'omitted': self.omitted, 'required': self.required, 'disabled': self.disabled, - 'validations': [v.to_json() for v in self.validations] + 'validations': [v.to_json() for v in self.validations], + 'defaultValue': self.default_value } if self.type == 'Array': diff --git a/fixtures/content_type/update_default_value.yaml b/fixtures/content_type/update_default_value.yaml new file mode 100644 index 0000000..c04df46 --- /dev/null +++ b/fixtures/content_type/update_default_value.yaml @@ -0,0 +1,169 @@ +interactions: +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - identity + Authorization: + - Bearer foobar + Connection: + - keep-alive + Content-Type: + - application/vnd.contentful.management.v1+json + User-Agent: + - python-requests/2.25.1 + X-Contentful-User-Agent: + - sdk contentful-management.py/2.13.0; platform python/3.11.4; os macOS/22.6.0; + method: GET + uri: https://api.contentful.com/spaces/facgnwwgj5fe/environments/master/content_types/test123 + response: + body: + string: '{"sys":{"space":{"sys":{"type":"Link","linkType":"Space","id":"facgnwwgj5fe"}},"id":"test123","type":"ContentType","createdAt":"2023-09-22T07:34:08.336Z","updatedAt":"2023-09-22T08:41:11.698Z","environment":{"sys":{"id":"master","type":"Link","linkType":"Environment"}},"publishedVersion":8,"publishedAt":"2023-09-22T08:34:43.737Z","firstPublishedAt":"2023-09-22T07:34:08.651Z","createdBy":{"sys":{"type":"Link","linkType":"User","id":"59Erm8D1JuuD273aXNb65T"}},"updatedBy":{"sys":{"type":"Link","linkType":"User","id":"59Erm8D1JuuD273aXNb65T"}},"publishedCounter":3,"version":11,"publishedBy":{"sys":{"type":"Link","linkType":"User","id":"59Erm8D1JuuD273aXNb65T"}}},"displayField":"hello","name":"test123","description":"","fields":[{"id":"hello","name":"hello","type":"Symbol","localized":false,"required":false,"validations":[],"defaultValue":{"en-US":"bhushan"},"disabled":false,"omitted":false}]} + + ' + headers: + Accept-Ranges: + - bytes + Access-Control-Allow-Headers: + - Accept,Accept-Language,Authorization,Cache-Control,CF-Context,Content-Length,Content-Range,Content-Type,DNT,Destination,Expires,If-Match,If-Modified-Since,If-None-Match,Keep-Alive,Last-Modified,Origin,Pragma,Range,User-Agent,X-Http-Method-Override,X-Mx-ReqToken,X-Requested-With,X-Contentful-Version,X-Contentful-Content-Type,X-Contentful-Organization,X-Contentful-Skip-Transformation,X-Contentful-Tag-Visibility,X-Contentful-User-Agent,X-Contentful-Enable-Alpha-Feature,X-Contentful-Source-Environment,X-Contentful-Team,X-Contentful-Parent-Id,x-contentful-validate-only,X-Contentful-Skip-UI-Draft-Validation,X-Contentful-Marketplace,X-Contentful-UI-Content-Auto-Save,cf-trace,X-Contentful-Comment-Body-Format,X-Contentful-Parent-Entity-Reference + Access-Control-Allow-Methods: + - DELETE,GET,HEAD,POST,PUT,PATCH,OPTIONS + Access-Control-Allow-Origin: + - '*' + Access-Control-Expose-Headers: + - Etag,x-contentful-request-id + Access-Control-Max-Age: + - '1728000' + Connection: + - keep-alive + Content-Length: + - '902' + Content-Type: + - application/vnd.contentful.management.v1+json + Contentful-Api: + - cma + Contentful-Upstream: + - content-api + Date: + - Fri, 22 Sep 2023 08:42:21 GMT + Server: + - Contentful + Strict-Transport-Security: + - max-age=15768000 + X-Content-Type-Options: + - nosniff + X-Contentful-RateLimit-Hour-Limit: + - '36000' + X-Contentful-RateLimit-Hour-Remaining: + - '35999' + X-Contentful-RateLimit-Reset: + - '0' + X-Contentful-RateLimit-Second-Limit: + - '10' + X-Contentful-RateLimit-Second-Remaining: + - '9' + cf-environment-id: + - master + cf-environment-uuid: + - master + cf-space-id: + - facgnwwgj5fe + etag: + - '"3310959143573608758"' + x-contentful-request-id: + - 3466a49e-9a5f-414c-9caf-7a2ebfb7dd83 + x-contentful-route: + - /spaces/:space/environments/:environment/content_types/:id + status: + code: 200 + message: OK +- request: + body: '{"name": "test123", "description": "", "displayField": "hello", "fields": + [{"name": "hello", "id": "hello", "type": "Symbol", "localized": false, "omitted": + false, "required": false, "disabled": false, "validations": [], "defaultValue": + {"en-US": "bhushan"}}]}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - identity + Authorization: + - Bearer foobar + Connection: + - keep-alive + Content-Length: + - '260' + Content-Type: + - application/vnd.contentful.management.v1+json + User-Agent: + - python-requests/2.25.1 + X-Contentful-User-Agent: + - sdk contentful-management.py/2.13.0; platform python/3.11.4; os macOS/22.6.0; + x-contentful-version: + - '11' + method: PUT + uri: https://api.contentful.com/spaces/facgnwwgj5fe/environments/master/content_types/test123 + response: + body: + string: '{"sys":{"space":{"sys":{"type":"Link","linkType":"Space","id":"facgnwwgj5fe"}},"id":"test123","type":"ContentType","createdAt":"2023-09-22T07:34:08.336Z","updatedAt":"2023-09-22T08:42:22.146Z","environment":{"sys":{"id":"master","type":"Link","linkType":"Environment"}},"publishedVersion":8,"publishedAt":"2023-09-22T08:34:43.737Z","firstPublishedAt":"2023-09-22T07:34:08.651Z","createdBy":{"sys":{"type":"Link","linkType":"User","id":"59Erm8D1JuuD273aXNb65T"}},"updatedBy":{"sys":{"type":"Link","linkType":"User","id":"59Erm8D1JuuD273aXNb65T"}},"publishedCounter":3,"version":12,"publishedBy":{"sys":{"type":"Link","linkType":"User","id":"59Erm8D1JuuD273aXNb65T"}}},"displayField":"hello","name":"test123","description":"","fields":[{"id":"hello","name":"hello","type":"Symbol","localized":false,"required":false,"validations":[],"defaultValue":{"en-US":"bhushan"},"disabled":false,"omitted":false}]} + + ' + headers: + Accept-Ranges: + - bytes + Access-Control-Allow-Headers: + - Accept,Accept-Language,Authorization,Cache-Control,CF-Context,Content-Length,Content-Range,Content-Type,DNT,Destination,Expires,If-Match,If-Modified-Since,If-None-Match,Keep-Alive,Last-Modified,Origin,Pragma,Range,User-Agent,X-Http-Method-Override,X-Mx-ReqToken,X-Requested-With,X-Contentful-Version,X-Contentful-Content-Type,X-Contentful-Organization,X-Contentful-Skip-Transformation,X-Contentful-Tag-Visibility,X-Contentful-User-Agent,X-Contentful-Enable-Alpha-Feature,X-Contentful-Source-Environment,X-Contentful-Team,X-Contentful-Parent-Id,x-contentful-validate-only,X-Contentful-Skip-UI-Draft-Validation,X-Contentful-Marketplace,X-Contentful-UI-Content-Auto-Save,cf-trace,X-Contentful-Comment-Body-Format,X-Contentful-Parent-Entity-Reference + Access-Control-Allow-Methods: + - DELETE,GET,HEAD,POST,PUT,PATCH,OPTIONS + Access-Control-Allow-Origin: + - '*' + Access-Control-Expose-Headers: + - Etag,x-contentful-request-id + Access-Control-Max-Age: + - '1728000' + Connection: + - keep-alive + Content-Length: + - '902' + Content-Type: + - application/vnd.contentful.management.v1+json + Contentful-Api: + - cma + Contentful-Upstream: + - content-api + Date: + - Fri, 22 Sep 2023 08:42:22 GMT + Server: + - Contentful + Strict-Transport-Security: + - max-age=15768000 + X-Content-Type-Options: + - nosniff + X-Contentful-RateLimit-Hour-Limit: + - '36000' + X-Contentful-RateLimit-Hour-Remaining: + - '35998' + X-Contentful-RateLimit-Reset: + - '0' + X-Contentful-RateLimit-Second-Limit: + - '10' + X-Contentful-RateLimit-Second-Remaining: + - '8' + cf-environment-id: + - master + cf-environment-uuid: + - master + cf-space-id: + - facgnwwgj5fe + etag: + - '"6211379953865625632"' + x-contentful-request-id: + - ebb6d9ac-01f3-4f66-bd43-d22138497fbe + x-contentful-route: + - /spaces/:space/environments/:environment/content_types/:id + status: + code: 200 + message: OK +version: 1
Bug: Calling content_type.save() appears to wipe out all default values on all fields of the content type Repro steps: 1. Create a content type with a field with a default value 2. Write a python script that retrieves this content type then calls `content_type.save` 3. Notice that after saving the default value entered in step 1 is empty Maybe I'm doing something wrong? This seems like a pretty huge bug. I was following exactly the step outlined in `Updating a content type:` in the readme file when I encountered this issue.
Hi! Thanks for reporting the issue, it is indeed a bug and we are taking a look at it. Will keep you posted on any progress we make in resolving it. Cheers
2023-09-27T09:05:28
0.0
[]
[]
unascribed/FlexVer
unascribed__FlexVer-10
722a2f82ef164ce20f05da108ddf5290ef29434e
diff --git a/java/build.gradle b/java/build.gradle index 4bb98f8..78c112b 100644 --- a/java/build.gradle +++ b/java/build.gradle @@ -8,6 +8,20 @@ sourceCompatibility = targetCompatibility = 8 archivesBaseName = 'FlexVer' version = '1.0.2' +repositories { + mavenCentral() +} + +dependencies { + testImplementation 'org.junit.jupiter:junit-jupiter-api:5.9.2' + testImplementation 'org.junit.jupiter:junit-jupiter-engine:5.9.2' + testImplementation 'org.junit.jupiter:junit-jupiter-params:5.9.2' +} + +test { + useJUnitPlatform() +} + compileJava { options.release = 8 } diff --git a/rust/src/lib.rs b/rust/src/lib.rs index 8b093a9..22a0887 100644 --- a/rust/src/lib.rs +++ b/rust/src/lib.rs @@ -161,67 +161,78 @@ impl Ord for FlexVer<'_> { #[cfg(test)] mod tests { + use std::path::PathBuf; + use std::fs; + use super::*; - fn test(left: &str, right: &str, result: Ordering) { - assert_eq!(compare(&left, &right), result); - assert_eq!( - compare(&right, &left), - match result { - Less => Greater, - Greater => Less, - Equal => Equal, - } - ); + const ENABLED_TESTS: &'static [&str] = &[ "test_vectors.txt" ]; + + fn test(left: &str, right: &str, expected: Ordering) -> Result<(), String> { + if compare(left, right) != expected { + return Err(format!("Expected {:?} but found {:?}", expected, compare(left, right))); + } + + // Assert commutativity, if right > left than left < right and vice versa + let inverse = match expected { + Less => Greater, + Greater => Less, + Equal => Equal, + }; + if compare(right, left) != inverse { + return Err(format!("Comparison method violates its general contract!")); + } + + Ok(()) } #[test] - fn test_compare() { - test("b1.7.3", "a1.2.6", Greater); - test("b1.2.6", "a1.7.3", Greater); - test("a1.1.2", "a1.1.2_01", Less); - test("1.16.5-0.00.5", "1.14.2-1.3.7", Greater); - test("1.0.0", "1.0.0-2", Less); - test("1.0.0", "1.0.0_01", Less); - test("1.0.1", "1.0.0_01", Greater); - test("1.0.0_01", "1.0.1", Less); - test("0.17.1-beta.1", "0.17.1", Less); - test("0.17.1-beta.1", "0.17.1-beta.2", Less); - test("1.4.5_01", "1.4.5_01+fabric-1.17", Equal); - test("1.4.5_01", "1.4.5_01+fabric-1.17+ohno", Equal); - test("14w16a", "18w40b", Less); - test("18w40a", "18w40b", Less); - test("1.4.5_01+fabric-1.17", "18w40b", Less); - test("13w02a", "c0.3.0_01", Less); - test("0.6.0-1.18.x", "0.9.beta-1.18.x", Less); + fn standardized_tests() { + let test_folder = PathBuf::from("../test"); + let errors = ENABLED_TESTS.iter().flat_map(|test_file_name| { + let test_file = test_folder.join(test_file_name); + fs::read_to_string(test_file).unwrap() + .lines() + .enumerate() + .filter(|(_, line)| !line.starts_with("#")) + .filter(|(_, line)| !line.is_empty()) + .map(|(num, line)| { + let split: Vec<&str> = line.split(" ").collect(); + if split.len() != 3 { panic!("{}:{} Line formatted incorrectly, expected 2 spaces: {}", test_file_name, num, line) } + let ord = match split[1] { + "<" => Less, + "=" => Equal, + ">" => Greater, + _ => panic!("{} is not a valid ordering", split[1]) + }; + test(split[0], split[2], ord).map_err(|message| (line.to_owned(), message)) + }).collect::<Vec<_>>() + }) + .filter_map(|res| res.err()) + .collect::<Vec<_>>(); + + if !errors.is_empty() { + errors.iter().for_each(|(line, message)| println!("{}: {}", line, message)); + panic!() + } } #[test] - fn test_ord() { - assert!(FlexVer("b1.7.3") > FlexVer("a1.2.6")); - assert!(FlexVer("b1.2.6") > FlexVer("a1.7.3")); - assert!(FlexVer("a1.1.2") < FlexVer("a1.1.2_01")); - assert!(FlexVer("1.16.5-0.00.5") > FlexVer("1.14.2-1.3.7")); - assert!(FlexVer("1.0.0") < FlexVer("1.0.0-2")); - assert!(FlexVer("1.0.0") < FlexVer("1.0.0_01")); - assert!(FlexVer("1.0.1") > FlexVer("1.0.0_01")); - assert!(FlexVer("1.0.0_01") < FlexVer("1.0.1")); - assert!(FlexVer("0.17.1-beta.1") < FlexVer("0.17.1")); - assert!(FlexVer("0.17.1-beta.1") < FlexVer("0.17.1-beta.2")); - assert!(FlexVer("1.4.5_01") == FlexVer("1.4.5_01+fabric-1.17")); - assert!(FlexVer("1.4.5_01") == FlexVer("1.4.5_01+fabric-1.17+ohno")); - assert!(FlexVer("14w16a") < FlexVer("18w40b")); - assert!(FlexVer("18w40a") < FlexVer("18w40b")); - assert!(FlexVer("1.4.5_01+fabric-1.17") < FlexVer("18w40b")); - assert!(FlexVer("13w02a") < FlexVer("c0.3.0_01")); - assert!(FlexVer("0.6.0-1.18.x") < FlexVer("0.9.beta-1.18.x")); + fn test_min() { + assert_eq!(FlexVer("1.0.0"), FlexVer("1.0.0").min(FlexVer("1.0.0"))); + assert_eq!(FlexVer("a1.2.6"), FlexVer("b1.7.3").min(FlexVer("a1.2.6"))); + assert_eq!(FlexVer("a1.7.3"), FlexVer("b1.2.6").min(FlexVer("a1.7.3"))); + } + #[test] + fn test_max() { assert_eq!(FlexVer("b1.7.3"), FlexVer("b1.7.3").max(FlexVer("a1.2.6"))); assert_eq!(FlexVer("b1.2.6"), FlexVer("b1.2.6").max(FlexVer("a1.7.3"))); - assert_eq!(FlexVer("a1.2.6"), FlexVer("b1.7.3").min(FlexVer("a1.2.6"))); - assert_eq!(FlexVer("a1.7.3"), FlexVer("b1.2.6").min(FlexVer("a1.7.3"))); assert_eq!(FlexVer("1.0.0"), FlexVer("1.0.0").max(FlexVer("1.0.0"))); - assert_eq!(FlexVer("1.0.0"), FlexVer("1.0.0").min(FlexVer("1.0.0"))); + } + + #[test] + fn test_clamp() { assert_eq!( FlexVer("1.1.0"), FlexVer("1.1.0").clamp(FlexVer("1.0.0"), FlexVer("1.2.0"))
Test vectors It would be useful for testing new implementations and verifying consistency between implementations to have a large series of test vectors, that any implementation can read and verify. For this purpose, I suggest a trivial plain text format, like the following, called something like `test_vectors.txt` in the root of the repo: ```sql # Basic numeric ordering (lexical string sort fails these) 10 > 2 100 > 10 # Trivial common numerics 1.0 < 1.1 1.0 < 1.0.1 1.1 > 1.0.1 # SemVer compatibility 1.5 > 1.5-pre1 1.5 = 1.5+foobar # SemVer incompatibility 1.5 < 1.5-2 1.5-pre10 > 1.5-pre2 # Optional features ## Too large for a 64-bit integer or double, checks if codepoint-wise or integer-parse is being used 36893488147419103232 <? 36893488147419103233 # ...and so on ``` We would also want weird Unicode edge cases in here, and tests for things like the leading-zero pitfall with integer-parse. (Open question: Does there exist a Unicode codepoint that sorts differently as UTF-16 and as UTF-32, when compared as outlined in the spec?) It would likely be good to start this by moving the current test vectors out of being hardcoded in each language implementation. A series of test vectors for decompositions may also be useful, such as: ```sql 1.0.1-pre2+foobar5 n1 t. n0 t. n1 p-pre n2 a+foobar n5 ``` Test vectors It would be useful for testing new implementations and verifying consistency between implementations to have a large series of test vectors, that any implementation can read and verify. For this purpose, I suggest a trivial plain text format, like the following, called something like `test_vectors.txt` in the root of the repo: ```sql # Basic numeric ordering (lexical string sort fails these) 10 > 2 100 > 10 # Trivial common numerics 1.0 < 1.1 1.0 < 1.0.1 1.1 > 1.0.1 # SemVer compatibility 1.5 > 1.5-pre1 1.5 = 1.5+foobar # SemVer incompatibility 1.5 < 1.5-2 1.5-pre10 > 1.5-pre2 # Optional features ## Too large for a 64-bit integer or double, checks if codepoint-wise or integer-parse is being used 36893488147419103232 <? 36893488147419103233 # ...and so on ``` We would also want weird Unicode edge cases in here, and tests for things like the leading-zero pitfall with integer-parse. (Open question: Does there exist a Unicode codepoint that sorts differently as UTF-16 and as UTF-32, when compared as outlined in the spec?) It would likely be good to start this by moving the current test vectors out of being hardcoded in each language implementation. A series of test vectors for decompositions may also be useful, such as: ```sql 1.0.1-pre2+foobar5 n1 t. n0 t. n1 p-pre n2 a+foobar n5 ```
2023-01-15T20:18:32
0.0
[]
[]
Lazarus-org/api-response-shaper
Lazarus-org__api-response-shaper-25
67d8b859345aa13ae970c1c43806c4dd02fe74db
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 238a79a..283723f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,11 +10,11 @@ jobs: strategy: matrix: python-version: - - '3.8' - '3.9' - '3.10' - '3.11' - '3.12' + - '3.13' steps: - uses: actions/checkout@v4 diff --git a/README.md b/README.md index 5a3234a..2105fa4 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ ## Project Detail -- Language: Python >= 3.8 +- Language: Python >= 3.9 - Framework: Django >= 4.2 - Django REST Framework: >= 3.14 diff --git a/packages/requirements-dev.txt b/packages/requirements-dev.txt index 3b87817..6696070 100644 --- a/packages/requirements-dev.txt +++ b/packages/requirements-dev.txt @@ -1,102 +1,100 @@ -alabaster==0.7.13 ; python_version >= "3.8" and python_version < "4.0" -annotated-types==0.7.0 ; python_version >= "3.8" and python_version < "4.0" -argcomplete==3.5.0 ; python_version >= "3.8" and python_version < "4.0" -asgiref==3.8.1 ; python_version >= "3.8" and python_version < "4.0" -astroid==3.2.4 ; python_version >= "3.8" and python_version < "4.0" -babel==2.16.0 ; python_version >= "3.8" and python_version < "4.0" -backports-zoneinfo==0.2.1 ; python_version >= "3.8" and python_version < "3.9" -bandit[toml]==1.7.10 ; python_version >= "3.8" and python_version < "4.0" -black==24.8.0 ; python_version >= "3.8" and python_version < "4.0" -cachetools==5.5.0 ; python_version >= "3.8" and python_version < "4.0" -certifi==2024.8.30 ; python_version >= "3.8" and python_version < "4.0" -cfgv==3.4.0 ; python_version >= "3.8" and python_version < "4.0" -chardet==5.2.0 ; python_version >= "3.8" and python_version < "4.0" -charset-normalizer==3.3.2 ; python_version >= "3.8" and python_version < "4.0" -click-option-group==0.5.6 ; python_version >= "3.8" and python_version < "4" -click==8.1.7 ; python_version >= "3.8" and python_version < "4.0" -codecov==2.1.13 ; python_version >= "3.8" and python_version < "4.0" -colorama==0.4.6 ; python_version >= "3.8" and python_version < "4.0" -commitizen==3.29.0 ; python_version >= "3.8" and python_version < "4.0" -coverage==7.6.1 ; python_version >= "3.8" and python_version < "4.0" -coverage[toml]==7.6.1 ; python_version >= "3.8" and python_version < "4.0" -decli==0.6.2 ; python_version >= "3.8" and python_version < "4.0" -dill==0.3.8 ; python_version >= "3.8" and python_version < "4.0" -distlib==0.3.8 ; python_version >= "3.8" and python_version < "4.0" -django-stubs-ext==5.1.0 ; python_version >= "3.8" and python_version < "4.0" -django-stubs==5.1.0 ; python_version >= "3.8" and python_version < "4.0" -django==4.2.16 ; python_version >= "3.8" and python_version < "3.10" -django==5.1.1 ; python_version >= "3.10" and python_version < "4.0" -djangorestframework==3.15.2 ; python_version >= "3.8" and python_version < "4.0" -docformatter==1.7.5 ; python_version >= "3.8" and python_version < "4.0" -docutils==0.19 ; python_version >= "3.8" and python_version < "4.0" -dotty-dict==1.3.1 ; python_version >= "3.8" and python_version < "4.0" -exceptiongroup==1.2.2 ; python_version >= "3.8" and python_version < "3.11" -filelock==3.16.1 ; python_version >= "3.8" and python_version < "4.0" -gitdb==4.0.11 ; python_version >= "3.8" and python_version < "4.0" -gitpython==3.1.43 ; python_version >= "3.8" and python_version < "4.0" -identify==2.6.1 ; python_version >= "3.8" and python_version < "4.0" -idna==3.10 ; python_version >= "3.8" and python_version < "4.0" -imagesize==1.4.1 ; python_version >= "3.8" and python_version < "4.0" -importlib-metadata==8.5.0 ; python_version >= "3.8" and python_version < "3.10" -importlib-resources==6.4.5 ; python_version >= "3.8" and python_version < "4.0" -iniconfig==2.0.0 ; python_version >= "3.8" and python_version < "4.0" -isort==5.13.2 ; python_version >= "3.8" and python_version < "4.0" -jinja2==3.1.4 ; python_version >= "3.8" and python_version < "4.0" -markdown-it-py==3.0.0 ; python_version >= "3.8" and python_version < "4.0" -markupsafe==2.1.5 ; python_version >= "3.8" and python_version < "4.0" -mccabe==0.7.0 ; python_version >= "3.8" and python_version < "4.0" -mdurl==0.1.2 ; python_version >= "3.8" and python_version < "4.0" -mypy-extensions==1.0.0 ; python_version >= "3.8" and python_version < "4.0" -mypy==1.11.2 ; python_version >= "3.8" and python_version < "4.0" -nodeenv==1.9.1 ; python_version >= "3.8" and python_version < "4.0" -packaging==24.1 ; python_version >= "3.8" and python_version < "4.0" -pathspec==0.12.1 ; python_version >= "3.8" and python_version < "4.0" -pbr==6.1.0 ; python_version >= "3.8" and python_version < "4.0" -platformdirs==4.3.6 ; python_version >= "3.8" and python_version < "4.0" -pluggy==1.5.0 ; python_version >= "3.8" and python_version < "4.0" -pre-commit==3.5.0 ; python_version >= "3.8" and python_version < "4.0" -prompt-toolkit==3.0.36 ; python_version >= "3.8" and python_version < "4.0" -pydantic-core==2.23.4 ; python_version >= "3.8" and python_version < "4.0" -pydantic==2.9.2 ; python_version >= "3.8" and python_version < "4.0" -pygments==2.18.0 ; python_version >= "3.8" and python_version < "4.0" -pylint-django==2.5.5 ; python_version >= "3.8" and python_version < "4.0" -pylint-plugin-utils==0.8.2 ; python_version >= "3.8" and python_version < "4.0" -pylint==3.2.7 ; python_version >= "3.8" and python_version < "4.0" -pyproject-api==1.8.0 ; python_version >= "3.8" and python_version < "4.0" -pytest-cov==5.0.0 ; python_version >= "3.8" and python_version < "4.0" -pytest-django==4.9.0 ; python_version >= "3.8" and python_version < "4.0" -pytest==8.3.3 ; python_version >= "3.8" and python_version < "4.0" -python-gitlab==4.11.1 ; python_version >= "3.8" and python_version < "4.0" -python-semantic-release==9.8.8 ; python_version >= "3.8" and python_version < "4.0" -pytz==2024.2 ; python_version >= "3.8" and python_version < "3.9" -pyyaml==6.0.2 ; python_version >= "3.8" and python_version < "4.0" -questionary==2.0.1 ; python_version >= "3.8" and python_version < "4.0" -requests-toolbelt==1.0.0 ; python_version >= "3.8" and python_version < "4.0" -requests==2.32.3 ; python_version >= "3.8" and python_version < "4.0" -rich==13.8.1 ; python_version >= "3.8" and python_version < "4.0" -shellingham==1.5.4 ; python_version >= "3.8" and python_version < "4.0" -smmap==5.0.1 ; python_version >= "3.8" and python_version < "4.0" -snowballstemmer==2.2.0 ; python_version >= "3.8" and python_version < "4.0" -sphinx-rtd-theme==2.0.0 ; python_version >= "3.8" and python_version < "4.0" -sphinx==6.2.1 ; python_version >= "3.8" and python_version < "4.0" -sphinxcontrib-applehelp==1.0.4 ; python_version >= "3.8" and python_version < "4.0" -sphinxcontrib-devhelp==1.0.2 ; python_version >= "3.8" and python_version < "4.0" -sphinxcontrib-htmlhelp==2.0.1 ; python_version >= "3.8" and python_version < "4.0" -sphinxcontrib-jquery==4.1 ; python_version >= "3.8" and python_version < "4.0" -sphinxcontrib-jsmath==1.0.1 ; python_version >= "3.8" and python_version < "4.0" -sphinxcontrib-qthelp==1.0.3 ; python_version >= "3.8" and python_version < "4.0" -sphinxcontrib-serializinghtml==1.1.5 ; python_version >= "3.8" and python_version < "4.0" -sqlparse==0.5.1 ; python_version >= "3.8" and python_version < "4.0" -stevedore==5.3.0 ; python_version >= "3.8" and python_version < "4.0" -termcolor==2.4.0 ; python_version >= "3.8" and python_version < "4.0" -tomli==2.0.1 ; python_version >= "3.8" and python_full_version <= "3.11.0a6" -tomlkit==0.13.2 ; python_version >= "3.8" and python_version < "4.0" -tox==4.20.0 ; python_version >= "3.8" and python_version < "4.0" -types-pyyaml==6.0.12.20240917 ; python_version >= "3.8" and python_version < "4.0" -typing-extensions==4.12.2 ; python_version >= "3.8" and python_version < "4.0" -tzdata==2024.2 ; python_version >= "3.8" and python_version < "4.0" and sys_platform == "win32" -untokenize==0.1.1 ; python_version >= "3.8" and python_version < "4.0" -urllib3==2.2.3 ; python_version >= "3.8" and python_version < "4.0" -virtualenv==20.26.5 ; python_version >= "3.8" and python_version < "4.0" -wcwidth==0.2.13 ; python_version >= "3.8" and python_version < "4.0" -zipp==3.20.2 ; python_version >= "3.8" and python_version < "3.10" +alabaster==0.7.16 ; python_version >= "3.9" and python_version < "4.0" +annotated-types==0.7.0 ; python_version >= "3.9" and python_version < "4.0" +argcomplete==3.5.1 ; python_version >= "3.9" and python_version < "4.0" +asgiref==3.8.1 ; python_version >= "3.9" and python_version < "4.0" +astroid==3.3.5 ; python_version >= "3.9" and python_version < "4.0" +babel==2.16.0 ; python_version >= "3.9" and python_version < "4.0" +bandit[toml]==1.7.10 ; python_version >= "3.9" and python_version < "4.0" +black==24.10.0 ; python_version >= "3.9" and python_version < "4.0" +cachetools==5.5.0 ; python_version >= "3.9" and python_version < "4.0" +certifi==2024.8.30 ; python_version >= "3.9" and python_version < "4.0" +cfgv==3.4.0 ; python_version >= "3.9" and python_version < "4.0" +chardet==5.2.0 ; python_version >= "3.9" and python_version < "4.0" +charset-normalizer==3.4.0 ; python_version >= "3.9" and python_version < "4.0" +click-option-group==0.5.6 ; python_version >= "3.9" and python_version < "4" +click==8.1.7 ; python_version >= "3.9" and python_version < "4.0" +codecov==2.1.13 ; python_version >= "3.9" and python_version < "4.0" +colorama==0.4.6 ; python_version >= "3.9" and python_version < "4.0" +commitizen==3.29.1 ; python_version >= "3.9" and python_version < "4.0" +coverage==7.6.2 ; python_version >= "3.9" and python_version < "4.0" +coverage[toml]==7.6.2 ; python_version >= "3.9" and python_version < "4.0" +decli==0.6.2 ; python_version >= "3.9" and python_version < "4.0" +dill==0.3.9 ; python_version >= "3.9" and python_version < "4.0" +distlib==0.3.8 ; python_version >= "3.9" and python_version < "4.0" +django-stubs-ext==5.1.0 ; python_version >= "3.9" and python_version < "4.0" +django-stubs==5.1.0 ; python_version >= "3.9" and python_version < "4.0" +django==4.2.16 ; python_version >= "3.9" and python_version < "3.10" +django==5.1.2 ; python_version >= "3.10" and python_version < "4.0" +djangorestframework==3.15.2 ; python_version >= "3.9" and python_version < "4.0" +docformatter==1.7.5 ; python_version >= "3.9" and python_version < "4.0" +docutils==0.19 ; python_version >= "3.9" and python_version < "4.0" +dotty-dict==1.3.1 ; python_version >= "3.9" and python_version < "4.0" +exceptiongroup==1.2.2 ; python_version >= "3.9" and python_version < "3.11" +filelock==3.16.1 ; python_version >= "3.9" and python_version < "4.0" +gitdb==4.0.11 ; python_version >= "3.9" and python_version < "4.0" +gitpython==3.1.43 ; python_version >= "3.9" and python_version < "4.0" +identify==2.6.1 ; python_version >= "3.9" and python_version < "4.0" +idna==3.10 ; python_version >= "3.9" and python_version < "4.0" +imagesize==1.4.1 ; python_version >= "3.9" and python_version < "4.0" +importlib-metadata==8.5.0 ; python_version >= "3.9" and python_version < "3.10" +importlib-resources==6.4.5 ; python_version >= "3.9" and python_version < "4.0" +iniconfig==2.0.0 ; python_version >= "3.9" and python_version < "4.0" +isort==5.13.2 ; python_version >= "3.9" and python_version < "4.0" +jinja2==3.1.4 ; python_version >= "3.9" and python_version < "4.0" +markdown-it-py==3.0.0 ; python_version >= "3.9" and python_version < "4.0" +markupsafe==3.0.1 ; python_version >= "3.9" and python_version < "4.0" +mccabe==0.7.0 ; python_version >= "3.9" and python_version < "4.0" +mdurl==0.1.2 ; python_version >= "3.9" and python_version < "4.0" +mypy-extensions==1.0.0 ; python_version >= "3.9" and python_version < "4.0" +mypy==1.11.2 ; python_version >= "3.9" and python_version < "4.0" +nodeenv==1.9.1 ; python_version >= "3.9" and python_version < "4.0" +packaging==24.1 ; python_version >= "3.9" and python_version < "4.0" +pathspec==0.12.1 ; python_version >= "3.9" and python_version < "4.0" +pbr==6.1.0 ; python_version >= "3.9" and python_version < "4.0" +platformdirs==4.3.6 ; python_version >= "3.9" and python_version < "4.0" +pluggy==1.5.0 ; python_version >= "3.9" and python_version < "4.0" +pre-commit==3.8.0 ; python_version >= "3.9" and python_version < "4.0" +prompt-toolkit==3.0.36 ; python_version >= "3.9" and python_version < "4.0" +pydantic-core==2.23.4 ; python_version >= "3.9" and python_version < "4.0" +pydantic==2.9.2 ; python_version >= "3.9" and python_version < "4.0" +pygments==2.18.0 ; python_version >= "3.9" and python_version < "4.0" +pylint-django==2.5.5 ; python_version >= "3.9" and python_version < "4.0" +pylint-plugin-utils==0.8.2 ; python_version >= "3.9" and python_version < "4.0" +pylint==3.3.1 ; python_version >= "3.9" and python_version < "4.0" +pyproject-api==1.8.0 ; python_version >= "3.9" and python_version < "4.0" +pytest-cov==5.0.0 ; python_version >= "3.9" and python_version < "4.0" +pytest-django==4.9.0 ; python_version >= "3.9" and python_version < "4.0" +pytest==8.3.3 ; python_version >= "3.9" and python_version < "4.0" +python-gitlab==4.13.0 ; python_version >= "3.9" and python_version < "4.0" +python-semantic-release==9.10.0 ; python_version >= "3.9" and python_version < "4.0" +pyyaml==6.0.2 ; python_version >= "3.9" and python_version < "4.0" +questionary==2.0.1 ; python_version >= "3.9" and python_version < "4.0" +requests-toolbelt==1.0.0 ; python_version >= "3.9" and python_version < "4.0" +requests==2.32.3 ; python_version >= "3.9" and python_version < "4.0" +rich==13.9.2 ; python_version >= "3.9" and python_version < "4.0" +shellingham==1.5.4 ; python_version >= "3.9" and python_version < "4.0" +smmap==5.0.1 ; python_version >= "3.9" and python_version < "4.0" +snowballstemmer==2.2.0 ; python_version >= "3.9" and python_version < "4.0" +sphinx-rtd-theme==2.0.0 ; python_version >= "3.9" and python_version < "4.0" +sphinx==6.2.1 ; python_version >= "3.9" and python_version < "4.0" +sphinxcontrib-applehelp==2.0.0 ; python_version >= "3.9" and python_version < "4.0" +sphinxcontrib-devhelp==2.0.0 ; python_version >= "3.9" and python_version < "4.0" +sphinxcontrib-htmlhelp==2.1.0 ; python_version >= "3.9" and python_version < "4.0" +sphinxcontrib-jquery==4.1 ; python_version >= "3.9" and python_version < "4.0" +sphinxcontrib-jsmath==1.0.1 ; python_version >= "3.9" and python_version < "4.0" +sphinxcontrib-qthelp==2.0.0 ; python_version >= "3.9" and python_version < "4.0" +sphinxcontrib-serializinghtml==2.0.0 ; python_version >= "3.9" and python_version < "4.0" +sqlparse==0.5.1 ; python_version >= "3.9" and python_version < "4.0" +stevedore==5.3.0 ; python_version >= "3.9" and python_version < "4.0" +termcolor==2.5.0 ; python_version >= "3.9" and python_version < "4.0" +tomli==2.0.2 ; python_version >= "3.9" and python_full_version <= "3.11.0a6" +tomlkit==0.13.2 ; python_version >= "3.9" and python_version < "4.0" +tox==4.21.2 ; python_version >= "3.9" and python_version < "4.0" +types-pyyaml==6.0.12.20240917 ; python_version >= "3.9" and python_version < "4.0" +typing-extensions==4.12.2 ; python_version >= "3.9" and python_version < "4.0" +tzdata==2024.2 ; python_version >= "3.9" and python_version < "4.0" and sys_platform == "win32" +untokenize==0.1.1 ; python_version >= "3.9" and python_version < "4.0" +urllib3==2.2.3 ; python_version >= "3.9" and python_version < "4.0" +virtualenv==20.26.6 ; python_version >= "3.9" and python_version < "4.0" +wcwidth==0.2.13 ; python_version >= "3.9" and python_version < "4.0" +zipp==3.20.2 ; python_version >= "3.9" and python_version < "3.10" diff --git a/packages/requirements.txt b/packages/requirements.txt index e59b151..481d556 100644 --- a/packages/requirements.txt +++ b/packages/requirements.txt @@ -1,8 +1,7 @@ -asgiref==3.8.1 ; python_version >= "3.8" and python_version < "4.0" -backports-zoneinfo==0.2.1 ; python_version >= "3.8" and python_version < "3.9" -django==4.2.16 ; python_version >= "3.8" and python_version < "3.10" -django==5.1.1 ; python_version >= "3.10" and python_version < "4.0" -djangorestframework==3.15.2 ; python_version >= "3.8" and python_version < "4.0" -sqlparse==0.5.1 ; python_version >= "3.8" and python_version < "4.0" -typing-extensions==4.12.2 ; python_version >= "3.8" and python_version < "3.11" -tzdata==2024.2 ; python_version >= "3.8" and python_version < "4.0" and sys_platform == "win32" +asgiref==3.8.1 ; python_version >= "3.9" and python_version < "4.0" +django==4.2.16 ; python_version >= "3.9" and python_version < "3.10" +django==5.1.2 ; python_version >= "3.10" and python_version < "4.0" +djangorestframework==3.15.2 ; python_version >= "3.9" and python_version < "4.0" +sqlparse==0.5.1 ; python_version >= "3.9" and python_version < "4.0" +typing-extensions==4.12.2 ; python_version >= "3.9" and python_version < "3.11" +tzdata==2024.2 ; python_version >= "3.9" and python_version < "4.0" and sys_platform == "win32" diff --git a/poetry.lock b/poetry.lock index b2dfffa..6a39bd1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2,13 +2,13 @@ [[package]] name = "alabaster" -version = "0.7.13" -description = "A configurable sidebar-enabled Sphinx theme" +version = "0.7.16" +description = "A light, configurable Sphinx theme" optional = false -python-versions = ">=3.6" +python-versions = ">=3.9" files = [ - {file = "alabaster-0.7.13-py3-none-any.whl", hash = "sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3"}, - {file = "alabaster-0.7.13.tar.gz", hash = "sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2"}, + {file = "alabaster-0.7.16-py3-none-any.whl", hash = "sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92"}, + {file = "alabaster-0.7.16.tar.gz", hash = "sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65"}, ] [[package]] @@ -22,18 +22,15 @@ files = [ {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] -[package.dependencies] -typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} - [[package]] name = "argcomplete" -version = "3.5.0" +version = "3.5.1" description = "Bash tab completion for argparse" optional = false python-versions = ">=3.8" files = [ - {file = "argcomplete-3.5.0-py3-none-any.whl", hash = "sha256:d4bcf3ff544f51e16e54228a7ac7f486ed70ebf2ecfe49a63a91171c76bf029b"}, - {file = "argcomplete-3.5.0.tar.gz", hash = "sha256:4349400469dccfb7950bb60334a680c58d88699bff6159df61251878dc6bf74b"}, + {file = "argcomplete-3.5.1-py3-none-any.whl", hash = "sha256:1a1d148bdaa3e3b93454900163403df41448a248af01b6e849edc5ac08e6c363"}, + {file = "argcomplete-3.5.1.tar.gz", hash = "sha256:eb1ee355aa2557bd3d0145de7b06b2a45b0ce461e1e7813f5d066039ab4177b4"}, ] [package.extras] @@ -58,13 +55,13 @@ tests = ["mypy (>=0.800)", "pytest", "pytest-asyncio"] [[package]] name = "astroid" -version = "3.2.4" +version = "3.3.5" description = "An abstract syntax tree for Python with inference support." optional = false -python-versions = ">=3.8.0" +python-versions = ">=3.9.0" files = [ - {file = "astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25"}, - {file = "astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a"}, + {file = "astroid-3.3.5-py3-none-any.whl", hash = "sha256:a9d1c946ada25098d790e079ba2a1b112157278f3fb7e718ae6a9252f5835dc8"}, + {file = "astroid-3.3.5.tar.gz", hash = "sha256:5cfc40ae9f68311075d27ef68a4841bdc5cc7f6cf86671b49f00607d30188e2d"}, ] [package.dependencies] @@ -81,40 +78,9 @@ files = [ {file = "babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316"}, ] -[package.dependencies] -pytz = {version = ">=2015.7", markers = "python_version < \"3.9\""} - [package.extras] dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] -[[package]] -name = "backports-zoneinfo" -version = "0.2.1" -description = "Backport of the standard library zoneinfo module" -optional = false -python-versions = ">=3.6" -files = [ - {file = "backports.zoneinfo-0.2.1-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:da6013fd84a690242c310d77ddb8441a559e9cb3d3d59ebac9aca1a57b2e18bc"}, - {file = "backports.zoneinfo-0.2.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:89a48c0d158a3cc3f654da4c2de1ceba85263fafb861b98b59040a5086259722"}, - {file = "backports.zoneinfo-0.2.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:1c5742112073a563c81f786e77514969acb58649bcdf6cdf0b4ed31a348d4546"}, - {file = "backports.zoneinfo-0.2.1-cp36-cp36m-win32.whl", hash = "sha256:e8236383a20872c0cdf5a62b554b27538db7fa1bbec52429d8d106effbaeca08"}, - {file = "backports.zoneinfo-0.2.1-cp36-cp36m-win_amd64.whl", hash = "sha256:8439c030a11780786a2002261569bdf362264f605dfa4d65090b64b05c9f79a7"}, - {file = "backports.zoneinfo-0.2.1-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:f04e857b59d9d1ccc39ce2da1021d196e47234873820cbeaad210724b1ee28ac"}, - {file = "backports.zoneinfo-0.2.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:17746bd546106fa389c51dbea67c8b7c8f0d14b5526a579ca6ccf5ed72c526cf"}, - {file = "backports.zoneinfo-0.2.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:5c144945a7752ca544b4b78c8c41544cdfaf9786f25fe5ffb10e838e19a27570"}, - {file = "backports.zoneinfo-0.2.1-cp37-cp37m-win32.whl", hash = "sha256:e55b384612d93be96506932a786bbcde5a2db7a9e6a4bb4bffe8b733f5b9036b"}, - {file = "backports.zoneinfo-0.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a76b38c52400b762e48131494ba26be363491ac4f9a04c1b7e92483d169f6582"}, - {file = "backports.zoneinfo-0.2.1-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:8961c0f32cd0336fb8e8ead11a1f8cd99ec07145ec2931122faaac1c8f7fd987"}, - {file = "backports.zoneinfo-0.2.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:e81b76cace8eda1fca50e345242ba977f9be6ae3945af8d46326d776b4cf78d1"}, - {file = "backports.zoneinfo-0.2.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7b0a64cda4145548fed9efc10322770f929b944ce5cee6c0dfe0c87bf4c0c8c9"}, - {file = "backports.zoneinfo-0.2.1-cp38-cp38-win32.whl", hash = "sha256:1b13e654a55cd45672cb54ed12148cd33628f672548f373963b0bff67b217328"}, - {file = "backports.zoneinfo-0.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:4a0f800587060bf8880f954dbef70de6c11bbe59c673c3d818921f042f9954a6"}, - {file = "backports.zoneinfo-0.2.1.tar.gz", hash = "sha256:fadbfe37f74051d024037f223b8e001611eac868b5c5b06144ef4d8b799862f2"}, -] - -[package.extras] -tzdata = ["tzdata"] - [[package]] name = "bandit" version = "1.7.10" @@ -142,33 +108,33 @@ yaml = ["PyYAML"] [[package]] name = "black" -version = "24.8.0" +version = "24.10.0" description = "The uncompromising code formatter." optional = false -python-versions = ">=3.8" -files = [ - {file = "black-24.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:09cdeb74d494ec023ded657f7092ba518e8cf78fa8386155e4a03fdcc44679e6"}, - {file = "black-24.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:81c6742da39f33b08e791da38410f32e27d632260e599df7245cccee2064afeb"}, - {file = "black-24.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:707a1ca89221bc8a1a64fb5e15ef39cd755633daa672a9db7498d1c19de66a42"}, - {file = "black-24.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d6417535d99c37cee4091a2f24eb2b6d5ec42b144d50f1f2e436d9fe1916fe1a"}, - {file = "black-24.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fb6e2c0b86bbd43dee042e48059c9ad7830abd5c94b0bc518c0eeec57c3eddc1"}, - {file = "black-24.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:837fd281f1908d0076844bc2b801ad2d369c78c45cf800cad7b61686051041af"}, - {file = "black-24.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:62e8730977f0b77998029da7971fa896ceefa2c4c4933fcd593fa599ecbf97a4"}, - {file = "black-24.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:72901b4913cbac8972ad911dc4098d5753704d1f3c56e44ae8dce99eecb0e3af"}, - {file = "black-24.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7c046c1d1eeb7aea9335da62472481d3bbf3fd986e093cffd35f4385c94ae368"}, - {file = "black-24.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:649f6d84ccbae73ab767e206772cc2d7a393a001070a4c814a546afd0d423aed"}, - {file = "black-24.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2b59b250fdba5f9a9cd9d0ece6e6d993d91ce877d121d161e4698af3eb9c1018"}, - {file = "black-24.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:6e55d30d44bed36593c3163b9bc63bf58b3b30e4611e4d88a0c3c239930ed5b2"}, - {file = "black-24.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:505289f17ceda596658ae81b61ebbe2d9b25aa78067035184ed0a9d855d18afd"}, - {file = "black-24.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b19c9ad992c7883ad84c9b22aaa73562a16b819c1d8db7a1a1a49fb7ec13c7d2"}, - {file = "black-24.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f13f7f386f86f8121d76599114bb8c17b69d962137fc70efe56137727c7047e"}, - {file = "black-24.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:f490dbd59680d809ca31efdae20e634f3fae27fba3ce0ba3208333b713bc3920"}, - {file = "black-24.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eab4dd44ce80dea27dc69db40dab62d4ca96112f87996bca68cd75639aeb2e4c"}, - {file = "black-24.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3c4285573d4897a7610054af5a890bde7c65cb466040c5f0c8b732812d7f0e5e"}, - {file = "black-24.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e84e33b37be070ba135176c123ae52a51f82306def9f7d063ee302ecab2cf47"}, - {file = "black-24.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:73bbf84ed136e45d451a260c6b73ed674652f90a2b3211d6a35e78054563a9bb"}, - {file = "black-24.8.0-py3-none-any.whl", hash = "sha256:972085c618ee94f402da1af548a4f218c754ea7e5dc70acb168bfaca4c2542ed"}, - {file = "black-24.8.0.tar.gz", hash = "sha256:2500945420b6784c38b9ee885af039f5e7471ef284ab03fa35ecdde4688cd83f"}, +python-versions = ">=3.9" +files = [ + {file = "black-24.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e6668650ea4b685440857138e5fe40cde4d652633b1bdffc62933d0db4ed9812"}, + {file = "black-24.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1c536fcf674217e87b8cc3657b81809d3c085d7bf3ef262ead700da345bfa6ea"}, + {file = "black-24.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:649fff99a20bd06c6f727d2a27f401331dc0cc861fb69cde910fe95b01b5928f"}, + {file = "black-24.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:fe4d6476887de70546212c99ac9bd803d90b42fc4767f058a0baa895013fbb3e"}, + {file = "black-24.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5a2221696a8224e335c28816a9d331a6c2ae15a2ee34ec857dcf3e45dbfa99ad"}, + {file = "black-24.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f9da3333530dbcecc1be13e69c250ed8dfa67f43c4005fb537bb426e19200d50"}, + {file = "black-24.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4007b1393d902b48b36958a216c20c4482f601569d19ed1df294a496eb366392"}, + {file = "black-24.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:394d4ddc64782e51153eadcaaca95144ac4c35e27ef9b0a42e121ae7e57a9175"}, + {file = "black-24.10.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b5e39e0fae001df40f95bd8cc36b9165c5e2ea88900167bddf258bacef9bbdc3"}, + {file = "black-24.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d37d422772111794b26757c5b55a3eade028aa3fde43121ab7b673d050949d65"}, + {file = "black-24.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:14b3502784f09ce2443830e3133dacf2c0110d45191ed470ecb04d0f5f6fcb0f"}, + {file = "black-24.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:30d2c30dc5139211dda799758559d1b049f7f14c580c409d6ad925b74a4208a8"}, + {file = "black-24.10.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1cbacacb19e922a1d75ef2b6ccaefcd6e93a2c05ede32f06a21386a04cedb981"}, + {file = "black-24.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1f93102e0c5bb3907451063e08b9876dbeac810e7da5a8bfb7aeb5a9ef89066b"}, + {file = "black-24.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ddacb691cdcdf77b96f549cf9591701d8db36b2f19519373d60d31746068dbf2"}, + {file = "black-24.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:680359d932801c76d2e9c9068d05c6b107f2584b2a5b88831c83962eb9984c1b"}, + {file = "black-24.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:17374989640fbca88b6a448129cd1745c5eb8d9547b464f281b251dd00155ccd"}, + {file = "black-24.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:63f626344343083322233f175aaf372d326de8436f5928c042639a4afbbf1d3f"}, + {file = "black-24.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfa1d0cb6200857f1923b602f978386a3a2758a65b52e0950299ea014be6800"}, + {file = "black-24.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:2cd9c95431d94adc56600710f8813ee27eea544dd118d45896bb734e9d7a0dc7"}, + {file = "black-24.10.0-py3-none-any.whl", hash = "sha256:3bb2b7a1f7b685f85b11fed1ef10f8a9148bceb49853e47a294a3dd963c1dd7d"}, + {file = "black-24.10.0.tar.gz", hash = "sha256:846ea64c97afe3bc677b761787993be4991810ecc7a4a937816dd6bddedc4875"}, ] [package.dependencies] @@ -182,7 +148,7 @@ typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} [package.extras] colorama = ["colorama (>=0.4.3)"] -d = ["aiohttp (>=3.7.4)", "aiohttp (>=3.7.4,!=3.9.0)"] +d = ["aiohttp (>=3.10)"] jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] uvloop = ["uvloop (>=0.15.2)"] @@ -232,101 +198,116 @@ files = [ [[package]] name = "charset-normalizer" -version = "3.3.2" +version = "3.4.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7.0" files = [ - {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, - {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"}, + {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"}, + {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, ] [[package]] @@ -390,13 +371,13 @@ files = [ [[package]] name = "commitizen" -version = "3.29.0" +version = "3.29.1" description = "Python commitizen client tool" optional = false python-versions = ">=3.8" files = [ - {file = "commitizen-3.29.0-py3-none-any.whl", hash = "sha256:0c6c479dbee6d19292315c6fca3782cf5c1f7f1638bc4bb5ab4cfb67f4e11894"}, - {file = "commitizen-3.29.0.tar.gz", hash = "sha256:586b30c1976850d244b836cd4730771097ba362c9c1684d1f8c379176c2ea532"}, + {file = "commitizen-3.29.1-py3-none-any.whl", hash = "sha256:83f6563fae6a6262238e4424c55db5743eaa9827d2044dc23719466e4e78a0ca"}, + {file = "commitizen-3.29.1.tar.gz", hash = "sha256:b9a56190f4f3b20c73600e5ba448c7b81e0e6f87be3092aec1db4de75bf0fa91"}, ] [package.dependencies] @@ -414,83 +395,73 @@ tomlkit = ">=0.5.3,<1.0.0" [[package]] name = "coverage" -version = "7.6.1" +version = "7.6.2" description = "Code coverage measurement for Python" optional = false -python-versions = ">=3.8" -files = [ - {file = "coverage-7.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b06079abebbc0e89e6163b8e8f0e16270124c154dc6e4a47b413dd538859af16"}, - {file = "coverage-7.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cf4b19715bccd7ee27b6b120e7e9dd56037b9c0681dcc1adc9ba9db3d417fa36"}, - {file = "coverage-7.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61c0abb4c85b095a784ef23fdd4aede7a2628478e7baba7c5e3deba61070a02"}, - {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd21f6ae3f08b41004dfb433fa895d858f3f5979e7762d052b12aef444e29afc"}, - {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f59d57baca39b32db42b83b2a7ba6f47ad9c394ec2076b084c3f029b7afca23"}, - {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a1ac0ae2b8bd743b88ed0502544847c3053d7171a3cff9228af618a068ed9c34"}, - {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e6a08c0be454c3b3beb105c0596ebdc2371fab6bb90c0c0297f4e58fd7e1012c"}, - {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f5796e664fe802da4f57a168c85359a8fbf3eab5e55cd4e4569fbacecc903959"}, - {file = "coverage-7.6.1-cp310-cp310-win32.whl", hash = "sha256:7bb65125fcbef8d989fa1dd0e8a060999497629ca5b0efbca209588a73356232"}, - {file = "coverage-7.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:3115a95daa9bdba70aea750db7b96b37259a81a709223c8448fa97727d546fe0"}, - {file = "coverage-7.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7dea0889685db8550f839fa202744652e87c60015029ce3f60e006f8c4462c93"}, - {file = "coverage-7.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed37bd3c3b063412f7620464a9ac1314d33100329f39799255fb8d3027da50d3"}, - {file = "coverage-7.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85f5e9a5f8b73e2350097c3756ef7e785f55bd71205defa0bfdaf96c31616ff"}, - {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bc572be474cafb617672c43fe989d6e48d3c83af02ce8de73fff1c6bb3c198d"}, - {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0420b573964c760df9e9e86d1a9a622d0d27f417e1a949a8a66dd7bcee7bc6"}, - {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1f4aa8219db826ce6be7099d559f8ec311549bfc4046f7f9fe9b5cea5c581c56"}, - {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:fc5a77d0c516700ebad189b587de289a20a78324bc54baee03dd486f0855d234"}, - {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b48f312cca9621272ae49008c7f613337c53fadca647d6384cc129d2996d1133"}, - {file = "coverage-7.6.1-cp311-cp311-win32.whl", hash = "sha256:1125ca0e5fd475cbbba3bb67ae20bd2c23a98fac4e32412883f9bcbaa81c314c"}, - {file = "coverage-7.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:8ae539519c4c040c5ffd0632784e21b2f03fc1340752af711f33e5be83a9d6c6"}, - {file = "coverage-7.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:95cae0efeb032af8458fc27d191f85d1717b1d4e49f7cb226cf526ff28179778"}, - {file = "coverage-7.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5621a9175cf9d0b0c84c2ef2b12e9f5f5071357c4d2ea6ca1cf01814f45d2391"}, - {file = "coverage-7.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:260933720fdcd75340e7dbe9060655aff3af1f0c5d20f46b57f262ab6c86a5e8"}, - {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e2ca0ad381b91350c0ed49d52699b625aab2b44b65e1b4e02fa9df0e92ad2d"}, - {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44fee9975f04b33331cb8eb272827111efc8930cfd582e0320613263ca849ca"}, - {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:877abb17e6339d96bf08e7a622d05095e72b71f8afd8a9fefc82cf30ed944163"}, - {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e0cadcf6733c09154b461f1ca72d5416635e5e4ec4e536192180d34ec160f8a"}, - {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3c02d12f837d9683e5ab2f3d9844dc57655b92c74e286c262e0fc54213c216d"}, - {file = "coverage-7.6.1-cp312-cp312-win32.whl", hash = "sha256:e05882b70b87a18d937ca6768ff33cc3f72847cbc4de4491c8e73880766718e5"}, - {file = "coverage-7.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:b5d7b556859dd85f3a541db6a4e0167b86e7273e1cdc973e5b175166bb634fdb"}, - {file = "coverage-7.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a4acd025ecc06185ba2b801f2de85546e0b8ac787cf9d3b06e7e2a69f925b106"}, - {file = "coverage-7.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a6d3adcf24b624a7b778533480e32434a39ad8fa30c315208f6d3e5542aeb6e9"}, - {file = "coverage-7.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0c212c49b6c10e6951362f7c6df3329f04c2b1c28499563d4035d964ab8e08c"}, - {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e81d7a3e58882450ec4186ca59a3f20a5d4440f25b1cff6f0902ad890e6748a"}, - {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78b260de9790fd81e69401c2dc8b17da47c8038176a79092a89cb2b7d945d060"}, - {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a78d169acd38300060b28d600344a803628c3fd585c912cacc9ea8790fe96862"}, - {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2c09f4ce52cb99dd7505cd0fc8e0e37c77b87f46bc9c1eb03fe3bc9991085388"}, - {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6878ef48d4227aace338d88c48738a4258213cd7b74fd9a3d4d7582bb1d8a155"}, - {file = "coverage-7.6.1-cp313-cp313-win32.whl", hash = "sha256:44df346d5215a8c0e360307d46ffaabe0f5d3502c8a1cefd700b34baf31d411a"}, - {file = "coverage-7.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:8284cf8c0dd272a247bc154eb6c95548722dce90d098c17a883ed36e67cdb129"}, - {file = "coverage-7.6.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d3296782ca4eab572a1a4eca686d8bfb00226300dcefdf43faa25b5242ab8a3e"}, - {file = "coverage-7.6.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:502753043567491d3ff6d08629270127e0c31d4184c4c8d98f92c26f65019962"}, - {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a89ecca80709d4076b95f89f308544ec8f7b4727e8a547913a35f16717856cb"}, - {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a318d68e92e80af8b00fa99609796fdbcdfef3629c77c6283566c6f02c6d6704"}, - {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13b0a73a0896988f053e4fbb7de6d93388e6dd292b0d87ee51d106f2c11b465b"}, - {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4421712dbfc5562150f7554f13dde997a2e932a6b5f352edcce948a815efee6f"}, - {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:166811d20dfea725e2e4baa71fffd6c968a958577848d2131f39b60043400223"}, - {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:225667980479a17db1048cb2bf8bfb39b8e5be8f164b8f6628b64f78a72cf9d3"}, - {file = "coverage-7.6.1-cp313-cp313t-win32.whl", hash = "sha256:170d444ab405852903b7d04ea9ae9b98f98ab6d7e63e1115e82620807519797f"}, - {file = "coverage-7.6.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b9f222de8cded79c49bf184bdbc06630d4c58eec9459b939b4a690c82ed05657"}, - {file = "coverage-7.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6db04803b6c7291985a761004e9060b2bca08da6d04f26a7f2294b8623a0c1a0"}, - {file = "coverage-7.6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f1adfc8ac319e1a348af294106bc6a8458a0f1633cc62a1446aebc30c5fa186a"}, - {file = "coverage-7.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a95324a9de9650a729239daea117df21f4b9868ce32e63f8b650ebe6cef5595b"}, - {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b43c03669dc4618ec25270b06ecd3ee4fa94c7f9b3c14bae6571ca00ef98b0d3"}, - {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8929543a7192c13d177b770008bc4e8119f2e1f881d563fc6b6305d2d0ebe9de"}, - {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a09ece4a69cf399510c8ab25e0950d9cf2b42f7b3cb0374f95d2e2ff594478a6"}, - {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9054a0754de38d9dbd01a46621636689124d666bad1936d76c0341f7d71bf569"}, - {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0dbde0f4aa9a16fa4d754356a8f2e36296ff4d83994b2c9d8398aa32f222f989"}, - {file = "coverage-7.6.1-cp38-cp38-win32.whl", hash = "sha256:da511e6ad4f7323ee5702e6633085fb76c2f893aaf8ce4c51a0ba4fc07580ea7"}, - {file = "coverage-7.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:3f1156e3e8f2872197af3840d8ad307a9dd18e615dc64d9ee41696f287c57ad8"}, - {file = "coverage-7.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abd5fd0db5f4dc9289408aaf34908072f805ff7792632250dcb36dc591d24255"}, - {file = "coverage-7.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:547f45fa1a93154bd82050a7f3cddbc1a7a4dd2a9bf5cb7d06f4ae29fe94eaf8"}, - {file = "coverage-7.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645786266c8f18a931b65bfcefdbf6952dd0dea98feee39bd188607a9d307ed2"}, - {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e0b2df163b8ed01d515807af24f63de04bebcecbd6c3bfeff88385789fdf75a"}, - {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:609b06f178fe8e9f89ef676532760ec0b4deea15e9969bf754b37f7c40326dbc"}, - {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:702855feff378050ae4f741045e19a32d57d19f3e0676d589df0575008ea5004"}, - {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2bdb062ea438f22d99cba0d7829c2ef0af1d768d1e4a4f528087224c90b132cb"}, - {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9c56863d44bd1c4fe2abb8a4d6f5371d197f1ac0ebdee542f07f35895fc07f36"}, - {file = "coverage-7.6.1-cp39-cp39-win32.whl", hash = "sha256:6e2cd258d7d927d09493c8df1ce9174ad01b381d4729a9d8d4e38670ca24774c"}, - {file = "coverage-7.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:06a737c882bd26d0d6ee7269b20b12f14a8704807a01056c80bb881a4b2ce6ca"}, - {file = "coverage-7.6.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:e9a6e0eb86070e8ccaedfbd9d38fec54864f3125ab95419970575b42af7541df"}, - {file = "coverage-7.6.1.tar.gz", hash = "sha256:953510dfb7b12ab69d20135a0662397f077c59b1e6379a768e97c59d852ee51d"}, +python-versions = ">=3.9" +files = [ + {file = "coverage-7.6.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c9df1950fb92d49970cce38100d7e7293c84ed3606eaa16ea0b6bc27175bb667"}, + {file = "coverage-7.6.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:24500f4b0e03aab60ce575c85365beab64b44d4db837021e08339f61d1fbfe52"}, + {file = "coverage-7.6.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a663b180b6669c400b4630a24cc776f23a992d38ce7ae72ede2a397ce6b0f170"}, + {file = "coverage-7.6.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfde025e2793a22efe8c21f807d276bd1d6a4bcc5ba6f19dbdfc4e7a12160909"}, + {file = "coverage-7.6.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:087932079c065d7b8ebadd3a0160656c55954144af6439886c8bcf78bbbcde7f"}, + {file = "coverage-7.6.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9c6b0c1cafd96213a0327cf680acb39f70e452caf8e9a25aeb05316db9c07f89"}, + {file = "coverage-7.6.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:6e85830eed5b5263ffa0c62428e43cb844296f3b4461f09e4bdb0d44ec190bc2"}, + {file = "coverage-7.6.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:62ab4231c01e156ece1b3a187c87173f31cbeee83a5e1f6dff17f288dca93345"}, + {file = "coverage-7.6.2-cp310-cp310-win32.whl", hash = "sha256:7b80fbb0da3aebde102a37ef0138aeedff45997e22f8962e5f16ae1742852676"}, + {file = "coverage-7.6.2-cp310-cp310-win_amd64.whl", hash = "sha256:d20c3d1f31f14d6962a4e2f549c21d31e670b90f777ef4171be540fb7fb70f02"}, + {file = "coverage-7.6.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bb21bac7783c1bf6f4bbe68b1e0ff0d20e7e7732cfb7995bc8d96e23aa90fc7b"}, + {file = "coverage-7.6.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a7b2e437fbd8fae5bc7716b9c7ff97aecc95f0b4d56e4ca08b3c8d8adcaadb84"}, + {file = "coverage-7.6.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:536f77f2bf5797983652d1d55f1a7272a29afcc89e3ae51caa99b2db4e89d658"}, + {file = "coverage-7.6.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f361296ca7054f0936b02525646b2731b32c8074ba6defab524b79b2b7eeac72"}, + {file = "coverage-7.6.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7926d8d034e06b479797c199747dd774d5e86179f2ce44294423327a88d66ca7"}, + {file = "coverage-7.6.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0bbae11c138585c89fb4e991faefb174a80112e1a7557d507aaa07675c62e66b"}, + {file = "coverage-7.6.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:fcad7d5d2bbfeae1026b395036a8aa5abf67e8038ae7e6a25c7d0f88b10a8e6a"}, + {file = "coverage-7.6.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f01e53575f27097d75d42de33b1b289c74b16891ce576d767ad8c48d17aeb5e0"}, + {file = "coverage-7.6.2-cp311-cp311-win32.whl", hash = "sha256:7781f4f70c9b0b39e1b129b10c7d43a4e0c91f90c60435e6da8288efc2b73438"}, + {file = "coverage-7.6.2-cp311-cp311-win_amd64.whl", hash = "sha256:9bcd51eeca35a80e76dc5794a9dd7cb04b97f0e8af620d54711793bfc1fbba4b"}, + {file = "coverage-7.6.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ebc94fadbd4a3f4215993326a6a00e47d79889391f5659bf310f55fe5d9f581c"}, + {file = "coverage-7.6.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9681516288e3dcf0aa7c26231178cc0be6cac9705cac06709f2353c5b406cfea"}, + {file = "coverage-7.6.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d9c5d13927d77af4fbe453953810db766f75401e764727e73a6ee4f82527b3e"}, + {file = "coverage-7.6.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b92f9ca04b3e719d69b02dc4a69debb795af84cb7afd09c5eb5d54b4a1ae2191"}, + {file = "coverage-7.6.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ff2ef83d6d0b527b5c9dad73819b24a2f76fdddcfd6c4e7a4d7e73ecb0656b4"}, + {file = "coverage-7.6.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:47ccb6e99a3031ffbbd6e7cc041e70770b4fe405370c66a54dbf26a500ded80b"}, + {file = "coverage-7.6.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a867d26f06bcd047ef716175b2696b315cb7571ccb951006d61ca80bbc356e9e"}, + {file = "coverage-7.6.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:cdfcf2e914e2ba653101157458afd0ad92a16731eeba9a611b5cbb3e7124e74b"}, + {file = "coverage-7.6.2-cp312-cp312-win32.whl", hash = "sha256:f9035695dadfb397bee9eeaf1dc7fbeda483bf7664a7397a629846800ce6e276"}, + {file = "coverage-7.6.2-cp312-cp312-win_amd64.whl", hash = "sha256:5ed69befa9a9fc796fe015a7040c9398722d6b97df73a6b608e9e275fa0932b0"}, + {file = "coverage-7.6.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4eea60c79d36a8f39475b1af887663bc3ae4f31289cd216f514ce18d5938df40"}, + {file = "coverage-7.6.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:aa68a6cdbe1bc6793a9dbfc38302c11599bbe1837392ae9b1d238b9ef3dafcf1"}, + {file = "coverage-7.6.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ec528ae69f0a139690fad6deac8a7d33629fa61ccce693fdd07ddf7e9931fba"}, + {file = "coverage-7.6.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed5ac02126f74d190fa2cc14a9eb2a5d9837d5863920fa472b02eb1595cdc925"}, + {file = "coverage-7.6.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21c0ea0d4db8a36b275cb6fb2437a3715697a4ba3cb7b918d3525cc75f726304"}, + {file = "coverage-7.6.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:35a51598f29b2a19e26d0908bd196f771a9b1c5d9a07bf20be0adf28f1ad4f77"}, + {file = "coverage-7.6.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:c9192925acc33e146864b8cf037e2ed32a91fdf7644ae875f5d46cd2ef086a5f"}, + {file = "coverage-7.6.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:bf4eeecc9e10f5403ec06138978235af79c9a79af494eb6b1d60a50b49ed2869"}, + {file = "coverage-7.6.2-cp313-cp313-win32.whl", hash = "sha256:e4ee15b267d2dad3e8759ca441ad450c334f3733304c55210c2a44516e8d5530"}, + {file = "coverage-7.6.2-cp313-cp313-win_amd64.whl", hash = "sha256:c71965d1ced48bf97aab79fad56df82c566b4c498ffc09c2094605727c4b7e36"}, + {file = "coverage-7.6.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:7571e8bbecc6ac066256f9de40365ff833553e2e0c0c004f4482facb131820ef"}, + {file = "coverage-7.6.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:078a87519057dacb5d77e333f740708ec2a8f768655f1db07f8dfd28d7a005f0"}, + {file = "coverage-7.6.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e5e92e3e84a8718d2de36cd8387459cba9a4508337b8c5f450ce42b87a9e760"}, + {file = "coverage-7.6.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ebabdf1c76593a09ee18c1a06cd3022919861365219ea3aca0247ededf6facd6"}, + {file = "coverage-7.6.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:12179eb0575b8900912711688e45474f04ab3934aaa7b624dea7b3c511ecc90f"}, + {file = "coverage-7.6.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:39d3b964abfe1519b9d313ab28abf1d02faea26cd14b27f5283849bf59479ff5"}, + {file = "coverage-7.6.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:84c4315577f7cd511d6250ffd0f695c825efe729f4205c0340f7004eda51191f"}, + {file = "coverage-7.6.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ff797320dcbff57caa6b2301c3913784a010e13b1f6cf4ab3f563f3c5e7919db"}, + {file = "coverage-7.6.2-cp313-cp313t-win32.whl", hash = "sha256:2b636a301e53964550e2f3094484fa5a96e699db318d65398cfba438c5c92171"}, + {file = "coverage-7.6.2-cp313-cp313t-win_amd64.whl", hash = "sha256:d03a060ac1a08e10589c27d509bbdb35b65f2d7f3f8d81cf2fa199877c7bc58a"}, + {file = "coverage-7.6.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c37faddc8acd826cfc5e2392531aba734b229741d3daec7f4c777a8f0d4993e5"}, + {file = "coverage-7.6.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ab31fdd643f162c467cfe6a86e9cb5f1965b632e5e65c072d90854ff486d02cf"}, + {file = "coverage-7.6.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97df87e1a20deb75ac7d920c812e9326096aa00a9a4b6d07679b4f1f14b06c90"}, + {file = "coverage-7.6.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:343056c5e0737487a5291f5691f4dfeb25b3e3c8699b4d36b92bb0e586219d14"}, + {file = "coverage-7.6.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad4ef1c56b47b6b9024b939d503ab487231df1f722065a48f4fc61832130b90e"}, + {file = "coverage-7.6.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7fca4a92c8a7a73dee6946471bce6d1443d94155694b893b79e19ca2a540d86e"}, + {file = "coverage-7.6.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:69f251804e052fc46d29d0e7348cdc5fcbfc4861dc4a1ebedef7e78d241ad39e"}, + {file = "coverage-7.6.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e8ea055b3ea046c0f66217af65bc193bbbeca1c8661dc5fd42698db5795d2627"}, + {file = "coverage-7.6.2-cp39-cp39-win32.whl", hash = "sha256:6c2ba1e0c24d8fae8f2cf0aeb2fc0a2a7f69b6d20bd8d3749fd6b36ecef5edf0"}, + {file = "coverage-7.6.2-cp39-cp39-win_amd64.whl", hash = "sha256:2186369a654a15628e9c1c9921409a6b3eda833e4b91f3ca2a7d9f77abb4987c"}, + {file = "coverage-7.6.2-pp39.pp310-none-any.whl", hash = "sha256:667952739daafe9616db19fbedbdb87917eee253ac4f31d70c7587f7ab531b4e"}, + {file = "coverage-7.6.2.tar.gz", hash = "sha256:a5f81e68aa62bc0cfca04f7b19eaa8f9c826b53fc82ab9e2121976dc74f131f3"}, ] [package.dependencies] @@ -512,13 +483,13 @@ files = [ [[package]] name = "dill" -version = "0.3.8" +version = "0.3.9" description = "serialize all of Python" optional = false python-versions = ">=3.8" files = [ - {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, - {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, + {file = "dill-0.3.9-py3-none-any.whl", hash = "sha256:468dff3b89520b474c0397703366b7b95eebe6303f108adf9b19da1f702be87a"}, + {file = "dill-0.3.9.tar.gz", hash = "sha256:81aa267dddf68cbfe8029c42ca9ec6a4ab3b22371d1c450abc54422577b4512c"}, ] [package.extras] @@ -549,7 +520,6 @@ files = [ [package.dependencies] asgiref = ">=3.6.0,<4" -"backports.zoneinfo" = {version = "*", markers = "python_version < \"3.9\""} sqlparse = ">=0.3.1" tzdata = {version = "*", markers = "sys_platform == \"win32\""} @@ -559,13 +529,13 @@ bcrypt = ["bcrypt"] [[package]] name = "django" -version = "5.1.1" +version = "5.1.2" description = "A high-level Python web framework that encourages rapid development and clean, pragmatic design." optional = false python-versions = ">=3.10" files = [ - {file = "Django-5.1.1-py3-none-any.whl", hash = "sha256:71603f27dac22a6533fb38d83072eea9ddb4017fead6f67f2562a40402d61c3f"}, - {file = "Django-5.1.1.tar.gz", hash = "sha256:021ffb7fdab3d2d388bc8c7c2434eb9c1f6f4d09e6119010bbb1694dda286bc2"}, + {file = "Django-5.1.2-py3-none-any.whl", hash = "sha256:f11aa87ad8d5617171e3f77e1d5d16f004b79a2cf5d2e1d2b97a6a1f8e9ba5ed"}, + {file = "Django-5.1.2.tar.gz", hash = "sha256:bd7376f90c99f96b643722eee676498706c9fd7dc759f55ebfaf2c08ebcdf4f0"}, ] [package.dependencies] @@ -628,7 +598,6 @@ files = [ ] [package.dependencies] -"backports.zoneinfo" = {version = "*", markers = "python_version < \"3.9\""} django = ">=4.2" [[package]] @@ -885,71 +854,72 @@ testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] [[package]] name = "markupsafe" -version = "2.1.5" +version = "3.0.1" description = "Safely add untrusted strings to HTML/XML markup." optional = false -python-versions = ">=3.7" -files = [ - {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, - {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, +python-versions = ">=3.9" +files = [ + {file = "MarkupSafe-3.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:db842712984e91707437461930e6011e60b39136c7331e971952bb30465bc1a1"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3ffb4a8e7d46ed96ae48805746755fadd0909fea2306f93d5d8233ba23dda12a"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67c519635a4f64e495c50e3107d9b4075aec33634272b5db1cde839e07367589"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48488d999ed50ba8d38c581d67e496f955821dc183883550a6fbc7f1aefdc170"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f31ae06f1328595d762c9a2bf29dafd8621c7d3adc130cbb46278079758779ca"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:80fcbf3add8790caddfab6764bde258b5d09aefbe9169c183f88a7410f0f6dea"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3341c043c37d78cc5ae6e3e305e988532b072329639007fd408a476642a89fd6"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cb53e2a99df28eee3b5f4fea166020d3ef9116fdc5764bc5117486e6d1211b25"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-win32.whl", hash = "sha256:db15ce28e1e127a0013dfb8ac243a8e392db8c61eae113337536edb28bdc1f97"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:4ffaaac913c3f7345579db4f33b0020db693f302ca5137f106060316761beea9"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:26627785a54a947f6d7336ce5963569b5d75614619e75193bdb4e06e21d447ad"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b954093679d5750495725ea6f88409946d69cfb25ea7b4c846eef5044194f583"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:973a371a55ce9ed333a3a0f8e0bcfae9e0d637711534bcb11e130af2ab9334e7"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:244dbe463d5fb6d7ce161301a03a6fe744dac9072328ba9fc82289238582697b"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d98e66a24497637dd31ccab090b34392dddb1f2f811c4b4cd80c230205c074a3"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ad91738f14eb8da0ff82f2acd0098b6257621410dcbd4df20aaa5b4233d75a50"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7044312a928a66a4c2a22644147bc61a199c1709712069a344a3fb5cfcf16915"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a4792d3b3a6dfafefdf8e937f14906a51bd27025a36f4b188728a73382231d91"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-win32.whl", hash = "sha256:fa7d686ed9883f3d664d39d5a8e74d3c5f63e603c2e3ff0abcba23eac6542635"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:9ba25a71ebf05b9bb0e2ae99f8bc08a07ee8e98c612175087112656ca0f5c8bf"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8ae369e84466aa70f3154ee23c1451fda10a8ee1b63923ce76667e3077f2b0c4"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40f1e10d51c92859765522cbd79c5c8989f40f0419614bcdc5015e7b6bf97fc5"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a4cb365cb49b750bdb60b846b0c0bc49ed62e59a76635095a179d440540c346"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee3941769bd2522fe39222206f6dd97ae83c442a94c90f2b7a25d847d40f4729"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62fada2c942702ef8952754abfc1a9f7658a4d5460fabe95ac7ec2cbe0d02abc"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4c2d64fdba74ad16138300815cfdc6ab2f4647e23ced81f59e940d7d4a1469d9"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:fb532dd9900381d2e8f48172ddc5a59db4c445a11b9fab40b3b786da40d3b56b"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0f84af7e813784feb4d5e4ff7db633aba6c8ca64a833f61d8e4eade234ef0c38"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-win32.whl", hash = "sha256:cbf445eb5628981a80f54087f9acdbf84f9b7d862756110d172993b9a5ae81aa"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:a10860e00ded1dd0a65b83e717af28845bb7bd16d8ace40fe5531491de76b79f"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e81c52638315ff4ac1b533d427f50bc0afc746deb949210bc85f05d4f15fd772"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:312387403cd40699ab91d50735ea7a507b788091c416dd007eac54434aee51da"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ae99f31f47d849758a687102afdd05bd3d3ff7dbab0a8f1587981b58a76152a"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c97ff7fedf56d86bae92fa0a646ce1a0ec7509a7578e1ed238731ba13aabcd1c"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7420ceda262dbb4b8d839a4ec63d61c261e4e77677ed7c66c99f4e7cb5030dd"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45d42d132cff577c92bfba536aefcfea7e26efb975bd455db4e6602f5c9f45e7"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4c8817557d0de9349109acb38b9dd570b03cc5014e8aabf1cbddc6e81005becd"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6a54c43d3ec4cf2a39f4387ad044221c66a376e58c0d0e971d47c475ba79c6b5"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-win32.whl", hash = "sha256:c91b394f7601438ff79a4b93d16be92f216adb57d813a78be4446fe0f6bc2d8c"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:fe32482b37b4b00c7a52a07211b479653b7fe4f22b2e481b9a9b099d8a430f2f"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:17b2aea42a7280db02ac644db1d634ad47dcc96faf38ab304fe26ba2680d359a"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:852dc840f6d7c985603e60b5deaae1d89c56cb038b577f6b5b8c808c97580f1d"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0778de17cff1acaeccc3ff30cd99a3fd5c50fc58ad3d6c0e0c4c58092b859396"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:800100d45176652ded796134277ecb13640c1a537cad3b8b53da45aa96330453"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d06b24c686a34c86c8c1fba923181eae6b10565e4d80bdd7bc1c8e2f11247aa4"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:33d1c36b90e570ba7785dacd1faaf091203d9942bc036118fab8110a401eb1a8"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:beeebf760a9c1f4c07ef6a53465e8cfa776ea6a2021eda0d0417ec41043fe984"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:bbde71a705f8e9e4c3e9e33db69341d040c827c7afa6789b14c6e16776074f5a"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-win32.whl", hash = "sha256:82b5dba6eb1bcc29cc305a18a3c5365d2af06ee71b123216416f7e20d2a84e5b"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-win_amd64.whl", hash = "sha256:730d86af59e0e43ce277bb83970530dd223bf7f2a838e086b50affa6ec5f9295"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4935dd7883f1d50e2ffecca0aa33dc1946a94c8f3fdafb8df5c330e48f71b132"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e9393357f19954248b00bed7c56f29a25c930593a77630c719653d51e7669c2a"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40621d60d0e58aa573b68ac5e2d6b20d44392878e0bfc159012a5787c4e35bc8"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f94190df587738280d544971500b9cafc9b950d32efcb1fba9ac10d84e6aa4e6"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6a387d61fe41cdf7ea95b38e9af11cfb1a63499af2759444b99185c4ab33f5b"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8ad4ad1429cd4f315f32ef263c1342166695fad76c100c5d979c45d5570ed58b"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e24bfe89c6ac4c31792793ad9f861b8f6dc4546ac6dc8f1c9083c7c4f2b335cd"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2a4b34a8d14649315c4bc26bbfa352663eb51d146e35eef231dd739d54a5430a"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-win32.whl", hash = "sha256:242d6860f1fd9191aef5fae22b51c5c19767f93fb9ead4d21924e0bcb17619d8"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:93e8248d650e7e9d49e8251f883eed60ecbc0e8ffd6349e18550925e31bd029b"}, + {file = "markupsafe-3.0.1.tar.gz", hash = "sha256:3e683ee4f5d0fa2dde4db77ed8dd8a876686e3fc417655c2ece9a90576905344"}, ] [[package]] @@ -1109,13 +1079,13 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "pre-commit" -version = "3.5.0" +version = "3.8.0" description = "A framework for managing and maintaining multi-language pre-commit hooks." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "pre_commit-3.5.0-py2.py3-none-any.whl", hash = "sha256:841dc9aef25daba9a0238cd27984041fa0467b4199fc4852e27950664919f660"}, - {file = "pre_commit-3.5.0.tar.gz", hash = "sha256:5804465c675b659b0862f07907f96295d490822a450c4c40e747d0b1c6ebcb32"}, + {file = "pre_commit-3.8.0-py2.py3-none-any.whl", hash = "sha256:9a90a53bf82fdd8778d58085faf8d83df56e40dfe18f45b19446e26bf1b3a63f"}, + {file = "pre_commit-3.8.0.tar.gz", hash = "sha256:8bb6494d4a20423842e198980c9ecf9f96607a07ea29549e180eef9ae80fe7af"}, ] [package.dependencies] @@ -1279,17 +1249,17 @@ windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pylint" -version = "3.2.7" +version = "3.3.1" description = "python code static checker" optional = false -python-versions = ">=3.8.0" +python-versions = ">=3.9.0" files = [ - {file = "pylint-3.2.7-py3-none-any.whl", hash = "sha256:02f4aedeac91be69fb3b4bea997ce580a4ac68ce58b89eaefeaf06749df73f4b"}, - {file = "pylint-3.2.7.tar.gz", hash = "sha256:1b7a721b575eaeaa7d39db076b6e7743c993ea44f57979127c517c6c572c803e"}, + {file = "pylint-3.3.1-py3-none-any.whl", hash = "sha256:2f846a466dd023513240bc140ad2dd73bfc080a5d85a710afdb728c420a5a2b9"}, + {file = "pylint-3.3.1.tar.gz", hash = "sha256:9f3dcc87b1203e612b78d91a896407787e708b3f189b5fa0b307712d49ff0c6e"}, ] [package.dependencies] -astroid = ">=3.2.4,<=3.3.0-dev0" +astroid = ">=3.3.4,<=3.4.0-dev0" colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} dill = [ {version = ">=0.2", markers = "python_version < \"3.11\""}, @@ -1418,13 +1388,13 @@ testing = ["Django", "django-configurations (>=2.0)"] [[package]] name = "python-gitlab" -version = "4.11.1" +version = "4.13.0" description = "A python wrapper for the GitLab API" optional = false python-versions = ">=3.8.0" files = [ - {file = "python_gitlab-4.11.1-py3-none-any.whl", hash = "sha256:f15fe4f4cbaa58c04e422ad30a2d4fc7976294dfdf1f90f81ab927e5b2238f33"}, - {file = "python_gitlab-4.11.1.tar.gz", hash = "sha256:7afa2f9c30618bc3daee95d2186a06e1cf18ca2fa97890eb55fd8ddc4e339812"}, + {file = "python_gitlab-4.13.0-py3-none-any.whl", hash = "sha256:8299a054fb571da16e1a8c1868fff01f34ac41ea1410c713a4647b3bbb2aa279"}, + {file = "python_gitlab-4.13.0.tar.gz", hash = "sha256:576bfb0901faca0c6b2d1ff2592e02944a6ec3e086c3129fb43c2a0df56a1c67"}, ] [package.dependencies] @@ -1438,13 +1408,13 @@ yaml = ["PyYaml (>=6.0.1)"] [[package]] name = "python-semantic-release" -version = "9.8.8" +version = "9.10.0" description = "Automatic Semantic Versioning for Python projects" optional = false python-versions = ">=3.8" files = [ - {file = "python_semantic_release-9.8.8-py3-none-any.whl", hash = "sha256:df43d02234ce4be3802bceca804c3a357aebf833a97624dc74563ec69da8a3ef"}, - {file = "python_semantic_release-9.8.8.tar.gz", hash = "sha256:19ac268bc417b2b8ea869caede307c50bc1fc2c6c70ee42e88403e321d13f494"}, + {file = "python_semantic_release-9.10.0-py3-none-any.whl", hash = "sha256:05a29ce7441e486cfd09d455cfb32abeb64b9c94eb4d2d148ff280578fd95558"}, + {file = "python_semantic_release-9.10.0.tar.gz", hash = "sha256:45a0199f7805dfe76bb57b932be4a9a5a3f7ff03a897c375eb5a1fa5dcfe5608"}, ] [package.dependencies] @@ -1463,21 +1433,10 @@ tomlkit = ">=0.11,<1.0" [package.extras] build = ["build (>=1.2,<2.0)"] -dev = ["pre-commit (>=3.5,<4.0)", "ruff (==0.5.0)", "tox (>=4.11,<5.0)"] +dev = ["pre-commit (>=3.5,<4.0)", "ruff (==0.6.1)", "tox (>=4.11,<5.0)"] docs = ["Sphinx (>=6.0,<7.0)", "furo (>=2024.1,<2025.0)", "sphinx-autobuild (==2024.2.4)", "sphinxcontrib-apidoc (==0.5.0)"] -mypy = ["mypy (==1.10.1)", "types-requests (>=2.32.0,<2.33.0)"] -test = ["coverage[toml] (>=7.0,<8.0)", "pytest (>=8.3,<9.0)", "pytest-clarity (>=1.0,<2.0)", "pytest-cov (>=5.0,<6.0)", "pytest-env (>=1.0,<2.0)", "pytest-lazy-fixtures (>=1.1.1,<1.2.0)", "pytest-mock (>=3.0,<4.0)", "pytest-pretty (>=1.2,<2.0)", "pytest-xdist (>=3.0,<4.0)", "requests-mock (>=1.10,<2.0)", "responses (>=0.25.0,<0.26.0)"] - -[[package]] -name = "pytz" -version = "2024.2" -description = "World timezone definitions, modern and historical" -optional = false -python-versions = "*" -files = [ - {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"}, - {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"}, -] +mypy = ["mypy (==1.11.2)", "types-requests (>=2.32.0,<2.33.0)"] +test = ["coverage[toml] (>=7.0,<8.0)", "pytest (>=8.3,<9.0)", "pytest-clarity (>=1.0,<2.0)", "pytest-cov (>=5.0,<6.0)", "pytest-env (>=1.0,<2.0)", "pytest-lazy-fixtures (>=1.1.1,<1.2.0)", "pytest-mock (>=3.0,<4.0)", "pytest-pretty (>=1.2,<2.0)", "pytest-xdist (>=3.0,<4.0)", "pyyaml (>=6.0,<7.0)", "requests-mock (>=1.10,<2.0)", "responses (>=0.25.0,<0.26.0)"] [[package]] name = "pyyaml" @@ -1592,19 +1551,19 @@ requests = ">=2.0.1,<3.0.0" [[package]] name = "rich" -version = "13.8.1" +version = "13.9.2" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false -python-versions = ">=3.7.0" +python-versions = ">=3.8.0" files = [ - {file = "rich-13.8.1-py3-none-any.whl", hash = "sha256:1760a3c0848469b97b558fc61c85233e3dafb69c7a071b4d60c38099d3cd4c06"}, - {file = "rich-13.8.1.tar.gz", hash = "sha256:8260cda28e3db6bf04d2d1ef4dbc03ba80a824c88b0e7668a0f23126a424844a"}, + {file = "rich-13.9.2-py3-none-any.whl", hash = "sha256:8c82a3d3f8dcfe9e734771313e606b39d8247bb6b826e196f4914b333b743cf1"}, + {file = "rich-13.9.2.tar.gz", hash = "sha256:51a2c62057461aaf7152b4d611168f93a9fc73068f8ded2790f29fe2b5366d0c"}, ] [package.dependencies] markdown-it-py = ">=2.2.0" pygments = ">=2.13.0,<3.0.0" -typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""} +typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.11\""} [package.extras] jupyter = ["ipywidgets (>=7.5.1,<9)"] @@ -1698,47 +1657,50 @@ dev = ["bump2version", "sphinxcontrib-httpdomain", "transifex-client", "wheel"] [[package]] name = "sphinxcontrib-applehelp" -version = "1.0.4" +version = "2.0.0" description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "sphinxcontrib-applehelp-1.0.4.tar.gz", hash = "sha256:828f867945bbe39817c210a1abfd1bc4895c8b73fcaade56d45357a348a07d7e"}, - {file = "sphinxcontrib_applehelp-1.0.4-py3-none-any.whl", hash = "sha256:29d341f67fb0f6f586b23ad80e072c8e6ad0b48417db2bde114a4c9746feb228"}, + {file = "sphinxcontrib_applehelp-2.0.0-py3-none-any.whl", hash = "sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5"}, + {file = "sphinxcontrib_applehelp-2.0.0.tar.gz", hash = "sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1"}, ] [package.extras] -lint = ["docutils-stubs", "flake8", "mypy"] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] test = ["pytest"] [[package]] name = "sphinxcontrib-devhelp" -version = "1.0.2" -description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp document." +version = "2.0.0" +description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp documents" optional = false -python-versions = ">=3.5" +python-versions = ">=3.9" files = [ - {file = "sphinxcontrib-devhelp-1.0.2.tar.gz", hash = "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4"}, - {file = "sphinxcontrib_devhelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e"}, + {file = "sphinxcontrib_devhelp-2.0.0-py3-none-any.whl", hash = "sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2"}, + {file = "sphinxcontrib_devhelp-2.0.0.tar.gz", hash = "sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad"}, ] [package.extras] -lint = ["docutils-stubs", "flake8", "mypy"] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] test = ["pytest"] [[package]] name = "sphinxcontrib-htmlhelp" -version = "2.0.1" +version = "2.1.0" description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "sphinxcontrib-htmlhelp-2.0.1.tar.gz", hash = "sha256:0cbdd302815330058422b98a113195c9249825d681e18f11e8b1f78a2f11efff"}, - {file = "sphinxcontrib_htmlhelp-2.0.1-py3-none-any.whl", hash = "sha256:c38cb46dccf316c79de6e5515e1770414b797162b23cd3d06e67020e1d2a6903"}, + {file = "sphinxcontrib_htmlhelp-2.1.0-py3-none-any.whl", hash = "sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8"}, + {file = "sphinxcontrib_htmlhelp-2.1.0.tar.gz", hash = "sha256:c9e2916ace8aad64cc13a0d233ee22317f2b9025b9cf3295249fa985cc7082e9"}, ] [package.extras] -lint = ["docutils-stubs", "flake8", "mypy"] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] test = ["html5lib", "pytest"] [[package]] @@ -1771,32 +1733,34 @@ test = ["flake8", "mypy", "pytest"] [[package]] name = "sphinxcontrib-qthelp" -version = "1.0.3" -description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp document." +version = "2.0.0" +description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp documents" optional = false -python-versions = ">=3.5" +python-versions = ">=3.9" files = [ - {file = "sphinxcontrib-qthelp-1.0.3.tar.gz", hash = "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72"}, - {file = "sphinxcontrib_qthelp-1.0.3-py2.py3-none-any.whl", hash = "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6"}, + {file = "sphinxcontrib_qthelp-2.0.0-py3-none-any.whl", hash = "sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb"}, + {file = "sphinxcontrib_qthelp-2.0.0.tar.gz", hash = "sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab"}, ] [package.extras] -lint = ["docutils-stubs", "flake8", "mypy"] -test = ["pytest"] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] +test = ["defusedxml (>=0.7.1)", "pytest"] [[package]] name = "sphinxcontrib-serializinghtml" -version = "1.1.5" -description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)." +version = "2.0.0" +description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)" optional = false -python-versions = ">=3.5" +python-versions = ">=3.9" files = [ - {file = "sphinxcontrib-serializinghtml-1.1.5.tar.gz", hash = "sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952"}, - {file = "sphinxcontrib_serializinghtml-1.1.5-py2.py3-none-any.whl", hash = "sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd"}, + {file = "sphinxcontrib_serializinghtml-2.0.0-py3-none-any.whl", hash = "sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331"}, + {file = "sphinxcontrib_serializinghtml-2.0.0.tar.gz", hash = "sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d"}, ] [package.extras] -lint = ["docutils-stubs", "flake8", "mypy"] +lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] +standalone = ["Sphinx (>=5)"] test = ["pytest"] [[package]] @@ -1830,13 +1794,13 @@ pbr = ">=2.0.0" [[package]] name = "termcolor" -version = "2.4.0" +version = "2.5.0" description = "ANSI color formatting for output in terminal" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "termcolor-2.4.0-py3-none-any.whl", hash = "sha256:9297c0df9c99445c2412e832e882a7884038a25617c60cea2ad69488d4040d63"}, - {file = "termcolor-2.4.0.tar.gz", hash = "sha256:aab9e56047c8ac41ed798fa36d892a37aca6b3e9159f3e0c24bc64a9b3ac7b7a"}, + {file = "termcolor-2.5.0-py3-none-any.whl", hash = "sha256:37b17b5fc1e604945c2642c872a3764b5d547a48009871aea3edd3afa180afb8"}, + {file = "termcolor-2.5.0.tar.gz", hash = "sha256:998d8d27da6d48442e8e1f016119076b690d962507531df4890fcd2db2ef8a6f"}, ] [package.extras] @@ -1844,13 +1808,13 @@ tests = ["pytest", "pytest-cov"] [[package]] name = "tomli" -version = "2.0.1" +version = "2.0.2" description = "A lil' TOML parser" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, + {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"}, + {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"}, ] [[package]] @@ -1866,30 +1830,31 @@ files = [ [[package]] name = "tox" -version = "4.20.0" +version = "4.21.2" description = "tox is a generic virtualenv management and test command line tool" optional = false python-versions = ">=3.8" files = [ - {file = "tox-4.20.0-py3-none-any.whl", hash = "sha256:21a8005e3d3fe5658a8e36b8ca3ed13a4230429063c5cc2a2fdac6ee5aa0de34"}, - {file = "tox-4.20.0.tar.gz", hash = "sha256:5b78a49b6eaaeab3ae4186415e7c97d524f762ae967c63562687c3e5f0ec23d5"}, + {file = "tox-4.21.2-py3-none-any.whl", hash = "sha256:13d996adcd792e7c82994b0e116d85efd84f0c6d185254d83d156f73f86b2038"}, + {file = "tox-4.21.2.tar.gz", hash = "sha256:49381ff102296753e378fa5ff30e42a35e695f149b4dbf8a2c49d15fdb5797b2"}, ] [package.dependencies] cachetools = ">=5.5" chardet = ">=5.2" colorama = ">=0.4.6" -filelock = ">=3.15.4" +filelock = ">=3.16.1" packaging = ">=24.1" -platformdirs = ">=4.2.2" +platformdirs = ">=4.3.6" pluggy = ">=1.5" -pyproject-api = ">=1.7.1" +pyproject-api = ">=1.8" tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""} -virtualenv = ">=20.26.3" +typing-extensions = {version = ">=4.12.2", markers = "python_version < \"3.11\""} +virtualenv = ">=20.26.6" [package.extras] -docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-argparse-cli (>=1.17)", "sphinx-autodoc-typehints (>=2.4)", "sphinx-copybutton (>=0.5.2)", "sphinx-inline-tabs (>=2023.4.21)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=24.8)"] -testing = ["build[virtualenv] (>=1.2.2)", "covdefaults (>=2.3)", "detect-test-pollution (>=1.2)", "devpi-process (>=1)", "diff-cover (>=9.1.1)", "distlib (>=0.3.8)", "flaky (>=3.8.1)", "hatch-vcs (>=0.4)", "hatchling (>=1.25)", "psutil (>=6)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-xdist (>=3.6.1)", "re-assert (>=1.1)", "setuptools (>=74.1.2)", "time-machine (>=2.15)", "wheel (>=0.44)"] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-argparse-cli (>=1.18.2)", "sphinx-autodoc-typehints (>=2.4.4)", "sphinx-copybutton (>=0.5.2)", "sphinx-inline-tabs (>=2023.4.21)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=24.8)"] +testing = ["build[virtualenv] (>=1.2.2)", "covdefaults (>=2.3)", "detect-test-pollution (>=1.2)", "devpi-process (>=1.0.2)", "diff-cover (>=9.2)", "distlib (>=0.3.8)", "flaky (>=3.8.1)", "hatch-vcs (>=0.4)", "hatchling (>=1.25)", "psutil (>=6)", "pytest (>=8.3.3)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-xdist (>=3.6.1)", "re-assert (>=1.1)", "setuptools (>=75.1)", "time-machine (>=2.15)", "wheel (>=0.44)"] [[package]] name = "types-pyyaml" @@ -1953,13 +1918,13 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "virtualenv" -version = "20.26.5" +version = "20.26.6" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.7" files = [ - {file = "virtualenv-20.26.5-py3-none-any.whl", hash = "sha256:4f3ac17b81fba3ce3bd6f4ead2749a72da5929c01774948e243db9ba41df4ff6"}, - {file = "virtualenv-20.26.5.tar.gz", hash = "sha256:ce489cac131aa58f4b25e321d6d186171f78e6cb13fafbf32a840cee67733ff4"}, + {file = "virtualenv-20.26.6-py3-none-any.whl", hash = "sha256:7345cc5b25405607a624d8418154577459c3e0277f5466dd79c49d5e492995f2"}, + {file = "virtualenv-20.26.6.tar.gz", hash = "sha256:280aede09a2a5c317e409a00102e7077c6432c5a38f0ef938e643805a7ad2c48"}, ] [package.dependencies] @@ -2003,5 +1968,5 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" -python-versions = ">=3.8,<4.0" -content-hash = "8fdfca3b3f0cd7f0064fa8e82f75e9a46dd44decd97036b4e6c224a435162866" +python-versions = ">=3.9,<4.0" +content-hash = "bec98985ee4d92c112d747fb59ac420d053a05af7259725124f299f365321a6f" diff --git a/pyproject.toml b/pyproject.toml index 1b72295..eb90942 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,7 @@ requires = [ "poetry-core" ] name = "api-response-shaper" version = "1.0.0" description = "An essential package for structuring and optimizing your API responses." -authors = [ "ARYAN-NIKNEZHAD <[email protected]>" ] +authors = [ "ARYAN-NIKNEZHAD <[email protected]>", "MEHRSHAD-MIRSHEKARY <[email protected]>" ] license = "MIT" readme = "README.md" keywords = [ "django-api-shaper", "django", "shaper", "response-shaper" ] @@ -24,11 +24,11 @@ classifiers = [ "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "Topic :: Software Development :: Libraries :: Python Modules", ] @@ -42,9 +42,9 @@ packages = [ "Issues" = "https://github.com/lazarus-org/api-response-shaper/issues" [tool.poetry.dependencies] -python = ">=3.8,<4.0" +python = ">=3.9,<4.0" django = [ - { version = ">=4.2,<5.0", python = ">=3.8,<3.10" }, + { version = ">=4.2,<5.0", python = ">=3.9,<3.10" }, { version = ">=4.2,<5.3", python = ">=3.10" }, # Django 4.2 and 5.x for Python 3.10+ ] djangorestframework = "^3.14.0" diff --git a/tox.ini b/tox.ini index 6e31310..8f7773e 100644 --- a/tox.ini +++ b/tox.ini @@ -2,6 +2,9 @@ requires = tox>=4.2 env_list = + py313-django40-drf{315, 314} + py313-django50-drf{315, 314} + py313-django51-drf{315, 314} py312-django40-drf{315, 314} py312-django50-drf{315, 314} py312-django51-drf{315, 314} @@ -12,7 +15,6 @@ env_list = py310-django50-drf{315, 314} py310-django51-drf{315, 314} py39-django40-drf{315, 314} - py38-django40-drf{315, 314} [testenv] description = Run Pytest tests with multiple django and drf versions @@ -47,8 +49,8 @@ commands = [gh-actions] python = - 3.8: py38 3.9: py39 3.10: py310 3.11: py311 3.12: py312 + 3.13: py313
🔧 📦 Support Python 3.13
2024-10-09T16:09:47
0.0
[]
[]
Lazarus-org/api-response-shaper
Lazarus-org__api-response-shaper-22
384ed43bf6d50a58ccfd51b49f19abd148c0e0e2
diff --git a/response_shaper/apps.py b/response_shaper/apps.py index cb72b85..1ff1a1c 100644 --- a/response_shaper/apps.py +++ b/response_shaper/apps.py @@ -6,3 +6,14 @@ class ResponseShaperConfig(AppConfig): default_auto_field = "django.db.models.BigAutoField" name = "response_shaper" verbose_name = _("Response Shaper") + + def ready(self) -> None: + """Import and register system checks for the Response Shaper app. + + This method is called when the app is ready and ensures that the settings + checks from `response_shaper.settings.check` are imported. This allows + Django's system check framework to validate the Response Shaper configuration during + startup. + + """ + from response_shaper.settings import check diff --git a/response_shaper/settings/check.py b/response_shaper/settings/check.py index 0a82978..4ef9f5a 100644 --- a/response_shaper/settings/check.py +++ b/response_shaper/settings/check.py @@ -2,56 +2,12 @@ from django.core.checks import Error, register - -def validate_boolean_setting(setting_value: Any, setting_name: str) -> List[Error]: - """Helper function to validate boolean settings. - - Args: - setting_value: The value of the setting to validate. - setting_name: The name of the setting being validated. - - Returns: - List[Error]: A list of errors if the validation fails, or an empty list if valid. - - """ - errors: List[Error] = [] - if setting_value is None or not isinstance(setting_value, bool): - errors.append( - Error( - f"{setting_name} should be a boolean value.", - hint=f"Set {setting_name} to either True or False.", - id=f"response_shaper.E001.{setting_name}", - ) - ) - return errors - - -def validate_class_setting(setting_value: Any, setting_name: str) -> List[Error]: - """Helper function to validate settings that are class paths (strings). - - Args: - setting_value: The value of the setting to validate. - setting_name: The name of the setting being validated. - - Returns: - List[Error]: A list of errors if the validation fails, or an empty list if valid. - - """ - errors: List[Error] = [] - # Only validate if the setting is not empty (since an empty string is allowed) - if ( - setting_value is not None - and setting_value != "" - and not isinstance(setting_value, str) - ): - errors.append( - Error( - f"{setting_name} should be a valid Python class path string.", - hint=f"Set {setting_name} to a valid import path for a class.", - id=f"response_shaper.E002.{setting_name}", - ) - ) - return errors +from response_shaper.settings.conf import response_shaper_config +from response_shaper.validators.config_validators import ( + validate_boolean_setting, + validate_class_setting, + validate_paths_list_setting, +) @register() @@ -70,27 +26,32 @@ def check_response_shaper_settings(app_configs: Any, **kwargs: Any) -> List[Erro """ errors: List[Error] = [] - from django.conf import settings - # Validate boolean settings errors.extend( validate_boolean_setting( - getattr(settings, "CUSTOM_RESPONSE_DEBUG", None), "CUSTOM_RESPONSE_DEBUG" + response_shaper_config.debug, "RESPONSE_SHAPER_DEBUG_MODE" + ) + ) + + # Validate optional excluded path settings + errors.extend( + validate_paths_list_setting( + response_shaper_config.excluded_paths, "RESPONSE_SHAPER_EXCLUDED_PATHS" ) ) # Validate optional class settings for custom handlers (skip validation if the setting is None or empty) errors.extend( validate_class_setting( - getattr(settings, "CUSTOM_RESPONSE_SUCCESS_HANDLER", None), - "CUSTOM_RESPONSE_SUCCESS_HANDLER", + response_shaper_config.success_handler, + "RESPONSE_SHAPER_SUCCESS_HANDLER", ) ) errors.extend( validate_class_setting( - getattr(settings, "CUSTOM_RESPONSE_ERROR_HANDLER", None), - "CUSTOM_RESPONSE_ERROR_HANDLER", + response_shaper_config.error_handler, + "RESPONSE_SHAPER_ERROR_HANDLER", ) ) diff --git a/response_shaper/settings/conf.py b/response_shaper/settings/conf.py index a6f4ed2..543344e 100644 --- a/response_shaper/settings/conf.py +++ b/response_shaper/settings/conf.py @@ -1,26 +1,31 @@ -from typing import Union +from typing import Any, Union from django.conf import settings class ResponseShaperConfig: - """A configuration handler.allowing dynamic settings loading from the - Django settings, with default fallbacks.""" + """A configuration handler. + + allowing dynamic settings loading from the Django settings, with + default fallbacks. + + """ def __init__(self) -> None: - self.debug = self.get_setting("CUSTOM_RESPONSE_DEBUG", False) + self.config_prefix = "RESPONSE_SHAPER_" + self.debug = self.get_setting(f"{self.config_prefix}DEBUG_MODE", False) self.excluded_paths = self.get_setting( - "CUSTOM_RESPONSE_EXCLUDED_PATHS", + f"{self.config_prefix}EXCLUDED_PATHS", ["/admin/", "/schema/swagger-ui/", "/schema/redoc/", "/schema/"], ) self.success_handler = self.get_setting( - "CUSTOM_RESPONSE_SUCCESS_HANDLER", "default_success_handler" + f"{self.config_prefix}SUCCESS_HANDLER", "default_success_handler" ) self.error_handler = self.get_setting( - "CUSTOM_RESPONSE_ERROR_HANDLER", "default_error_handler" + f"{self.config_prefix}ERROR_HANDLER", "default_error_handler" ) - def get_setting(self, setting_name: str, default_value: str) -> Union[str, bool]: + def get_setting(self, setting_name: str, default_value: Any) -> Union[str, bool]: """Retrieve a setting from Django settings with a default fallback.""" return getattr(settings, setting_name, default_value) diff --git a/response_shaper/validators/__init__.py b/response_shaper/validators/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/response_shaper/validators/config_validators.py b/response_shaper/validators/config_validators.py new file mode 100644 index 0000000..d423c10 --- /dev/null +++ b/response_shaper/validators/config_validators.py @@ -0,0 +1,105 @@ +from typing import Any, List + +from django.core.checks import Error + + +def validate_boolean_setting(setting_value: Any, setting_name: str) -> List[Error]: + """Helper function to validate boolean settings. + + Args: + setting_value: The value of the setting to validate. + setting_name: The name of the setting being validated. + + Returns: + List[Error]: A list of errors if the validation fails, or an empty list if valid. + + """ + errors: List[Error] = [] + if setting_value is None or not isinstance(setting_value, bool): + errors.append( + Error( + f"{setting_name} should be a boolean value.", + hint=f"Set {setting_name} to either True or False.", + id=f"response_shaper.E001.{setting_name}", + ) + ) + return errors + + +def validate_class_setting(setting_value: Any, setting_name: str) -> List[Error]: + """Helper function to validate settings that are class paths (strings). + + Args: + setting_value: The value of the setting to validate. + setting_name: The name of the setting being validated. + + Returns: + List[Error]: A list of errors if the validation fails, or an empty list if valid. + + """ + errors: List[Error] = [] + # Only validate if the setting is not empty (since an empty string is allowed) + if ( + setting_value is not None + and setting_value != "" + and not isinstance(setting_value, str) + ): + errors.append( + Error( + f"{setting_name} should be a valid Python class path string.", + hint=f"Set {setting_name} to a valid import path for a class.", + id=f"response_shaper.E002.{setting_name}", + ) + ) + return errors + + +def validate_paths_list_setting(setting_value: Any, setting_name: str) -> List[Error]: + """Validates that the given setting is a list of paths, ensuring each path + is a string and starts and ends with a forward slash ('/'). + + Args: + setting_value (Any): The value of the setting to validate, expected to be a list of strings. + setting_name (str): The name of the setting being validated. + + Returns: + List[Error]: A list of errors if the validation fails, or an empty list if valid. + + Validation Criteria: + - The setting must be a list. + - Each item in the list must be a string. + - Each path must start and end with a forward slash ('/'). + + Example: + A valid setting: ['/admin/', '/schema/swagger-ui/'] + An invalid setting: ['admin', '/schema/swagger-ui', '/schema'] + + """ + errors: List[Error] = [] + if not isinstance(setting_value, list): + errors.append( + Error( + f"{setting_name} should be a list.", + hint=f"Set {setting_name} to a list of strings, e.g., ['/admin/', '/schema/swagger-ui/']", + id=f"response_shaper.E003.{setting_name}", + ) + ) + elif not all(isinstance(path, str) for path in setting_value): + errors.append( + Error( + f"All items in {setting_name} should be strings.", + hint="Ensure each path is a valid string.", + id=f"response_shaper.E004.{setting_name}", + ) + ) + else: + for path in setting_value: + if not (path.startswith("/") and path.endswith("/")): + errors.append( + Error( + f"The path '{path}' in {setting_name} should start and end with a '/'.", + hint="Ensure each path in the list starts and ends with '/'.", + id=f"response_shaper.E005.{setting_name}", + ) + ) + return errors
Missing Validation for Excluded Paths List The current implementation lacks proper validation for the `CUSTOM_RESPONSE_EXCLUDED_PATHS` setting, which is expected to be a list of URL paths. This can lead to potential misconfigurations if the setting is not provided as a list or contains invalid path formats. #### Problem: - The `CUSTOM_RESPONSE_EXCLUDED_PATHS` is not validated to ensure it is a list of valid URL strings. - If a user accidentally provides a string or any other non-list type, the middleware might not function as expected. - There is also no check to confirm that the list contains valid and properly formatted URL paths. #### Expected Behavior: - The `CUSTOM_RESPONSE_EXCLUDED_PATHS` should be validated to ensure that it is a list. - Each item in the list should be validated to confirm that it represents a valid URL path (e.g., starts with a forward slash `/`). #### Suggested Solution: - Implement validation to check whether `CUSTOM_RESPONSE_EXCLUDED_PATHS` is a list. - Ensure each path in the list conforms to valid URL formats. This will prevent potential issues from misconfigured paths, making the application more robust.
2024-10-05T19:08:09
0.0
[]
[]
ckan/ckanext-showcase
ckan__ckanext-showcase-95
78366bd94571766285a18f220e700305ceaebbca
diff --git a/.gitignore b/.gitignore index c21b782c..14608b91 100644 --- a/.gitignore +++ b/.gitignore @@ -19,6 +19,8 @@ sdist/ *.egg-info/ .installed.cfg *.egg +.eggs/ +.venv # PyInstaller # Usually these files are written by a python script from a template @@ -39,4 +41,4 @@ nosetests.xml coverage.xml # Sphinx documentation -docs/_build/ \ No newline at end of file +docs/_build/ diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 02ea5f19..00000000 --- a/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -language: python - -# the new trusty images of Travis cause build errors with psycopg2, see https://github.com/travis-ci/travis-ci/issues/8897 -dist: trusty -group: deprecated-2017Q4 - -python: - - "2.7" -env: - - CKANVERSION=master - - CKANVERSION=2.8 - - CKANVERSION=2.7 - - CKANVERSION=2.6 - - CKANVERSION=2.5 - - CKANVERSION=2.4 - - CKANVERSION=2.3 -install: - - bash bin/travis-build.bash -services: - -postgresql -script: sh bin/travis-run.sh -after_success: coveralls -sudo: required diff --git a/README.rst b/README.rst index 54e3c9f9..d23a44a4 100644 --- a/README.rst +++ b/README.rst @@ -31,7 +31,7 @@ Requirements ------------ -Compatible with CKAN 2.3+. +Compatible with CKAN 2.9. N.B. The ``migrate`` command, detailed below, requires the Related Item models and actions, which have been removed in CKAN 2.6. If you wish to migrate your diff --git a/bin/travis-build.bash b/bin/travis-build.bash deleted file mode 100644 index 2e088487..00000000 --- a/bin/travis-build.bash +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash -set -e - -echo "This is travis-build.bash..." - -echo "Installing the packages that CKAN requires..." -sudo apt-get update -qq -sudo apt-get install solr-jetty libcommons-fileupload-java - -echo "Installing CKAN and its Python dependencies..." -git clone https://github.com/ckan/ckan -cd ckan -if [ $CKANVERSION == 'master' ] -then - echo "CKAN version: master" -else - CKAN_TAG=$(git tag | grep ^ckan-$CKANVERSION | sort --version-sort | tail -n 1) - git checkout $CKAN_TAG - echo "CKAN version: ${CKAN_TAG#ckan-}" -fi -python setup.py develop -if [ -f requirements-py2.txt ] -then - pip install -r requirements-py2.txt -else - pip install -r requirements.txt -fi -pip install -r dev-requirements.txt --allow-all-external -cd - - -echo "Creating the PostgreSQL user and database..." -sudo -u postgres psql -c "CREATE USER ckan_default WITH PASSWORD 'pass';" -sudo -u postgres psql -c 'CREATE DATABASE ckan_test WITH OWNER ckan_default;' - -echo "Initialising the database..." -cd ckan -paster db init -c test-core.ini -cd - - -echo "Installing ckanext-showcase and its requirements..." -python setup.py develop -pip install -r dev-requirements.txt - -echo "Moving test.ini into a subdir..." -mkdir subdir -mv test-travis.ini subdir - -echo "travis-build.bash is done." diff --git a/bin/travis-run.sh b/bin/travis-run.sh deleted file mode 100644 index d3f36649..00000000 --- a/bin/travis-run.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh -e - -echo "NO_START=0\nJETTY_HOST=127.0.0.1\nJETTY_PORT=8983\nJAVA_HOME=$JAVA_HOME" | sudo tee /etc/default/jetty -sudo cp ckan/ckan/config/solr/schema.xml /etc/solr/conf/schema.xml -sudo service jetty restart -nosetests --ckan --nologcapture --with-pylons=subdir/test-travis.ini --with-coverage --cover-package=ckanext.showcase --cover-inclusive --cover-erase --cover-tests diff --git a/ckanext/showcase/commands/cli.py b/ckanext/showcase/commands/cli.py new file mode 100644 index 00000000..4df2423f --- /dev/null +++ b/ckanext/showcase/commands/cli.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- + +import click + +from ckanext.showcase import utils + +# Click commands for CKAN 2.9 and above + + [email protected]() +def showcase(): + '''showcase commands + ''' + pass + + [email protected]() [email protected]('--allow-duplicates', + default=False, + help='Allow related items with duplicate titles to be migrated. Duplicate showcases will be created as "duplicate_<related-name>_<related-id>".') +def migrate(allow_duplicates): + """ + showcase migrate [options] + """ + utils.migrate(allow_duplicates) + + [email protected]() +def markdown_to_html(): + ''' + showcase markdown-to-html + ''' + utils.markdown_to_html() + + +def get_commands(): + return [showcase] \ No newline at end of file diff --git a/ckanext/showcase/commands/migrate.py b/ckanext/showcase/commands/migrate.py deleted file mode 100644 index 569c8de2..00000000 --- a/ckanext/showcase/commands/migrate.py +++ /dev/null @@ -1,183 +0,0 @@ -from ckan import model -from ckan.lib.cli import CkanCommand -from ckan.lib.munge import munge_title_to_name, substitute_ascii_equivalents -from ckan.logic import get_action -from ckan.lib.helpers import render_markdown -from ckan.plugins import toolkit - - -import logging -log = logging.getLogger(__name__) - - -class MigrationCommand(CkanCommand): - ''' - CKAN 'Related Items' to 'Showcase' migration command. - - Usage:: - - paster showcase migrate -c <path to config file> - - Migrate Related Items to Showcases - - paster showcase migrate -c <path to config file> [--allow-duplicates] - - Migrate Related Items to Showcases and allow duplicates - - paster showcase markdown-to-html -c <path to config file> - - Migrate the notes of all showcases from markdown to html. - - Must be run from the ckanext-showcase directory. - ''' - summary = __doc__.split('\n')[0] - usage = __doc__ - - def __init__(self,name): - super(CkanCommand, self).__init__(name) - - self.parser.add_option('--allow-duplicates', dest='allow_duplicates', - default=False, help='''Use this option to allow - related items with duplicate titles to be migrated. - Duplicate showcases will be created as - 'duplicate_<related-name>_<related-id>'.''', action='store_true') - - def command(self): - ''' - Parse command line arguments and call appropriate method. - ''' - if not self.args or self.args[0] in ['--help', '-h', 'help']: - print(self.__doc__) - return - - cmd = self.args[0] - self._load_config() - - if cmd == 'migrate': - self.migrate() - elif cmd == 'markdown-to-html': - self.markdown_to_html() - else: - print('Command "{0}" not recognized'.format(cmd)) - - def migrate(self): - ''' - - ''' - # determine whether migration should allow duplicates - allow_duplicates = self.options.allow_duplicates - - related_items = get_action('related_list')(data_dict={}) - - # preflight: - # related items must have unique titles before migration - related_titles = [i['title'] for i in related_items] - # make a list of duplicate titles - duplicate_titles = self._find_duplicates(related_titles) - if duplicate_titles and allow_duplicates == False: - print( - """All Related Items must have unique titles before migration. The following -Related Item titles are used more than once and need to be corrected before -migration can continue. Please correct and try again:""" - ) - for i in duplicate_titles: - print(i) - return - - for related in related_items: - existing_showcase = get_action('package_search')( - data_dict={'fq': '+dataset_type:showcase original_related_item_id:{0}'.format(related['id'])}) - normalized_title = substitute_ascii_equivalents(related['title']) - if existing_showcase['count'] > 0: - print('Showcase for Related Item "{0}" already exists.'.format( - normalized_title)) - else: - showcase_title = self._gen_new_title(related.get('title'), related['id']) - data_dict = { - 'original_related_item_id': related.get('id'), - 'title': showcase_title, - 'name': munge_title_to_name(showcase_title), - 'notes': related.get('description'), - 'image_url': related.get('image_url'), - 'url': related.get('url'), - 'tags': [{"name": related.get('type').lower()}] - } - # make the showcase - try: - new_showcase = get_action('ckanext_showcase_create')( - data_dict=data_dict) - except Exception as e: - print('There was a problem migrating "{0}": {1}'.format( - normalized_title, e)) - else: - print('Created Showcase from the Related Item "{0}"'.format(normalized_title)) - - # make the showcase_package_association, if needed - try: - related_pkg_id = self._get_related_dataset( - related['id']) - if related_pkg_id: - get_action('ckanext_showcase_package_association_create')( - data_dict={'showcase_id': new_showcase['id'], - 'package_id': related_pkg_id}) - except Exception as e: - print('There was a problem creating the showcase_package_association for "{0}": {1}'.format( - normalized_title, e)) - - def _get_related_dataset(self, related_id): - '''Get the id of a package from related_dataset, if one exists.''' - related_dataset = model.Session.query(model.RelatedDataset).filter_by( - related_id=related_id).first() - if related_dataset: - return related_dataset.dataset_id - - def _find_duplicates(self, lst): - '''From a list, return a set of duplicates. - - >>> MigrationCommand('cmd')._find_duplicates([1, 2, 3, 4, 5]) - [] - - >>> MigrationCommand('cmd')._find_duplicates([1, 2, 3, 4, 3, 1, 1]) - [1, 3] - - >>> MigrationCommand('cmd')._find_duplicates(['one', 'two', 'three', 'four', 'two', 'three']) - ['two', 'three'] - ''' - return list(set(x for x in lst if lst.count(x) >= 2)) - - def _gen_new_title(self, title, related_id): - name = munge_title_to_name(title) - pkg_obj = model.Session.query(model.Package).filter_by(name=name).first() - if pkg_obj: - title.replace('duplicate_', '') - return 'duplicate_' + title + '_' + related_id - else: - return title - - def markdown_to_html(self): - ''' Migrates the notes of all showcases from markdown to html. - - When using CKEditor, notes on showcases are stored in html instead of - markdown, this command will migrate all nothes using CKAN's - render_markdown core helper. - ''' - showcases = toolkit.get_action('ckanext_showcase_list')(data_dict={}) - - site_user = toolkit.get_action('get_site_user')({ - 'model': model, - 'ignore_auth': True}, - {} - ) - context = { - 'model': model, - 'session': model.Session, - 'ignore_auth': True, - 'user': site_user['name'], - } - - for showcase in showcases: - toolkit.get_action('package_patch')( - context, - { - 'id': showcase['id'], - 'notes': render_markdown(showcase['notes']) - } - ) - print('All notes were migrated successfully.') diff --git a/ckanext/showcase/commands/paster.py b/ckanext/showcase/commands/paster.py new file mode 100644 index 00000000..c58a63fe --- /dev/null +++ b/ckanext/showcase/commands/paster.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- + +from __future__ import print_function + +from ckan.lib.cli import CkanCommand + +from ckanext.showcase import utils + +# Paster commands for CKAN 2.8 and below + + +class MigrationCommand(CkanCommand): + ''' + CKAN 'Related Items' to 'Showcase' migration command. + + Usage:: + + paster showcase migrate -c <path to config file> + - Migrate Related Items to Showcases + + paster showcase migrate -c <path to config file> [--allow-duplicates] + - Migrate Related Items to Showcases and allow duplicates + + paster showcase markdown-to-html -c <path to config file> + - Migrate the notes of all showcases from markdown to html. + + Must be run from the ckanext-showcase directory. + ''' + summary = __doc__.split('\n')[0] + usage = __doc__ + + def __init__(self, name): + super(CkanCommand, self).__init__(name) + + self.parser.add_option('--allow-duplicates', + dest='allow_duplicates', + default=False, + help='''Use this option to allow + related items with duplicate titles to be migrated. + Duplicate showcases will be created as + 'duplicate_<related-name>_<related-id>'.''', + action='store_true') + + def command(self): + ''' + Parse command line arguments and call appropriate method. + ''' + if not self.args or self.args[0] in ['--help', '-h', 'help']: + print(self.__doc__) + return + + cmd = self.args[0] + self._load_config() + + if cmd == 'migrate': + self.migrate() + elif cmd == 'markdown-to-html': + self.markdown_to_html() + else: + print('Command "{0}" not recognized'.format(cmd)) + + def migrate(self): + utils.migrate(self.options.allow_duplicates) + + def markdown_to_html(self): + utils.markdown_to_html() diff --git a/ckanext/showcase/controller.py b/ckanext/showcase/controller.py index 8340b655..d0ad1272 100644 --- a/ckanext/showcase/controller.py +++ b/ckanext/showcase/controller.py @@ -1,22 +1,16 @@ -from urllib import urlencode import logging import json -from pylons import config from ckan.plugins import toolkit as tk -import ckan.model as model import ckan.lib.helpers as h import ckan.lib.navl.dictization_functions as dict_fns import ckan.logic as logic -import ckan.plugins as p -from ckan.common import OrderedDict, ungettext -from ckan.controllers.package import (PackageController, - url_with_params, - _encode_params) +from ckan.controllers.package import (PackageController) -from ckanext.showcase.model import ShowcasePackageAssociation -from ckanext.showcase.plugin import DATASET_TYPE_NAME + +from ckanext.showcase import utils +from ckanext.showcase.utils import DATASET_TYPE_NAME _ = tk._ c = tk.c @@ -33,46 +27,24 @@ parse_params = logic.parse_params NotAuthorized = tk.NotAuthorized - log = logging.getLogger(__name__) class ShowcaseController(PackageController): - def new(self, data=None, errors=None, error_summary=None): - context = {'model': model, 'session': model.Session, - 'user': c.user or c.author, 'auth_user_obj': c.userobj, - 'save': 'save' in request.params} - - # Check access here, then continue with PackageController.new() - # PackageController.new will also check access for package_create. - # This is okay for now, while only sysadmins can create Showcases, but - # may not work if we allow other users to create Showcases, who don't - # have access to create dataset package types. Same for edit below. - try: - check_access('ckanext_showcase_create', context) - except NotAuthorized: - abort(401, _('Unauthorized to create a package')) - - return super(ShowcaseController, self).new(data=data, errors=errors, + utils.check_new_view_auth() + return super(ShowcaseController, self).new(data=data, + errors=errors, error_summary=error_summary) def edit(self, id, data=None, errors=None, error_summary=None): - context = {'model': model, 'session': model.Session, - 'user': c.user or c.author, 'auth_user_obj': c.userobj, - 'save': 'save' in request.params, - 'moderated': config.get('moderated'), - 'pending': True} - - try: - check_access('ckanext_showcase_update', context) - except NotAuthorized: - abort(401, _('User not authorized to edit {showcase_id}').format( - showcase_id=id)) - - return super(ShowcaseController, self).edit( - id, data=data, errors=errors, error_summary=error_summary) + utils.check_edit_view_auth(id) + return super(ShowcaseController, + self).edit(id, + data=data, + errors=errors, + error_summary=error_summary) def _guess_package_type(self, expecting_name=False): """Showcase packages are always DATASET_TYPE_NAME.""" @@ -85,8 +57,8 @@ def _save_new(self, context, package_type=None): associated packages with the new showcase. ''' - data_dict = clean_dict(dict_fns.unflatten( - tuplize_dict(parse_params(request.POST)))) + data_dict = clean_dict( + dict_fns.unflatten(tuplize_dict(parse_params(request.POST)))) data_dict['type'] = package_type context['message'] = data_dict.get('log_message', '') @@ -101,9 +73,7 @@ def _save_new(self, context, package_type=None): return self.new(data_dict, errors, error_summary) # redirect to manage datasets - url = h.url_for( - controller='ckanext.showcase.controller:ShowcaseController', - action='manage_datasets', id=pkg_dict['name']) + url = h.url_for('showcase_manage_datasets', id=pkg_dict['name']) redirect(url) def _save_edit(self, name_or_id, context, package_type=None): @@ -111,8 +81,8 @@ def _save_edit(self, name_or_id, context, package_type=None): Edit a showcase's details, then redirect to the showcase read page. ''' - data_dict = clean_dict(dict_fns.unflatten( - tuplize_dict(parse_params(request.POST)))) + data_dict = clean_dict( + dict_fns.unflatten(tuplize_dict(parse_params(request.POST)))) data_dict['id'] = name_or_id try: @@ -125,9 +95,7 @@ def _save_edit(self, name_or_id, context, package_type=None): c.pkg_dict = pkg # redirect to showcase details page - url = h.url_for( - controller='ckanext.showcase.controller:ShowcaseController', - action='read', id=pkg['name']) + url = h.url_for('showcase_read', id=pkg['name']) redirect(url) def read(self, id, format='html'): @@ -135,485 +103,29 @@ def read(self, id, format='html'): Detail view for a single showcase, listing its associated datasets. ''' - context = {'model': model, 'session': model.Session, - 'user': c.user or c.author, 'for_view': True, - 'auth_user_obj': c.userobj} - data_dict = {'id': id} - - # check if showcase exists - try: - c.pkg_dict = get_action('package_show')(context, data_dict) - except NotFound: - abort(404, _('Showcase not found')) - except NotAuthorized: - abort(401, _('Unauthorized to read showcase')) - - # get showcase packages - c.showcase_pkgs = get_action('ckanext_showcase_package_list')( - context, {'showcase_id': c.pkg_dict['id']}) - - package_type = DATASET_TYPE_NAME - return render(self._read_template(package_type), - extra_vars={'dataset_type': package_type}) + return utils.read_view(id) def delete(self, id): - if 'cancel' in request.params: - tk.redirect_to( - controller='ckanext.showcase.controller:ShowcaseController', - action='edit', id=id) - - context = {'model': model, 'session': model.Session, - 'user': c.user or c.author, 'auth_user_obj': c.userobj} - - try: - check_access('ckanext_showcase_delete', context, {'id': id}) - except NotAuthorized: - abort(401, _('Unauthorized to delete showcase')) - - try: - if request.method == 'POST': - get_action('ckanext_showcase_delete')(context, {'id': id}) - h.flash_notice(_('Showcase has been deleted.')) - tk.redirect_to( - controller='ckanext.showcase.controller:ShowcaseController', - action='search') - c.pkg_dict = get_action('package_show')(context, {'id': id}) - except NotAuthorized: - abort(401, _('Unauthorized to delete showcase')) - except NotFound: - abort(404, _('Showcase not found')) - return render('showcase/confirm_delete.html', - extra_vars={'dataset_type': DATASET_TYPE_NAME}) + return utils.delete_view(id) def dataset_showcase_list(self, id): ''' Display a list of showcases a dataset is associated with, with an option to add to showcase from a list. ''' - context = {'model': model, 'session': model.Session, - 'user': c.user or c.author, 'for_view': True, - 'auth_user_obj': c.userobj} - data_dict = {'id': id} - - try: - check_access('package_show', context, data_dict) - except NotFound: - abort(404, _('Dataset not found')) - except NotAuthorized: - abort(401, _('Not authorized to see this page')) - - try: - c.pkg_dict = get_action('package_show')(context, data_dict) - c.showcase_list = get_action('ckanext_package_showcase_list')( - context, {'package_id': c.pkg_dict['id']}) - except NotFound: - abort(404, _('Dataset not found')) - except logic.NotAuthorized: - abort(401, _('Unauthorized to read package')) - - if request.method == 'POST': - # Are we adding the dataset to a showcase? - new_showcase = request.POST.get('showcase_added') - if new_showcase: - data_dict = {"showcase_id": new_showcase, - "package_id": c.pkg_dict['id']} - try: - get_action('ckanext_showcase_package_association_create')( - context, data_dict) - except NotFound: - abort(404, _('Showcase not found')) - else: - h.flash_success( - _("The dataset has been added to the showcase.")) - - # Are we removing a dataset from a showcase? - showcase_to_remove = request.POST.get('remove_showcase_id') - if showcase_to_remove: - data_dict = {"showcase_id": showcase_to_remove, - "package_id": c.pkg_dict['id']} - try: - get_action('ckanext_showcase_package_association_delete')( - context, data_dict) - except NotFound: - abort(404, _('Showcase not found')) - else: - h.flash_success( - _("The dataset has been removed from the showcase.")) - redirect(h.url_for( - controller='ckanext.showcase.controller:ShowcaseController', - action='dataset_showcase_list', id=c.pkg_dict['name'])) - - pkg_showcase_ids = [showcase['id'] for showcase in c.showcase_list] - site_showcases = get_action('ckanext_showcase_list')(context, {}) - - c.showcase_dropdown = [[showcase['id'], showcase['title']] - for showcase in site_showcases - if showcase['id'] not in pkg_showcase_ids] - - return render("package/dataset_showcase_list.html") + return utils.dataset_showcase_list(id) def manage_datasets(self, id): ''' List datasets associated with the given showcase id. ''' - - context = {'model': model, 'session': model.Session, - 'user': c.user or c.author} - data_dict = {'id': id} - - try: - check_access('ckanext_showcase_update', context) - except NotAuthorized: - abort(401, _('User not authorized to edit {showcase_id}').format( - showcase_id=id)) - - # check if showcase exists - try: - c.pkg_dict = get_action('package_show')(context, data_dict) - except NotFound: - abort(404, _('Showcase not found')) - except NotAuthorized: - abort(401, _('Unauthorized to read showcase')) - - # Are we removing a showcase/dataset association? - if (request.method == 'POST' - and 'bulk_action.showcase_remove' in request.params): - # Find the datasets to perform the action on, they are prefixed by - # dataset_ in the form data - dataset_ids = [] - for param in request.params: - if param.startswith('dataset_'): - dataset_ids.append(param[8:]) - if dataset_ids: - for dataset_id in dataset_ids: - get_action('ckanext_showcase_package_association_delete')( - context, - {'showcase_id': c.pkg_dict['id'], - 'package_id': dataset_id}) - h.flash_success( - ungettext( - "The dataset has been removed from the showcase.", - "The datasets have been removed from the showcase.", - len(dataset_ids))) - url = h.url_for( - controller='ckanext.showcase.controller:ShowcaseController', - action='manage_datasets', id=id) - redirect(url) - - # Are we creating a showcase/dataset association? - elif (request.method == 'POST' - and 'bulk_action.showcase_add' in request.params): - # Find the datasets to perform the action on, they are prefixed by - # dataset_ in the form data - dataset_ids = [] - for param in request.params: - if param.startswith('dataset_'): - dataset_ids.append(param[8:]) - if dataset_ids: - successful_adds = [] - for dataset_id in dataset_ids: - try: - get_action( - 'ckanext_showcase_package_association_create')( - context, {'showcase_id': c.pkg_dict['id'], - 'package_id': dataset_id}) - except ValidationError as e: - h.flash_notice(e.error_summary) - else: - successful_adds.append(dataset_id) - if successful_adds: - h.flash_success( - ungettext( - "The dataset has been added to the showcase.", - "The datasets have been added to the showcase.", - len(successful_adds))) - url = h.url_for( - controller='ckanext.showcase.controller:ShowcaseController', - action='manage_datasets', id=id) - redirect(url) - - self._add_dataset_search(c.pkg_dict['id'], c.pkg_dict['name']) - - # get showcase packages - c.showcase_pkgs = get_action('ckanext_showcase_package_list')( - context, {'showcase_id': c.pkg_dict['id']}) - - return render('showcase/manage_datasets.html') - - def _search_url(self, params, name): - url = h.url_for( - controller='ckanext.showcase.controller:ShowcaseController', - action='manage_datasets', id=name) - return url_with_params(url, params) - - def _add_dataset_search(self, showcase_id, showcase_name): - ''' - Search logic for discovering datasets to add to a showcase. - ''' - - from ckan.lib.search import SearchError - - package_type = 'dataset' - - # unicode format (decoded from utf8) - q = c.q = request.params.get('q', u'') - c.query_error = False - try: - page = self._get_page_number(request.params) - except AttributeError: - # in CKAN >= 2.5 _get_page_number has been moved - page = h.get_page_number(request.params) - - limit = int(config.get('ckan.datasets_per_page', 20)) - - # most search operations should reset the page counter: - params_nopage = [(k, v) for k, v in request.params.items() - if k != 'page'] - - def drill_down_url(alternative_url=None, **by): - return h.add_url_param(alternative_url=alternative_url, - controller='package', action='search', - new_params=by) - - c.drill_down_url = drill_down_url - - def remove_field(key, value=None, replace=None): - return h.remove_url_param(key, value=value, replace=replace, - controller='package', action='search') - - c.remove_field = remove_field - - sort_by = request.params.get('sort', None) - params_nosort = [(k, v) for k, v in params_nopage if k != 'sort'] - - def _sort_by(fields): - """ - Sort by the given list of fields. - - Each entry in the list is a 2-tuple: (fieldname, sort_order) - - eg - [('metadata_modified', 'desc'), ('name', 'asc')] - - If fields is empty, then the default ordering is used. - """ - params = params_nosort[:] - - if fields: - sort_string = ', '.join('%s %s' % f for f in fields) - params.append(('sort', sort_string)) - return self._search_url(params, showcase_name) - - c.sort_by = _sort_by - if sort_by is None: - c.sort_by_fields = [] - else: - c.sort_by_fields = [field.split()[0] - for field in sort_by.split(',')] - - def pager_url(q=None, page=None): - params = list(params_nopage) - params.append(('page', page)) - return self._search_url(params, showcase_name) - - c.search_url_params = urlencode(_encode_params(params_nopage)) - - try: - c.fields = [] - # c.fields_grouped will contain a dict of params containing - # a list of values eg {'tags':['tag1', 'tag2']} - c.fields_grouped = {} - search_extras = {} - fq = '' - for (param, value) in request.params.items(): - if param not in ['q', 'page', 'sort'] \ - and len(value) and not param.startswith('_'): - if not param.startswith('ext_'): - c.fields.append((param, value)) - fq += ' %s:"%s"' % (param, value) - if param not in c.fields_grouped: - c.fields_grouped[param] = [value] - else: - c.fields_grouped[param].append(value) - else: - search_extras[param] = value - - context = {'model': model, 'session': model.Session, - 'user': c.user or c.author, 'for_view': True, - 'auth_user_obj': c.userobj} - - if package_type and package_type != 'dataset': - # Only show datasets of this particular type - fq += ' +dataset_type:{type}'.format(type=package_type) - else: - # Unless changed via config options, don't show non standard - # dataset types on the default search page - if not tk.asbool(config.get('ckan.search.show_all_types', - 'False')): - fq += ' +dataset_type:dataset' - - # Only search for packages that aren't already associated with the - # Showcase - associated_package_ids = ShowcasePackageAssociation.get_package_ids_for_showcase(showcase_id) - # flatten resulting list to space separated string - if associated_package_ids: - associated_package_ids_str = \ - ' OR '.join([id[0] for id in associated_package_ids]) - fq += ' !id:({0})'.format(associated_package_ids_str) - - facets = OrderedDict() - - default_facet_titles = { - 'organization': _('Organizations'), - 'groups': _('Groups'), - 'tags': _('Tags'), - 'res_format': _('Formats'), - 'license_id': _('Licenses'), - } - - # for CKAN-Versions that do not provide the facets-method from - # helper-context, import facets from ckan.common - if hasattr(h, 'facets'): - current_facets = h.facets() - else: - from ckan.common import g - current_facets = g.facets - - for facet in current_facets: - if facet in default_facet_titles: - facets[facet] = default_facet_titles[facet] - else: - facets[facet] = facet - - # Facet titles - for plugin in p.PluginImplementations(p.IFacets): - facets = plugin.dataset_facets(facets, package_type) - - c.facet_titles = facets - - data_dict = { - 'q': q, - 'fq': fq.strip(), - 'facet.field': facets.keys(), - 'rows': limit, - 'start': (page - 1) * limit, - 'sort': sort_by, - 'extras': search_extras - } - - query = get_action('package_search')(context, data_dict) - c.sort_by_selected = query['sort'] - - c.page = h.Page( - collection=query['results'], - page=page, - url=pager_url, - item_count=query['count'], - items_per_page=limit - ) - c.facets = query['facets'] - c.search_facets = query['search_facets'] - c.page.items = query['results'] - except SearchError, se: - log.error('Dataset search error: %r', se.args) - c.query_error = True - c.facets = {} - c.search_facets = {} - c.page = h.Page(collection=[]) - c.search_facets_limits = {} - for facet in c.search_facets.keys(): - try: - limit = int(request.params.get('_%s_limit' % facet, - int(config.get('search.facets.default', 10)))) - except ValueError: - abort(400, _("Parameter '{parameter_name}' is not an integer").format( - parameter_name='_%s_limit' % facet - )) - c.search_facets_limits[facet] = limit + return utils.manage_datasets_view(id) def manage_showcase_admins(self): - ''' - A ckan-admin page to list and add showcase admin users. - ''' - context = {'model': model, 'session': model.Session, - 'user': c.user or c.author} - - try: - check_access('sysadmin', context, {}) - except NotAuthorized: - abort(401, _('User not authorized to view page')) - - # We're trying to add a user to the showcase admins list. - if request.method == 'POST' and request.params['username']: - username = request.params['username'] - try: - get_action('ckanext_showcase_admin_add')( - data_dict={'username': username}) - except NotAuthorized: - abort(401, _('Unauthorized to perform that action')) - except NotFound: - h.flash_error(_("User '{user_name}' not found.").format( - user_name=username)) - except ValidationError as e: - h.flash_notice(e.error_summary) - else: - h.flash_success(_("The user is now a Showcase Admin")) - - return redirect(h.url_for( - controller='ckanext.showcase.controller:ShowcaseController', - action='manage_showcase_admins')) - - c.showcase_admins = get_action('ckanext_showcase_admin_list')() - - return render('admin/manage_showcase_admins.html') + return utils.manage_showcase_admins() def remove_showcase_admin(self): - ''' - Remove a user from the Showcase Admin list. - ''' - context = {'model': model, 'session': model.Session, - 'user': c.user or c.author} - - try: - check_access('sysadmin', context, {}) - except NotAuthorized: - abort(401, _('User not authorized to view page')) - - if 'cancel' in request.params: - tk.redirect_to( - controller='ckanext.showcase.controller:ShowcaseController', - action='manage_showcase_admins') - - user_id = request.params['user'] - if request.method == 'POST' and user_id: - user_id = request.params['user'] - try: - get_action('ckanext_showcase_admin_remove')( - data_dict={'username': user_id}) - except NotAuthorized: - abort(401, _('Unauthorized to perform that action')) - except NotFound: - h.flash_error(_('The user is not a Showcase Admin')) - else: - h.flash_success(_('The user is no longer a Showcase Admin')) - - return redirect(h.url_for( - controller='ckanext.showcase.controller:ShowcaseController', - action='manage_showcase_admins')) - - c.user_dict = get_action('user_show')(data_dict={'id': user_id}) - c.user_id = user_id - return render('admin/confirm_remove_showcase_admin.html') + return utils.remove_showcase_admin() def showcase_upload(self): - if not tk.request.method == 'POST': - tk.abort(409, _('Only Posting is availiable')) - - try: - url = tk.get_action('ckanext_showcase_upload')( - None, - dict(tk.request.POST) - ) - except tk.NotAuthorized: - tk.abort(401, _('Unauthorized to upload file %s') % id) - - return json.dumps(url) + return utils.upload() diff --git a/ckanext/showcase/fanstatic/.gitignore b/ckanext/showcase/fanstatic/.gitignore deleted file mode 100644 index e69de29b..00000000 diff --git a/ckanext/showcase/fanstatic/resource.config b/ckanext/showcase/fanstatic/resource.config new file mode 100644 index 00000000..07a8ea2a --- /dev/null +++ b/ckanext/showcase/fanstatic/resource.config @@ -0,0 +1,8 @@ +[depends] + +ckeditor = base/main + +[groups] +ckeditor = + dist/ckeditor.js + js/showcase-ckeditor.js diff --git a/ckanext/showcase/fanstatic/webassets.yml b/ckanext/showcase/fanstatic/webassets.yml new file mode 100644 index 00000000..d4ce090b --- /dev/null +++ b/ckanext/showcase/fanstatic/webassets.yml @@ -0,0 +1,21 @@ +ckeditor: + filter: rjsmin + output: ckanext-showcase/%(version)s_ckeditor.json + extra: + preload: + - base/main + contents: + - dist/ckeditor.js + - js/showcase-ckeditor.js + +ckanext-showcase-css: + contents: + - ckanext_showcase.css + output: showcase/%(version)s_ckanext_showcase.css + filter: cssrewrite + +ckeditor-content-css: + contents: + - ckeditor-content-style.css + output: showcase/%(version)s_ckeditor-content-style.css + filter: cssrewrite diff --git a/ckanext/showcase/logic/action/__init__.py b/ckanext/showcase/logic/action/__init__.py index e69de29b..43ab2d14 100644 --- a/ckanext/showcase/logic/action/__init__.py +++ b/ckanext/showcase/logic/action/__init__.py @@ -0,0 +1,36 @@ +import ckanext.showcase.logic.action.create +import ckanext.showcase.logic.action.delete +import ckanext.showcase.logic.action.update +import ckanext.showcase.logic.action.get + + +def get_actions(): + action_functions = { + 'ckanext_showcase_create': + ckanext.showcase.logic.action.create.showcase_create, + 'ckanext_showcase_update': + ckanext.showcase.logic.action.update.showcase_update, + 'ckanext_showcase_delete': + ckanext.showcase.logic.action.delete.showcase_delete, + 'ckanext_showcase_show': + ckanext.showcase.logic.action.get.showcase_show, + 'ckanext_showcase_list': + ckanext.showcase.logic.action.get.showcase_list, + 'ckanext_showcase_package_association_create': + ckanext.showcase.logic.action.create.showcase_package_association_create, + 'ckanext_showcase_package_association_delete': + ckanext.showcase.logic.action.delete.showcase_package_association_delete, + 'ckanext_showcase_package_list': + ckanext.showcase.logic.action.get.showcase_package_list, + 'ckanext_package_showcase_list': + ckanext.showcase.logic.action.get.package_showcase_list, + 'ckanext_showcase_admin_add': + ckanext.showcase.logic.action.create.showcase_admin_add, + 'ckanext_showcase_admin_remove': + ckanext.showcase.logic.action.delete.showcase_admin_remove, + 'ckanext_showcase_admin_list': + ckanext.showcase.logic.action.get.showcase_admin_list, + 'ckanext_showcase_upload': + ckanext.showcase.logic.action.create.showcase_upload, + } + return action_functions diff --git a/ckanext/showcase/logic/action/create.py b/ckanext/showcase/logic/action/create.py index a952df4e..f58dafec 100644 --- a/ckanext/showcase/logic/action/create.py +++ b/ckanext/showcase/logic/action/create.py @@ -24,7 +24,6 @@ def showcase_create(context, data_dict): # force type to 'showcase' data_dict['type'] = 'showcase' - # If get_uploader is available (introduced for IUploader in CKAN 2.5), use # it, otherwise use the default uploader. # https://github.com/ckan/ckan/pull/2510 diff --git a/ckanext/showcase/logic/auth.py b/ckanext/showcase/logic/auth.py index 09f2c714..2d8f08e1 100644 --- a/ckanext/showcase/logic/auth.py +++ b/ckanext/showcase/logic/auth.py @@ -7,6 +7,24 @@ log = logging.getLogger(__name__) +def get_auth_functions(): + return { + 'ckanext_showcase_create': create, + 'ckanext_showcase_update': update, + 'ckanext_showcase_delete': delete, + 'ckanext_showcase_show': show, + 'ckanext_showcase_list': showcase_list, + 'ckanext_showcase_package_association_create': package_association_create, + 'ckanext_showcase_package_association_delete': package_association_delete, + 'ckanext_showcase_package_list': showcase_package_list, + 'ckanext_package_showcase_list': package_showcase_list, + 'ckanext_showcase_admin_add': add_showcase_admin, + 'ckanext_showcase_admin_remove': remove_showcase_admin, + 'ckanext_showcase_admin_list': showcase_admin_list, + 'ckanext_showcase_updload': showcase_upload, + } + + def _is_showcase_admin(context): ''' Determines whether user in context is in the showcase admin list. @@ -47,7 +65,7 @@ def show(context, data_dict): @toolkit.auth_allow_anonymous_access -def list(context, data_dict): +def showcase_list(context, data_dict): '''All users can access a showcase list''' return {'success': True} diff --git a/ckanext/showcase/logic/helpers.py b/ckanext/showcase/logic/helpers.py index 717e8e86..be6b87f8 100644 --- a/ckanext/showcase/logic/helpers.py +++ b/ckanext/showcase/logic/helpers.py @@ -9,8 +9,7 @@ def facet_remove_field(key, value=None, replace=None): ''' return h.remove_url_param( key, value=value, replace=replace, - controller='ckanext.showcase.controller:ShowcaseController', - action='search') + alternative_url=h.url_for('showcase_index')) def get_site_statistics(): diff --git a/ckanext/showcase/logic/schema.py b/ckanext/showcase/logic/schema.py index bcce6aa1..27f9f516 100644 --- a/ckanext/showcase/logic/schema.py +++ b/ckanext/showcase/logic/schema.py @@ -1,3 +1,6 @@ +# -*- coding: utf-8 -*- + +import six import ckan.plugins.toolkit as toolkit from ckan.lib.navl.validators import (not_empty, empty, @@ -25,15 +28,15 @@ def showcase_base_schema(): schema = { 'id': [empty], 'revision_id': [ignore], - 'name': [not_empty, unicode, name_validator, package_name_validator], - 'title': [if_empty_same_as("name"), unicode], - 'author': [ignore_missing, unicode], - 'author_email': [ignore_missing, unicode], - 'notes': [ignore_missing, unicode], - 'url': [ignore_missing, unicode], + 'name': [not_empty, six.text_type, name_validator, package_name_validator], + 'title': [if_empty_same_as("name"), six.text_type], + 'author': [ignore_missing, six.text_type], + 'author_email': [ignore_missing, six.text_type], + 'notes': [ignore_missing, six.text_type], + 'url': [ignore_missing, six.text_type], 'state': [ignore_not_package_admin, ignore_missing], - 'type': [ignore_missing, unicode], - 'log_message': [ignore_missing, unicode, no_http], + 'type': [ignore_missing, six.text_type], + 'log_message': [ignore_missing, six.text_type, no_http], '__extras': [ignore], '__junk': [empty], 'resources': default_resource_schema(), @@ -65,11 +68,11 @@ def showcase_update_schema(): # Supplying the package name when updating a package is optional (you can # supply the id to identify the package instead). schema['name'] = [ignore_missing, name_validator, - package_name_validator, unicode] + package_name_validator, six.text_type] # Supplying the package title when updating a package is optional, if it's # not supplied the title will not be changed. - schema['title'] = [ignore_missing, unicode] + schema['title'] = [ignore_missing, six.text_type] return schema @@ -100,8 +103,8 @@ def showcase_show_schema(): schema['metadata_modified'] = [] schema['creator_user_id'] = [] schema['num_tags'] = [] - schema['revision_id'] = [] - schema['tracking_summary'] = [] + schema['revision_id'] = [ignore_missing] + schema['tracking_summary'] = [ignore_missing] schema.update({ 'image_url': [toolkit.get_converter('convert_from_extras'), @@ -116,9 +119,9 @@ def showcase_show_schema(): def showcase_package_association_create_schema(): schema = { - 'package_id': [not_empty, unicode, + 'package_id': [not_empty, six.text_type, convert_package_name_or_id_to_id_for_type_dataset], - 'showcase_id': [not_empty, unicode, + 'showcase_id': [not_empty, six.text_type, convert_package_name_or_id_to_id_for_type_showcase] } return schema @@ -130,7 +133,7 @@ def showcase_package_association_delete_schema(): def showcase_package_list_schema(): schema = { - 'showcase_id': [not_empty, unicode, + 'showcase_id': [not_empty, six.text_type, convert_package_name_or_id_to_id_for_type_showcase] } return schema @@ -138,7 +141,7 @@ def showcase_package_list_schema(): def package_showcase_list_schema(): schema = { - 'package_id': [not_empty, unicode, + 'package_id': [not_empty, six.text_type, convert_package_name_or_id_to_id_for_type_dataset] } return schema @@ -146,7 +149,7 @@ def package_showcase_list_schema(): def showcase_admin_add_schema(): schema = { - 'username': [not_empty, user_id_or_name_exists, unicode], + 'username': [not_empty, user_id_or_name_exists, six.text_type], } return schema diff --git a/ckanext/showcase/plugin.py b/ckanext/showcase/plugin/__init__.py similarity index 54% rename from ckanext/showcase/plugin.py rename to ckanext/showcase/plugin/__init__.py index 5fdbe35f..3aeaef50 100644 --- a/ckanext/showcase/plugin.py +++ b/ckanext/showcase/plugin/__init__.py @@ -1,39 +1,47 @@ +# -*- coding: utf-8 -*- + import os import sys +import json import logging +from collections import OrderedDict + +from six import string_types import ckan.plugins as plugins import ckan.lib.plugins as lib_plugins import ckan.lib.helpers as h -from ckan.plugins import toolkit as tk -from ckan.common import OrderedDict from ckan import model as ckan_model -from routes.mapper import SubMapper +import ckantoolkit as tk + + +import ckanext.showcase.utils as utils +from ckanext.showcase.logic import auth, action -import ckanext.showcase.logic.auth -import ckanext.showcase.logic.action.create -import ckanext.showcase.logic.action.delete -import ckanext.showcase.logic.action.update -import ckanext.showcase.logic.action.get import ckanext.showcase.logic.schema as showcase_schema import ckanext.showcase.logic.helpers as showcase_helpers from ckanext.showcase.model import setup as model_setup +if tk.check_ckan_version(u'2.9'): + from ckanext.showcase.plugin.flask_plugin import MixinPlugin +else: + from ckanext.showcase.plugin.pylons_plugin import MixinPlugin + c = tk.c _ = tk._ log = logging.getLogger(__name__) -DATASET_TYPE_NAME = 'showcase' +DATASET_TYPE_NAME = utils.DATASET_TYPE_NAME -class ShowcasePlugin(plugins.SingletonPlugin, lib_plugins.DefaultDatasetForm): +class ShowcasePlugin( + MixinPlugin, plugins.SingletonPlugin, lib_plugins.DefaultDatasetForm): plugins.implements(plugins.IConfigurable) plugins.implements(plugins.IConfigurer) plugins.implements(plugins.IDatasetForm) plugins.implements(plugins.IFacets, inherit=True) - plugins.implements(plugins.IRoutes, inherit=True) plugins.implements(plugins.IAuthFunctions) plugins.implements(plugins.IActions) plugins.implements(plugins.IPackageController, inherit=True) @@ -48,13 +56,30 @@ class ShowcasePlugin(plugins.SingletonPlugin, lib_plugins.DefaultDatasetForm): # IConfigurer def update_config(self, config): - tk.add_template_directory(config, 'templates') - tk.add_public_directory(config, 'public') - tk.add_resource('fanstatic', 'showcase') + tk.add_template_directory(config, '../templates') + tk.add_public_directory(config, '../public') + tk.add_resource('../fanstatic', 'showcase') if tk.check_ckan_version(min_version='2.4'): - tk.add_ckan_admin_tab(config, 'ckanext_showcase_admins', + tk.add_ckan_admin_tab(config, 'showcase_admins', 'Showcase Config') + if tk.check_ckan_version(min_version='2.9.0'): + mappings = config.get('ckan.legacy_route_mappings', {}) + if isinstance(mappings, string_types): + mappings = json.loads(mappings) + + bp_routes = [ + 'index', 'new', 'delete', + 'read', 'edit', 'manage_datasets', + 'dataset_showcase_list', 'admins', 'admin_remove' + ] + mappings.update({ + 'showcase_' + route: 'showcase_blueprint.' + route + for route in bp_routes + }) + # https://github.com/ckan/ckan/pull/4521 + config['ckan.legacy_route_mappings'] = json.dumps(mappings) + # IConfigurable def configure(self, config): @@ -112,94 +137,12 @@ def dataset_facets(self, facets_dict, package_type): # IAuthFunctions def get_auth_functions(self): - return { - 'ckanext_showcase_create': ckanext.showcase.logic.auth.create, - 'ckanext_showcase_update': ckanext.showcase.logic.auth.update, - 'ckanext_showcase_delete': ckanext.showcase.logic.auth.delete, - 'ckanext_showcase_show': ckanext.showcase.logic.auth.show, - 'ckanext_showcase_list': ckanext.showcase.logic.auth.list, - 'ckanext_showcase_package_association_create': - ckanext.showcase.logic.auth.package_association_create, - 'ckanext_showcase_package_association_delete': - ckanext.showcase.logic.auth.package_association_delete, - 'ckanext_showcase_package_list': - ckanext.showcase.logic.auth.showcase_package_list, - 'ckanext_package_showcase_list': - ckanext.showcase.logic.auth.package_showcase_list, - 'ckanext_showcase_admin_add': - ckanext.showcase.logic.auth.add_showcase_admin, - 'ckanext_showcase_admin_remove': - ckanext.showcase.logic.auth.remove_showcase_admin, - 'ckanext_showcase_admin_list': - ckanext.showcase.logic.auth.showcase_admin_list, - 'ckanext_showcase_upload': - ckanext.showcase.logic.auth.showcase_upload - } - - # IRoutes - - def before_map(self, map): - # These named routes are used for custom dataset forms which will use - # the names below based on the dataset.type ('dataset' is the default - # type) - with SubMapper(map, controller='ckanext.showcase.controller:ShowcaseController') as m: - m.connect('ckanext_showcase_index', '/showcase', action='search', - highlight_actions='index search') - m.connect('ckanext_showcase_new', '/showcase/new', action='new') - m.connect('ckanext_showcase_delete', '/showcase/delete/{id}', - action='delete') - m.connect('ckanext_showcase_read', '/showcase/{id}', action='read', - ckan_icon='picture') - m.connect('ckanext_showcase_edit', '/showcase/edit/{id}', - action='edit', ckan_icon='edit') - m.connect('ckanext_showcase_manage_datasets', - '/showcase/manage_datasets/{id}', - action="manage_datasets", ckan_icon="sitemap") - m.connect('dataset_showcase_list', '/dataset/showcases/{id}', - action='dataset_showcase_list', ckan_icon='picture') - m.connect('ckanext_showcase_admins', '/ckan-admin/showcase_admins', - action='manage_showcase_admins', ckan_icon='picture'), - m.connect('ckanext_showcase_admin_remove', - '/ckan-admin/showcase_admin_remove', - action='remove_showcase_admin'), - m.connect('showcase_upload', '/showcase_upload', - action='showcase_upload') - map.redirect('/showcases', '/showcase') - map.redirect('/showcases/{url:.*}', '/showcase/{url}') - return map + return auth.get_auth_functions() # IActions def get_actions(self): - action_functions = { - 'ckanext_showcase_create': - ckanext.showcase.logic.action.create.showcase_create, - 'ckanext_showcase_update': - ckanext.showcase.logic.action.update.showcase_update, - 'ckanext_showcase_delete': - ckanext.showcase.logic.action.delete.showcase_delete, - 'ckanext_showcase_show': - ckanext.showcase.logic.action.get.showcase_show, - 'ckanext_showcase_list': - ckanext.showcase.logic.action.get.showcase_list, - 'ckanext_showcase_package_association_create': - ckanext.showcase.logic.action.create.showcase_package_association_create, - 'ckanext_showcase_package_association_delete': - ckanext.showcase.logic.action.delete.showcase_package_association_delete, - 'ckanext_showcase_package_list': - ckanext.showcase.logic.action.get.showcase_package_list, - 'ckanext_package_showcase_list': - ckanext.showcase.logic.action.get.package_showcase_list, - 'ckanext_showcase_admin_add': - ckanext.showcase.logic.action.create.showcase_admin_add, - 'ckanext_showcase_admin_remove': - ckanext.showcase.logic.action.delete.showcase_admin_remove, - 'ckanext_showcase_admin_list': - ckanext.showcase.logic.action.get.showcase_admin_list, - 'ckanext_showcase_upload': - ckanext.showcase.logic.action.create.showcase_upload, - } - return action_functions + return action.get_actions() # IPackageController diff --git a/ckanext/showcase/plugin/flask_plugin.py b/ckanext/showcase/plugin/flask_plugin.py new file mode 100644 index 00000000..54cdef90 --- /dev/null +++ b/ckanext/showcase/plugin/flask_plugin.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- + +import ckan.plugins as p +import ckanext.showcase.views as views + +from ckanext.showcase.commands import cli + + +class MixinPlugin(p.SingletonPlugin): + p.implements(p.IBlueprint) + p.implements(p.IClick) + + # IBlueprint + + def get_blueprint(self): + return views.get_blueprints() + + # IClick + + def get_commands(self): + return cli.get_commands() diff --git a/ckanext/showcase/plugin/pylons_plugin.py b/ckanext/showcase/plugin/pylons_plugin.py new file mode 100644 index 00000000..bda3916e --- /dev/null +++ b/ckanext/showcase/plugin/pylons_plugin.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- + +from routes.mapper import SubMapper +import ckan.plugins as p + + +class MixinPlugin(p.SingletonPlugin): + p.implements(p.IRoutes, inherit=True) + + # IRoutes + + def before_map(self, map): + # These named routes are used for custom dataset forms which will use + # the names below based on the dataset.type ('dataset' is the default + # type) + with SubMapper( + map, controller='ckanext.showcase.controller:ShowcaseController' + ) as m: + m.connect('showcase_index', '/showcase', action='search', + highlight_actions='index search') + m.connect('showcase_new', '/showcase/new', action='new') + m.connect('showcase_delete', '/showcase/delete/{id}', + action='delete') + m.connect('showcase_read', '/showcase/{id}', action='read', + ckan_icon='picture') + m.connect('showcase_edit', '/showcase/edit/{id}', + action='edit', ckan_icon='edit') + m.connect('showcase_manage_datasets', + '/showcase/manage_datasets/{id}', + action="manage_datasets", ckan_icon="sitemap") + m.connect('showcase_dataset_showcase_list', '/dataset/showcases/{id}', + action='dataset_showcase_list', ckan_icon='picture') + m.connect('showcase_admins', '/ckan-admin/showcase_admins', + action='manage_showcase_admins', ckan_icon='picture'), + m.connect('showcase_admin_remove', + '/ckan-admin/showcase_admin_remove', + action='remove_showcase_admin'), + m.connect('showcase_upload', '/showcase_upload', + action='showcase_upload') + map.redirect('/showcases', '/showcase') + map.redirect('/showcases/{url:.*}', '/showcase/{url}') + return map diff --git a/ckanext/showcase/templates/admin/confirm_remove_showcase_admin.html b/ckanext/showcase/templates/admin/confirm_remove_showcase_admin.html index 73dd6a28..f37bf3bf 100644 --- a/ckanext/showcase/templates/admin/confirm_remove_showcase_admin.html +++ b/ckanext/showcase/templates/admin/confirm_remove_showcase_admin.html @@ -10,7 +10,7 @@ {% block form %} <p>{{ _('Are you sure you want to remove this user as a Showcase Admin - {name}?').format(name=c.user_dict.name) }}</p> <p class="form-actions"> - <form action="{% url_for controller='ckanext.showcase.controller:ShowcaseController', action='remove_showcase_admin' %}" method="post"> + <form action="{{ h.url_for('showcase_admin_remove') }}" method="post"> <input type="hidden" name="user" value="{{ c.user_id }}" /> <button class="btn" type="submit" name="cancel" >{{ _('Cancel') }}</button> <button class="btn btn-primary" type="submit" name="delete" >{{ _('Confirm Remove') }}</button> diff --git a/ckanext/showcase/templates/admin/manage_showcase_admins.html b/ckanext/showcase/templates/admin/manage_showcase_admins.html index 10a3d2cb..15bc37b4 100644 --- a/ckanext/showcase/templates/admin/manage_showcase_admins.html +++ b/ckanext/showcase/templates/admin/manage_showcase_admins.html @@ -50,7 +50,7 @@ <h3 class="page-heading">{{ _('Showcase Admins') }}</h3> {{ h.linked_user(user_dict['id'], maxlength=20) }} {% set locale = h.dump_json({'content': _('Are you sure you want to remove this user from the Showcase Admin list?')}) %} <div class="btn-group pull-right"> - <a class="btn btn-danger btn-small" href="{% url_for controller='ckanext.showcase.controller:ShowcaseController', action='remove_showcase_admin', user=user_dict['id'] %}" data-module="confirm-action" data-module-i18n="{{ locale }}" title="{{ _('Remove') }}">{% block delete_button_text %}<i class="icon-remove"></i> {{ _('Remove') }}{% endblock %}</a> + <a class="btn btn-danger btn-small" href="{{ h.url_for('showcase_admin_remove', user=user_dict['id']) }}" data-module="confirm-action" data-module-i18n="{{ locale }}" title="{{ _('Remove') }}">{% block delete_button_text %}<i class="icon-remove"></i> {{ _('Remove') }}{% endblock %}</a> </div> </td> </tr> diff --git a/ckanext/showcase/templates/header.html b/ckanext/showcase/templates/header.html index c66a2648..fd582d88 100644 --- a/ckanext/showcase/templates/header.html +++ b/ckanext/showcase/templates/header.html @@ -8,7 +8,7 @@ ('search', _('Datasets')), ('organizations_index', _('Organizations')), ('group_index', _('Groups')), - ('ckanext_showcase_index', _('Showcases')), + ('showcase_index', _('Showcases')), (about_route, _('About')) ) }} {% endblock %} diff --git a/ckanext/showcase/templates/home/snippets/stats.html b/ckanext/showcase/templates/home/snippets/stats.html index 48c0f0e6..4e52ab9e 100644 --- a/ckanext/showcase/templates/home/snippets/stats.html +++ b/ckanext/showcase/templates/home/snippets/stats.html @@ -23,7 +23,7 @@ <h3>{{ _('{site_title} statistics').format(site_title=g.site_title) }}</h3> </a> </li> <li> - <a href="{{ h.url_for(controller='ckanext.showcase.controller:ShowcaseController', action='search') }}"> + <a href="{{ h.url_for('showcase_index') }}"> <b>{{ h.SI_number_span(stats.showcase_count) }}</b> {{ ungettext('showcase', 'showcases', stats.showcase_count) }} </a> diff --git a/ckanext/showcase/templates/package/dataset_showcase_list.html b/ckanext/showcase/templates/package/dataset_showcase_list.html index 4733d72e..972b3e6e 100644 --- a/ckanext/showcase/templates/package/dataset_showcase_list.html +++ b/ckanext/showcase/templates/package/dataset_showcase_list.html @@ -3,23 +3,23 @@ {% block subtitle %}{{ _('Showcases') }} - {{ super() }}{% endblock %} {% block primary_content_inner %} - {% if h.check_access('ckanext_showcase_update') and c.showcase_dropdown %} - <form method="post" class="form-horizontal" id="showcase-add"> - <select id="field-add_showcase" name="showcase_added" data-module="autocomplete"> - {% for option in c.showcase_dropdown %} - <option value="{{ option[0] }}"> {{ option[1] }}</option> - {% endfor %} - </select> - <button type="submit" class="btn btn-primary" title="{{ _('Associate this showcase with this dataset') }}">{{ _('Add to dataset') }}</button> - </form> - {% endif %} - - <h2>{% block page_heading %}{{ _('Showcases featuring {dataset_name}').format(dataset_name=h.dataset_display_name(c.pkg_dict)) }}{% endblock %}</h2> - {% block showcase_list %} - {% if c.showcase_list %} - {% snippet "showcase/snippets/showcase_list.html", packages=c.showcase_list, pkg_id=c.pkg_dict.name, show_remove=h.check_access('ckanext_showcase_update') %} - {% else %} - <p class="empty">{{ _('There are no showcases that feature this dataset') }}</p> + {% if h.check_access('ckanext_showcase_update') and c.showcase_dropdown %} + <form method="post" class="form-horizontal" id="showcase-add"> + <select id="field-add_showcase" name="showcase_added" data-module="autocomplete"> + {% for option in c.showcase_dropdown %} + <option value="{{ option[0] }}"> {{ option[1] }}</option> + {% endfor %} + </select> + <button type="submit" class="btn btn-primary" title="{{ _('Associate this showcase with this dataset') }}">{{ _('Add to dataset') }}</button> + </form> {% endif %} - {% endblock %} + + <h2>{% block page_heading %}{{ _('Showcases featuring {dataset_name}').format(dataset_name=h.dataset_display_name(c.pkg_dict)) }}{% endblock %}</h2> + {% block showcase_list %} + {% if c.showcase_list %} + {% snippet "showcase/snippets/showcase_list.html", packages=c.showcase_list, pkg_id=c.pkg_dict.name, show_remove=h.check_access('ckanext_showcase_update') %} + {% else %} + <p class="empty">{{ _('There are no showcases that feature this dataset') }}</p> + {% endif %} + {% endblock %} {% endblock %} diff --git a/ckanext/showcase/templates/package/read_base.html b/ckanext/showcase/templates/package/read_base.html index 4a2817d5..0af5adaf 100644 --- a/ckanext/showcase/templates/package/read_base.html +++ b/ckanext/showcase/templates/package/read_base.html @@ -2,5 +2,5 @@ {% block content_primary_nav %} {{ super() }} - {{ h.build_nav_icon('dataset_showcase_list', _('Showcases'), id=pkg.name) }} + {{ h.build_nav_icon('showcase_dataset_showcase_list', _('Showcases'), id=pkg.name) }} {% endblock %} diff --git a/ckanext/showcase/templates/showcase/confirm_delete.html b/ckanext/showcase/templates/showcase/confirm_delete.html index 67b13113..6acfe581 100644 --- a/ckanext/showcase/templates/showcase/confirm_delete.html +++ b/ckanext/showcase/templates/showcase/confirm_delete.html @@ -1,21 +1,25 @@ {% extends "page.html" %} +{% set pkg = pkg_dict or c.pkg_dict %} + {% block subtitle %}{{ _("Confirm Delete") }}{% endblock %} -{% block maintag %}<div class="row" role="main">{% endblock %} +{% block maintag %} + <div class="row" role="main"> +{% endblock %} {% block main_content %} - <section class="module span6 offset3"> - <div class="module-content"> - {% block form %} - <p>{{ _('Are you sure you want to delete showcase - {showcase_name}?').format(showcase_name=c.pkg_dict.name) }}</p> - <p class="form-actions"> - <form action="{% url_for controller='ckanext.showcase.controller:ShowcaseController', action='delete', id=c.pkg_dict.name %}" method="post"> - <button class="btn" type="submit" name="cancel" >{{ _('Cancel') }}</button> - <button class="btn btn-primary" type="submit" name="delete" >{{ _('Confirm Delete') }}</button> - </form> - </p> - {% endblock %} - </div> - </section> + <section class="module span6 offset3"> + <div class="module-content"> + {% block form %} + <p>{{ _('Are you sure you want to delete showcase - {showcase_name}?').format(showcase_name=pkg.name) }}</p> + <p class="form-actions"> + <form action="{{ h.url_for('showcase_delete', id=c.pkg_dict.name) }}" method="post"> + <button class="btn" type="submit" name="cancel" >{{ _('Cancel') }}</button> + <button class="btn btn-primary" type="submit" name="delete" >{{ _('Confirm Delete') }}</button> + </form> + </p> + {% endblock %} + </div> + </section> {% endblock %} diff --git a/ckanext/showcase/templates/showcase/edit_base.html b/ckanext/showcase/templates/showcase/edit_base.html index fe4a4ebf..5d897678 100644 --- a/ckanext/showcase/templates/showcase/edit_base.html +++ b/ckanext/showcase/templates/showcase/edit_base.html @@ -1,12 +1,13 @@ {% extends 'page.html' %} -{% set pkg = c.pkg_dict %} +{% set pkg = pkg_dict or c.pkg_dict %} {% block subtitle %}{{ _('Showcases') }}{% endblock %} {% block styles %} {{ super() }} - {% resource "showcase/ckanext_showcase.css" %} + {% set _type = 'asset' if h.ckan_version().split('.')[1] | int >= 9 else 'resource' %} + {% snippet 'showcase/snippets/showcase_css_' ~ _type ~ '.html' %} {% endblock %} {% block breadcrumb_content_selected %}{% endblock %} @@ -14,11 +15,11 @@ {% block breadcrumb_content %} {% if pkg %} {% set showcase = pkg.title or pkg.name %} - <li>{% link_for _('Showcases'), controller='ckanext.showcase.controller:ShowcaseController', action='search' %}</li> - <li{{ self.breadcrumb_content_selected() }}>{% link_for showcase|truncate(30), controller='ckanext.showcase.controller:ShowcaseController', action='read', id=pkg.name %}</li> - <li class="active">{% link_for _('Edit'), controller='ckanext.showcase.controller:ShowcaseController', action='edit', id=pkg.name %}</li> + <li>{% link_for _('Showcases'), named_route='showcase_index' %}</li> + <li{{ self.breadcrumb_content_selected() }}>{% link_for showcase|truncate(30), named_route='showcase_read', id=pkg.name %}</li> + <li class="active">{% link_for _('Edit'), named_route='showcase_edit', id=pkg.name %}</li> {% else %} - <li>{% link_for _('Showcases'), controller='ckanext.showcase.controller:ShowcaseController', action='search' %}</li> + <li>{% link_for _('Showcases'), named_route='showcase_index' %}</li> <li class="active"><a href="">{{ _('Create Showcase') }}</a></li> {% endif %} {% endblock %} @@ -32,14 +33,14 @@ {% if self.content_action() | trim %} <div class="content_action"> {% block content_action %} - {% link_for _('View showcase'), controller='ckanext.showcase.controller:ShowcaseController', action='read', id=pkg.name, class_='btn', icon='eye-open' %} + {% link_for _('View showcase'), named_route='showcase_read', id=pkg.name, class_='btn', icon='eye-open' %} {% endblock %} </div> {% endif %} <ul class="nav nav-tabs"> {% block content_primary_nav %} - {{ h.build_nav_icon('ckanext_showcase_edit', _('Edit showcase'), id=pkg.name) }} - {{ h.build_nav_icon('ckanext_showcase_manage_datasets', _('Manage datasets'), id=pkg.name) }} + {{ h.build_nav_icon('showcase_edit', _('Edit showcase'), id=pkg.name) }} + {{ h.build_nav_icon('showcase_manage_datasets', _('Manage datasets'), id=pkg.name) }} {% endblock %} </ul> </header> diff --git a/ckanext/showcase/templates/showcase/manage_datasets.html b/ckanext/showcase/templates/showcase/manage_datasets.html index 10a35ef2..78eabb52 100644 --- a/ckanext/showcase/templates/showcase/manage_datasets.html +++ b/ckanext/showcase/templates/showcase/manage_datasets.html @@ -65,22 +65,22 @@ <h3 class="page-heading">{{ _('Datasets available to add to this showcase') }}</ <input type="checkbox" name="dataset_{{ package.id }}"> </td> <td class="context"> - <h3 class="dataset-heading"> - {{ h.link_to(h.truncate(title, truncate_title), h.url_for(controller='package', action='read', id=package.name)) }} - </h3> - {% if notes %} - <p>{{ notes|urlize }}</p> - {% endif %} + <h3 class="dataset-heading"> + {{ h.link_to(h.truncate(title, truncate_title), h.url_for(controller='dataset' if h.ckan_version().split('.')[1] | int >= 9 else 'package', action='read', id=package.name)) }} + </h3> + {% if notes %} + <p>{{ notes|urlize }}</p> + {% endif %} </td> </tr> {% endfor %} </tbody> {% if c.page.pager() %} - <tfoot> - <tr> - <td colspan="2" class="ckanext_showcase_pagination_footer">{{ c.page.pager(q=c.q) }}</td> - </tr> - </tfoot> + <tfoot> + <tr> + <td colspan="2" class="ckanext_showcase_pagination_footer">{{ c.page.pager(q=c.q) }}</td> + </tr> + </tfoot> {% endif %} </table> </form> @@ -92,39 +92,39 @@ <h3 class="dataset-heading"> </section> <section class="span6"> - <div class="module-content"> - <h3 class="page-heading">{{ _('Datasets in this showcase') }}</h3> - {% if c.showcase_pkgs %} - <form method="POST" data-module="basic-form"> - <table class="table table-bordered table-header table-hover table-bulk-edit table-edit-hover" data-module="table-selectable-rows"> - <col width="8"> - <col width="120"> - <thead> - <tr> - <th></th> - <th class="table-actions"> - <div class="btn-group"> - <button name="bulk_action.showcase_remove" value="remove" class="btn btn-danger" type="submit"> - <i class="fa fa-times icon-remove"></i> - {{ _('Remove from Showcase') }} - </button> - </div> - </th> - </tr> - </thead> - <tbody> - {% for package in c.showcase_pkgs %} - {% set truncate = truncate or 180 %} - {% set truncate_title = truncate_title or 80 %} - {% set title = package.title or package.name %} - {% set notes = h.markdown_extract(package.notes, extract_length=truncate) %} - <tr> - <td> - <input type="checkbox" name="dataset_{{ package.id }}"> - </td> - <td class="context"> - <h3 class="dataset-heading"> - {{ h.link_to(h.truncate(title, truncate_title), h.url_for(controller='package', action='read', id=package.name)) }} + <div class="module-content"> + <h3 class="page-heading">{{ _('Datasets in this showcase') }}</h3> + {% if c.showcase_pkgs %} + <form method="POST" data-module="basic-form"> + <table class="table table-bordered table-header table-hover table-bulk-edit table-edit-hover" data-module="table-selectable-rows"> + <col width="8"> + <col width="120"> + <thead> + <tr> + <th></th> + <th class="table-actions"> + <div class="btn-group"> + <button name="bulk_action.showcase_remove" value="remove" class="btn btn-danger" type="submit"> + <i class="fa fa-times icon-remove"></i> + {{ _('Remove from Showcase') }} + </button> + </div> + </th> + </tr> + </thead> + <tbody> + {% for package in c.showcase_pkgs %} + {% set truncate = truncate or 180 %} + {% set truncate_title = truncate_title or 80 %} + {% set title = package.title or package.name %} + {% set notes = h.markdown_extract(package.notes, extract_length=truncate) %} + <tr> + <td> + <input type="checkbox" name="dataset_{{ package.id }}"> + </td> + <td class="context"> + <h3 class="dataset-heading"> + {{ h.link_to(h.truncate(title, truncate_title), h.url_for(controller='dataset' if h.ckan_version().split('.')[1] | int >= 9 else 'package', action='read', id=package.name)) }} </h3> {% if notes %} <p>{{ notes|urlize }}</p> diff --git a/ckanext/showcase/templates/showcase/new_package_form.html b/ckanext/showcase/templates/showcase/new_package_form.html index dcfee8a0..4be49730 100644 --- a/ckanext/showcase/templates/showcase/new_package_form.html +++ b/ckanext/showcase/templates/showcase/new_package_form.html @@ -16,8 +16,8 @@ {% endblock %} {% block package_basic_fields_url %} - {% set prefix = h.url_for(controller='ckanext.showcase.controller:ShowcaseController', action='read', id='') %} - {% set domain = h.url_for(controller='ckanext.showcase.controller:ShowcaseController', action='read', id='', qualified=true) %} + {% set prefix = h.url_for('showcase_read', id='') %} + {% set domain = h.url_for('showcase_read', id='', qualified=true) %} {% set domain = domain|replace("http://", "")|replace("https://", "") %} {% set attrs = {'data-module': 'slug-preview-slug', 'data-module-prefix': domain, 'data-module-placeholder': '<showcase>'} %} @@ -27,8 +27,8 @@ {% block package_basic_fields_description %} {% set editor = h.get_wysiwyg_editor() %} {% if editor == 'ckeditor' %} - {% resource 'showcase/dist/ckeditor.js' %} - {% resource 'showcase/js/showcase-ckeditor.js' %} + {% set _type = 'asset' if h.ckan_version().split('.')[1] | int >= 9 else 'resource' %} + {% snippet 'showcase/snippets/ckeditor_' ~ _type ~ '.html' %} {% set ckeditor_attrs = { 'data-module': 'showcase-ckeditor', @@ -64,11 +64,11 @@ {% endblock %} {% block form_actions %} - <div class="form-actions"> - {% block delete_button %} - {% if form_style == 'edit' and h.check_access('ckanext_showcase_delete', {'id': data.id}) and not data.state == 'deleted' %} - {% set locale = h.dump_json({'content': _('Are you sure you want to delete this showcase?')}) %} - <a class="btn btn-danger pull-left" href="{% url_for controller='ckanext.showcase.controller:ShowcaseController', action='delete', id=data.id %}" data-module="confirm-action" data-module-i18n="{{ locale }}">{% block delete_button_text %}{{ _('Delete') }}{% endblock %}</a> + <div class="form-actions"> + {% block delete_button %} + {% if form_style == 'edit' and h.check_access('ckanext_showcase_delete', {'id': data.id}) and not data.state == 'deleted' %} + {% set locale = h.dump_json({'content': _('Are you sure you want to delete this showcase?')}) %} + <a class="btn btn-danger pull-left" href="{{ h.url_for('showcase_delete', id=data.id) }}" data-module="confirm-action" data-module-i18n="{{ locale }}">{% block delete_button_text %}{{ _('Delete') }}{% endblock %}</a> {% endif %} {% endblock %} {% block save_button %} diff --git a/ckanext/showcase/templates/showcase/read.html b/ckanext/showcase/templates/showcase/read.html index e2df3144..e12dd712 100644 --- a/ckanext/showcase/templates/showcase/read.html +++ b/ckanext/showcase/templates/showcase/read.html @@ -1,6 +1,6 @@ {% extends "page.html" %} -{% set pkg = c.pkg_dict %} +{% set pkg = pkg_dict or c.pkg_dict %} {% set name = pkg.title or pkg.name %} {% set editor = h.get_wysiwyg_editor() %} @@ -8,34 +8,35 @@ {% block styles %} {{ super() }} - {% resource "showcase/ckanext_showcase.css" %} + {% set _type = 'asset' if h.ckan_version().split('.')[1] | int >= 9 else 'resource' %} + {% snippet 'showcase/snippets/showcase_css_' ~ _type ~ '.html' %} {% if editor == 'ckeditor' %} - {% resource "showcase/ckeditor-content-style.css" %} + {% snippet 'showcase/snippets/ckeditor_content_css_' ~ _type ~ '.html' %} {% endif %} {% endblock %} {% block links -%} - {{ super() }} - <link rel="alternate" type="application/rdf+xml" href="{{ h.url_for(controller='ckanext.showcase.controller:ShowcaseController', action='read', id=pkg.id, format='rdf', qualified=True) }}"/> + {{ super() }} + <link rel="alternate" type="application/rdf+xml" href="{{ h.url_for('showcase_read', id=pkg.id, format='rdf', qualified=True) }}"/> {% endblock -%} {% block head_extras -%} - {{ super() }} - {% set description = h.markdown_extract(pkg.notes, extract_length=200)|forceescape %} - <meta property="og:title" content="{{ h.dataset_display_name(pkg) }} - {{ g.site_title }}"> - <meta property="og:description" content="{{ description }}"> - <meta property="og:url" content="{{ h.full_current_url() }}"> - {% if pkg.image_display_url %} - <meta property="og:image" content="{{ pkg.image_display_url }}"> - {% endif %} + {{ super() }} + {% set description = h.markdown_extract(pkg.notes, extract_length=200)|forceescape %} + <meta property="og:title" content="{{ h.dataset_display_name(pkg) }} - {{ g.site_title }}"> + <meta property="og:description" content="{{ description }}"> + <meta property="og:url" content="{{ h.full_current_url() }}"> + {% if pkg.image_display_url %} + <meta property="og:image" content="{{ pkg.image_display_url }}"> + {% endif %} {% endblock -%} {% block breadcrumb_content_selected %} class="active"{% endblock %} {% block breadcrumb_content %} {% set showcase = pkg.title or pkg.name %} - <li>{{ h.nav_link(_('Showcases'), controller='ckanext.showcase.controller:ShowcaseController', action='search', highlight_actions = 'new index') }}</li> - <li{{ self.breadcrumb_content_selected() }}>{% link_for showcase|truncate(30), controller='ckanext.showcase.controller:ShowcaseController', action='read', id=pkg.name %}</li> + <li>{{ h.nav_link(_('Showcases'), named_route='showcase_index', highlight_actions = 'new index') }}</li> + <li{{ self.breadcrumb_content_selected() }}>{% link_for showcase|truncate(30), named_route='showcase_read', id=pkg.name %}</li> {% endblock %} {% block page_header %} @@ -45,30 +46,30 @@ {% endblock %} {% block primary_content_inner %} - {% if h.check_access('ckanext_showcase_update', {'id':pkg.id }) %} - <div class="actions"> - {% link_for _('Manage'), controller='ckanext.showcase.controller:ShowcaseController', action='edit', id=pkg.name, class_='btn', icon='wrench' %} - </div> - {% endif %} - {% block package_description %} - {% if pkg.private %} - <span class="dataset-private label label-inverse pull-right"> - <i class="fa fa-lock icon-lock"></i> - {{ _('Private') }} - </span> + {% if h.check_access('ckanext_showcase_update', {'id':pkg.id }) %} + <div class="actions"> + {% link_for _('Manage'), named_route='showcase_edit', id=pkg.name, class_='btn btn-default', icon='wrench' %} + </div> {% endif %} - <h1> - {% block page_heading %} - {{ name }} - {% if pkg.state.startswith('draft') %} - [{{ _('Draft') }}] + {% block package_description %} + {% if pkg.private %} + <span class="dataset-private label label-inverse pull-right"> + <i class="fa fa-lock icon-lock"></i> + {{ _('Private') }} + </span> + {% endif %} + <h1> + {% block page_heading %} + {{ name }} + {% if pkg.state.startswith('draft') %} + [{{ _('Draft') }}] + {% endif %} + {% endblock %} + </h1> + + {% if pkg.image_display_url %} + <p class="ckanext-showcase-image-container"><img src="{{ pkg.image_display_url }}" alt="{{ name }}" class="media-image ckanext-showcase-image img-responsive"></p> {% endif %} - {% endblock %} - </h1> - - {% if pkg.image_display_url %} - <p class="ckanext-showcase-image-container"><img src="{{ pkg.image_display_url }}" alt="{{ name }}" class="media-image ckanext-showcase-image img-responsive"></p> - {% endif %} {% block package_notes %} {% if pkg.showcase_notes_formatted and editor == 'ckeditor' %} @@ -82,29 +83,29 @@ <h1> {% endif %} {% endblock %} - {% if pkg.url %} - <p><a class="btn btn-primary ckanext-showcase-launch" href="{{ pkg.url }}" target="_blank"><i class="fa fa-external-link icon-external-link"></i> {{ _('Launch website') }}</a></p> - {% endif %} + {% if pkg.url %} + <p><a class="btn btn-primary ckanext-showcase-launch" href="{{ pkg.url }}" target="_blank"><i class="fa fa-external-link icon-external-link"></i> {{ _('Launch website') }}</a></p> + {% endif %} - {% endblock %} + {% endblock %} - {% block package_tags %} - {% snippet "showcase/snippets/tags.html", tags=pkg.tags %} - {% endblock %} + {% block package_tags %} + {% snippet "showcase/snippets/tags.html", tags=pkg.tags %} + {% endblock %} - {% block package_search_results_list %} - {% endblock %} + {% block package_search_results_list %} + {% endblock %} {% endblock %} {% block secondary_content %} - {% block secondary_help_content %}{% endblock %} + {% block secondary_help_content %}{% endblock %} - {% block package_info %} - {% snippet 'showcase/snippets/showcase_info.html', pkg=pkg, showcase_pkgs=c.showcase_pkgs %} - {% endblock %} + {% block package_info %} + {% snippet 'showcase/snippets/showcase_info.html', pkg=pkg, showcase_pkgs=c.showcase_pkgs %} + {% endblock %} - {% block package_social %} - {% snippet "snippets/social.html" %} - {% endblock %} + {% block package_social %} + {% snippet "snippets/social.html" %} + {% endblock %} {% endblock %} diff --git a/ckanext/showcase/templates/showcase/search.html b/ckanext/showcase/templates/showcase/search.html index 4d0d35ab..488f9e97 100644 --- a/ckanext/showcase/templates/showcase/search.html +++ b/ckanext/showcase/templates/showcase/search.html @@ -4,13 +4,13 @@ {% block subtitle %}{{ _("Showcases") }}{% endblock %} {% block breadcrumb_content %} - <li class="active">{{ h.nav_link(_('Showcases'), controller='ckanext.showcase.controller:ShowcaseController', action='search', highlight_actions = 'new index') }}</li> + <li class="active">{{ h.nav_link(_('Showcases'), named_route='showcase_index', highlight_actions = 'new index') }}</li> {% endblock %} {% block page_primary_action %} {% if h.check_access('ckanext_showcase_create') %} <div class="page_primary_action"> - {% link_for _('Add Showcase'), controller='ckanext.showcase.controller:ShowcaseController', action='new', class_='btn btn-primary', icon='plus-sign-alt' %} + {% link_for _('Add Showcase'), named_route='showcase_new', class_='btn btn-primary', icon='plus-square' %} </div> {% endif %} {% endblock %} diff --git a/ckanext/showcase/templates/showcase/snippets/ckeditor_asset.html b/ckanext/showcase/templates/showcase/snippets/ckeditor_asset.html new file mode 100644 index 00000000..732e7e92 --- /dev/null +++ b/ckanext/showcase/templates/showcase/snippets/ckeditor_asset.html @@ -0,0 +1,1 @@ +{% asset 'showcase/ckeditor' %} diff --git a/ckanext/showcase/templates/showcase/snippets/ckeditor_content_css_asset.html b/ckanext/showcase/templates/showcase/snippets/ckeditor_content_css_asset.html new file mode 100644 index 00000000..d3bc8c9c --- /dev/null +++ b/ckanext/showcase/templates/showcase/snippets/ckeditor_content_css_asset.html @@ -0,0 +1,1 @@ +{% asset "showcase/ckeditor-content-css" %} \ No newline at end of file diff --git a/ckanext/showcase/templates/showcase/snippets/ckeditor_content_css_resource.html b/ckanext/showcase/templates/showcase/snippets/ckeditor_content_css_resource.html new file mode 100644 index 00000000..57ef2812 --- /dev/null +++ b/ckanext/showcase/templates/showcase/snippets/ckeditor_content_css_resource.html @@ -0,0 +1,1 @@ +{% resource "showcase/ckeditor-content-style.css" %} \ No newline at end of file diff --git a/ckanext/showcase/templates/showcase/snippets/ckeditor_resource.html b/ckanext/showcase/templates/showcase/snippets/ckeditor_resource.html new file mode 100644 index 00000000..8a65271b --- /dev/null +++ b/ckanext/showcase/templates/showcase/snippets/ckeditor_resource.html @@ -0,0 +1,1 @@ +{% resource 'showcase/ckeditor' %} diff --git a/ckanext/showcase/templates/showcase/snippets/helper.html b/ckanext/showcase/templates/showcase/snippets/helper.html index fdc1894f..0b2a7d19 100644 --- a/ckanext/showcase/templates/showcase/snippets/helper.html +++ b/ckanext/showcase/templates/showcase/snippets/helper.html @@ -9,7 +9,7 @@ <h2 class="module-heading"> </p> {% if h.check_access('ckanext_showcase_admin_add') %} <p> - {% trans url=h.url_for(controller='ckanext.showcase.controller:ShowcaseController', action='manage_showcase_admins') %}Sysadmins can manage Showcase Admins from the <a href='{{ url }}'>Showcase configuration page</a>.{% endtrans %} + {% trans url=h.url_for('showcase_admins') %}Sysadmins can manage Showcase Admins from the <a href='{{ url }}'>Showcase configuration page</a>.{% endtrans %} </p> {% endif %} </div> diff --git a/ckanext/showcase/templates/showcase/snippets/showcase_css_asset.html b/ckanext/showcase/templates/showcase/snippets/showcase_css_asset.html new file mode 100644 index 00000000..a7c6dabe --- /dev/null +++ b/ckanext/showcase/templates/showcase/snippets/showcase_css_asset.html @@ -0,0 +1,1 @@ +{% asset "showcase/ckanext-showcase-css" %} \ No newline at end of file diff --git a/ckanext/showcase/templates/showcase/snippets/showcase_css_resource.html b/ckanext/showcase/templates/showcase/snippets/showcase_css_resource.html new file mode 100644 index 00000000..dee33209 --- /dev/null +++ b/ckanext/showcase/templates/showcase/snippets/showcase_css_resource.html @@ -0,0 +1,1 @@ +{% resource "showcase/ckanext_showcase.css" %} \ No newline at end of file diff --git a/ckanext/showcase/templates/showcase/snippets/showcase_info.html b/ckanext/showcase/templates/showcase/snippets/showcase_info.html index 957ae82e..a684a934 100644 --- a/ckanext/showcase/templates/showcase/snippets/showcase_info.html +++ b/ckanext/showcase/templates/showcase/snippets/showcase_info.html @@ -5,47 +5,47 @@ Example: - {% snippet "package/snippets/info.html", pkg=pkg %} +{% snippet "package/snippets/info.html", pkg=pkg %} #} {% block package_info %} - {% if pkg %} - <section class="module module-narrow"> - <div class="module context-info"> - <div class="module-content"> - {% block package_info_inner %} - {% block heading %} - <h1 class="heading">{{ pkg.title or pkg.name }}</h1> - {% endblock %} - {% if pkg.author %} - <span class="smallest">{{_('Submitted by')}}</span> - <p>{{ pkg.author }}</p> - {% endif %} - {% if pkg.url %} - <div class="info"> - <a class="btn btn-primary" href="{{ pkg.url }}" target="_blank"> - <i class="fa fa-external-link icon-external-link"></i> {{ _('Launch website') }} - </a> - </div> - {% endif %} - {% endblock %} - </div> - </div> - </section> + {% if pkg %} + <section class="module module-narrow"> + <div class="module context-info"> + <div class="module-content"> + {% block package_info_inner %} + {% block heading %} + <h1 class="heading">{{ pkg.title or pkg.name }}</h1> + {% endblock %} + {% if pkg.author %} + <span class="smallest">{{_('Submitted by')}}</span> + <p>{{ pkg.author }}</p> + {% endif %} + {% if pkg.url %} + <div class="info"> + <a class="btn btn-primary" href="{{ pkg.url }}" target="_blank"> + <i class="fa fa-external-link icon-external-link"></i> {{ _('Launch website') }} + </a> + </div> + {% endif %} + {% endblock %} + </div> + </div> + </section> - <section class="module module-narrow"> - <h3 class="module-heading"><i class="fa fa-sitemap icon-medium icon-sitemap"></i> {{ _('Datasets in Showcase') }}</h2> - {% if showcase_pkgs %} - <ul class="nav nav-simple"> - {% for package in showcase_pkgs %} - {% set truncate_title = truncate_title or 80 %} - {% set title = package.title or package.name %} - <li class="nav-item">{{ h.link_to(h.truncate(title, truncate_title), h.url_for(controller='package', action='read', id=package.name)) }}</li> - {% endfor %} - </ul> - {% else %} - <p class="module-content empty">{{_('There are no Datasets in this Showcase')}}</p> - {% endif %} - </section> - {% endif %} + <section class="module module-narrow"> + <h3 class="module-heading"><i class="fa fa-sitemap icon-medium icon-sitemap"></i> {{ _('Datasets in Showcase') }}</h2> + {% if showcase_pkgs %} + <ul class="nav nav-simple"> + {% for package in showcase_pkgs %} + {% set truncate_title = truncate_title or 80 %} + {% set title = package.title or package.name %} + <li class="nav-item">{{ h.link_to(h.truncate(title, truncate_title), h.url_for(controller='dataset' if h.ckan_version().split('.')[1] | int >= 9 else 'package', action='read', id=package.name)) }}</li> + {% endfor %} + </ul> + {% else %} + <p class="module-content empty">{{_('There are no Datasets in this Showcase')}}</p> + {% endif %} + </section> + {% endif %} {% endblock %} diff --git a/ckanext/showcase/templates/showcase/snippets/showcase_item.html b/ckanext/showcase/templates/showcase/snippets/showcase_item.html index 0250f9e1..10c4344b 100644 --- a/ckanext/showcase/templates/showcase/snippets/showcase_item.html +++ b/ckanext/showcase/templates/showcase/snippets/showcase_item.html @@ -21,7 +21,7 @@ <img src="{{ package.image_display_url or h.url_for_static('/base/images/placeholder-group.png') }}" alt="{{ package.name }}" class="media-image img-responsive"> {% endblock %} {% block title %} - <h3 class="media-heading">{{ h.link_to(h.truncate(title, truncate_title), h.url_for(controller='ckanext.showcase.controller:ShowcaseController', action='read', id=package.name)) }}</h3> + <h3 class="media-heading">{{ h.link_to(h.truncate(title, truncate_title), h.url_for('showcase_read', id=package.name)) }}</h3> {% endblock %} {% block notes %} {% if notes %} @@ -38,7 +38,7 @@ <h3 class="media-heading">{{ h.link_to(h.truncate(title, truncate_title), h.url_ {% endif %} {% endblock %} {% block link %} - <a href="{{ h.url_for(controller='ckanext.showcase.controller:ShowcaseController', action='read', id=package.name) }}" title="{{ _('View {showcase_title}').format(showcase_title=package.title) }}" class="media-view"> + <a href="{{ h.url_for('showcase_read', id=package.name) }}" title="{{ _('View {showcase_title}').format(showcase_title=package.title) }}" class="media-view"> <span>{{ _('View {showcase_title}').format(showcase_title=package.title) }}</span> </a> {% endblock %} diff --git a/ckanext/showcase/templates/showcase/snippets/tags.html b/ckanext/showcase/templates/showcase/snippets/tags.html index 7ca4d57b..41500485 100644 --- a/ckanext/showcase/templates/showcase/snippets/tags.html +++ b/ckanext/showcase/templates/showcase/snippets/tags.html @@ -5,7 +5,7 @@ <ul class="{{ _class }}"> {% for tag in tags %} <li> - <a class="{% block tag_list_item_class %}tag{% endblock %}" href="{% url_for controller='ckanext.showcase.controller:ShowcaseController', action='search', tags=tag.name %}">{{ h.truncate(tag.display_name, 22) }}</a> + <a class="{% block tag_list_item_class %}tag{% endblock %}" href="{{ h.url_for('showcase_index', tags=tag.name) }}">{{ h.truncate(tag.display_name, 22) }}</a> </li> {% endfor %} </ul> diff --git a/ckanext/showcase/utils.py b/ckanext/showcase/utils.py new file mode 100644 index 00000000..e8e12c43 --- /dev/null +++ b/ckanext/showcase/utils.py @@ -0,0 +1,729 @@ +# -*- coding: utf-8 -*- + +from __future__ import print_function +import json +import logging + +from collections import OrderedDict +import six +from six.moves.urllib.parse import urlencode + +import ckan.plugins as p +from ckan import model +from ckan.lib.munge import munge_title_to_name, substitute_ascii_equivalents +from ckan.logic import get_action +import ckan.lib.helpers as h +import ckantoolkit as tk +from ckanext.showcase.model import ShowcasePackageAssociation + +_ = tk._ +abort = tk.abort +c = tk.c + +log = logging.getLogger(__name__) +DATASET_TYPE_NAME = 'showcase' + + +def check_edit_view_auth(id): + context = { + 'model': model, + 'session': model.Session, + 'user': c.user or c.author, + 'auth_user_obj': c.userobj, + 'save': 'save' in tk.request.params, + 'moderated': tk.config.get('moderated'), + 'pending': True + } + + try: + tk.check_access('ckanext_showcase_update', context) + except tk.NotAuthorized: + return tk.abort( + 401, + _('User not authorized to edit {showcase_id}').format( + showcase_id=id)) + + +def check_new_view_auth(): + context = { + 'model': model, + 'session': model.Session, + 'user': tk.c.user or tk.c.author, + 'auth_user_obj': tk.c.userobj, + 'save': 'save' in tk.request.params + } + + # Check access here, then continue with PackageController.new() + # PackageController.new will also check access for package_create. + # This is okay for now, while only sysadmins can create Showcases, but + # may not work if we allow other users to create Showcases, who don't + # have access to create dataset package types. Same for edit below. + try: + tk.check_access('ckanext_showcase_create', context) + except tk.NotAuthorized: + return tk.abort(401, _('Unauthorized to create a package')) + + +def read_view(id): + context = { + 'model': model, + 'session': model.Session, + 'user': c.user or c.author, + 'for_view': True, + 'auth_user_obj': c.userobj + } + data_dict = {'id': id} + + # check if showcase exists + try: + c.pkg_dict = tk.get_action('package_show')(context, data_dict) + except tk.ObjectNotFound: + return tk.abort(404, _('Showcase not found')) + except tk.NotAuthorized: + return tk.abort(401, _('Unauthorized to read showcase')) + + # get showcase packages + c.showcase_pkgs = tk.get_action('ckanext_showcase_package_list')( + context, { + 'showcase_id': c.pkg_dict['id'] + }) + + package_type = DATASET_TYPE_NAME + return tk.render('showcase/read.html', + extra_vars={'dataset_type': package_type}) + + +def manage_datasets_view(id): + + context = { + 'model': model, + 'session': model.Session, + 'user': tk.c.user or tk.c.author + } + data_dict = {'id': id} + + try: + tk.check_access('ckanext_showcase_update', context) + except tk.NotAuthorized: + return tk.abort( + 401, + _('User not authorized to edit {showcase_id}').format( + showcase_id=id)) + + # check if showcase exists + try: + tk.c.pkg_dict = tk.get_action('package_show')(context, data_dict) + except tk.ObjectNotFound: + return tk.abort(404, _('Showcase not found')) + except tk.NotAuthorized: + return tk.abort(401, _('Unauthorized to read showcase')) + + # Are we removing a showcase/dataset association? + form_data = tk.request.form if tk.check_ckan_version( + '2.9') else tk.request.params + + if (tk.request.method == 'POST' + and 'bulk_action.showcase_remove' in form_data): + # Find the datasets to perform the action on, they are prefixed by + # dataset_ in the form data + dataset_ids = [] + for param in form_data: + if param.startswith('dataset_'): + dataset_ids.append(param[8:]) + if dataset_ids: + for dataset_id in dataset_ids: + tk.get_action('ckanext_showcase_package_association_delete')( + context, { + 'showcase_id': tk.c.pkg_dict['id'], + 'package_id': dataset_id + }) + h.flash_success( + tk.ungettext( + "The dataset has been removed from the showcase.", + "The datasets have been removed from the showcase.", + len(dataset_ids))) + url = h.url_for('showcase_manage_datasets', id=id) + return h.redirect_to(url) + + # Are we creating a showcase/dataset association? + elif (tk.request.method == 'POST' + and 'bulk_action.showcase_add' in form_data): + # Find the datasets to perform the action on, they are prefixed by + # dataset_ in the form data + dataset_ids = [] + for param in form_data: + if param.startswith('dataset_'): + dataset_ids.append(param[8:]) + if dataset_ids: + successful_adds = [] + for dataset_id in dataset_ids: + try: + tk.get_action( + 'ckanext_showcase_package_association_create')( + context, { + 'showcase_id': tk.c.pkg_dict['id'], + 'package_id': dataset_id + }) + except tk.ValidationError as e: + h.flash_notice(e.error_summary) + else: + successful_adds.append(dataset_id) + if successful_adds: + h.flash_success( + tk.ungettext( + "The dataset has been added to the showcase.", + "The datasets have been added to the showcase.", + len(successful_adds))) + url = h.url_for('showcase_manage_datasets', id=id) + return h.redirect_to(url) + + _add_dataset_search(tk.c.pkg_dict['id'], tk.c.pkg_dict['name']) + + # get showcase packages + tk.c.showcase_pkgs = tk.get_action('ckanext_showcase_package_list')( + context, { + 'showcase_id': tk.c.pkg_dict['id'] + }) + + return tk.render('showcase/manage_datasets.html') + + +def migrate(allow_duplicates): + related_items = get_action('related_list')(data_dict={}) + + # preflight: + # related items must have unique titles before migration + related_titles = [i['title'] for i in related_items] + # make a list of duplicate titles + duplicate_titles = _find_duplicates(related_titles) + if duplicate_titles and allow_duplicates == False: + print( + """All Related Items must have unique titles before migration. The following +Related Item titles are used more than once and need to be corrected before +migration can continue. Please correct and try again:""") + for i in duplicate_titles: + print(i) + return + + for related in related_items: + existing_showcase = get_action('package_search')(data_dict={ + 'fq': + '+dataset_type:showcase original_related_item_id:{0}'.format( + related['id']) + }) + normalized_title = substitute_ascii_equivalents(related['title']) + if existing_showcase['count'] > 0: + print('Showcase for Related Item "{0}" already exists.'.format( + normalized_title)) + else: + showcase_title = _gen_new_title(related.get('title'), + related['id']) + data_dict = { + 'original_related_item_id': related.get('id'), + 'title': showcase_title, + 'name': munge_title_to_name(showcase_title), + 'notes': related.get('description'), + 'image_url': related.get('image_url'), + 'url': related.get('url'), + 'tags': [{ + "name": related.get('type').lower() + }] + } + # make the showcase + try: + new_showcase = get_action('ckanext_showcase_create')( + data_dict=data_dict) + except Exception as e: + print('There was a problem migrating "{0}": {1}'.format( + normalized_title, e)) + else: + print('Created Showcase from the Related Item "{0}"'.format( + normalized_title)) + + # make the showcase_package_association, if needed + try: + related_pkg_id = _get_related_dataset(related['id']) + if related_pkg_id: + get_action( + 'ckanext_showcase_package_association_create')( + data_dict={ + 'showcase_id': new_showcase['id'], + 'package_id': related_pkg_id + }) + except Exception as e: + print( + 'There was a problem creating the showcase_package_association for "{0}": {1}' + .format(normalized_title, e)) + + +def _get_related_dataset(related_id): + '''Get the id of a package from related_dataset, if one exists.''' + related_dataset = model.Session.query( + model.RelatedDataset).filter_by(related_id=related_id).first() + if related_dataset: + return related_dataset.dataset_id + + +def _find_duplicates(lst): + '''From a list, return a set of duplicates. + + >>> MigrationCommand('cmd')._find_duplicates([1, 2, 3, 4, 5]) + [] + + >>> MigrationCommand('cmd')._find_duplicates([1, 2, 3, 4, 3, 1, 1]) + [1, 3] + + >>> MigrationCommand('cmd')._find_duplicates(['one', 'two', 'three', 'four', 'two', 'three']) + ['two', 'three'] + ''' + return list(set(x for x in lst if lst.count(x) >= 2)) + + +def _gen_new_title(title, related_id): + name = munge_title_to_name(title) + pkg_obj = model.Session.query(model.Package).filter_by(name=name).first() + if pkg_obj: + title.replace('duplicate_', '') + return 'duplicate_' + title + '_' + related_id + else: + return title + + +def _add_dataset_search(showcase_id, showcase_name): + ''' + Search logic for discovering datasets to add to a showcase. + ''' + + from ckan.lib.search import SearchError + + package_type = 'dataset' + + # unicode format (decoded from utf8) + q = c.q = tk.request.params.get('q', u'') + c.query_error = False + page = h.get_page_number(tk.request.params) + + limit = int(tk.config.get('ckan.datasets_per_page', 20)) + + # most search operations should reset the page counter: + params_nopage = [(k, v) for k, v in tk.request.params.items() + if k != 'page'] + + def drill_down_url(alternative_url=None, **by): + return h.add_url_param(alternative_url=alternative_url, + controller='dataset' + if tk.check_ckan_version('2.9') else 'package', + action='search', + new_params=by) + + c.drill_down_url = drill_down_url + + def remove_field(key, value=None, replace=None): + return h.remove_url_param(key, + value=value, + replace=replace, + controller='dataset' if + tk.check_ckan_version('2.9') else 'package', + action='search') + + c.remove_field = remove_field + + sort_by = tk.request.params.get('sort', None) + params_nosort = [(k, v) for k, v in params_nopage if k != 'sort'] + + def _sort_by(fields): + """ + Sort by the given list of fields. + + Each entry in the list is a 2-tuple: (fieldname, sort_order) + + eg - [('metadata_modified', 'desc'), ('name', 'asc')] + + If fields is empty, then the default ordering is used. + """ + params = params_nosort[:] + + if fields: + sort_string = ', '.join('%s %s' % f for f in fields) + params.append(('sort', sort_string)) + return _search_url(params, showcase_name) + + c.sort_by = _sort_by + if sort_by is None: + c.sort_by_fields = [] + else: + c.sort_by_fields = [field.split()[0] for field in sort_by.split(',')] + + def pager_url(q=None, page=None): + params = list(params_nopage) + params.append(('page', page)) + return _search_url(params, showcase_name) + + c.search_url_params = urlencode(_encode_params(params_nopage)) + + try: + c.fields = [] + # c.fields_grouped will contain a dict of params containing + # a list of values eg {'tags':['tag1', 'tag2']} + c.fields_grouped = {} + search_extras = {} + fq = '' + for (param, value) in tk.request.params.items(): + if param not in ['q', 'page', 'sort'] \ + and len(value) and not param.startswith('_'): + if not param.startswith('ext_'): + c.fields.append((param, value)) + fq += ' %s:"%s"' % (param, value) + if param not in c.fields_grouped: + c.fields_grouped[param] = [value] + else: + c.fields_grouped[param].append(value) + else: + search_extras[param] = value + + context = { + 'model': model, + 'session': model.Session, + 'user': c.user or c.author, + 'for_view': True, + 'auth_user_obj': c.userobj + } + + if package_type and package_type != 'dataset': + # Only show datasets of this particular type + fq += ' +dataset_type:{type}'.format(type=package_type) + else: + # Unless changed via config options, don't show non standard + # dataset types on the default search page + if not tk.asbool( + tk.config.get('ckan.search.show_all_types', 'False')): + fq += ' +dataset_type:dataset' + + # Only search for packages that aren't already associated with the + # Showcase + associated_package_ids = ShowcasePackageAssociation.get_package_ids_for_showcase( + showcase_id) + # flatten resulting list to space separated string + if associated_package_ids: + associated_package_ids_str = \ + ' OR '.join([id[0] for id in associated_package_ids]) + fq += ' !id:({0})'.format(associated_package_ids_str) + + facets = OrderedDict() + + default_facet_titles = { + 'organization': _('Organizations'), + 'groups': _('Groups'), + 'tags': _('Tags'), + 'res_format': _('Formats'), + 'license_id': _('Licenses'), + } + + # for CKAN-Versions that do not provide the facets-method from + # helper-context, import facets from ckan.common + if hasattr(h, 'facets'): + current_facets = h.facets() + else: + from ckan.common import g + current_facets = g.facets + + for facet in current_facets: + if facet in default_facet_titles: + facets[facet] = default_facet_titles[facet] + else: + facets[facet] = facet + + # Facet titles + for plugin in p.PluginImplementations(p.IFacets): + facets = plugin.dataset_facets(facets, package_type) + + c.facet_titles = facets + + data_dict = { + 'q': q, + 'fq': fq.strip(), + 'facet.field': list(facets.keys()), + 'rows': limit, + 'start': (page - 1) * limit, + 'sort': sort_by, + 'extras': search_extras + } + + query = tk.get_action('package_search')(context, data_dict) + c.sort_by_selected = query['sort'] + + c.page = h.Page(collection=query['results'], + page=page, + url=pager_url, + item_count=query['count'], + items_per_page=limit) + c.facets = query['facets'] + c.search_facets = query['search_facets'] + c.page.items = query['results'] + except SearchError as se: + log.error('Dataset search error: %r', se.args) + c.query_error = True + c.facets = {} + c.search_facets = {} + c.page = h.Page(collection=[]) + c.search_facets_limits = {} + for facet in c.search_facets.keys(): + try: + limit = int( + tk.request.params.get( + '_%s_limit' % facet, + int(tk.config.get('search.facets.default', 10)))) + except tk.ValueError: + abort( + 400, + _("Parameter '{parameter_name}' is not an integer").format( + parameter_name='_%s_limit' % facet)) + c.search_facets_limits[facet] = limit + + +def _search_url(params, name): + url = h.url_for('showcase_manage_datasets', id=name) + return url_with_params(url, params) + + +def _encode_params(params): + return [(k, six.ensure_str(six.text_type(v))) for k, v in params] + + +def url_with_params(url, params): + params = _encode_params(params) + return url + u'?' + urlencode(params) + + +def delete_view(id): + if 'cancel' in tk.request.params: + tk.redirect_to('showcase_edit', id=id) + + context = { + 'model': model, + 'session': model.Session, + 'user': c.user or c.author, + 'auth_user_obj': c.userobj + } + + try: + tk.check_access('ckanext_showcase_delete', context, {'id': id}) + except tk.NotAuthorized: + return tk.abort(401, _('Unauthorized to delete showcase')) + + try: + if tk.request.method == 'POST': + tk.get_action('ckanext_showcase_delete')(context, {'id': id}) + h.flash_notice(_('Showcase has been deleted.')) + return tk.redirect_to('showcase_index') + c.pkg_dict = tk.get_action('package_show')(context, {'id': id}) + except tk.NotAuthorized: + tk.abort(401, _('Unauthorized to delete showcase')) + except tk.ObjectNotFound: + tk.abort(404, _('Showcase not found')) + return tk.render('showcase/confirm_delete.html', + extra_vars={'dataset_type': DATASET_TYPE_NAME}) + + +def dataset_showcase_list(id): + context = { + 'model': model, + 'session': model.Session, + 'user': c.user or c.author, + 'for_view': True, + 'auth_user_obj': c.userobj + } + data_dict = {'id': id} + + try: + tk.check_access('package_show', context, data_dict) + except tk.ObjectNotFound: + return tk.abort(404, _('Dataset not found')) + except tk.NotAuthorized: + return tk.abort(401, _('Not authorized to see this page')) + + try: + c.pkg_dict = tk.get_action('package_show')(context, data_dict) + c.showcase_list = tk.get_action('ckanext_package_showcase_list')( + context, { + 'package_id': c.pkg_dict['id'] + }) + except tk.ObjectNotFound: + return tk.abort(404, _('Dataset not found')) + except tk.NotAuthorized: + return tk.abort(401, _('Unauthorized to read package')) + + if tk.request.method == 'POST': + # Are we adding the dataset to a showcase? + form_data = tk.request.form if tk.check_ckan_version( + '2.9') else tk.request.params + + new_showcase = form_data.get('showcase_added') + if new_showcase: + data_dict = { + "showcase_id": new_showcase, + "package_id": c.pkg_dict['id'] + } + try: + tk.get_action('ckanext_showcase_package_association_create')( + context, data_dict) + except tk.ObjectNotFound: + return tk.abort(404, _('Showcase not found')) + else: + h.flash_success( + _("The dataset has been added to the showcase.")) + + # Are we removing a dataset from a showcase? + showcase_to_remove = form_data.get('remove_showcase_id') + if showcase_to_remove: + data_dict = { + "showcase_id": showcase_to_remove, + "package_id": c.pkg_dict['id'] + } + try: + tk.get_action('ckanext_showcase_package_association_delete')( + context, data_dict) + except tk.ObjectNotFound: + return tk.abort(404, _('Showcase not found')) + else: + h.flash_success( + _("The dataset has been removed from the showcase.")) + return h.redirect_to( + h.url_for('showcase_dataset_showcase_list', id=c.pkg_dict['name'])) + + pkg_showcase_ids = [showcase['id'] for showcase in c.showcase_list] + site_showcases = tk.get_action('ckanext_showcase_list')(context, {}) + + c.showcase_dropdown = [[showcase['id'], showcase['title']] + for showcase in site_showcases + if showcase['id'] not in pkg_showcase_ids] + + return tk.render("package/dataset_showcase_list.html", + extra_vars={'pkg_dict': c.pkg_dict}) + + +def manage_showcase_admins(): + context = { + 'model': model, + 'session': model.Session, + 'user': c.user or c.author + } + + try: + tk.check_access('sysadmin', context, {}) + except tk.NotAuthorized: + return tk.abort(401, _('User not authorized to view page')) + + form_data = tk.request.form if tk.check_ckan_version( + '2.9') else tk.request.params + + # We're trying to add a user to the showcase admins list. + if tk.request.method == 'POST' and form_data['username']: + username = form_data['username'] + try: + tk.get_action('ckanext_showcase_admin_add')(data_dict={ + 'username': username + }) + except tk.NotAuthorized: + abort(401, _('Unauthorized to perform that action')) + except tk.ObjectNotFound: + h.flash_error( + _("User '{user_name}' not found.").format(user_name=username)) + except tk.ValidationError as e: + h.flash_notice(e.error_summary) + else: + h.flash_success(_("The user is now a Showcase Admin")) + + return tk.redirect_to(h.url_for('showcase_admins')) + + c.showcase_admins = tk.get_action('ckanext_showcase_admin_list')() + + return tk.render('admin/manage_showcase_admins.html') + + +def remove_showcase_admin(): + ''' + Remove a user from the Showcase Admin list. + ''' + context = { + 'model': model, + 'session': model.Session, + 'user': c.user or c.author + } + + try: + tk.check_access('sysadmin', context, {}) + except tk.NotAuthorized: + return tk.abort(401, _('User not authorized to view page')) + + form_data = tk.request.form if tk.check_ckan_version( + '2.9') else tk.request.params + + if 'cancel' in form_data: + return tk.redirect_to('showcase_admins') + + user_id = tk.request.params['user'] + if tk.request.method == 'POST' and user_id: + user_id = tk.request.params['user'] + try: + tk.get_action('ckanext_showcase_admin_remove')(data_dict={ + 'username': user_id + }) + except tk.NotAuthorized: + return tk.abort(401, _('Unauthorized to perform that action')) + except tk.ObjectNotFound: + h.flash_error(_('The user is not a Showcase Admin')) + else: + h.flash_success(_('The user is no longer a Showcase Admin')) + + return tk.redirect_to(h.url_for('showcase_admins')) + + c.user_dict = tk.get_action('user_show')(data_dict={'id': user_id}) + c.user_id = user_id + return tk.render('admin/confirm_remove_showcase_admin.html') + +def markdown_to_html(): + ''' Migrates the notes of all showcases from markdown to html. + + When using CKEditor, notes on showcases are stored in html instead of + markdown, this command will migrate all nothes using CKAN's + render_markdown core helper. + ''' + showcases = tk.get_action('ckanext_showcase_list')(data_dict={}) + + site_user = tk.get_action('get_site_user')({ + 'model': model, + 'ignore_auth': True}, + {} + ) + context = { + 'model': model, + 'session': model.Session, + 'ignore_auth': True, + 'user': site_user['name'], + } + + for showcase in showcases: + tk.get_action('package_patch')( + context, + { + 'id': showcase['id'], + 'notes': h.render_markdown(showcase['notes']) + } + ) + log.info('All notes were migrated successfully.') + + +def upload(): + if not tk.request.method == 'POST': + tk.abort(409, _('Only Posting is availiable')) + + try: + url = tk.get_action('ckanext_showcase_upload')( + None, + dict(tk.request.POST) + ) + except tk.NotAuthorized: + tk.abort(401, _('Unauthorized to upload file %s') % id) + + return json.dumps(url) \ No newline at end of file diff --git a/ckanext/showcase/views.py b/ckanext/showcase/views.py new file mode 100644 index 00000000..e613a9cd --- /dev/null +++ b/ckanext/showcase/views.py @@ -0,0 +1,143 @@ +# -*- coding: utf-8 -*- + +from flask import Blueprint + +import ckantoolkit as tk + +import ckan.lib.helpers as h +import ckan.views.dataset as dataset + +import ckanext.showcase.utils as utils + +showcase = Blueprint(u'showcase_blueprint', __name__) + + +def index(): + return dataset.search(utils.DATASET_TYPE_NAME) + + +class CreateView(dataset.CreateView): + def get(self, data=None, errors=None, error_summary=None): + utils.check_new_view_auth() + return super(CreateView, self).get(utils.DATASET_TYPE_NAME, data, + errors, error_summary) + + def post(self): + + data_dict = dataset.clean_dict( + dataset.dict_fns.unflatten( + dataset.tuplize_dict(dataset.parse_params(tk.request.form)))) + data_dict.update( + dataset.clean_dict( + dataset.dict_fns.unflatten( + dataset.tuplize_dict(dataset.parse_params( + tk.request.files))))) + context = self._prepare() + data_dict['type'] = utils.DATASET_TYPE_NAME + context['message'] = data_dict.get('log_message', '') + + try: + pkg_dict = tk.get_action('ckanext_showcase_create')(context, + data_dict) + + except tk.ValidationError as e: + errors = e.error_dict + error_summary = e.error_summary + data_dict['state'] = 'none' + return self.get(data_dict, errors, error_summary) + + # redirect to manage datasets + url = h.url_for('showcase_blueprint.manage_datasets', + id=pkg_dict['name']) + return h.redirect_to(url) + + +def manage_datasets(id): + return utils.manage_datasets_view(id) + + +def delete(id): + return utils.delete_view(id) + + +def read(id): + return utils.read_view(id) + + +class EditView(dataset.EditView): + def get(self, id, data=None, errors=None, error_summary=None): + utils.check_new_view_auth() + return super(EditView, self).get(utils.DATASET_TYPE_NAME, id, data, + errors, error_summary) + + def post(self, id): + context = self._prepare(id) + utils.check_edit_view_auth(id) + + data_dict = dataset.clean_dict( + dataset.dict_fns.unflatten( + dataset.tuplize_dict(dataset.parse_params(tk.request.form)))) + data_dict.update( + dataset.clean_dict( + dataset.dict_fns.unflatten( + dataset.tuplize_dict(dataset.parse_params( + tk.request.files))))) + + data_dict['id'] = id + try: + pkg = tk.get_action('ckanext_showcase_update')(context, data_dict) + except tk.ValidationError as e: + errors = e.error_dict + error_summary = e.error_summary + return self.get(id, data_dict, errors, error_summary) + + tk.c.pkg_dict = pkg + + # redirect to showcase details page + url = h.url_for('showcase_read', id=pkg['name']) + return h.redirect_to(url) + + +def dataset_showcase_list(id): + return utils.dataset_showcase_list(id) + + +def admins(): + return utils.manage_showcase_admins() + + +def admin_remove(): + return utils.remove_showcase_admin() + +def upload(): + return utils.upload() + + +showcase.add_url_rule('/showcase', view_func=index) +showcase.add_url_rule('/showcase/new', view_func=CreateView.as_view('new')) +showcase.add_url_rule('/showcase/delete/<id>', + view_func=delete, + methods=[u'GET', u'POST']) +showcase.add_url_rule('/showcase/<id>', view_func=read) +showcase.add_url_rule('/showcase/edit/<id>', + view_func=EditView.as_view('edit'), + methods=[u'GET', u'POST']) +showcase.add_url_rule('/showcase/manage_datasets/<id>', + view_func=manage_datasets, + methods=[u'GET', u'POST']) +showcase.add_url_rule('/dataset/showcases/<id>', + view_func=dataset_showcase_list, + methods=[u'GET', u'POST']) +showcase.add_url_rule('/ckan-admin/showcase_admins', + view_func=admins, + methods=[u'GET', u'POST']) +showcase.add_url_rule('/ckan-admin/showcase_admin_remove', + view_func=admin_remove, + methods=[u'GET', u'POST']) +showcase.add_url_rule('/showcase/upload', + view_func=upload, + methods=[u'POST']) + + +def get_blueprints(): + return [showcase] diff --git a/dev-requirements.txt b/dev-requirements.txt index 8f63371c..3eef46d7 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1,1 +1,3 @@ -beautifulsoup4==4.5.1 +beautifulsoup4==4.8.2 +pytest==4.6.5 +pytest-cov==2.7.1 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..26a6edf1 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,1 @@ +ckantoolkit>=0.0.3 diff --git a/setup.cfg b/setup.cfg index fa53fa01..5f5bdefc 100644 --- a/setup.cfg +++ b/setup.cfg @@ -19,3 +19,9 @@ previous = true domain = ckanext-showcase directory = ckanext/showcase/i18n statistics = true + +[tool:pytest] +filterwarnings = + ignore::sqlalchemy.exc.SADeprecationWarning + ignore::sqlalchemy.exc.SAWarning + ignore::DeprecationWarning diff --git a/setup.py b/setup.py index 15efcda6..390fa78b 100644 --- a/setup.py +++ b/setup.py @@ -49,6 +49,7 @@ # You can just specify the packages manually here if your project is # simple. Or you can use find_packages(). packages=find_packages(exclude=['contrib', 'docs', 'tests*']), + namespace_packages=['ckanext'], # List run-time dependencies here. These will be installed by pip when your # project is installed. For an analysis of "install_requires" vs pip's @@ -80,7 +81,7 @@ ckan = ckan.lib.extract:extract_ckan [paste.paster_command] - showcase=ckanext.showcase.commands.migrate:MigrationCommand + showcase=ckanext.showcase.commands.paster:MigrationCommand ''', message_extractors={
Ensure Travis uses CKAN 2.3 when available Change which version of CKAN Travis checkout to a stable release in travis-build.bash.
Fixed in ca98eb88f174c11869c6785b93e504c8c793bc50.
2021-02-18T14:20:08
0.0
[]
[]
DahnJ/H3-Pandas
DahnJ__H3-Pandas-21
52a311128aa0460feab9bffb7703883fc489d7ce
diff --git a/h3pandas/h3pandas.py b/h3pandas/h3pandas.py index a3d6029..5b0e93c 100644 --- a/h3pandas/h3pandas.py +++ b/h3pandas/h3pandas.py @@ -324,7 +324,11 @@ def h3_to_parent(self, resolution: int = None) -> AnyDataFrame: 881e2659c3fffff 1 851e265bfffffff """ # TODO: Test `h3_parent` case - column = self._format_resolution(resolution) if resolution else "h3_parent" + column = ( + self._format_resolution(resolution) + if resolution is not None + else "h3_parent" + ) return self._apply_index_assign( wrapped_partial(h3.h3_to_parent, res=resolution), column )
h3_to_parent at resolution 0 labels it as direct parent `h3_to_parent` at resolution 0 calculates correctly, but returns a column called `h3_parent`, rather than `h3_00` (or perhaps `h3_0`?) as expected based on the pattern for other resolutions. I don't think there's anything unexpected about calculating the 0-level parent (I use it for partitioning; a higher resolution is used for my actual information). Rather it's just that [this line](https://github.com/DahnJ/H3-Pandas/blob/master/h3pandas/h3pandas.py#L331) checks for implicit False, not a strict check for None. `column = self._format_resolution(resolution) if resolution else "h3_parent"` Should be: `column = self._format_resolution(resolution) if resolution != None else "h3_parent"` ``` >>> 'a' if 0 != None else 'b' 'a' >>> 'a' if 0 else 'b' 'b' ``` The actual result is correct, it is a level-0 cell.
2023-03-19T17:50:20
0.0
[]
[]
mike-oakley/openapi-pydantic
mike-oakley__openapi-pydantic-48
e93752ced788c613ec164dbbf6dfb31447923987
diff --git a/README.md b/README.md index dc395aa..ca2bc17 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ OpenAPI schema implemented in [Pydantic](https://github.com/samuelcolvin/pydantic). Both Pydantic 1.8+ and 2.x are supported. The naming of the classes follows the schema in -[OpenAPI specification](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.1.0.md#schema). +[OpenAPI specification](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.1.1.md#schema). > This library is forked from [OpenAPI Schema Pydantic](https://github.com/kuimono/openapi-schema-pydantic) (at version [1.2.4](https://github.com/kuimono/openapi-schema-pydantic/releases/tag/v1.2.4)) which is no longer actively maintained. @@ -45,7 +45,7 @@ Result: ```json { - "openapi": "3.1.0", + "openapi": "3.1.1", "info": { "title": "My own API", "version": "v0.0.1" @@ -81,7 +81,7 @@ from openapi_pydantic import parse_obj, OpenAPI, PathItem, Response # Construct OpenAPI from dict, inferring the correct schema version open_api = parse_obj({ - "openapi": "3.1.0", + "openapi": "3.1.1", "info": {"title": "My own API", "version": "v0.0.1"}, "paths": { "/ping": { @@ -91,7 +91,7 @@ open_api = parse_obj({ }) -# Construct OpenAPI v3.1.0 schema from dict +# Construct OpenAPI v3.1 schema from dict # Note: for Pydantic 1.x, replace `model_validate` with `parse_obj` open_api = OpenAPI.model_validate({ "info": {"title": "My own API", "version": "v0.0.1"}, @@ -116,7 +116,7 @@ open_api = OpenAPI.model_validate({ ## Use Pydantic classes as schema -- The [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.3.md#schemaObject) +- The [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.4.md#schemaObject) in OpenAPI has definitions and tweaks in JSON Schema, which are hard to comprehend and define a good data class - Pydantic already has a good way to [create JSON schema](https://pydantic-docs.helpmanual.io/usage/schema/). Let's not reinvent the wheel. @@ -175,7 +175,7 @@ Result: ```json { - "openapi": "3.1.0", + "openapi": "3.1.1", "info": { "title": "My own API", "version": "v0.0.1" @@ -286,8 +286,8 @@ More info about field aliases: | OpenAPI version | Field alias info | | --------------- | ---------------- | -| 3.1.0 | [here](https://github.com/mike-oakley/openapi-pydantic/blob/main/openapi_pydantic/v3/v3_1_0/README.md#alias) | -| 3.0.3 | [here](https://github.com/mike-oakley/openapi-pydantic/blob/main/openapi_pydantic/v3/v3_0_3/README.md#alias) | +| 3.1 | [here](https://github.com/mike-oakley/openapi-pydantic/blob/main/openapi_pydantic/v3/v3_1/README.md#alias) | +| 3.0 | [here](https://github.com/mike-oakley/openapi-pydantic/blob/main/openapi_pydantic/v3/v3_0/README.md#alias) | ### Non-pydantic schema types @@ -296,17 +296,17 @@ Please refer to the following for more info: | OpenAPI version | Non-pydantic schema type info | | --------------- | ----------------------------- | -| 3.1.0 | [here](https://github.com/mike-oakley/openapi-pydantic/blob/main/openapi_pydantic/v3/v3_1_0/README.md#non-pydantic-schema-types) | -| 3.0.3 | [here](https://github.com/mike-oakley/openapi-pydantic/blob/main/openapi_pydantic/v3/v3_0_3/README.md#non-pydantic-schema-types) | +| 3.1 | [here](https://github.com/mike-oakley/openapi-pydantic/blob/main/openapi_pydantic/v3/v3_1/README.md#non-pydantic-schema-types) | +| 3.0 | [here](https://github.com/mike-oakley/openapi-pydantic/blob/main/openapi_pydantic/v3/v3_0/README.md#non-pydantic-schema-types) | -### Use OpenAPI 3.0.3 instead of 3.1.0 +### Use OpenAPI 3.0 instead of 3.1 -Some UI renderings (e.g. Swagger) still do not support OpenAPI 3.1.0. -The old 3.0.3 version is available by importing from different paths: +Some UI renderings (e.g. Swagger) still do not support OpenAPI 3.1.x. +The old 3.0.x version is available by importing from different paths: ```python -from openapi_pydantic.v3.v3_0_3 import OpenAPI, ... -from openapi_pydantic.v3.v3_0_3.util import PydanticSchema, construct_open_api_with_schema_class +from openapi_pydantic.v3.v3_0 import OpenAPI, ... +from openapi_pydantic.v3.v3_0.util import PydanticSchema, construct_open_api_with_schema_class ``` ### Pydantic version compatibility diff --git a/openapi_pydantic/v3/__init__.py b/openapi_pydantic/v3/__init__.py index 859ccbe..ee575e8 100644 --- a/openapi_pydantic/v3/__init__.py +++ b/openapi_pydantic/v3/__init__.py @@ -1,34 +1,34 @@ from .parser import parse_obj as parse_obj -from .v3_1_0 import XML as XML -from .v3_1_0 import Callback as Callback -from .v3_1_0 import Components as Components -from .v3_1_0 import Contact as Contact -from .v3_1_0 import DataType as DataType -from .v3_1_0 import Discriminator as Discriminator -from .v3_1_0 import Encoding as Encoding -from .v3_1_0 import Example as Example -from .v3_1_0 import ExternalDocumentation as ExternalDocumentation -from .v3_1_0 import Header as Header -from .v3_1_0 import Info as Info -from .v3_1_0 import License as License -from .v3_1_0 import Link as Link -from .v3_1_0 import MediaType as MediaType -from .v3_1_0 import OAuthFlow as OAuthFlow -from .v3_1_0 import OAuthFlows as OAuthFlows -from .v3_1_0 import OpenAPI as OpenAPI -from .v3_1_0 import Operation as Operation -from .v3_1_0 import Parameter as Parameter -from .v3_1_0 import ParameterLocation as ParameterLocation -from .v3_1_0 import PathItem as PathItem -from .v3_1_0 import Paths as Paths -from .v3_1_0 import Reference as Reference -from .v3_1_0 import RequestBody as RequestBody -from .v3_1_0 import Response as Response -from .v3_1_0 import Responses as Responses -from .v3_1_0 import Schema as Schema -from .v3_1_0 import SecurityRequirement as SecurityRequirement -from .v3_1_0 import SecurityScheme as SecurityScheme -from .v3_1_0 import Server as Server -from .v3_1_0 import ServerVariable as ServerVariable -from .v3_1_0 import Tag as Tag -from .v3_1_0 import schema_validate as schema_validate +from .v3_1 import XML as XML +from .v3_1 import Callback as Callback +from .v3_1 import Components as Components +from .v3_1 import Contact as Contact +from .v3_1 import DataType as DataType +from .v3_1 import Discriminator as Discriminator +from .v3_1 import Encoding as Encoding +from .v3_1 import Example as Example +from .v3_1 import ExternalDocumentation as ExternalDocumentation +from .v3_1 import Header as Header +from .v3_1 import Info as Info +from .v3_1 import License as License +from .v3_1 import Link as Link +from .v3_1 import MediaType as MediaType +from .v3_1 import OAuthFlow as OAuthFlow +from .v3_1 import OAuthFlows as OAuthFlows +from .v3_1 import OpenAPI as OpenAPI +from .v3_1 import Operation as Operation +from .v3_1 import Parameter as Parameter +from .v3_1 import ParameterLocation as ParameterLocation +from .v3_1 import PathItem as PathItem +from .v3_1 import Paths as Paths +from .v3_1 import Reference as Reference +from .v3_1 import RequestBody as RequestBody +from .v3_1 import Response as Response +from .v3_1 import Responses as Responses +from .v3_1 import Schema as Schema +from .v3_1 import SecurityRequirement as SecurityRequirement +from .v3_1 import SecurityScheme as SecurityScheme +from .v3_1 import Server as Server +from .v3_1 import ServerVariable as ServerVariable +from .v3_1 import Tag as Tag +from .v3_1 import schema_validate as schema_validate diff --git a/openapi_pydantic/v3/parser.py b/openapi_pydantic/v3/parser.py index e80b52f..0149bff 100644 --- a/openapi_pydantic/v3/parser.py +++ b/openapi_pydantic/v3/parser.py @@ -4,8 +4,8 @@ from openapi_pydantic.compat import PYDANTIC_V2 -from .v3_0_3 import OpenAPI as OpenAPIv3_0 -from .v3_1_0 import OpenAPI as OpenAPIv3_1 +from .v3_0 import OpenAPI as OpenAPIv3_0 +from .v3_1 import OpenAPI as OpenAPIv3_1 OpenAPIv3 = Union[OpenAPIv3_1, OpenAPIv3_0] diff --git a/openapi_pydantic/v3/v3_0_3/README.md b/openapi_pydantic/v3/v3_0/README.md similarity index 98% rename from openapi_pydantic/v3/v3_0_3/README.md rename to openapi_pydantic/v3/v3_0/README.md index 7a4892a..16b9c44 100644 --- a/openapi_pydantic/v3/v3_0_3/README.md +++ b/openapi_pydantic/v3/v3_0/README.md @@ -1,4 +1,4 @@ -# OpenAPI v3.0.3 schema classes +# OpenAPI v3.0 schema classes ## Alias diff --git a/openapi_pydantic/v3/v3_1_0/__init__.py b/openapi_pydantic/v3/v3_0/__init__.py similarity index 95% rename from openapi_pydantic/v3/v3_1_0/__init__.py rename to openapi_pydantic/v3/v3_0/__init__.py index b6b2b8f..af250d4 100644 --- a/openapi_pydantic/v3/v3_1_0/__init__.py +++ b/openapi_pydantic/v3/v3_0/__init__.py @@ -1,9 +1,9 @@ """ -OpenAPI v3.1.0 schema types, created according to the specification: -https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.1.0.md +OpenAPI v3.0 schema types, created according to the specification: +https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.4.md The type orders are according to the contents of the specification: -https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.1.0.md#table-of-contents +https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.4.md#table-of-contents """ from typing import TYPE_CHECKING diff --git a/openapi_pydantic/v3/v3_0_3/callback.py b/openapi_pydantic/v3/v3_0/callback.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/callback.py rename to openapi_pydantic/v3/v3_0/callback.py diff --git a/openapi_pydantic/v3/v3_0_3/components.py b/openapi_pydantic/v3/v3_0/components.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/components.py rename to openapi_pydantic/v3/v3_0/components.py diff --git a/openapi_pydantic/v3/v3_0_3/contact.py b/openapi_pydantic/v3/v3_0/contact.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/contact.py rename to openapi_pydantic/v3/v3_0/contact.py diff --git a/openapi_pydantic/v3/v3_0_3/datatype.py b/openapi_pydantic/v3/v3_0/datatype.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/datatype.py rename to openapi_pydantic/v3/v3_0/datatype.py diff --git a/openapi_pydantic/v3/v3_0_3/discriminator.py b/openapi_pydantic/v3/v3_0/discriminator.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/discriminator.py rename to openapi_pydantic/v3/v3_0/discriminator.py diff --git a/openapi_pydantic/v3/v3_0_3/encoding.py b/openapi_pydantic/v3/v3_0/encoding.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/encoding.py rename to openapi_pydantic/v3/v3_0/encoding.py diff --git a/openapi_pydantic/v3/v3_0_3/example.py b/openapi_pydantic/v3/v3_0/example.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/example.py rename to openapi_pydantic/v3/v3_0/example.py diff --git a/openapi_pydantic/v3/v3_0_3/external_documentation.py b/openapi_pydantic/v3/v3_0/external_documentation.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/external_documentation.py rename to openapi_pydantic/v3/v3_0/external_documentation.py diff --git a/openapi_pydantic/v3/v3_0_3/header.py b/openapi_pydantic/v3/v3_0/header.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/header.py rename to openapi_pydantic/v3/v3_0/header.py diff --git a/openapi_pydantic/v3/v3_0_3/info.py b/openapi_pydantic/v3/v3_0/info.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/info.py rename to openapi_pydantic/v3/v3_0/info.py diff --git a/openapi_pydantic/v3/v3_0_3/license.py b/openapi_pydantic/v3/v3_0/license.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/license.py rename to openapi_pydantic/v3/v3_0/license.py diff --git a/openapi_pydantic/v3/v3_0_3/link.py b/openapi_pydantic/v3/v3_0/link.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/link.py rename to openapi_pydantic/v3/v3_0/link.py diff --git a/openapi_pydantic/v3/v3_0_3/media_type.py b/openapi_pydantic/v3/v3_0/media_type.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/media_type.py rename to openapi_pydantic/v3/v3_0/media_type.py diff --git a/openapi_pydantic/v3/v3_0_3/oauth_flow.py b/openapi_pydantic/v3/v3_0/oauth_flow.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/oauth_flow.py rename to openapi_pydantic/v3/v3_0/oauth_flow.py diff --git a/openapi_pydantic/v3/v3_0_3/oauth_flows.py b/openapi_pydantic/v3/v3_0/oauth_flows.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/oauth_flows.py rename to openapi_pydantic/v3/v3_0/oauth_flows.py diff --git a/openapi_pydantic/v3/v3_0_3/open_api.py b/openapi_pydantic/v3/v3_0/open_api.py similarity index 97% rename from openapi_pydantic/v3/v3_0_3/open_api.py rename to openapi_pydantic/v3/v3_0/open_api.py index be788df..b436ffa 100644 --- a/openapi_pydantic/v3/v3_0_3/open_api.py +++ b/openapi_pydantic/v3/v3_0/open_api.py @@ -16,7 +16,7 @@ class OpenAPI(BaseModel): """This is the root document object of the OpenAPI document.""" - openapi: Literal["3.0.3", "3.0.2", "3.0.1", "3.0.0"] = "3.0.3" + openapi: Literal["3.0.4", "3.0.3", "3.0.2", "3.0.1", "3.0.0"] = "3.0.4" """ **REQUIRED**. This string MUST be the [semantic version number](https://semver.org/spec/v2.0.0.html) of the [OpenAPI Specification version](#versions) that the OpenAPI document uses. diff --git a/openapi_pydantic/v3/v3_0_3/operation.py b/openapi_pydantic/v3/v3_0/operation.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/operation.py rename to openapi_pydantic/v3/v3_0/operation.py diff --git a/openapi_pydantic/v3/v3_0_3/parameter.py b/openapi_pydantic/v3/v3_0/parameter.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/parameter.py rename to openapi_pydantic/v3/v3_0/parameter.py diff --git a/openapi_pydantic/v3/v3_0_3/path_item.py b/openapi_pydantic/v3/v3_0/path_item.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/path_item.py rename to openapi_pydantic/v3/v3_0/path_item.py diff --git a/openapi_pydantic/v3/v3_0_3/paths.py b/openapi_pydantic/v3/v3_0/paths.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/paths.py rename to openapi_pydantic/v3/v3_0/paths.py diff --git a/openapi_pydantic/v3/v3_0_3/reference.py b/openapi_pydantic/v3/v3_0/reference.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/reference.py rename to openapi_pydantic/v3/v3_0/reference.py diff --git a/openapi_pydantic/v3/v3_0_3/request_body.py b/openapi_pydantic/v3/v3_0/request_body.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/request_body.py rename to openapi_pydantic/v3/v3_0/request_body.py diff --git a/openapi_pydantic/v3/v3_0_3/response.py b/openapi_pydantic/v3/v3_0/response.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/response.py rename to openapi_pydantic/v3/v3_0/response.py diff --git a/openapi_pydantic/v3/v3_0_3/responses.py b/openapi_pydantic/v3/v3_0/responses.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/responses.py rename to openapi_pydantic/v3/v3_0/responses.py diff --git a/openapi_pydantic/v3/v3_0_3/schema.py b/openapi_pydantic/v3/v3_0/schema.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/schema.py rename to openapi_pydantic/v3/v3_0/schema.py diff --git a/openapi_pydantic/v3/v3_0_3/security_requirement.py b/openapi_pydantic/v3/v3_0/security_requirement.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/security_requirement.py rename to openapi_pydantic/v3/v3_0/security_requirement.py diff --git a/openapi_pydantic/v3/v3_0_3/security_scheme.py b/openapi_pydantic/v3/v3_0/security_scheme.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/security_scheme.py rename to openapi_pydantic/v3/v3_0/security_scheme.py diff --git a/openapi_pydantic/v3/v3_0_3/server.py b/openapi_pydantic/v3/v3_0/server.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/server.py rename to openapi_pydantic/v3/v3_0/server.py diff --git a/openapi_pydantic/v3/v3_0_3/server_variable.py b/openapi_pydantic/v3/v3_0/server_variable.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/server_variable.py rename to openapi_pydantic/v3/v3_0/server_variable.py diff --git a/openapi_pydantic/v3/v3_0_3/tag.py b/openapi_pydantic/v3/v3_0/tag.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/tag.py rename to openapi_pydantic/v3/v3_0/tag.py diff --git a/openapi_pydantic/v3/v3_0_3/util.py b/openapi_pydantic/v3/v3_0/util.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/util.py rename to openapi_pydantic/v3/v3_0/util.py diff --git a/openapi_pydantic/v3/v3_0_3/xml.py b/openapi_pydantic/v3/v3_0/xml.py similarity index 100% rename from openapi_pydantic/v3/v3_0_3/xml.py rename to openapi_pydantic/v3/v3_0/xml.py diff --git a/openapi_pydantic/v3/v3_1_0/README.md b/openapi_pydantic/v3/v3_1/README.md similarity index 98% rename from openapi_pydantic/v3/v3_1_0/README.md rename to openapi_pydantic/v3/v3_1/README.md index 22d4474..2232c46 100644 --- a/openapi_pydantic/v3/v3_1_0/README.md +++ b/openapi_pydantic/v3/v3_1/README.md @@ -1,4 +1,4 @@ -# OpenAPI v3.1.0 schema classes +# OpenAPI v3.1 schema classes ## Alias diff --git a/openapi_pydantic/v3/v3_0_3/__init__.py b/openapi_pydantic/v3/v3_1/__init__.py similarity index 95% rename from openapi_pydantic/v3/v3_0_3/__init__.py rename to openapi_pydantic/v3/v3_1/__init__.py index 73d1d3d..0574504 100644 --- a/openapi_pydantic/v3/v3_0_3/__init__.py +++ b/openapi_pydantic/v3/v3_1/__init__.py @@ -1,9 +1,9 @@ """ -OpenAPI v3.0.3 schema types, created according to the specification: -https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.3.md +OpenAPI v3.1 schema types, created according to the specification: +https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.1.1.md The type orders are according to the contents of the specification: -https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.3.md#table-of-contents +https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.1.1.md#table-of-contents """ from typing import TYPE_CHECKING diff --git a/openapi_pydantic/v3/v3_1_0/callback.py b/openapi_pydantic/v3/v3_1/callback.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/callback.py rename to openapi_pydantic/v3/v3_1/callback.py diff --git a/openapi_pydantic/v3/v3_1_0/components.py b/openapi_pydantic/v3/v3_1/components.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/components.py rename to openapi_pydantic/v3/v3_1/components.py diff --git a/openapi_pydantic/v3/v3_1_0/contact.py b/openapi_pydantic/v3/v3_1/contact.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/contact.py rename to openapi_pydantic/v3/v3_1/contact.py diff --git a/openapi_pydantic/v3/v3_1_0/datatype.py b/openapi_pydantic/v3/v3_1/datatype.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/datatype.py rename to openapi_pydantic/v3/v3_1/datatype.py diff --git a/openapi_pydantic/v3/v3_1_0/discriminator.py b/openapi_pydantic/v3/v3_1/discriminator.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/discriminator.py rename to openapi_pydantic/v3/v3_1/discriminator.py diff --git a/openapi_pydantic/v3/v3_1_0/encoding.py b/openapi_pydantic/v3/v3_1/encoding.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/encoding.py rename to openapi_pydantic/v3/v3_1/encoding.py diff --git a/openapi_pydantic/v3/v3_1_0/example.py b/openapi_pydantic/v3/v3_1/example.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/example.py rename to openapi_pydantic/v3/v3_1/example.py diff --git a/openapi_pydantic/v3/v3_1_0/external_documentation.py b/openapi_pydantic/v3/v3_1/external_documentation.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/external_documentation.py rename to openapi_pydantic/v3/v3_1/external_documentation.py diff --git a/openapi_pydantic/v3/v3_1_0/header.py b/openapi_pydantic/v3/v3_1/header.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/header.py rename to openapi_pydantic/v3/v3_1/header.py diff --git a/openapi_pydantic/v3/v3_1_0/info.py b/openapi_pydantic/v3/v3_1/info.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/info.py rename to openapi_pydantic/v3/v3_1/info.py diff --git a/openapi_pydantic/v3/v3_1_0/license.py b/openapi_pydantic/v3/v3_1/license.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/license.py rename to openapi_pydantic/v3/v3_1/license.py diff --git a/openapi_pydantic/v3/v3_1_0/link.py b/openapi_pydantic/v3/v3_1/link.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/link.py rename to openapi_pydantic/v3/v3_1/link.py diff --git a/openapi_pydantic/v3/v3_1_0/media_type.py b/openapi_pydantic/v3/v3_1/media_type.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/media_type.py rename to openapi_pydantic/v3/v3_1/media_type.py diff --git a/openapi_pydantic/v3/v3_1_0/oauth_flow.py b/openapi_pydantic/v3/v3_1/oauth_flow.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/oauth_flow.py rename to openapi_pydantic/v3/v3_1/oauth_flow.py diff --git a/openapi_pydantic/v3/v3_1_0/oauth_flows.py b/openapi_pydantic/v3/v3_1/oauth_flows.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/oauth_flows.py rename to openapi_pydantic/v3/v3_1/oauth_flows.py diff --git a/openapi_pydantic/v3/v3_1_0/open_api.py b/openapi_pydantic/v3/v3_1/open_api.py similarity index 98% rename from openapi_pydantic/v3/v3_1_0/open_api.py rename to openapi_pydantic/v3/v3_1/open_api.py index 0d186c5..3427dc5 100644 --- a/openapi_pydantic/v3/v3_1_0/open_api.py +++ b/openapi_pydantic/v3/v3_1/open_api.py @@ -18,7 +18,7 @@ class OpenAPI(BaseModel): """This is the root document object of the OpenAPI document.""" - openapi: Literal["3.1.0"] = "3.1.0" + openapi: Literal["3.1.1", "3.1.0"] = "3.1.1" """ **REQUIRED**. This string MUST be the [version number](#versions) of the OpenAPI Specification that the OpenAPI document uses. diff --git a/openapi_pydantic/v3/v3_1_0/operation.py b/openapi_pydantic/v3/v3_1/operation.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/operation.py rename to openapi_pydantic/v3/v3_1/operation.py diff --git a/openapi_pydantic/v3/v3_1_0/parameter.py b/openapi_pydantic/v3/v3_1/parameter.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/parameter.py rename to openapi_pydantic/v3/v3_1/parameter.py diff --git a/openapi_pydantic/v3/v3_1_0/path_item.py b/openapi_pydantic/v3/v3_1/path_item.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/path_item.py rename to openapi_pydantic/v3/v3_1/path_item.py diff --git a/openapi_pydantic/v3/v3_1_0/paths.py b/openapi_pydantic/v3/v3_1/paths.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/paths.py rename to openapi_pydantic/v3/v3_1/paths.py diff --git a/openapi_pydantic/v3/v3_1_0/reference.py b/openapi_pydantic/v3/v3_1/reference.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/reference.py rename to openapi_pydantic/v3/v3_1/reference.py diff --git a/openapi_pydantic/v3/v3_1_0/request_body.py b/openapi_pydantic/v3/v3_1/request_body.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/request_body.py rename to openapi_pydantic/v3/v3_1/request_body.py diff --git a/openapi_pydantic/v3/v3_1_0/response.py b/openapi_pydantic/v3/v3_1/response.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/response.py rename to openapi_pydantic/v3/v3_1/response.py diff --git a/openapi_pydantic/v3/v3_1_0/responses.py b/openapi_pydantic/v3/v3_1/responses.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/responses.py rename to openapi_pydantic/v3/v3_1/responses.py diff --git a/openapi_pydantic/v3/v3_1_0/schema.py b/openapi_pydantic/v3/v3_1/schema.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/schema.py rename to openapi_pydantic/v3/v3_1/schema.py diff --git a/openapi_pydantic/v3/v3_1_0/security_requirement.py b/openapi_pydantic/v3/v3_1/security_requirement.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/security_requirement.py rename to openapi_pydantic/v3/v3_1/security_requirement.py diff --git a/openapi_pydantic/v3/v3_1_0/security_scheme.py b/openapi_pydantic/v3/v3_1/security_scheme.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/security_scheme.py rename to openapi_pydantic/v3/v3_1/security_scheme.py diff --git a/openapi_pydantic/v3/v3_1_0/server.py b/openapi_pydantic/v3/v3_1/server.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/server.py rename to openapi_pydantic/v3/v3_1/server.py diff --git a/openapi_pydantic/v3/v3_1_0/server_variable.py b/openapi_pydantic/v3/v3_1/server_variable.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/server_variable.py rename to openapi_pydantic/v3/v3_1/server_variable.py diff --git a/openapi_pydantic/v3/v3_1_0/tag.py b/openapi_pydantic/v3/v3_1/tag.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/tag.py rename to openapi_pydantic/v3/v3_1/tag.py diff --git a/openapi_pydantic/v3/v3_1_0/xml.py b/openapi_pydantic/v3/v3_1/xml.py similarity index 100% rename from openapi_pydantic/v3/v3_1_0/xml.py rename to openapi_pydantic/v3/v3_1/xml.py
New OAS versions https://github.com/OAI/OpenAPI-Specification/releases/tag/3.0.4 https://github.com/OAI/OpenAPI-Specification/releases/tag/3.1.1
2024-11-04T22:11:15
0.0
[]
[]
lnxpy/pyaction
lnxpy__pyaction-25
4fd6cc6983572a30757878ee9a1c726044c9877a
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8f237f6..fe50c32 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,20 +7,11 @@ repos: - id: trailing-whitespace - id: end-of-file-fixer - id: check-added-large-files - - repo: "https://github.com/psf/black" - rev: 23.7.0 + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.3.0 hooks: - - id: black - - repo: "https://github.com/PyCQA/flake8" - rev: 6.0.0 - hooks: - - id: flake8 - args: ["--ignore=E203,E501,W503"] - - repo: "https://github.com/pre-commit/mirrors-mypy" - rev: v1.4.1 - hooks: - - id: mypy - args: ["--cache-dir=/dev/null", "--ignore-missing-imports"] + - id: ruff + - id: ruff-format - repo: https://github.com/asottile/pyupgrade rev: v3.10.1 hooks: diff --git a/README.md b/README.md index 6d459ab..aa74d03 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,8 @@ -## PyAction [![docs ci](https://github.com/lnxpy/pyaction/actions/workflows/docs.yml/badge.svg?branch=main)](https://github.com/lnxpy/pyaction/actions/workflows/docs.yml) [![main](https://github.com/lnxpy/pyaction/actions/workflows/main.yml/badge.svg)](https://github.com/lnxpy/pyaction/actions/workflows/main.yml) ![GitHub tag (with filter)](https://img.shields.io/github/v/tag/lnxpy/pyaction?label=Version) +## PyAction [![docs ci](https://github.com/lnxpy/pyaction/actions/workflows/docs.yml/badge.svg?branch=main)](https://github.com/lnxpy/pyaction/actions/workflows/docs.yml) [![main](https://github.com/lnxpy/pyaction/actions/workflows/main.yml/badge.svg)](https://github.com/lnxpy/pyaction/actions/workflows/main.yml) ![version)](https://img.shields.io/github/v/tag/lnxpy/pyaction?label=Version) -PyAction is a [Cookiecutter](https://cookiecutter.io) template that allows you to develop custom [GitHub Actions](https://docs.github.com/en/actions) using [Python](https://python.org/). + +PyAction is a [Cookiecutter](https://cookiecutter.io) template that allows you to develop custom [GitHub Actions](https://docs.github.com/en/actions) using [Python 3](https://python.org/). <!-- > [!NOTE] > Read ["Writing GitHub Actions in Python"](https://imsadra.me/writing-github-actions-in-python) article that walks you through a hello-world example. --> diff --git a/cookiecutter.json b/cookiecutter.json index 199e2a9..f27a569 100644 --- a/cookiecutter.json +++ b/cookiecutter.json @@ -10,10 +10,6 @@ "gplv3", "notopensource" ], - "python_version": [ - "3", - "2" - ], "include_dependencies": "n", "include_cicd_testing": "n" } diff --git a/docs/demo.md b/docs/demo.md index 865e298..a55ce23 100644 --- a/docs/demo.md +++ b/docs/demo.md @@ -20,23 +20,19 @@ cookiecutter gh:lnxpy/pyaction And here would be the prompting for my action called "PyAction Hello World". ``` { .plaintext .no-copy } -[1/8] action_name (My Awesome Action): PyAction Hello World -[2/8] action_slug (pyaction-hello-world): -[3/8] description (A short description..): This actions says Hello to you! -[4/8] author_name (John Doe): -[5/8] Select open_source_license +[1/7] action_name (My Awesome Action): PyAction Hello World +[2/7] action_slug (pyaction-hello-world): +[3/7] description (A short description..): This actions says Hello to you! +[4/7] author_name (John Doe): +[5/7] Select open_source_license 1 - mit 2 - bsd 3 - apache 4 - gplv3 5 - notopensource Choose from [1/2/3/4/5] (1): -[6/8] Select python_version - 1 - 3 - 2 - 2 - Choose from [1/2] (1): -[7/8] include_dependencies (n): -[8/8] include_cicd_testing (n): +[6/7] include_dependencies (n): +[7/7] include_cicd_testing (n): pyaction-hello-world is created successfully! ✅ ``` @@ -47,7 +43,6 @@ pyaction-hello-world is created successfully! ✅ | `description` | Action description | `This actions says Hello to you!` | | `author_name` | Your name | `John Doe` | | `open_source_license` | OS license | `1` = (mit) | -| `python_version` | Python version | `1` = (3) | | `include_dependencies` | Using `requirements.txt` | `y` = (action has requirements) | | `include_cicd_testing` | A workflow for testing the action | `n` | diff --git a/docs/index.md b/docs/index.md index 65f7737..a83f5bc 100644 --- a/docs/index.md +++ b/docs/index.md @@ -106,7 +106,7 @@ Here you can see a very basic hello-world example action generated with PyAction ## How It Works -Custom GitHub Actions can be developed in different ways. PyAction uses the [Docker Container](https://docs.github.com/en/actions/creating-actions/about-custom-actions#docker-container-actions) method which is highly stable with different Python environments. This way, you'll be able to specify the requirements for your actions and run them inside a lightweight isolated container with all the dependencies installed. +Custom GitHub Actions can be developed in different ways. PyAction uses the [Docker Container](https://docs.github.com/en/actions/creating-actions/about-custom-actions#docker-container-actions) method which is highly stable with Python environments. This way, you'll be able to specify the requirements for your actions and run them inside a lightweight isolated container with all the dependencies installed. ## Passion As a Python developer, I always wanted to help the community and be impactful in its growth. Watching that GitHub supports JavaScript as an official method for creating actions, made me think of inventing a way for Python developers to help the community be able to write actions in Python and benefit from the powerful packages and tools from Python's world. diff --git a/requirements-dev.ini b/requirements-dev.txt similarity index 100% rename from requirements-dev.ini rename to requirements-dev.txt diff --git a/requirements-docs.ini b/requirements-docs.txt similarity index 100% rename from requirements-docs.ini rename to requirements-docs.txt diff --git a/tox.ini b/tox.ini index d57bd4e..36ffbc3 100644 --- a/tox.ini +++ b/tox.ini @@ -12,7 +12,7 @@ skip_install = true [testenv] description = run tests deps = - -r requirements-dev.ini + -r requirements-dev.txt commands = coverage run -m pytest {posargs:tests} coverage report --omit={{cookiecutter.action_slug}}/* @@ -26,5 +26,5 @@ commands = pre-commit run --all-files --show-diff-on-failure [testenv:docs] description = docs utils deps = - -r requirements-docs.ini + -r requirements-docs.txt commands = mkdocs {posargs:tests} diff --git a/{{cookiecutter.action_slug}}/Dockerfile b/{{cookiecutter.action_slug}}/Dockerfile index 2a40ac1..9a1d477 100644 --- a/{{cookiecutter.action_slug}}/Dockerfile +++ b/{{cookiecutter.action_slug}}/Dockerfile @@ -1,5 +1,5 @@ # setting the base-image to alpine -FROM python:{{ cookiecutter.python_version }}-slim +FROM python:3-slim # importing the action COPY . /action diff --git a/{{cookiecutter.action_slug}}/README.md b/{{cookiecutter.action_slug}}/README.md index c3c8ac6..ddd4309 100644 --- a/{{cookiecutter.action_slug}}/README.md +++ b/{{cookiecutter.action_slug}}/README.md @@ -1,4 +1,4 @@ -## {{ cookiecutter.action_name }} <img alt="action-badge" src="https://img.shields.io/badge/{{ cookiecutter.action_name }}-white?logo=github-actions&label=GitHub%20Action&labelColor=white&color=0064D7"> <a href="https://github.com/lnxpy/pyaction"><img alt="pyaction" src="https://img.shields.io/badge/pyaction-white?logo=cookiecutter&label=Made%20with&labelColor=white&color=0064D7"></a> +## {{ cookiecutter.action_name }} <img alt="action-badge" src="https://img.shields.io/badge/{{ cookiecutter.action_name }}-white?logo=github-actions&label=GitHub%20Action&labelColor=white&color=0064D7"> <a href="https://github.com/lnxpy/pyaction"><img alt="pyaction" src="https://img.shields.io/badge/PyAction-white?logo=cookiecutter&label=Made%20with&labelColor=white&color=0064D7"></a> {{ cookiecutter.description }}
Drop support for Python2 Running the action in `python:2-slim` docker images causes issues since Python2 doesn't support some syntaxes from Python3.
2024-03-03T15:10:20
0.0
[]
[]
opeco17/poetry-audit-plugin
opeco17__poetry-audit-plugin-19
25327829d82bacd0e694540af00dc7d58c8d039c
diff --git a/README.md b/README.md index 5bfc9a8..b3f2134 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ Scanning 19 packages... ## Installation -The easiest way to install the `export` plugin is via the `plugin add` command of Poetry. +The easiest way to install the `audit` plugin is via the `plugin add` command of Poetry. ```bash poetry plugin add poetry-audit-plugin @@ -38,20 +38,36 @@ pip install poetry-audit-plugin * `--json`: Export the result in JSON format. * `--ignore-code`: Ignore some vulnerabilities IDs. Receive a list of IDs. For example: + ```bash poetry audit --ignore-code=CVE-2022-42969,CVE-2020-10684 ``` * `--ignore-package`: Ignore some packages. Receive a list of packages. For example: + ```bash poetry audit --json --ignore-package=py,ansible-tower-cli ``` + +* `--proxy-protocol`, `--proxy-host`, `--proxy-port`: Proxy to access Safety DB. For example: + +```bash +poetry audit --proxy-protocol=http --proxy-host=localhost --proxy-port=3128 +``` + +* `--cache-sec`: How long Safety DB can be cached locally. For example: + +```bash +poetry audit --cache-sec=60 +``` + ## Exit codes `poetry audit` will exit with a code indicating its status. * `0`: Vulnerabilities were not found. * `1`: One or more vulnerabilities were found. +* Others: Something wrong happened. ## Develop poetry-audit-plugin @@ -69,6 +85,7 @@ Once you've done it, you can start developing poetry-audit-plugin. You can use t ```sh cd tests/assets/no_vulnerabilities +poetry shell poetry audit ``` diff --git a/poetry_audit_plugin/command.py b/poetry_audit_plugin/command.py index c3968ec..49cb6e0 100644 --- a/poetry_audit_plugin/command.py +++ b/poetry_audit_plugin/command.py @@ -1,16 +1,24 @@ +import copy import json import sys -from typing import Any, Dict, List +from typing import Any, Dict, List, Tuple from cleo.helpers import option from poetry.console.commands.command import Command from poetry_audit_plugin import __version__ +from poetry_audit_plugin.constants import ( + EXIT_CODE_OK, + EXIT_CODE_OPTION_INVALID, + EXIT_CODE_VULNERABILITY_FOUND, +) +from poetry_audit_plugin.errors import SafetyDBAccessError, SafetyDBSessionBuildError from poetry_audit_plugin.safety import ( Package, + Vulnerability, VulnerablePackage, + build_safety_db_session, check_vulnerable_packages, - suppress_vulnerable_packages, ) @@ -19,17 +27,59 @@ class AuditCommand(Command): description = "Check vulnerabilities in dependencies" options = [ - option("json", None, "Generate a JSON payload with the information of vulnerable packages.", flag=True), - option("ignore-code", None, "Ignore specified vulnerability codes", flag=False), - option("ignore-package", None, "Ignore specified packages", flag=False), + option( + long_name="json", + description="Generate a JSON payload with the information of vulnerable packages.", + flag=True, + ), + option( + long_name="ignore-code", + description="Ignore specified vulnerability codes.", + flag=False, + ), + option( + long_name="ignore-package", + description="Ignore specified packages.", + flag=False, + ), + option( + long_name="proxy-protocol", + description="Protocol of proxy to access Safety DB.", + flag=False, + value_required=False, + default="http", + ), + option( + long_name="proxy-host", + description="Host of proxy to access Safety DB.", + flag=False, + value_required=False, + ), + option( + long_name="proxy-port", + description="Port of proxy to access Safety DB.", + flag=False, + value_required=False, + default="80", + ), + option( + long_name="cache-sec", + description="How long Safety DB can be cached locally.", + flag=False, + value_required=False, + default="0", + ), ] def handle(self) -> None: self.is_quiet = self.option("json") + self.line("<b># poetry audit report</b>") + self.line("") + + self.validate_options() self.validate_lock_file() - self.line("<b># poetry audit report</b>") self.line("<info>Loading...</info>") locked_repo = self.poetry.locker.locked_repository() @@ -40,22 +90,45 @@ def handle(self) -> None: self.line(f"<info>Scanning {len(packages)} packages...</info>") self.line("") - all_vulnerable_packages = check_vulnerable_packages(packages) - ignored_packages: List[str] = self.option("ignore-package").split(",") if self.option("ignore-package") else [] ignored_codes: List[str] = self.option("ignore-code").split(",") if self.option("ignore-code") else [] is_ignore = bool(len(ignored_packages) or len(ignored_codes)) - vulnerable_packages, amount_of_ignored_vulnerabilities = suppress_vulnerable_packages( - all_vulnerable_packages, ignored_packages, ignored_codes - ) + try: + # TODO: Pass auth key to build_client_session function for advanced safety usage. + session = build_safety_db_session( + proxy_protocol=self.option("proxy-protocol"), + proxy_host=self.option("proxy-host"), + proxy_port=int(self.option("proxy-port")) if self.option("proxy-port") else None, + ) + except SafetyDBSessionBuildError as e: + self.chatty_line_error(f"<error>Error occured while building Safety DB session.</error>") + self.chatty_line_error("") + self.chatty_line_error(str(e)) + sys.exit(e.get_exit_code()) + try: + all_vulnerable_packages = check_vulnerable_packages( + session, packages, int(self.option("cache-sec")) if self.option("cache-sec") else 0 + ) + except SafetyDBAccessError as e: + self.chatty_line_error(f"<error>Error occured while accessing Safety DB.</error>") + self.chatty_line_error("") + self.chatty_line_error(str(e)) + sys.exit(e.get_exit_code()) + vulnerable_packages, amount_of_ignored_vulnerabilities = self.filter_vulnerable_packages( + all_vulnerable_packages, + ignored_packages, + ignored_codes, + ) max_line_lengths = self.calculate_line_length(vulnerable_packages) amount_of_vulnerable_packages = len(vulnerable_packages) if self.option("json"): json_report = self.get_json_report(vulnerable_packages) self.chatty_line(json_report) if amount_of_vulnerable_packages > 0: - sys.exit(1) + sys.exit(EXIT_CODE_VULNERABILITY_FOUND) + else: + sys.exit(EXIT_CODE_OK) else: amount_of_vulnerabilities = 0 for vulnerable_package in vulnerable_packages: @@ -82,10 +155,10 @@ def handle(self) -> None: self.line( f"<error>{amount_of_vulnerabilities}</error> <b>vulnerabilities found in {amount_of_vulnerable_packages} packages</b>" ) - sys.exit(1) + sys.exit(EXIT_CODE_VULNERABILITY_FOUND) else: - self.line("<b>Vulnerabilities not found</b> ✨✨") - sys.exit(0) + self.line("<b>No vulnerabilities found</b> ✨✨") + sys.exit(EXIT_CODE_OK) def line(self, *args: Any, **kwargs: Any) -> None: if not self.is_quiet: @@ -99,13 +172,34 @@ def chatty_line(self, *args: Any, **kwargs: Any) -> None: super().line(*args, **kwargs) def chatty_line_error(self, *args: Any, **kwargs: Any) -> None: - super().line(*args, **kwargs) + super().line_error(*args, **kwargs) + + def validate_options(self) -> None: + errors: List[str] = [] + if self.option("proxy-host") and (not self.option("proxy-protocol") or not self.option("proxy-port")): + errors.append("proxy-protocol and proxy-port should not be empty when proxy-host is specified.") + + if self.option("proxy-protocol") and (self.option("proxy-protocol") not in ["http", "https"]): + errors.append("proxy-protocol should be http or https.") + + if self.option("proxy-port") and not self.option("proxy-port").isnumeric(): + errors.append("proxy-port should be number.") + + if self.option("cache-sec") and not self.option("cache-sec").isnumeric(): + errors.append("cache-sec be number") + + if errors: + self.chatty_line_error("<error>Command line option(s) are invalid</error>") + for error in errors: + self.chatty_line_error(error) + sys.exit(EXIT_CODE_OPTION_INVALID) def validate_lock_file(self) -> None: + # Ref: https://github.com/python-poetry/poetry/blob/1.2.0b1/src/poetry/console/commands/export.py#L40 locker = self.poetry.locker if not locker.is_locked(): self.line_error("<comment>The lock file does not exist. Locking.</comment>") - option = "quiet" if self.is_quiet() else None + option = "quiet" if self.is_quiet else None self.call("lock", option) self.line("") @@ -131,8 +225,7 @@ def calculate_line_length(self, vulnerable_packages: List[VulnerablePackage]) -> else: line_length = len(getattr(vulnerability, key)) - max_line_length = max_line_lengths[key] - if line_length > max_line_length: + if line_length > max_line_lengths[key]: max_line_lengths[key] = line_length return max_line_lengths @@ -152,6 +245,39 @@ def get_json_report(self, vulnerable_packages: List[VulnerablePackage]) -> str: } return json.dumps(json_report_dict, indent=2) + def filter_vulnerable_packages( + self, vulnerable_packages: List[VulnerablePackage], ignored_packages: List[str], ignored_codes: List[str] + ) -> Tuple[List[VulnerablePackage], int]: + filtered_vulnerable_packages: List[VulnerablePackage] = [] + amount_of_ignored_vulnerabilities = 0 + + is_ignore_packages = len(ignored_packages) > 0 + is_ignore_codes = len(ignored_codes) > 0 + + for vulnerable_package in vulnerable_packages: + filtered_vulnerable_package = copy.copy(vulnerable_package) + if is_ignore_packages: + if vulnerable_package.name in ignored_packages: + amount_of_ignored_vulnerabilities += len(vulnerable_package.vulnerabilities) + continue + + if is_ignore_codes: + filtered_vulnerabilities: List[Vulnerability] = [] + for vulnerability in vulnerable_package.vulnerabilities: + if vulnerability.cve not in ignored_codes: + filtered_vulnerabilities.append(vulnerability) + else: + amount_of_ignored_vulnerabilities += 1 + + if len(filtered_vulnerabilities): + filtered_vulnerable_package.vulnerabilities = filtered_vulnerabilities + else: + continue + + filtered_vulnerable_packages.append(filtered_vulnerable_package) + + return filtered_vulnerable_packages, amount_of_ignored_vulnerabilities + def factory(): return AuditCommand() diff --git a/poetry_audit_plugin/constants.py b/poetry_audit_plugin/constants.py new file mode 100644 index 0000000..3122292 --- /dev/null +++ b/poetry_audit_plugin/constants.py @@ -0,0 +1,5 @@ +EXIT_CODE_OK = 0 +EXIT_CODE_VULNERABILITY_FOUND = 1 +EXIT_CODE_OPTION_INVALID = 64 +EXIT_CODE_SAFETY_DB_SESSION_BUILD_ERROR = 65 +EXIT_CODE_SAFETY_DB_ACCESS_ERROR = 66 diff --git a/poetry_audit_plugin/errors.py b/poetry_audit_plugin/errors.py new file mode 100644 index 0000000..75d0dbc --- /dev/null +++ b/poetry_audit_plugin/errors.py @@ -0,0 +1,22 @@ +from poetry_audit_plugin.constants import ( + EXIT_CODE_SAFETY_DB_ACCESS_ERROR, + EXIT_CODE_SAFETY_DB_SESSION_BUILD_ERROR, +) + + +class SafetyDBSessionBuildError(Exception): + def __init__(self, message) -> None: + self.message = message + super().__init__(self.message) + + def get_exit_code(self) -> int: + return EXIT_CODE_SAFETY_DB_SESSION_BUILD_ERROR + + +class SafetyDBAccessError(Exception): + def __init__(self, message) -> None: + self.message = message + super().__init__(self.message) + + def get_exit_code(self) -> int: + return EXIT_CODE_SAFETY_DB_ACCESS_ERROR diff --git a/poetry_audit_plugin/safety.py b/poetry_audit_plugin/safety.py index 2e2bcc1..9361b6f 100644 --- a/poetry_audit_plugin/safety.py +++ b/poetry_audit_plugin/safety.py @@ -1,8 +1,11 @@ -from typing import Any, Dict, Iterator, List, Tuple +from typing import Any, Dict, Iterator, List, Optional from packaging.specifiers import SpecifierSet +from safety.auth import build_client_session from safety.safety import fetch_database +from poetry_audit_plugin.errors import SafetyDBAccessError, SafetyDBSessionBuildError + class Package: def __init__(self, name: str, version: str) -> None: @@ -34,35 +37,69 @@ def format(self) -> Dict[str, Any]: } -def get_vulnerable_entry(pkg_name: str, spec: str, db_full: Dict[str, Any]) -> Iterator[Dict[str, Any]]: - for entry in db_full.get(pkg_name, []): +def build_safety_db_session( + key: Optional[str] = None, + proxy_protocol: Optional[str] = None, + proxy_host: Optional[str] = None, + proxy_port: Optional[int] = None, +) -> Any: + # Ref: https://github.com/pyupio/safety/blob/3.0.1/safety/auth/cli_utils.py#L130 + proxy_config: Optional[Dict[str, str]] = None + if proxy_host and proxy_port and proxy_protocol: + proxy_config = {"https": f"{proxy_protocol}://{proxy_host}:{str(proxy_port)}"} + try: + # Note: proxy_config is ignored when it's invalid or inaccessible inside build_client_session + session, _ = build_client_session(api_key=key, proxies=proxy_config) + except Exception as e: + raise SafetyDBSessionBuildError(str(e)) + + return session + + +def get_vulnerable_entry(pkg_name: str, spec: str, db_full: Dict[str, Dict[str, Any]]) -> Iterator[Dict[str, Any]]: + for entry in db_full.get("vulnerable_packages", {}).get(pkg_name, []): for entry_spec in entry.get("specs", []): if entry_spec == spec: yield entry -def check_vulnerable_packages(packages: List[Package]) -> List[VulnerablePackage]: - db: Dict[str, Any] = fetch_database() - db_full: Dict[str, Any] = {} +def check_vulnerable_packages(session: Any, packages: List[Package], cache_sec: int = 0) -> List[VulnerablePackage]: + """ + Check vulnerabilities in given packages by checking Safety DB. + + If cache_sec is not 0, Safety DB is cached in $HOME/.safety/200/ and it can be used for next scan. + """ + # Ref: https://github.com/pyupio/safety/blob/2.3.5/safety/safety.py#L320 + # Ref: https://github.com/pyupio/safety/blob/3.0.1/safety/scan/finder/handlers.py#L50 + try: + db: Dict[str, Dict[str, Any]] = fetch_database( + session, full=False, db=False, cached=cache_sec, telemetry=False, from_cache=True + ) + db_full: Dict[str, Dict[str, Any]] = fetch_database( + session, full=True, db=False, cached=cache_sec, telemetry=False, from_cache=True + ) + except Exception as e: + raise SafetyDBAccessError(str(e)) + vulnerable_packages: List[VulnerablePackage] = [] for pkg in packages: name = pkg.name.replace("_", "-").lower() vulnerabilities: List[Vulnerability] = [] - if name in frozenset(db.keys()): - specifiers: List[str] = db[name] - for specifier in specifiers: - spec_set = SpecifierSet(specifiers=specifier) - if spec_set.contains(pkg.version): - if not db_full: - db_full = fetch_database(full=True) - for data in get_vulnerable_entry(pkg_name=name, spec=specifier, db_full=db_full): - cve = data.get("cve") - if cve: - cve = cve.split(",")[0].strip() - if data.get("id"): - vulnerabilities.append( - Vulnerability(advisory=data.get("advisory", ""), cve=cve, spec=specifier) - ) + if name not in db.get("vulnerable_packages", {}).keys(): + continue + + specifiers: List[str] = db["vulnerable_packages"][name] + for specifier in specifiers: + spec_set = SpecifierSet(specifiers=specifier) + if not spec_set.contains(pkg.version): + continue + + for entry in get_vulnerable_entry(pkg_name=name, spec=specifier, db_full=db_full): + for cve in entry.get("ids", []): + if cve.get("type") in ["cve", "pve"] and cve.get("id"): + vulnerabilities.append( + Vulnerability(advisory=entry.get("advisory", ""), cve=cve["id"], spec=specifier) + ) if vulnerabilities: vulnerable_packages.append( @@ -70,36 +107,3 @@ def check_vulnerable_packages(packages: List[Package]) -> List[VulnerablePackage ) return vulnerable_packages - - -def suppress_vulnerable_packages( - vulnerable_packages: List[VulnerablePackage], ignored_packages: List[str], ignored_codes: List[str] -) -> Tuple[List[VulnerablePackage], int]: - filtered_vulnerable_packages: List[VulnerablePackage] = [] - amount_of_ignored_vulnerabilities = 0 - - is_ignore_packages = len(ignored_packages) > 0 - is_ignore_codes = len(ignored_codes) > 0 - - for vulnerable_package in vulnerable_packages: - if is_ignore_packages: - if vulnerable_package.name in ignored_packages: - amount_of_ignored_vulnerabilities += len(vulnerable_package.vulnerabilities) - continue - - if is_ignore_codes: - filtered_vulnerabilities: List[Vulnerability] = [] - for vulnerability in vulnerable_package.vulnerabilities: - if vulnerability.cve not in ignored_codes: - filtered_vulnerabilities.append(vulnerability) - else: - amount_of_ignored_vulnerabilities += 1 - - if len(filtered_vulnerabilities): - vulnerable_package.vulnerabilities = filtered_vulnerabilities - else: - continue - - filtered_vulnerable_packages.append(vulnerable_package) - - return filtered_vulnerable_packages, amount_of_ignored_vulnerabilities diff --git a/pyproject.toml b/pyproject.toml index e0f73b0..accb473 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,10 +21,10 @@ classifiers = [ "Topic :: System :: Installation/Setup", "Topic :: System :: Software Distribution", "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", "License :: OSI Approved :: MIT License" ] keywords = ["poetry", "vulnerabilities", "security", "audit"] @@ -32,7 +32,7 @@ keywords = ["poetry", "vulnerabilities", "security", "audit"] [tool.poetry.dependencies] python = "^3.8" poetry = "^1.6.1" -safety = "^2.3.5" +safety = "^3.0.0" [tool.poetry.group.dev.dependencies] pytest = "^6.2.5"
error running with poetry 1.8.0 The combination of - `poetry-audit-plugin` 0.3.0 - `poetry` 1.8.0 - `python` 3.11.6 leads to the following error: ``` $ poetry audit No module named 'packaging.metadata' ```
2024-03-03T10:16:42
0.0
[]
[]
codereverser/casparser
codereverser__casparser-80
68c1e777ddaa008791bffd938a4c53e652f2be1a
diff --git a/CHANGELOG.md b/CHANGELOG.md index 73ebecb..4bc026c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,8 @@ # Changelog +# 0.7.1 - 2023-09-06 +- fix bug where long scheme names were getting truncated + ## 0.7.0 - 2023-09-03 - update pydantic to v2 diff --git a/casparser/process/cas_summary.py b/casparser/process/cas_summary.py index 8e65ade..1c17f06 100644 --- a/casparser/process/cas_summary.py +++ b/casparser/process/cas_summary.py @@ -13,7 +13,7 @@ StatementPeriod, ) -from .regex import SUMMARY_DATE_RE, SUMMARY_ROW_RE +from .regex import SCHEME_TAIL_RE, SUMMARY_DATE_RE, SUMMARY_ROW_RE from .utils import isin_search @@ -43,6 +43,11 @@ def process_summary_text(text): for line in lines: if len(folios) > 0 and re.search("Total", line, re.I): break + scheme_tails = [] + if m := re.findall(SCHEME_TAIL_RE, line): + for txt in m: + line = line.replace(txt, "") + scheme_tails.append(re.sub(r"\s+", " ", txt).strip()) if m := re.search(SUMMARY_ROW_RE, line, re.DOTALL | re.MULTILINE | re.I): folio = m.group("folio").strip() if current_folio is None or current_folio != folio: @@ -55,7 +60,10 @@ def process_summary_text(text): PANKYC="N/A", schemes=[], ) - scheme = re.sub(r"\(formerly.+?\)", "", m.group("name"), flags=re.I | re.DOTALL).strip() + scheme = m.group("name") + if len(scheme_tails) > 0: + scheme = " ".join([scheme, *scheme_tails]) + scheme = re.sub(r"\(formerly.+?\)", "", scheme, flags=re.I | re.DOTALL).strip() rta = m.group("rta").strip() rta_code = m.group("code").strip() isin_ = m.group("isin") diff --git a/casparser/process/regex.py b/casparser/process/regex.py index 6ec9d04..0927416 100644 --- a/casparser/process/regex.py +++ b/casparser/process/regex.py @@ -44,3 +44,4 @@ TRANSACTION_RE3 = rf"{date_re}\t\t([^0-9].*)\t\t{amt_re}(?:\t\t{amt_re}\t\t{amt_re}\t\t{amt_re})*" DESCRIPTION_TAIL_RE = r"(\n.+?)(\t\t|$)" DIVIDEND_RE = r"(?:div\.|dividend|idcw).+?(reinvest)*.*?@\s*Rs\.\s*([\d\.]+)(?:\s+per\s+unit)?" +SCHEME_TAIL_RE = r"(\n.+?)(?:\t\t|$)" diff --git a/poetry.lock b/poetry.lock index a69f2d0..5faf8a4 100644 --- a/poetry.lock +++ b/poetry.lock @@ -29,14 +29,14 @@ files = [ [[package]] name = "asttokens" -version = "2.3.0" +version = "2.4.0" description = "Annotate AST trees with source code positions" category = "dev" optional = false python-versions = "*" files = [ - {file = "asttokens-2.3.0-py2.py3-none-any.whl", hash = "sha256:bef1a51bc256d349e9f94e7e40e44b705ed1162f55294220dd561d24583d9877"}, - {file = "asttokens-2.3.0.tar.gz", hash = "sha256:2552a88626aaa7f0f299f871479fc755bd4e7c11e89078965e928fb7bb9a6afe"}, + {file = "asttokens-2.4.0-py2.py3-none-any.whl", hash = "sha256:cf8fc9e61a86461aa9fb161a14a0841a03c405fa829ac6b202670b3495d2ce69"}, + {file = "asttokens-2.4.0.tar.gz", hash = "sha256:2e0171b991b2c959acc6c49318049236844a5da1d65ba2672c4880c1c894834e"}, ] [package.dependencies] diff --git a/pyproject.toml b/pyproject.toml index 10f3209..e8a373a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "casparser" -version = "0.7.0" +version = "0.7.1" description = "(Karvy/Kfintech/CAMS) Consolidated Account Statement (CAS) PDF parser" authors = ["Sandeep Somasekharan <[email protected]>"] homepage = "https://github.com/codereverser/casparser"
Long mutual fund folio scheme name is not fully read For long mutual fund scheme names that spans more than one row, only the first row is being read. Example name: """ My long mutual fund scheme name ELSS - Direct growth plan """ Only first row will be read: "My long mutual fund scheme name ELSS -"
Can you please send me a partial screenshot of the entire two rows (with all sensitive data hidden) just to get an idea about how it looks? Also does the remaining half just get ignored or does it appear in other place (like appended to the next schema name etc)? Hi, Sure, attaching the partial screenshot of input and output entire rows -- check **only first two rows** in the screenshot. ![input](https://github.com/codereverser/casparser/assets/1196708/099ee925-fd80-482a-86c9-4b18e690dc1e) ![output](https://github.com/codereverser/casparser/assets/1196708/393c54e6-3d81-4785-9454-2a0d6447e176) As far as I found, the remaining half just get ignored -- didn't find it as part of any other entry. I tried to fix it for a while but failed and ended up manually copy pasting because only 3 such entries for me, but would be nice to automate this. It looks like CAS format has been updated recently. Working on a fix.
2023-09-06T08:03:29
0.0
[]
[]
codereverser/casparser
codereverser__casparser-47
53cfa2a78982259a2c81546beacfa7825578db9c
diff --git a/CHANGELOG.md b/CHANGELOG.md index dcf189e..2288d5c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,10 @@ # Changelog +## 0.5.2 - 2021-08-07 +- fix crash while generating capital gains reports on dividend payout funds +- rework capital gains algorithm +- various bug fixes + ## 0.5.1 - 2021-07-21 - gains: PnL report - support for migration of Franklin Templeton funds to CAMS RTA diff --git a/casparser/analysis/gains.py b/casparser/analysis/gains.py index 3350ca6..5b04447 100644 --- a/casparser/analysis/gains.py +++ b/casparser/analysis/gains.py @@ -15,16 +15,56 @@ from casparser.types import CASParserDataType, TransactionDataType from .utils import CII, get_fin_year, nav_search +PURCHASE_TXNS = { + TransactionType.DIVIDEND_REINVEST.name, + TransactionType.PURCHASE.name, + TransactionType.PURCHASE_SIP.name, + TransactionType.REVERSAL.name, + TransactionType.SWITCH_IN.name, + TransactionType.SWITCH_IN_MERGER.name, +} + +SALE_TXNS = { + TransactionType.REDEMPTION.name, + TransactionType.SWITCH_OUT.name, + TransactionType.SWITCH_OUT_MERGER.name, +} + @dataclass class MergedTransaction: """Represent net transaction on a given date""" dt: date - units: Decimal = Decimal(0.0) nav: Decimal = Decimal(0.0) - amount: Decimal = Decimal(0.0) - tax: Decimal = Decimal(0.0) + purchase: Decimal = Decimal(0.0) + purchase_units: Decimal = Decimal(0.0) + sale: Decimal = Decimal(0.0) + sale_units: Decimal = Decimal(0.0) + stamp_duty: Decimal = Decimal(0.0) + stt: Decimal = Decimal(0.0) + tds: Decimal = Decimal(0.0) + + def add(self, txn: TransactionDataType): + txn_type = txn["type"] + if txn_type in PURCHASE_TXNS and txn["units"] is not None: + self.nav = txn["nav"] + self.purchase_units += txn["units"] + self.purchase += txn["amount"] + elif txn_type in SALE_TXNS and txn["units"] is not None: + self.nav = txn["nav"] + self.sale_units += txn["units"] + self.sale += txn["amount"] + elif txn_type == TransactionType.STT_TAX.name: + self.stt += txn["amount"] + elif txn_type == TransactionType.STAMP_DUTY_TAX.name: + self.stamp_duty += txn["amount"] + elif txn_type == TransactionType.TDS_TAX.name: + self.tds += txn["amount"] + elif txn_type == TransactionType.SEGREGATION.name: + self.nav = Decimal(0.0) + self.purchase_units += txn["units"] + self.purchase = Decimal(0.0) @dataclass @@ -184,25 +224,17 @@ def merge_transactions(self): if dt not in merged_transactions: merged_transactions[dt] = MergedTransaction(dt) - if txn["type"] in ( - TransactionType.STT_TAX.name, - TransactionType.STAMP_DUTY_TAX.name, - ): - merged_transactions[dt].tax += txn["amount"] - elif txn["units"] is not None: - merged_transactions[dt].nav = txn["nav"] - merged_transactions[dt].units += txn["units"] - merged_transactions[dt].amount += txn["amount"] + merged_transactions[dt].add(txn) return merged_transactions def process(self): self.gains = [] for dt in sorted(self._merged_transactions.keys()): txn = self._merged_transactions[dt] - if txn.amount > 0: - self.buy(dt, txn.units, txn.nav, txn.tax) - elif txn.amount < 0: - self.sell(dt, txn.units, txn.nav, txn.tax) + if txn.purchase_units > 0: + self.buy(dt, txn.purchase_units, txn.nav, txn.stamp_duty) + if txn.sale_units < 0: + self.sell(dt, txn.sale_units, txn.nav, txn.stt) return self.gains def buy(self, txn_date: date, quantity: Decimal, nav: Decimal, tax: Decimal): @@ -214,7 +246,7 @@ def sell(self, sell_date: date, quantity: Decimal, nav: Decimal, tax: Decimal): fin_year = get_fin_year(sell_date) original_quantity = abs(quantity) pending_units = original_quantity - while pending_units > 0: + while pending_units >= 1e-3: purchase_date, units, purchase_nav, purchase_tax = self.transactions.popleft() if units <= pending_units: @@ -300,7 +332,7 @@ def get_summary(self): def get_summary_csv_data(self) -> str: """Return summary data as a csv string.""" - headers = ["FY", "Fund", "ISIN", "Type", "LTCG", "LTCG(Taxable)", "STCG"] + headers = ["FY", "Fund", "ISIN", "Type", "LTCG(Realized)", "LTCG(Taxable)", "STCG"] with io.StringIO() as csv_fp: writer = csv.writer(csv_fp) writer.writerow(headers) @@ -325,7 +357,7 @@ def get_gains_csv_data(self) -> str: "Sale Date", "Sale Value", "STT", - "LTCG", + "LTCG Realized", "LTCG Taxable", "STCG", ] diff --git a/casparser/cli.py b/casparser/cli.py index 683c55d..3f459f5 100644 --- a/casparser/cli.py +++ b/casparser/cli.py @@ -186,11 +186,11 @@ def print_gains(data, output_file_path=None): if not ext.lower().endswith("csv"): return fname = f"{base_path}-gains-summary.csv" - with open(fname, "w") as fp: + with open(fname, "w", newline="", encoding="utf-8") as fp: fp.write(cg.get_summary_csv_data()) console.print(f"Gains summary report saved : [bold]{fname}[/]") fname = f"{base_path}-gains-detailed.csv" - with open(fname, "w") as fp: + with open(fname, "w", newline="", encoding="utf-8") as fp: fp.write(cg.get_gains_csv_data()) console.print(f"Detailed gains report saved : [bold]{fname}[/]") diff --git a/casparser/process/cas_detailed.py b/casparser/process/cas_detailed.py index 6c768af..462dc3e 100644 --- a/casparser/process/cas_detailed.py +++ b/casparser/process/cas_detailed.py @@ -1,3 +1,4 @@ +from collections import namedtuple from decimal import Decimal import re from typing import Dict, Optional, Tuple @@ -6,12 +7,21 @@ from ..enums import TransactionType, CASFileType from ..exceptions import HeaderParseError, CASParseError -from .regex import DETAILED_DATE_RE, FOLIO_RE, SCHEME_RE, REGISTRAR_RE -from .regex import CLOSE_UNITS_RE, NAV_RE, OPEN_UNITS_RE, VALUATION_RE -from .regex import DIVIDEND_RE, TRANSACTION_RE1, TRANSACTION_RE2, DESCRIPTION_TAIL_RE +from .regex import AMC_RE, DETAILED_DATE_RE, FOLIO_RE, SCHEME_RE, REGISTRAR_RE +from .regex import CLOSE_UNITS_RE, NAV_RE, OPEN_UNITS_RE, VALUATION_RE, DESCRIPTION_TAIL_RE +from .regex import DIVIDEND_RE, TRANSACTION_RE1, TRANSACTION_RE2, TRANSACTION_RE3 from ..types import FolioType, SchemeType from .utils import isin_search +ParsedTransaction = namedtuple( + "ParsedTransaction", ("date", "description", "amount", "units", "nav", "balance") +) + + +def str_to_decimal(value: Optional[str]) -> Decimal: + if isinstance(value, str): + return Decimal(value.replace(",", "_").replace("(", "-")) + def parse_header(text): """ @@ -41,8 +51,6 @@ def get_transaction_type( txn_type = TransactionType.STT_TAX elif "stamp" in description: txn_type = TransactionType.STAMP_DUTY_TAX - elif "segregat" in description: - txn_type = TransactionType.SEGREGATION elif "tds" in description: txn_type = TransactionType.TDS_TAX else: @@ -53,7 +61,13 @@ def get_transaction_type( txn_type = TransactionType.SWITCH_IN_MERGER else: txn_type = TransactionType.SWITCH_IN - elif "sip" in description or "systematic" in description: + elif "segregat" in description: + txn_type = TransactionType.SEGREGATION + elif ( + "sip" in description + or "systematic" in description + or re.search("sys.+?invest", description, re.I | re.DOTALL) + ): txn_type = TransactionType.PURCHASE_SIP else: txn_type = TransactionType.PURCHASE @@ -78,10 +92,22 @@ def get_transaction_type( return txn_type, dividend_rate -def parse_transaction(line): - for regex in (TRANSACTION_RE1, TRANSACTION_RE2): +def parse_transaction(line) -> Optional[ParsedTransaction]: + for regex in (TRANSACTION_RE1, TRANSACTION_RE2, TRANSACTION_RE3): if m := re.search(regex, line, re.DOTALL | re.MULTILINE | re.I): - return m + groups = m.groups() + date = description = amount = units = nav = balance = None + if groups.count(None) == 3: + # Tax entries + date, description, amount, *_ = groups + elif groups.count(None) == 2: + # Segregated Portfolio Entries + date, description, units, balance, *_ = groups + elif groups.count(None) == 0: + # Normal entries + date, description, amount, units, nav, balance = groups + if date is not None: + return ParsedTransaction(date, description, amount, units, nav, balance) def process_detailed_text(text): @@ -104,7 +130,7 @@ def process_detailed_text(text): # "Registrar" column to the previous line if re.search(REGISTRAR_RE, line): line = "\t\t".join([lines[idx + 1], line]) - if amc_match := re.search(r"^(.+?)\s+(MF|Mutual\s+Fund)$", line, re.I | re.DOTALL): + if amc_match := re.search(AMC_RE, line, re.I | re.DOTALL): current_amc = amc_match.group(0) elif m := re.search(FOLIO_RE, line, re.I | re.DOTALL): folio = m.group(1).strip() @@ -172,24 +198,16 @@ def process_detailed_text(text): if m := re.search(DESCRIPTION_TAIL_RE, line): description_tail = m.group(1).strip() line = line.replace(m.group(1), "") - if m := parse_transaction(line): - date = date_parser.parse(m.group(1)).date() - desc = m.group(2).strip() + if parsed_txn := parse_transaction(line): + date = date_parser.parse(parsed_txn.date).date() + desc = parsed_txn.description.strip() if description_tail != "": desc = " ".join([desc, description_tail]) - amt = Decimal(m.group(3).replace(",", "_").replace("(", "-")) - if m.group(4) is None: - units = None - nav = None - else: - units = Decimal(m.group(4).replace(",", "_").replace("(", "-")) - nav = Decimal(m.group(5).replace(",", "_")) - balance = Decimal(m.group(6).replace(",", "_").replace("(", "-")) + amt = str_to_decimal(parsed_txn.amount) + units = str_to_decimal(parsed_txn.units) + nav = str_to_decimal(parsed_txn.nav) + balance = str_to_decimal(parsed_txn.balance) txn_type, dividend_rate = get_transaction_type(desc, units) - if txn_type == TransactionType.SEGREGATION: - units = balance = amt - amt = Decimal(0.0) - nav = Decimal(0.0) if units is not None: curr_scheme_data["close_calculated"] += units curr_scheme_data["transactions"].append( diff --git a/casparser/process/regex.py b/casparser/process/regex.py index a3802a4..f83ed10 100644 --- a/casparser/process/regex.py +++ b/casparser/process/regex.py @@ -12,19 +12,26 @@ ) SCHEME_TAIL_RE = r"(\n.+?)\t\t" +AMC_RE = r"^(.+?\s+(MF|Mutual\s*Fund)|franklin\s+templeton\s+investments)$" FOLIO_RE = ( r"Folio\s+No\s*:\s+([\d/\s]+)\s*.*?(?:PAN\s*:\s*([A-Z]{5}\d{4}[A-Z])\s+)?.*?" r"(?:KYC\s*:\s*(OK|NOT\s+OK))?\s*.*?(?:PAN\s*:\s*(OK|NOT\s+OK))?$" ) -SCHEME_RE = r"([\s\w]+)-\s*\d*\s*(.+?)\s*(?:\(Advisor\s*:\s*(.+?)\))*\s+Registrar\s*:\s*(.*)\s*$" +SCHEME_RE = ( + r"([\s\w]+-*[gdp]?)-\s*\d*\s*(.+?)\s*(?:\(Advisor\s*:\s*(.+?)\))*\s+Registrar\s*:\s*(.*)\s*$" +) REGISTRAR_RE = r"^\s*Registrar\s*:\s*(.*)\s*$" OPEN_UNITS_RE = r"Opening\s+Unit\s+Balance.+?([\d,.]+)" CLOSE_UNITS_RE = r"Closing\s+Unit\s+Balance.+?([\d,.]+)" VALUATION_RE = r"Valuation\s+on\s+(\d{2}-[A-Za-z]{3}-\d{4})\s*:\s*INR\s*([\d,.]+)" NAV_RE = r"NAV\s+on\s+(\d{2}-[A-Za-z]{3}-\d{4})\s*:\s*INR\s*([\d,.]+)" +# Normal Transaction entries TRANSACTION_RE1 = rf"{date_re}\t\t([^0-9].*)\t\t{amt_re}\t\t{amt_re}\t\t{amt_re}\t\t{amt_re}" -TRANSACTION_RE2 = rf"{date_re}\t\t([^0-9].*)\t\t{amt_re}(?:\t\t{amt_re}\t\t{amt_re}\t\t{amt_re})*" +# Segregated portfolio entries +TRANSACTION_RE2 = rf"{date_re}\t\t([^0-9].*)\t\t{amt_re}\t\t{amt_re}(?:\t\t{amt_re}\t\t{amt_re})*" +# Tax transactions +TRANSACTION_RE3 = rf"{date_re}\t\t([^0-9].*)\t\t{amt_re}(?:\t\t{amt_re}\t\t{amt_re}\t\t{amt_re})*" DESCRIPTION_TAIL_RE = r"(\n.+?)(\t\t|$)" -DIVIDEND_RE = r"(?:dividend|idcw).+?(reinvest)*.*?@\s+Rs\.\s*([\d\.]+)\s+per\s+unit" +DIVIDEND_RE = r"(?:div\.|dividend|idcw).+?(reinvest)*.*?@\s*Rs\.\s*([\d\.]+)\s+per\s+unit" diff --git a/casparser/types.py b/casparser/types.py index d1e0f05..3ec400c 100644 --- a/casparser/types.py +++ b/casparser/types.py @@ -21,10 +21,10 @@ class TransactionDataType(TypedDict): date: Union[date, str] description: str - amount: Union[Decimal, float] + amount: Union[Decimal, float, None] units: Union[Decimal, float, None] nav: Union[Decimal, float, None] - balance: Union[Decimal, float] + balance: Union[Decimal, float, None] type: str dividend_rate: Union[Decimal, float, None] diff --git a/pyproject.toml b/pyproject.toml index db81277..8baac88 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "casparser" -version = "0.5.1" +version = "0.5.2" description = "(Karvy/Kfintech/CAMS) Consolidated Account Statement (CAS) PDF parser" authors = ["Sandeep Somasekharan <[email protected]>"] homepage = "https://github.com/codereverser/casparser"
Issue in Franklin Templeton Segregated Units Hello team, Franklin Templeton created few Segregated Portfolio's for some stressed Mutual Funds. The data is read incorrectly in some cases. In the example given below - there are 2 Segregation records - one for qty 215931.176, and second for qty 0.008, but the parser scans the 2nd one as qty 215931.184 ... {"scheme": "Franklin India Credit Risk Fund- Segregated Portfolio 1 (8.25% Vodafone Idea Ltd-10JUL20-Growth Plan)", "advisor": "ICICIRON", "rta_code": "FTI880", "type": "DEBT", "rta": "CAMS", "isin": "INF090I01TJ6", "amfi": "147954", "open": "0.000", "close": "0.000", "close_calculated": "215931.176", "valuation": {"date": "2020-07-17", "value": "0.00", "nav": "0.0818"}, "transactions": [ {"date": "2020-01-24", "description": "Creation of units - Segregated Portfolio\t\t215,931.176", "amount": "0", "units": "215931.176", "nav": "0", "balance": "215931.176", "type": "SEGREGATION", "dividend_rate": null}, {"date": "2020-01-24", "description": "Creation of units - Segregated Portfolio\t\t0.008", "amount": "0", "units": "215931.184", "nav": "0", "balance": "215931.184", "type": "SEGREGATION", "dividend_rate": null}, {"date": "2020-06-15", "description": "Payment - Units Extinguished", "amount": "-1338.33", "units": "-16360.996", "nav": "0.0818", "balance": "199570.188", "type": "REDEMPTION", "dividend_rate": null}, {"date": "2020-07-10", "description": "Payment - Units Extinguished", "amount": "-16324.84", "units": "-199570.188", "nav": "0.0818", "balance": "0.000", "type": "REDEMPTION", "dividend_rate": null}]}
2021-07-22T21:04:05
0.0
[]
[]
codereverser/casparser
codereverser__casparser-40
e507c5305554df16fcc3d48c709cfd20fe463eb9
diff --git a/casparser/exceptions.py b/casparser/exceptions.py index 80fa315..f5e3a54 100644 --- a/casparser/exceptions.py +++ b/casparser/exceptions.py @@ -10,6 +10,10 @@ class CASParseError(ParserException): """Error while parsing pdf file.""" +class IncorrectPasswordError(CASParseError): + """Incorrect password error.""" + + class CASIntegrityError(ParserException): """Error while processing transactions""" diff --git a/casparser/parsers/mupdf.py b/casparser/parsers/mupdf.py index ff1b43a..a8dc94b 100644 --- a/casparser/parsers/mupdf.py +++ b/casparser/parsers/mupdf.py @@ -8,7 +8,7 @@ import fitz from casparser.enums import FileType -from casparser.exceptions import CASParseError +from casparser.exceptions import CASParseError, IncorrectPasswordError from .utils import is_close, InvestorInfo, PartialCASData @@ -197,7 +197,7 @@ def cas_pdf_to_text(filename: Union[str, io.IOBase], password) -> PartialCASData if doc.needsPass: rc = doc.authenticate(password) if not rc: - raise CASParseError("Incorrect PDF password!") + raise IncorrectPasswordError("Incorrect PDF password!") pages = [] investor_info = None diff --git a/casparser/parsers/pdfminer.py b/casparser/parsers/pdfminer.py index 68903d5..c70ac10 100644 --- a/casparser/parsers/pdfminer.py +++ b/casparser/parsers/pdfminer.py @@ -11,7 +11,7 @@ from pdfminer.layout import LTTextBoxHorizontal, LTTextBoxVertical from casparser.enums import FileType -from casparser.exceptions import CASParseError +from casparser.exceptions import CASParseError, IncorrectPasswordError from .utils import is_close, InvestorInfo, PartialCASData @@ -122,7 +122,7 @@ def cas_pdf_to_text(filename: Union[str, io.IOBase], password) -> PartialCASData try: document = PDFDocument(pdf_parser, password=password) except PDFPasswordIncorrect: - raise CASParseError("Incorrect PDF password!") + raise IncorrectPasswordError("Incorrect PDF password!") except PDFSyntaxError: raise CASParseError("Unhandled error while opening file")
Different exception for when the password is incorrect In the current code `CASParseError("Incorrect PDF password!")` is raised when the password is wrong. https://github.com/codereverser/casparser/blob/e507c5305554df16fcc3d48c709cfd20fe463eb9/casparser/parsers/mupdf.py#L200 So you have to do ugly things like: ```python try: read_cas_pdf("pdf", "password") except CASParseError as err: if err.args: if 'incorrect pdf password' in err.args[0].lower(): raise InvalidPasswordError raise ``` One possible solution could be to create a separate Exception for wrong password inheriting from `CASParseError`. Or a `code` attribute could be set in the `CASParseError` class, whose value could be like `incorrect_password`(or something else depending on the context where it is raised) which you can check for when handling the exception. If you don't have the bandwidth, I can make a PR for the same this weekend.
Oh! that makes sense. I'll add a separate exception for invalid passwords and make a release by this weekend.
2021-07-15T18:10:48
0.0
[]
[]
codereverser/casparser
codereverser__casparser-15
8dae4ec06d410f3299e1203ee89e6d0794bdd92d
diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f32679..fb8c0d8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # Changelog +## 0.3.8 - 2020-12-29 + +- Support for parsing folios without PAN/KYC details + ## 0.3.7 - 2020-12-24 - Support for parsing dividend transactions diff --git a/casparser/VERSION.txt b/casparser/VERSION.txt index ce4f5af..4209dba 100644 --- a/casparser/VERSION.txt +++ b/casparser/VERSION.txt @@ -1,1 +1,1 @@ -0.3.7 \ No newline at end of file +0.3.8 \ No newline at end of file diff --git a/casparser/process.py b/casparser/process.py index b2f1587..2b40216 100644 --- a/casparser/process.py +++ b/casparser/process.py @@ -42,7 +42,7 @@ def process_cas_text(text): description_tail = "" if amc_match := re.search(r"^(.+?)\s+(MF|Mutual\s+Fund)$", line, re.I | re.DOTALL): current_amc = amc_match.group(0) - elif m := re.search(FOLIO_RE, line, re.I): + elif m := re.search(FOLIO_RE, line, re.I | re.DOTALL): folio = m.group(1).strip() if current_folio is None or current_folio != folio: if curr_scheme_data and current_folio is not None: @@ -54,7 +54,7 @@ def process_cas_text(text): "amc": current_amc, "PAN": (m.group(2) or "").strip(), "KYC": m.group(3).strip(), - "PANKYC": m.group(4).strip(), + "PANKYC": None if m.group(4) is None else m.group(4).strip(), "schemes": [], } elif m := re.search(SCHEME_RE, line, re.DOTALL | re.MULTILINE | re.I): diff --git a/casparser/regex.py b/casparser/regex.py index 80fcecb..ea11283 100644 --- a/casparser/regex.py +++ b/casparser/regex.py @@ -1,8 +1,8 @@ HEADER_RE = r"(?P<from>\d{2}-[a-zA-Z]{3}-\d{4})\s+to\s+(?P<to>\d{2}-[a-zA-Z]{3}-\d{4})" FOLIO_RE = ( - r"Folio\s+No\s*:\s+(.+?)\s+(?:PAN\s*:\s+([A-Z]{5}\d{4}[A-Z])\s+)?" - r"KYC\s*:\s*(.+?)\s+PAN\s*:\s*(.+?)$" + r"Folio\s+No\s*:\s+([\d/\s]+)\s+.*?(?:PAN\s*:\s+([A-Z]{5}\d{4}[A-Z])\s+)?.*?" + r"KYC\s*:\s*(OK|NOT\s+OK)\s*.*?(?:PAN\s*:\s*(OK|NOT\s+OK))?$" ) SCHEME_RE = r"([\s\w]+)-\s*\d*\s*(.+?)\s*(?:\(Advisor\s*:\s*(.+?)\))*\s+Registrar\s*:\s*(.*)\s*$" @@ -12,7 +12,8 @@ NAV_RE = r"NAV\s+on\s+(\d{2}-[A-Za-z]{3}-\d{4})\s*:\s*INR\s*([\d,.]+)" TRANSACTION_RE = ( - r"(\d{2}-[A-Za-z]{3}-\d{4})\t\t([^\t]+?)\t\t([(\d,.]+)\)*(?:\t\t([(\d,.]+)\)*\t\t([(\d,.]+)\)*\t\t([(\d,.]+)\)*)*" + r"(\d{2}-[A-Za-z]{3}-\d{4})\t\t([^\t]+?)\t\t([(\d,.]+)\)*(?:\t\t([(\d,.]+)\)*\t\t" + r"([(\d,.]+)\)*\t\t([(\d,.]+)\)*)*" ) DIVIDEND_RE = r"dividend.+?(reinvest)*.+?@\s+Rs\.\s*([\d\.]+)\s+per\s+unit"
[CAMS CAS]Issue in folio parsing when PAN data unavailable Hi, Folio is not getting parsed in below case. Transactions are getting mapped to previously parsed folio. ![image](https://user-images.githubusercontent.com/73683130/103168537-fe429e00-4859-11eb-8ce2-383aa13f364d.png) Below are details of pdf elements and lines for debug `[28.93000030517578, 93.44519805908203, 553.7244873046875, 103.9654769897461, 'Date\t\tTransaction\t\tAmount\t\tUnits\t\tPrice\t\tUnit']` `[358.6300048828125, 102.31519317626953, 566.5147705078125, 124.0643310546875, '(INR)\t\t(INR)\t\tBalance\nKYC: OK']` `[28.93000030517578, 113.60517120361328, 99.20275115966797, 124.12545013427734, 'Folio No: 99999999']` `'Date\t\tTransaction\t\tAmount\t\tUnits\t\tPrice\t\tUnit'` `'Folio No: 99999999\t\t(INR)\t\t(INR)\t\tBalance\nKYC: OK'`
Looks like PANKYC field is missing for this folio. shall fix this soon.
2020-12-27T16:59:57
0.0
[]
[]
sparckles/Robyn
sparckles__Robyn-1002
deea29aec9348a96d4e2d4c9207c7a2ceb0bdbce
diff --git a/docs_src/src/pages/documentation/api_reference/openapi.mdx b/docs_src/src/pages/documentation/api_reference/openapi.mdx index b732fc154..6159f87dc 100644 --- a/docs_src/src/pages/documentation/api_reference/openapi.mdx +++ b/docs_src/src/pages/documentation/api_reference/openapi.mdx @@ -11,6 +11,10 @@ Out of the box, the following endpoints are setup for you: - `/docs` The Swagger UI - `/openapi.json` The JSON Specification +To use a custom openapi configuration, you can: + + - Place the `openapi.json` config file in the root directory. + - Or, pass the file path to the `openapi_file_path` parameter in the `Robyn()` constructor. (the parameter gets priority over the file). However, if you don't want to generate the OpenAPI docs, you can disable it by passing `--disable-openapi` flag while starting the application. diff --git a/docs_src/src/pages/documentation/example_app/openapi.mdx b/docs_src/src/pages/documentation/example_app/openapi.mdx index 9a0a693b4..8a3bbe735 100644 --- a/docs_src/src/pages/documentation/example_app/openapi.mdx +++ b/docs_src/src/pages/documentation/example_app/openapi.mdx @@ -13,6 +13,11 @@ Out of the box, the following endpoints are setup for you: However, if you don't want to generate the OpenAPI docs, you can disable it by passing `--disable-openapi` flag while starting the application. +To use a custom openapi configuration, you can: + + - Place the `openapi.json` config file in the root directory. + - Or, pass the file path to the `openapi_file_path` parameter in the `Robyn()` constructor. (the parameter gets priority over the file). + ```bash python app.py --disable-openapi ``` diff --git a/robyn/__init__.py b/robyn/__init__.py index 09e9a204f..f5222acda 100644 --- a/robyn/__init__.py +++ b/robyn/__init__.py @@ -2,6 +2,7 @@ import logging import os import socket +from pathlib import Path from typing import Callable, List, Optional, Tuple, Union import multiprocess as mp @@ -41,6 +42,7 @@ def __init__( self, file_object: str, config: Config = Config(), + openapi_file_path: str = None, openapi: OpenAPI = OpenAPI(), dependencies: DependencyMap = DependencyMap(), ) -> None: @@ -51,6 +53,11 @@ def __init__( self.dependencies = dependencies self.openapi = openapi + if openapi_file_path: + openapi.override_openapi(Path(self.directory_path).joinpath(openapi_file_path)) + elif Path(self.directory_path).joinpath("openapi.json").exists(): + openapi.override_openapi(Path(self.directory_path).joinpath("openapi.json")) + if not bool(os.environ.get("ROBYN_CLI", False)): # the env variables are already set when are running through the cli load_vars(project_root=directory_path) @@ -583,7 +590,7 @@ def configure_authentication(self, authentication_handler: AuthenticationHandler class SubRouter(Robyn): def __init__(self, file_object: str, prefix: str = "", config: Config = Config(), openapi: OpenAPI = OpenAPI()) -> None: - super().__init__(file_object, config, openapi) + super().__init__(file_object=file_object, config=config, openapi=openapi) self.prefix = prefix def __add_prefix(self, endpoint: str): diff --git a/robyn/openapi.py b/robyn/openapi.py index e04d2faa6..5d4868c33 100644 --- a/robyn/openapi.py +++ b/robyn/openapi.py @@ -1,8 +1,10 @@ import inspect +import json import typing from dataclasses import asdict, dataclass, field from importlib import resources from inspect import Signature +from pathlib import Path from typing import Any, Callable, Dict, List, Optional, TypedDict from robyn.responses import FileResponse, html @@ -138,11 +140,15 @@ class OpenAPI: info: OpenAPIInfo = field(default_factory=OpenAPIInfo) openapi_spec: dict = field(init=False) + openapi_file_override: bool = False # denotes whether there is an override or not. def __post_init__(self): """ Initializes the openapi_spec dict """ + if self.openapi_file_override: + return + self.openapi_spec = { "openapi": "3.1.0", "info": asdict(self.info), @@ -163,6 +169,9 @@ def add_openapi_path_obj(self, route_type: str, endpoint: str, openapi_name: str @param handler: Callable the handler function for the endpoint """ + if self.openapi_file_override: + return + query_params = None request_body = None return_annotation = None @@ -212,6 +221,10 @@ def add_subrouter_paths(self, subrouter_openapi: "OpenAPI"): @param subrouter_openapi: OpenAPI the OpenAPI object of the current subrouter """ + + if self.openapi_file_override: + return + paths = subrouter_openapi.openapi_spec["paths"] for path in paths: @@ -393,6 +406,16 @@ def get_schema_object(self, parameter: str, param_type: Any) -> dict: return properties + def override_openapi(self, openapi_json_spec_path: Path): + """ + Set a pre-configured OpenAPI spec + @param openapi_json_spec_path: str the path to the json file + """ + with open(openapi_json_spec_path) as json_file: + json_file_content = json.load(json_file) + self.openapi_spec = dict(json_file_content) + self.openapi_file_override = True + def get_openapi_docs_page(self) -> FileResponse: """ Handler to the swagger html page to be deployed to the endpoint `/docs`
feat: override openapi json let the user override the openapi json file. refrain from generating, if: a) the file is present somewhere, OR b) the user passes it as a param to Robyn()
@VishnuSanal , let us restrict the file only to the root of the project for this. a) restrict the file to the root. b) and override based on the path provided by the user. Give priority to b over a in case of a clash. @VishnuSanal , can you please have a look at this? - check how fastapi does this > * check how fastapi does this I can't find docs on whether fastapi supports this. 🤔 Hey @VishnuSanal , I've give the implementation a lot of thought. Here's what we should do 1. Restrict the OpenAPI JSON file to the root: This rule ensures consistency and simplifies file location, making debugging easier. 2. Allow users to override the OpenAPI JSON by passing a file path: Users who want custom configurations can pass their own file path via a parameter to Robyn(). If the file is passed as a parameter, it takes priority over the root file, ensuring flexibility. 3. Resolve potential conflicts: In case both options (root file and file passed as a parameter) exist, give priority to the user-passed path, as you suggested. alright, will do this. btw, I still think placing a file with a certain name in a certain path feels an "old" approach of doing things. (it reminds me of running windows xp 🙈). but, let's agree to disagree!
2024-10-28T04:53:09
0.0
[]
[]
sparckles/Robyn
sparckles__Robyn-960
e6872af923f4962bd61a3e6d9d429cd37b62283d
diff --git a/docs_src/src/pages/documentation/api_reference/openapi.mdx b/docs_src/src/pages/documentation/api_reference/openapi.mdx index 013033510..6057f94c6 100644 --- a/docs_src/src/pages/documentation/api_reference/openapi.mdx +++ b/docs_src/src/pages/documentation/api_reference/openapi.mdx @@ -217,11 +217,13 @@ app.include_router(subrouter) ## Other Specification Params -We support all the params mentioned in the latest OpenAPI specifications (https://swagger.io/specification/). See an example of using request body below. +We support all the params mentioned in the latest OpenAPI specifications (https://swagger.io/specification/). See an example using request & response bodies below: -<CodeGroup title="Request Body"> +<CodeGroup title="Request & Response Body"> ```python {{ title: 'untyped' }} +from robyn.types import JSONResponse + class Initial(TypedDict): is_present: bool letter: Optional[str] @@ -240,12 +242,19 @@ class CreateItemBody(TypedDict): tax: float +class CreateResponse(JSONResponse): + success: bool + items_changed: int + + @app.post("/") -def create_item(request, body=CreateItemBody): - return request.body +def create_item(request: Request, body=CreateItemBody) -> CreateResponse: + return CreateResponse(success=True, items_changed=2) ``` ```python {{ title: 'typed' }} +from robyn.types import JSONResponse + class Initial(TypedDict): is_present: bool letter: Optional[str] @@ -263,10 +272,14 @@ class CreateItemBody(TypedDict): price: float tax: float +class CreateResponse(JSONResponse): + success: bool + items_changed: int + @app.post("/") -def create_item(request: Request, body=CreateItemBody): - return request.body +def create_item(request: Request, body=CreateItemBody) -> CreateResponse: + return CreateResponse(success=True, items_changed=2) ``` </CodeGroup> diff --git a/docs_src/src/pages/documentation/example_app/openapi.mdx b/docs_src/src/pages/documentation/example_app/openapi.mdx index 389917a74..f9ecfcde9 100644 --- a/docs_src/src/pages/documentation/example_app/openapi.mdx +++ b/docs_src/src/pages/documentation/example_app/openapi.mdx @@ -175,11 +175,13 @@ app.include_router(subrouter) ## Other Specification Params -We support all the params mentioned in the latest OpenAPI specifications (https://swagger.io/specification/). See an example of using request body below. +We support all the params mentioned in the latest OpenAPI specifications (https://swagger.io/specification/). See an example using request & response bodies below: -<CodeGroup title="Request Body"> +<CodeGroup title="Request & Response Body"> ```python {{ title: 'untyped' }} +from robyn.types import JSONResponse + class Initial(TypedDict): is_present: bool letter: Optional[str] @@ -198,12 +200,19 @@ class CreateItemBody(TypedDict): tax: float +class CreateResponse(JSONResponse): + success: bool + items_changed: int + + @app.post("/") -def create_item(request, body=CreateItemBody): - return request.body +def create_item(request: Request, body=CreateItemBody) -> CreateResponse: + return {"success": True, "items_changed": 2} ``` ```python {{ title: 'typed' }} +from robyn.types import JSONResponse + class Initial(TypedDict): is_present: bool letter: Optional[str] @@ -221,10 +230,14 @@ class CreateItemBody(TypedDict): price: float tax: float +class CreateResponse(JSONResponse): + success: bool + items_changed: int + @app.post("/") -def create_item(request: Request, body=CreateItemBody): - return request.body +def create_item(request: Request, body=CreateItemBody) -> CreateResponse: + return CreateResponse(success=True, items_changed=2) ``` </CodeGroup> diff --git a/robyn/openapi.py b/robyn/openapi.py index f5419139e..0037ba466 100644 --- a/robyn/openapi.py +++ b/robyn/openapi.py @@ -4,6 +4,7 @@ from inspect import Signature from typing import Callable, Dict, List, Optional, TypedDict, Any +from robyn import Response from robyn.responses import FileResponse, html @@ -162,21 +163,24 @@ def add_openapi_path_obj(self, route_type: str, endpoint: str, openapi_name: str query_params = None request_body = None + return_annotation = None signature = inspect.signature(handler) openapi_description = inspect.getdoc(handler) or "" - if signature and "query_params" in signature.parameters: - query_params = signature.parameters["query_params"].default + if signature: + if "query_params" in signature.parameters: + query_params = signature.parameters["query_params"].default - if signature and "body" in signature.parameters: - request_body = signature.parameters["body"].default + if "body" in signature.parameters: + request_body = signature.parameters["body"].default - return_annotation = signature.return_annotation + if signature.return_annotation is not Signature.empty: + return_annotation = signature.return_annotation - return_type = "text/plain" if return_annotation == Signature.empty or return_annotation is str else "application/json" - - modified_endpoint, path_obj = self.get_path_obj(endpoint, openapi_name, openapi_description, openapi_tags, query_params, request_body, return_type) + modified_endpoint, path_obj = self.get_path_obj( + endpoint, openapi_name, openapi_description, openapi_tags, query_params, request_body, return_annotation + ) if modified_endpoint not in self.openapi_spec["paths"]: self.openapi_spec["paths"][modified_endpoint] = {} @@ -201,7 +205,7 @@ def get_path_obj( tags: List[str], query_params: Optional[TypedDict], request_body: Optional[TypedDict], - return_type: str, + return_annotation: Optional[TypedDict], ) -> (str, dict): """ Get the "path" openapi object according to spec @@ -212,7 +216,7 @@ def get_path_obj( @param tags: List[str] for grouping of endpoints @param query_params: Optional[TypedDict] query params for the function @param request_body: Optional[TypedDict] request body for the function - @param return_type: str return type of the endpoint handler + @param return_annotation: Optional[TypedDict] return type of the endpoint handler @return: (str, dict) a tuple containing the endpoint with path params wrapped in braces and the "path" openapi object according to spec @@ -226,7 +230,6 @@ def get_path_obj( "description": description, "parameters": [], "tags": tags, - "responses": {"200": {"description": "Successful Response", "content": {return_type: {"schema": {}}}}}, } # robyn has paths like /:url/:etc whereas openapi requires path like /{url}/{path} @@ -273,7 +276,7 @@ def get_path_obj( properties = {} for body_item in request_body.__annotations__: - properties[body_item] = self.get_properties_object(body_item, request_body.__annotations__[body_item]) + properties[body_item] = self.get_schema_object(body_item, request_body.__annotations__[body_item]) request_body_object = { "content": { @@ -288,6 +291,15 @@ def get_path_obj( openapi_path_object["requestBody"] = request_body_object + response_schema = {} + response_type = "text/plain" + + if return_annotation and return_annotation is not Response: + response_type = "application/json" + response_schema = self.get_schema_object("response object", return_annotation) + + openapi_path_object["responses"] = {"200": {"description": "Successful Response", "content": {response_type: {"schema": response_schema}}}} + return endpoint_with_path_params_wrapped_in_braces, openapi_path_object def get_openapi_type(self, typed_dict: TypedDict) -> str: @@ -313,9 +325,9 @@ def get_openapi_type(self, typed_dict: TypedDict) -> str: # default to "string" if type is not found return "string" - def get_properties_object(self, parameter: str, param_type: Any) -> dict: + def get_schema_object(self, parameter: str, param_type: Any) -> dict: """ - Get the properties object for request body + Get the schema object for request/response body @param parameter: name of the parameter @param param_type: Any the type to be inferred @@ -351,7 +363,7 @@ def get_properties_object(self, parameter: str, param_type: Any) -> dict: properties["properties"] = {} for e in param_type.__annotations__: - properties["properties"][e] = self.get_properties_object(e, param_type.__annotations__[e]) + properties["properties"][e] = self.get_schema_object(e, param_type.__annotations__[e]) properties["type"] = "object" diff --git a/robyn/types.py b/robyn/types.py index b7ed7d30f..7d13be7f6 100644 --- a/robyn/types.py +++ b/robyn/types.py @@ -1,5 +1,5 @@ from dataclasses import dataclass -from typing import Optional +from typing import Optional, TypedDict @dataclass @@ -16,3 +16,7 @@ def as_list(self): self.show_files_listing, self.index_file, ] + + +class JSONResponse(TypedDict): + pass
Response schema missing in openapi docs <!-- Thank you for considering improving Robyn! Please describe your idea in depth. If you're not sure what to write, imagine the following: - How is this important to you? How would you use it? - Can you think of any alternatives? - Do you have any ideas about how it can be implemented? Are you willing/able to implement it? Do you need mentoring? --> <img width="530" alt="Screenshot 2024-08-17 at 18 58 39" src="https://github.com/user-attachments/assets/71465eb3-86d8-4c2e-8769-bc9330ef203e">
@VishnuSanal , could you please have a look?
2024-09-14T07:00:56
0.0
[]
[]
sparckles/Robyn
sparckles__Robyn-952
028f1615469c7237c95fbda416fb93a882c9cce2
diff --git a/docs_src/src/pages/documentation/api_reference/getting_started.mdx b/docs_src/src/pages/documentation/api_reference/getting_started.mdx index 82f7921ff..77c6629d5 100644 --- a/docs_src/src/pages/documentation/api_reference/getting_started.mdx +++ b/docs_src/src/pages/documentation/api_reference/getting_started.mdx @@ -244,19 +244,24 @@ Batman was curious about how to access path parameters and query parameters from from robyn import jsonify @app.post("/jsonify/:id") - async def json(request): + async def json(request, path_params): print(request.path_params["id"]) + print(path_params["id"]) + assert request.path_params["id"] == path_params["id"] return {"hello": "world"} ``` ```python {{ title: 'typed' }} - from robyn import jsonify, Request + from robyn import jsonify + from robyn.types import PathParams @app.post("/jsonify/:id") - async def json(request: Request): - print(request.path_params["id"]) + async def json(req_obj: Request, path_parameters: PathParams): + print(req_obj.path_params["id"]) + print(path_params["id"]) + assert req_obj.path_params["id"] == path_parameters["id"] return {"hello": "world"} ``` @@ -282,17 +287,20 @@ Batman was curious about how to access path parameters and query parameters from ```python {{ title: 'untyped' }} @app.get("/query") - async def query_get(request): - query_data = request.query_params.to_dict() + async def query_get(request, query_params): + query_data = query_params.to_dict() + assert query_data == request.query_params.to_dict() return jsonify(query_data) ``` ```python {{ title: 'typed' }} from robyn import Request + from robyn.robyn import QueryParams @app.get("/query") - async def query_get(request: Request): - query_data = request.query_params.to_dict() + async def query_get(req_obj: Request, query_params: QueryParams): + query_data = query_params.to_dict() + assert query_data == req_obj.query_params.to_dict() return jsonify(query_data) ``` @@ -300,6 +308,65 @@ Batman was curious about how to access path parameters and query parameters from </Col> </Row> +<Row> + <Col> + + Any request param can be used in the handler function either using type annotations or using the reserved names. + + <b> + Do note that the type annotations will take precedence over the reserved names. + </b> + + Robyn showed Batman example syntaxes of accessing the request params: + + </Col> + <Col sticky> + + <CodeGroup title="Request" tag="GET" label="/split_request_params"> + + ```python + from robyn.robyn import QueryParams, Headers + from robyn.types import PathParams, RequestMethod, RequestBody, RequestURL + + @app.get("/untyped/query_params") + def untyped_basic(query_params): + return query_params.to_dict() + + + @app.get("/typed/query_params") + def typed_basic(query_data: QueryParams): + return query_data.to_dict() + + + @app.get("/untyped/path_params/:id") + def untyped_path_params(query_params: PathParams): + return query_params # contains the path params since the type annotations takes precedence over the reserved names + + + @app.post("/typed_untyped/combined") + def typed_untyped_combined( + query_params, + method_data: RequestMethod, + body_data: RequestBody, + url: RequestURL, + headers_item: Headers, + ): + return { + "body": body_data, + "query_params": query_params.to_dict(), + "method": method_data, + "url": url.path, + "headers": headers_item.get("server"), + } + ``` + + </CodeGroup> + </Col> +</Row> + +Type Aliases: `Request`, `QueryParams`, `Headers`, `PathParams`, `RequestBody`, `RequestMethod`, `RequestURL`, `FormData`, `RequestFiles`, `RequestIP`, `RequestIdentity` + +Reserved Names: `r`, `req`, `request`, `query_params`, `headers`, `path_params`, `body`, `method`, `url`, `ip_addr`, `identity`, `form_data`, `files` --- diff --git a/docs_src/src/pages/documentation/api_reference/openapi.mdx b/docs_src/src/pages/documentation/api_reference/openapi.mdx index 6057f94c6..b732fc154 100644 --- a/docs_src/src/pages/documentation/api_reference/openapi.mdx +++ b/docs_src/src/pages/documentation/api_reference/openapi.mdx @@ -20,16 +20,14 @@ python app.py --disable-openapi ## How to use? -- Query Params: The typing for query params can be added as `def get(r: Request, query_params=GetRequestParams)` where `GetRequestParams` is a `TypedDict` +- Query Params: The typing for query params can be added as `def get(r: Request, query_params: GetRequestParams)` where `GetRequestParams` is a subclass of `QueryParams` - Path Params are defaulted to string type (ref: https://en.wikipedia.org/wiki/Query_string) <CodeGroup title="Basic App"> ```python {{ title: 'untyped' }} -from typing import TypedDict - -from robyn import Robyn, OpenAPI -from robyn.openapi import OpenAPIInfo, Contact, License, ExternalDocumentation, Components +from robyn import Robyn +from robyn.robyn import QueryParams app = Robyn( file_object=__file__, @@ -61,13 +59,13 @@ async def welcome(): return "hi" -class GetRequestParams(TypedDict): +class GetRequestParams(QueryParams): appointment_id: str year: int @app.get("/api/v1/name", openapi_name="Name Route", openapi_tags=["Name"]) -async def get(r, query_params=GetRequestParams): +async def get(r, query_params: GetRequestParams): """Get Name by ID""" return r.query_params @@ -83,12 +81,11 @@ if __name__ == "__main__": ``` ```python {{ title: 'typed' }} -from typing import TypedDict +from robyn.robyn import QueryParams -from robyn import Robyn, OpenAPI, Request -from robyn.openapi import OpenAPIInfo, Contact, License, ExternalDocumentation, Components +from robyn import Robyn, Request -app: Robyn = Robyn( +app = Robyn( file_object=__file__, openapi=OpenAPI( info=OpenAPIInfo( @@ -118,13 +115,13 @@ async def welcome(): return "hi" -class GetRequestParams(TypedDict): +class GetRequestParams(QueryParams): appointment_id: str year: int @app.get("/api/v1/name", openapi_name="Name Route", openapi_tags=["Name"]) -async def get(r: Request, query_params=GetRequestParams): +async def get(r: Request, query_params: GetRequestParams): """Get Name by ID""" return r.query_params @@ -146,9 +143,8 @@ if __name__ == "__main__": <CodeGroup title="Subrouters"> ```python {{ title: 'untyped' }} -from typing import TypedDict - from robyn import SubRouter +from robyn.robyn import QueryParams subrouter = SubRouter(__name__, prefix="/sub") @@ -159,13 +155,13 @@ async def subrouter_welcome(): return "hiiiiii subrouter" -class SubRouterGetRequestParams(TypedDict): +class SubRouterGetRequestParams(QueryParams): _id: int value: str @subrouter.get("/name") -async def subrouter_get(r, query_params=SubRouterGetRequestParams): +async def subrouter_get(r, query_params: SubRouterGetRequestParams): """Get Name by ID""" return r.query_params @@ -180,7 +176,7 @@ app.include_router(subrouter) ``` ```python {{ title: 'typed' }} -from typing import TypedDict +from robyn.robyn import QueryParams from robyn import Request, SubRouter @@ -193,13 +189,13 @@ async def subrouter_welcome(): return "hiiiiii subrouter" -class SubRouterGetRequestParams(TypedDict): +class SubRouterGetRequestParams(QueryParams): _id: int value: str @subrouter.get("/name") -async def subrouter_get(r: Request, query_params=SubRouterGetRequestParams): +async def subrouter_get(r: Request, query_params: SubRouterGetRequestParams): """Get Name by ID""" return r.query_params @@ -222,20 +218,20 @@ We support all the params mentioned in the latest OpenAPI specifications (https: <CodeGroup title="Request & Response Body"> ```python {{ title: 'untyped' }} -from robyn.types import JSONResponse +from robyn.types import JSONResponse, Body -class Initial(TypedDict): +class Initial(Body): is_present: bool letter: Optional[str] -class FullName(TypedDict): +class FullName(Body): first: str second: str initial: Initial -class CreateItemBody(TypedDict): +class CreateItemBody(Body): name: FullName description: str price: float @@ -248,37 +244,38 @@ class CreateResponse(JSONResponse): @app.post("/") -def create_item(request: Request, body=CreateItemBody) -> CreateResponse: +def create_item(request: Request, body: CreateItemBody) -> CreateResponse: return CreateResponse(success=True, items_changed=2) ``` ```python {{ title: 'typed' }} -from robyn.types import JSONResponse +from robyn.types import JSONResponse, Body -class Initial(TypedDict): +class Initial(Body): is_present: bool letter: Optional[str] -class FullName(TypedDict): +class FullName(Body): first: str second: str initial: Initial -class CreateItemBody(TypedDict): +class CreateItemBody(Body): name: FullName description: str price: float tax: float + class CreateResponse(JSONResponse): success: bool items_changed: int @app.post("/") -def create_item(request: Request, body=CreateItemBody) -> CreateResponse: +def create_item(request: Request, body: CreateItemBody) -> CreateResponse: return CreateResponse(success=True, items_changed=2) ``` diff --git a/docs_src/src/pages/documentation/example_app/openapi.mdx b/docs_src/src/pages/documentation/example_app/openapi.mdx index f9ecfcde9..9a0a693b4 100644 --- a/docs_src/src/pages/documentation/example_app/openapi.mdx +++ b/docs_src/src/pages/documentation/example_app/openapi.mdx @@ -19,19 +19,37 @@ python app.py --disable-openapi ## How to use? -- Query Params: The typing for query params can be added as `def get(r: Request, query_params=GetRequestParams)` where `GetRequestParams` is a `TypedDict` +- Query Params: The typing for query params can be added as `def get(r: Request, query_params: GetRequestParams)` where `GetRequestParams` is a subclass of `QueryParams` - Path Params are defaulted to string type (ref: https://en.wikipedia.org/wiki/Query_string) <CodeGroup title="Basic App"> ```python {{ title: 'untyped' }} -from typing import TypedDict - -from robyn import Robyn, OpenAPI -from robyn.openapi import OpenAPIInfo, Contact, License, ExternalDocumentation, Components - - -app = Robyn(file_object=__file__) +from robyn import Robyn +from robyn.robyn import QueryParams + +app = Robyn( + file_object=__file__, + openapi=OpenAPI( + info=OpenAPIInfo( + title="Sample App", + description="This is a sample server application.", + termsOfService="https://example.com/terms/", + version="1.0.0", + contact=Contact( + name="API Support", + url="https://www.example.com/support", + email="[email protected]", + ), + license=License( + name="BSD2.0", + url="https://opensource.org/license/bsd-2-clause", + ), + externalDocs=ExternalDocumentation(description="Find more info here", url="https://example.com/"), + components=Components(), + ), + ), +) @app.get("/") @@ -40,13 +58,13 @@ async def welcome(): return "hi" -class GetRequestParams(TypedDict): +class GetRequestParams(QueryParams): appointment_id: str year: int @app.get("/api/v1/name", openapi_name="Name Route", openapi_tags=["Name"]) -async def get(r, query_params=GetRequestParams): +async def get(r, query_params: GetRequestParams): """Get Name by ID""" return r.query_params @@ -62,12 +80,32 @@ if __name__ == "__main__": ``` ```python {{ title: 'typed' }} -from typing import TypedDict - -from robyn import Robyn, OpenAPI, Request -from robyn.openapi import OpenAPIInfo, Contact, License, ExternalDocumentation, Components - -app: Robyn = Robyn(file_object=__file__) +from robyn.robyn import QueryParams + +from robyn import Robyn, Request + +app = Robyn( + file_object=__file__, + openapi=OpenAPI( + info=OpenAPIInfo( + title="Sample App", + description="This is a sample server application.", + termsOfService="https://example.com/terms/", + version="1.0.0", + contact=Contact( + name="API Support", + url="https://www.example.com/support", + email="[email protected]", + ), + license=License( + name="BSD2.0", + url="https://opensource.org/license/bsd-2-clause", + ), + externalDocs=ExternalDocumentation(description="Find more info here", url="https://example.com/"), + components=Components(), + ), + ), +) @app.get("/") @@ -76,13 +114,13 @@ async def welcome(): return "hi" -class GetRequestParams(TypedDict): +class GetRequestParams(QueryParams): appointment_id: str year: int @app.get("/api/v1/name", openapi_name="Name Route", openapi_tags=["Name"]) -async def get(r: Request, query_params=GetRequestParams): +async def get(r: Request, query_params: GetRequestParams): """Get Name by ID""" return r.query_params @@ -104,9 +142,8 @@ if __name__ == "__main__": <CodeGroup title="Subrouters"> ```python {{ title: 'untyped' }} -from typing import TypedDict - from robyn import SubRouter +from robyn.robyn import QueryParams subrouter = SubRouter(__name__, prefix="/sub") @@ -117,13 +154,13 @@ async def subrouter_welcome(): return "hiiiiii subrouter" -class SubRouterGetRequestParams(TypedDict): +class SubRouterGetRequestParams(QueryParams): _id: int value: str @subrouter.get("/name") -async def subrouter_get(r, query_params=SubRouterGetRequestParams): +async def subrouter_get(r, query_params: SubRouterGetRequestParams): """Get Name by ID""" return r.query_params @@ -138,7 +175,7 @@ app.include_router(subrouter) ``` ```python {{ title: 'typed' }} -from typing import TypedDict +from robyn.robyn import QueryParams from robyn import Request, SubRouter @@ -151,13 +188,13 @@ async def subrouter_welcome(): return "hiiiiii subrouter" -class SubRouterGetRequestParams(TypedDict): +class SubRouterGetRequestParams(QueryParams): _id: int value: str @subrouter.get("/name") -async def subrouter_get(r: Request, query_params=SubRouterGetRequestParams): +async def subrouter_get(r: Request, query_params: SubRouterGetRequestParams): """Get Name by ID""" return r.query_params @@ -180,20 +217,20 @@ We support all the params mentioned in the latest OpenAPI specifications (https: <CodeGroup title="Request & Response Body"> ```python {{ title: 'untyped' }} -from robyn.types import JSONResponse +from robyn.types import JSONResponse, Body -class Initial(TypedDict): +class Initial(Body): is_present: bool letter: Optional[str] -class FullName(TypedDict): +class FullName(Body): first: str second: str initial: Initial -class CreateItemBody(TypedDict): +class CreateItemBody(Body): name: FullName description: str price: float @@ -206,37 +243,38 @@ class CreateResponse(JSONResponse): @app.post("/") -def create_item(request: Request, body=CreateItemBody) -> CreateResponse: - return {"success": True, "items_changed": 2} +def create_item(request: Request, body: CreateItemBody) -> CreateResponse: + return CreateResponse(success=True, items_changed=2) ``` ```python {{ title: 'typed' }} -from robyn.types import JSONResponse +from robyn.types import JSONResponse, Body -class Initial(TypedDict): +class Initial(Body): is_present: bool letter: Optional[str] -class FullName(TypedDict): +class FullName(Body): first: str second: str initial: Initial -class CreateItemBody(TypedDict): +class CreateItemBody(Body): name: FullName description: str price: float tax: float + class CreateResponse(JSONResponse): success: bool items_changed: int @app.post("/") -def create_item(request: Request, body=CreateItemBody) -> CreateResponse: +def create_item(request: Request, body: CreateItemBody) -> CreateResponse: return CreateResponse(success=True, items_changed=2) ``` diff --git a/robyn/openapi.py b/robyn/openapi.py index dc1b85416..e04d2faa6 100644 --- a/robyn/openapi.py +++ b/robyn/openapi.py @@ -1,11 +1,13 @@ import inspect +import typing from dataclasses import asdict, dataclass, field from importlib import resources from inspect import Signature from typing import Any, Callable, Dict, List, Optional, TypedDict from robyn.responses import FileResponse, html -from robyn.robyn import Response +from robyn.robyn import QueryParams, Response +from robyn.types import Body @dataclass @@ -169,11 +171,29 @@ def add_openapi_path_obj(self, route_type: str, endpoint: str, openapi_name: str openapi_description = inspect.getdoc(handler) or "" if signature: - if "query_params" in signature.parameters: - query_params = signature.parameters["query_params"].default + parameters = signature.parameters - if "body" in signature.parameters: - request_body = signature.parameters["body"].default + if "query_params" in parameters: + query_params = parameters["query_params"].default + + if query_params is Signature.empty: + query_params = None + + if "body" in parameters: + request_body = parameters["body"].default + + if request_body is Signature.empty: + request_body = None + + # priority to typing + for parameter in parameters: + param_annotation = parameters[parameter].annotation + + if inspect.isclass(param_annotation): + if issubclass(param_annotation, Body): + request_body = param_annotation + elif issubclass(param_annotation, QueryParams): + query_params = param_annotation if signature.return_annotation is not Signature.empty: return_annotation = signature.return_annotation @@ -260,8 +280,10 @@ def get_path_obj( endpoint_with_path_params_wrapped_in_braces += "/{" + path_param_name + "}" if query_params: - for query_param in query_params.__annotations__: - query_param_type = self.get_openapi_type(query_params.__annotations__[query_param]) + query_param_annotations = query_params.__annotations__ if query_params is TypedDict else typing.get_type_hints(query_params) + + for query_param in query_param_annotations: + query_param_type = self.get_openapi_type(query_param_annotations[query_param]) openapi_path_object["parameters"].append( { @@ -275,8 +297,10 @@ def get_path_obj( if request_body: properties = {} - for body_item in request_body.__annotations__: - properties[body_item] = self.get_schema_object(body_item, request_body.__annotations__[body_item]) + request_body_annotations = request_body.__annotations__ if request_body is TypedDict else typing.get_type_hints(request_body) + + for body_item in request_body_annotations: + properties[body_item] = self.get_schema_object(body_item, request_body_annotations[body_item]) request_body_object = { "content": { diff --git a/robyn/router.py b/robyn/router.py index 965856ac3..d63017768 100644 --- a/robyn/router.py +++ b/robyn/router.py @@ -12,7 +12,8 @@ from robyn.dependency_injection import DependencyMap from robyn.jsonify import jsonify from robyn.responses import FileResponse -from robyn.robyn import FunctionInfo, Headers, HttpMethod, MiddlewareType, Request, Response +from robyn.robyn import FunctionInfo, Headers, HttpMethod, Identity, MiddlewareType, QueryParams, Request, Response, Url +from robyn.types import Body, Files, FormData, IPAddress, Method, PathParams from robyn.ws import WebSocket _logger = logging.getLogger(__name__) @@ -115,11 +116,79 @@ def add_route( exception_handler: Optional[Callable], injected_dependencies: dict, ) -> Union[Callable, CoroutineType]: + def wrapped_handler(*args, **kwargs): + # In the execute functions the request is passed into *args + request = next(filter(lambda it: isinstance(it, Request), args), None) + + handler_params = signature(handler).parameters + + if not request or (len(handler_params) == 1 and next(iter(handler_params)) is Request): + return handler(*args, **kwargs) + + type_mapping = { + "request": Request, + "query_params": QueryParams, + "headers": Headers, + "path_params": PathParams, + "body": Body, + "method": Method, + "url": Url, + "form_data": FormData, + "files": Files, + "ip_addr": IPAddress, + "identity": Identity, + } + + type_filtered_params = {} + + for handler_param in iter(handler_params): + for type_name in type_mapping: + handler_param_type = handler_params[handler_param].annotation + handler_param_name = handler_params[handler_param].name + if handler_param_type is Request: + type_filtered_params[handler_param_name] = request + elif handler_param_type is type_mapping[type_name]: + type_filtered_params[handler_param_name] = getattr(request, type_name) + elif inspect.isclass(handler_param_type): + if issubclass(handler_param_type, Body): + type_filtered_params[handler_param_name] = getattr(request, "body") + elif issubclass(handler_param_type, QueryParams): + type_filtered_params[handler_param_name] = getattr(request, "query_params") + + request_components = { + "r": request, + "req": request, + "request": request, + "query_params": request.query_params, + "headers": request.headers, + "path_params": request.path_params, + "body": request.body, + "method": request.method, + "url": request.url, + "ip_addr": request.ip_addr, + "identity": request.identity, + "form_data": request.form_data, + "files": request.files, + "router_dependencies": injected_dependencies["router_dependencies"], + "global_dependencies": injected_dependencies["global_dependencies"], + **kwargs, + } + + name_filtered_params = {k: v for k, v in request_components.items() if k in handler_params and k not in type_filtered_params} + + filtered_params = dict(**type_filtered_params, **name_filtered_params) + + if len(filtered_params) != len(handler_params): + invalid_args = set(handler_params) - set(filtered_params) + raise SyntaxError(f"Unexpected request params found: {invalid_args}") + + return handler(**filtered_params) + @wraps(handler) async def async_inner_handler(*args, **kwargs): try: response = self._format_response( - await handler(*args, **kwargs), + await wrapped_handler(*args, **kwargs), ) except Exception as err: if exception_handler is None: @@ -133,7 +202,7 @@ async def async_inner_handler(*args, **kwargs): def inner_handler(*args, **kwargs): try: response = self._format_response( - handler(*args, **kwargs), + wrapped_handler(*args, **kwargs), ) except Exception as err: if exception_handler is None: diff --git a/robyn/types.py b/robyn/types.py index 7d13be7f6..071e62a60 100644 --- a/robyn/types.py +++ b/robyn/types.py @@ -1,5 +1,5 @@ from dataclasses import dataclass -from typing import Optional, TypedDict +from typing import Dict, NewType, Optional, TypedDict @dataclass @@ -18,5 +18,27 @@ def as_list(self): ] +PathParams = NewType("PathParams", Dict[str, str]) +Method = NewType("Method", str) +FormData = NewType("FormData", Dict[str, str]) +Files = NewType("Files", Dict[str, bytes]) +IPAddress = NewType("IPAddress", Optional[str]) + + class JSONResponse(TypedDict): + """ + A type alias for openapi response bodies. This class should be inherited by the response class type definition. + """ + pass + + +class Body: + """ + A type alias for openapi request bodies. This class should be inherited by the request body class annotation. + """ + + pass + + +__all__ = ["JSONResponse", "Body"] diff --git a/src/types/multimap.rs b/src/types/multimap.rs index ac78e68aa..70ed480b7 100644 --- a/src/types/multimap.rs +++ b/src/types/multimap.rs @@ -4,7 +4,7 @@ use pyo3::types::{PyDict, PyList}; use std::collections::HashMap; // Custom Multimap class -#[pyclass(name = "QueryParams")] +#[pyclass(subclass)] #[derive(Clone, Debug, Default)] pub struct QueryParams { pub queries: HashMap<String, Vec<String>>,
feat: split request params **Description** This PR fixes # <!-- Thank you for contributing to Robyn! Contributing Conventions: 1. Include descriptive PR titles. 2. Build and test your changes before submitting a PR. Pre-Commit Instructions: Please ensure that you have run the [pre-commit hooks](https://github.com/sansyrox/robyn#%EF%B8%8F-to-develop-locally) on your PR. -->
[vc]: #BprsPBy/DlO1xUStnqkonDs02I8FFnMjxZ+TYt6EFzY=:eyJpc01vbm9yZXBvIjp0cnVlLCJ0eXBlIjoiZ2l0aHViIiwicHJvamVjdHMiOlt7Im5hbWUiOiJyb2J5biIsImluc3BlY3RvclVybCI6Imh0dHBzOi8vdmVyY2VsLmNvbS9zcGFyY2tsZXMvcm9ieW4vRTRMd3JqeEdWQmFvWVRFSlpFeGhvMk13Z0pMSyIsInByZXZpZXdVcmwiOiJyb2J5bi1naXQtcmVxdWVzdHBhcmFtc3NwbGl0LXNwYXJja2xlcy52ZXJjZWwuYXBwIiwibmV4dENvbW1pdFN0YXR1cyI6IkRFUExPWUVEIiwibGl2ZUZlZWRiYWNrIjp7InJlc29sdmVkIjowLCJ1bnJlc29sdmVkIjowLCJ0b3RhbCI6MCwibGluayI6InJvYnluLWdpdC1yZXF1ZXN0cGFyYW1zc3BsaXQtc3BhcmNrbGVzLnZlcmNlbC5hcHAifSwicm9vdERpcmVjdG9yeSI6ImRvY3Nfc3JjIn1dfQ== **The latest updates on your projects**. Learn more about [Vercel for Git ↗︎](https://vercel.link/github-learn-more) | Name | Status | Preview | Comments | Updated (UTC) | | :--- | :----- | :------ | :------- | :------ | | **robyn** | ✅ Ready ([Inspect](https://vercel.com/sparckles/robyn/E4LwrjxGVBaoYTEJZExho2MwgJLK)) | [Visit Preview](https://vercel.live/open-feedback/robyn-git-requestparamssplit-sparckles.vercel.app?via=pr-comment-visit-preview-link&passThrough=1) | 💬 [**Add feedback**](https://vercel.live/open-feedback/robyn-git-requestparamssplit-sparckles.vercel.app?via=pr-comment-feedback-link) | Jun 18, 2024 0:13am | Need to sort out middlewares
2024-08-26T06:27:42
0.0
[]
[]
sparckles/Robyn
sparckles__Robyn-946
b5797a87c3faca2dd7cf5c6eae595e1e1661ab80
diff --git a/docs_src/src/pages/documentation/api_reference/websockets.mdx b/docs_src/src/pages/documentation/api_reference/websockets.mdx index c111d154a..d048bbe40 100644 --- a/docs_src/src/pages/documentation/api_reference/websockets.mdx +++ b/docs_src/src/pages/documentation/api_reference/websockets.mdx @@ -2,7 +2,7 @@ export const description = 'On this page, we’ll dive into the different conversation endpoints you can use to manage conversations programmatically.' -## WebSockets {{ tag: 'GET', label: '/hello_world' }} +## WebSockets {{ tag: 'WebSockets', label: 'WebSockets' }} <Row> <Col> @@ -214,6 +214,43 @@ To handle real-time communication, Batman learned how to work with WebSockets. H </Col> </Row> + + +<Row> + <Col> + To programmatically close a WebSocket connection from the server side, Batman learned to use the `close()` method: + The `close()` method does the following: +1. Sends a close message to the client. +2. Removes the client from the WebSocket registry. +3. Closes the WebSocket connection. + +This method is useful for scenarios where you need to programmatically end a WebSocket connection based on certain conditions or events on the server side. + + </Col> + <Col> + <CodeGroup title="Request" tag="GET" label="/hello_world"> + + ```python {{ title: 'untyped' }} + @websocket.on("message") + def message(ws, msg): + if msg == "disconnect": + ws.close() + return "Closing connection" + return "Message received" + ``` + + ```python {{title: 'typed'}} + @websocket.on("message") + def message(ws: WebSocketConnector, msg: str) -> str: + if msg == "disconnect": + ws.close() + return "Closing connection" + return "Message received" + ``` + </CodeGroup> + </Col> +</Row> + --- ## What's next? diff --git a/robyn/robyn.pyi b/robyn/robyn.pyi index 595f22536..f354f99bd 100644 --- a/robyn/robyn.pyi +++ b/robyn/robyn.pyi @@ -236,6 +236,10 @@ class Headers: pass def is_empty(self) -> bool: + """ + Returns: + True if the headers are empty, False otherwise + """ pass @dataclass @@ -295,15 +299,20 @@ class Response: def set_cookie(self, key: str, value: str) -> None: """ + Sets the cookie in the response. - The function to set a cookie (onto headers). - - @param key: the cookie name - @param value: the actual cookie value + Args: + key (str): The key of the cookie + value (str): The value of the cookie """ pass class Server: + """ + The Server object used to create a Robyn server. + + This object is used to create a Robyn server and add routes, middlewares, etc. + """ def __init__(self) -> None: pass def add_directory( @@ -369,10 +378,41 @@ class WebSocketConnector: query_params: QueryParams async def async_broadcast(self, message: str) -> None: + """ + Broadcasts a message to all clients. + + Args: + message (str): The message to broadcast + """ pass async def async_send_to(self, sender_id: str, message: str) -> None: + """ + Sends a message to a specific client. + + Args: + sender_id (str): The id of the sender + message (str): The message to send + """ pass def sync_broadcast(self, message: str) -> None: + """ + Broadcasts a message to all clients. + + Args: + message (str): The message to broadcast + """ pass def sync_send_to(self, sender_id: str, message: str) -> None: + """ + Sends a message to a specific client. + + Args: + sender_id (str): The id of the sender + message (str): The message to send + """ + pass + def close(self) -> None: + """ + Closes the connection. + """ pass diff --git a/src/websockets/mod.rs b/src/websockets/mod.rs index 8a3394b12..51f51a47c 100644 --- a/src/websockets/mod.rs +++ b/src/websockets/mod.rs @@ -3,7 +3,7 @@ pub mod registry; use crate::executors::web_socket_executors::execute_ws_function; use crate::types::function_info::FunctionInfo; use crate::types::multimap::QueryParams; -use registry::{SendMessageToAll, SendText}; +use registry::{Close, SendMessageToAll, SendText}; use actix::prelude::*; use actix::{Actor, AsyncContext, StreamHandler}; @@ -60,7 +60,12 @@ impl Handler<SendText> for WebSocketConnector { fn handle(&mut self, msg: SendText, ctx: &mut Self::Context) { if self.id == msg.recipient_id { + let message = msg.message.clone(); ctx.text(msg.message); + if message == "Connection closed" { + // Close the WebSocket connection + ctx.stop(); + } } } } @@ -171,6 +176,10 @@ impl WebSocketConnector { Ok(awaitable.into_py(py)) } + pub fn close(&self) { + self.registry_addr.do_send(Close { id: self.id }); + } + #[getter] pub fn get_id(&self) -> String { self.id.to_string() diff --git a/src/websockets/registry.rs b/src/websockets/registry.rs index 8f9ddbe9d..702a50ac1 100644 --- a/src/websockets/registry.rs +++ b/src/websockets/registry.rs @@ -70,6 +70,8 @@ impl Handler<SendText> for WebSocketRegistry { if let Some(client_addr) = self.clients.get(&recipient_id) { client_addr.do_send(msg); + } else { + log::warn!("No client found for id: {}", recipient_id); } } } @@ -96,3 +98,26 @@ impl Handler<SendMessageToAll> for WebSocketRegistry { } } } + +pub struct Close { + pub id: Uuid, +} + +impl Message for Close { + type Result = (); +} + +impl Handler<Close> for WebSocketRegistry { + type Result = (); + + fn handle(&mut self, msg: Close, _ctx: &mut Self::Context) { + if let Some(client) = self.clients.remove(&msg.id) { + // Send a close message to the client before removing it + client.do_send(SendText { + recipient_id: msg.id, + message: "Connection closed".to_string(), + sender_id: msg.id, + }); + } + } +}
Websocket close method feature ### Description Currently, Robyn framework lacks the capability to programmatically close a WebSocket connection from within the handler method. This feature is critical for implementing server-side logic that demands the closure of a WebSocket connection under specific conditions, such as the expiration of a user's authorization period. Almost all major frameworks got this functionality ### Use Case A practical example of this feature would be in scenarios where a user's temporary access for WebSocket communication expires. For instance, if a user is only authorized to maintain a WebSocket connection for a certain period, the server should have the ability to terminate the connection from within the WebSocket view/handler once this period elapses. This is crucial for maintaining security and efficient resource management. ### Suggested Implementation The implementation could involve enhancing the existing WebSocket handler class/methods, allowing them to invoke a close connection method. This method should be able to send a closing handshake message to the client and terminate the connection gracefully. Example pseudo-code: ```python @websocket.on("message") def message(ws, msg) : if not is_valid(msg): ws.close() <----- implement this ```
As you suggested earlier, Firstly, the WebSocket handler class would need to be enhanced to include a close method. This method would be responsible for sending a closing handshake message to the client and terminating the connection. Here's a simplified example: class WebSocketHandler: def __init__(self, ws): self.ws = ws def close(self, code=1000, reason=""): # Send a closing handshake message to the client self.ws.send_close(code, reason) # Close the WebSocket connection self.ws.close() Then, in WebSocket view, we could use this close method to terminate the connection under specific conditions: As you suggested Example pseudo-code: @websocket.on("message") def message(ws, msg) : if not is_valid(msg): ws.close() <----- implement this Can you please provide reference to the code to contribute to it?
2024-08-25T14:11:34
0.0
[]
[]
sparckles/Robyn
sparckles__Robyn-937
12d8c0b9f1b43fd04198f2e288486c71d286191d
diff --git a/docs_src/src/pages/documentation/api_reference/openapi.mdx b/docs_src/src/pages/documentation/api_reference/openapi.mdx index b2239647c..013033510 100644 --- a/docs_src/src/pages/documentation/api_reference/openapi.mdx +++ b/docs_src/src/pages/documentation/api_reference/openapi.mdx @@ -215,6 +215,62 @@ app.include_router(subrouter) </CodeGroup> +## Other Specification Params + +We support all the params mentioned in the latest OpenAPI specifications (https://swagger.io/specification/). See an example of using request body below. + +<CodeGroup title="Request Body"> + +```python {{ title: 'untyped' }} +class Initial(TypedDict): + is_present: bool + letter: Optional[str] + + +class FullName(TypedDict): + first: str + second: str + initial: Initial + + +class CreateItemBody(TypedDict): + name: FullName + description: str + price: float + tax: float + + [email protected]("/") +def create_item(request, body=CreateItemBody): + return request.body +``` + +```python {{ title: 'typed' }} +class Initial(TypedDict): + is_present: bool + letter: Optional[str] + + +class FullName(TypedDict): + first: str + second: str + initial: Initial + + +class CreateItemBody(TypedDict): + name: FullName + description: str + price: float + tax: float + + [email protected]("/") +def create_item(request: Request, body=CreateItemBody): + return request.body +``` + +</CodeGroup> + With the reference documentation deployed and running smoothly, Batman had a powerful new tool at his disposal. The Robyn framework had provided him with the flexibility, scalability, and performance needed to create an effective crime-fighting application, giving him a technological edge in his ongoing battle to protect Gotham City. diff --git a/docs_src/src/pages/documentation/example_app/openapi.mdx b/docs_src/src/pages/documentation/example_app/openapi.mdx index 6197dfa39..389917a74 100644 --- a/docs_src/src/pages/documentation/example_app/openapi.mdx +++ b/docs_src/src/pages/documentation/example_app/openapi.mdx @@ -173,6 +173,62 @@ app.include_router(subrouter) </CodeGroup> +## Other Specification Params + +We support all the params mentioned in the latest OpenAPI specifications (https://swagger.io/specification/). See an example of using request body below. + +<CodeGroup title="Request Body"> + +```python {{ title: 'untyped' }} +class Initial(TypedDict): + is_present: bool + letter: Optional[str] + + +class FullName(TypedDict): + first: str + second: str + initial: Initial + + +class CreateItemBody(TypedDict): + name: FullName + description: str + price: float + tax: float + + [email protected]("/") +def create_item(request, body=CreateItemBody): + return request.body +``` + +```python {{ title: 'typed' }} +class Initial(TypedDict): + is_present: bool + letter: Optional[str] + + +class FullName(TypedDict): + first: str + second: str + initial: Initial + + +class CreateItemBody(TypedDict): + name: FullName + description: str + price: float + tax: float + + [email protected]("/") +def create_item(request: Request, body=CreateItemBody): + return request.body +``` + +</CodeGroup> + With the reference documentation deployed and running smoothly, Batman had a powerful new tool at his disposal. The Robyn framework had provided him with the flexibility, scalability, and performance needed to create an effective crime-fighting application, giving him a technological edge in his ongoing battle to protect Gotham City. diff --git a/robyn/openapi.py b/robyn/openapi.py index 5229a90a5..f5419139e 100644 --- a/robyn/openapi.py +++ b/robyn/openapi.py @@ -1,8 +1,8 @@ -from dataclasses import asdict, dataclass, field import inspect -from inspect import Signature +from dataclasses import asdict, dataclass, field from importlib import resources -from typing import Callable, Optional, TypedDict, List, Dict +from inspect import Signature +from typing import Callable, Dict, List, Optional, TypedDict, Any from robyn.responses import FileResponse, html @@ -161,17 +161,22 @@ def add_openapi_path_obj(self, route_type: str, endpoint: str, openapi_name: str """ query_params = None + request_body = None + signature = inspect.signature(handler) openapi_description = inspect.getdoc(handler) or "" if signature and "query_params" in signature.parameters: query_params = signature.parameters["query_params"].default + if signature and "body" in signature.parameters: + request_body = signature.parameters["body"].default + return_annotation = signature.return_annotation return_type = "text/plain" if return_annotation == Signature.empty or return_annotation is str else "application/json" - modified_endpoint, path_obj = self.get_path_obj(endpoint, openapi_name, openapi_description, openapi_tags, query_params, return_type) + modified_endpoint, path_obj = self.get_path_obj(endpoint, openapi_name, openapi_description, openapi_tags, query_params, request_body, return_type) if modified_endpoint not in self.openapi_spec["paths"]: self.openapi_spec["paths"][modified_endpoint] = {} @@ -188,7 +193,16 @@ def add_subrouter_paths(self, subrouter_openapi: "OpenAPI"): for path in paths: self.openapi_spec["paths"][path] = paths[path] - def get_path_obj(self, endpoint: str, name: str, description: str, tags: List[str], query_params: Optional[TypedDict], return_type: str) -> (str, dict): + def get_path_obj( + self, + endpoint: str, + name: str, + description: str, + tags: List[str], + query_params: Optional[TypedDict], + request_body: Optional[TypedDict], + return_type: str, + ) -> (str, dict): """ Get the "path" openapi object according to spec @@ -197,18 +211,30 @@ def get_path_obj(self, endpoint: str, name: str, description: str, tags: List[st @param description: Optional[str] short description of the endpoint (to be fetched from the endpoint defenition by default) @param tags: List[str] for grouping of endpoints @param query_params: Optional[TypedDict] query params for the function + @param request_body: Optional[TypedDict] request body for the function @param return_type: str return type of the endpoint handler @return: (str, dict) a tuple containing the endpoint with path params wrapped in braces and the "path" openapi object according to spec """ + + if not description: + description = "No description provided" + + openapi_path_object = { + "summary": name, + "description": description, + "parameters": [], + "tags": tags, + "responses": {"200": {"description": "Successful Response", "content": {return_type: {"schema": {}}}}}, + } + # robyn has paths like /:url/:etc whereas openapi requires path like /{url}/{path} # this function is used for converting path params to the required form # initialized with endpoint for handling endpoints without path params endpoint_with_path_params_wrapped_in_braces = endpoint endpoint_path_params_split = endpoint.split(":") - openapi_parameter_object = [] if len(endpoint_path_params_split) > 1: endpoint_without_path_params = endpoint_path_params_split[0] @@ -220,7 +246,7 @@ def get_path_obj(self, endpoint: str, name: str, description: str, tags: List[st for path_param in endpoint_path_params_split[1:]: path_param_name = path_param[:-1] if path_param.endswith("/") else path_param - openapi_parameter_object.append( + openapi_path_object["parameters"].append( { "name": path_param_name, "in": "path", @@ -234,7 +260,7 @@ def get_path_obj(self, endpoint: str, name: str, description: str, tags: List[st for query_param in query_params.__annotations__: query_param_type = self.get_openapi_type(query_params.__annotations__[query_param]) - openapi_parameter_object.append( + openapi_path_object["parameters"].append( { "name": query_param, "in": "query", @@ -243,16 +269,26 @@ def get_path_obj(self, endpoint: str, name: str, description: str, tags: List[st } ) - if not description: - description = "No description provided" + if request_body: + properties = {} - return endpoint_with_path_params_wrapped_in_braces, { - "summary": name, - "description": description, - "tags": tags, - "parameters": openapi_parameter_object, - "responses": {"200": {"description": "Successful Response", "content": {return_type: {"schema": {}}}}}, - } + for body_item in request_body.__annotations__: + properties[body_item] = self.get_properties_object(body_item, request_body.__annotations__[body_item]) + + request_body_object = { + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": properties, + } + } + } + } + + openapi_path_object["requestBody"] = request_body_object + + return endpoint_with_path_params_wrapped_in_braces, openapi_path_object def get_openapi_type(self, typed_dict: TypedDict) -> str: """ @@ -277,6 +313,50 @@ def get_openapi_type(self, typed_dict: TypedDict) -> str: # default to "string" if type is not found return "string" + def get_properties_object(self, parameter: str, param_type: Any) -> dict: + """ + Get the properties object for request body + + @param parameter: name of the parameter + @param param_type: Any the type to be inferred + @return: dict the properties object + """ + + properties = { + "title": parameter.capitalize(), + } + + type_mapping = { + int: "integer", + str: "string", + bool: "boolean", + float: "number", + dict: "object", + list: "array", + } + + for type_name in type_mapping: + if param_type is type_name: + properties["type"] = type_mapping[type_name] + return properties + + # check for Optional type + if param_type.__module__ == "typing": + properties["anyOf"] = [{"type": self.get_openapi_type(param_type.__args__[0])}, {"type": "null"}] + return properties + # check for custom classes and TypedDicts + elif inspect.isclass(param_type): + properties["type"] = "object" + + properties["properties"] = {} + + for e in param_type.__annotations__: + properties["properties"][e] = self.get_properties_object(e, param_type.__annotations__[e]) + + properties["type"] = "object" + + return properties + def get_openapi_docs_page(self) -> FileResponse: """ Handler to the swagger html page to be deployed to the endpoint `/docs`
expose the operator object params to the user in openapi spec eg: requestBody, operationId, etc PS: I am working on it.
2024-08-22T06:07:04
0.0
[]
[]
sparckles/Robyn
sparckles__Robyn-932
b7a3bc83f084d8b696f210dc8a04296ebd82e184
diff --git a/docs_src/src/pages/documentation/api_reference/future-roadmap.mdx b/docs_src/src/pages/documentation/api_reference/future-roadmap.mdx index c5a3725fe..1dcd18826 100644 --- a/docs_src/src/pages/documentation/api_reference/future-roadmap.mdx +++ b/docs_src/src/pages/documentation/api_reference/future-roadmap.mdx @@ -3,7 +3,6 @@ export const description = - Add performance optimizations -- OpenAPI Integration - Pydantic Integration - Implement Auto Const Requests - Add ORM support, especially Prisma integration diff --git a/robyn/__init__.py b/robyn/__init__.py index ac7e4bb5e..8c2b7be20 100644 --- a/robyn/__init__.py +++ b/robyn/__init__.py @@ -1,5 +1,4 @@ import asyncio -import inspect import logging import os import socket @@ -315,6 +314,7 @@ def get( endpoint: str, const: bool = False, auth_required: bool = False, + openapi_name: str = "", openapi_tags: List[str] = ["get"], ): """ @@ -323,11 +323,12 @@ def get( :param endpoint str: endpoint for the route added :param const bool: represents if the handler is a const function or not :param auth_required bool: represents if the route needs authentication or not + :param openapi_name: str -- the name of the endpoint in the openapi spec :param openapi_tags: List[str] -- for grouping of endpoints in the openapi spec """ def inner(handler): - self.openapi.add_openapi_path_obj("get", endpoint, inspect.getdoc(handler), openapi_tags, inspect.signature(handler)) + self.openapi.add_openapi_path_obj("get", endpoint, openapi_name, openapi_tags, handler) return self.add_route(HttpMethod.GET, endpoint, handler, const, auth_required) @@ -337,6 +338,7 @@ def post( self, endpoint: str, auth_required: bool = False, + openapi_name: str = "", openapi_tags: List[str] = ["post"], ): """ @@ -344,11 +346,12 @@ def post( :param endpoint str: endpoint for the route added :param auth_required bool: represents if the route needs authentication or not + :param openapi_name: str -- the name of the endpoint in the openapi spec :param openapi_tags: List[str] -- for grouping of endpoints in the openapi spec """ def inner(handler): - self.openapi.add_openapi_path_obj("post", endpoint, inspect.getdoc(handler), openapi_tags, inspect.signature(handler)) + self.openapi.add_openapi_path_obj("post", endpoint, openapi_name, openapi_tags, handler) return self.add_route(HttpMethod.POST, endpoint, handler, auth_required=auth_required) @@ -358,6 +361,7 @@ def put( self, endpoint: str, auth_required: bool = False, + openapi_name: str = "", openapi_tags: List[str] = ["put"], ): """ @@ -365,11 +369,12 @@ def put( :param endpoint str: endpoint for the route added :param auth_required bool: represents if the route needs authentication or not + :param openapi_name: str -- the name of the endpoint in the openapi spec :param openapi_tags: List[str] -- for grouping of endpoints in the openapi spec """ def inner(handler): - self.openapi.add_openapi_path_obj("put", endpoint, inspect.getdoc(handler), openapi_tags, inspect.signature(handler)) + self.openapi.add_openapi_path_obj("put", endpoint, openapi_name, openapi_tags, handler) return self.add_route(HttpMethod.PUT, endpoint, handler, auth_required=auth_required) @@ -379,6 +384,7 @@ def delete( self, endpoint: str, auth_required: bool = False, + openapi_name: str = "", openapi_tags: List[str] = ["delete"], ): """ @@ -386,11 +392,12 @@ def delete( :param endpoint str: endpoint for the route added :param auth_required bool: represents if the route needs authentication or not + :param openapi_name: str -- the name of the endpoint in the openapi spec :param openapi_tags: List[str] -- for grouping of endpoints in the openapi spec """ def inner(handler): - self.openapi.add_openapi_path_obj("delete", endpoint, inspect.getdoc(handler), openapi_tags, inspect.signature(handler)) + self.openapi.add_openapi_path_obj("delete", endpoint, openapi_name, openapi_tags, handler) return self.add_route(HttpMethod.DELETE, endpoint, handler, auth_required=auth_required) @@ -400,6 +407,7 @@ def patch( self, endpoint: str, auth_required: bool = False, + openapi_name: str = "", openapi_tags: List[str] = ["patch"], ): """ @@ -407,11 +415,12 @@ def patch( :param endpoint str: endpoint for the route added :param auth_required bool: represents if the route needs authentication or not + :param openapi_name: str -- the name of the endpoint in the openapi spec :param openapi_tags: List[str] -- for grouping of endpoints in the openapi spec """ def inner(handler): - self.openapi.add_openapi_path_obj("patch", endpoint, inspect.getdoc(handler), openapi_tags, inspect.signature(handler)) + self.openapi.add_openapi_path_obj("patch", endpoint, openapi_name, openapi_tags, handler) return self.add_route(HttpMethod.PATCH, endpoint, handler, auth_required=auth_required) @@ -421,6 +430,7 @@ def head( self, endpoint: str, auth_required: bool = False, + openapi_name: str = "", openapi_tags: List[str] = ["head"], ): """ @@ -428,11 +438,12 @@ def head( :param endpoint str: endpoint for the route added :param auth_required bool: represents if the route needs authentication or not + :param openapi_name: str -- the name of the endpoint in the openapi spec :param openapi_tags: List[str] -- for grouping of endpoints in the openapi spec """ def inner(handler): - self.openapi.add_openapi_path_obj("head", endpoint, inspect.getdoc(handler), openapi_tags, inspect.signature(handler)) + self.openapi.add_openapi_path_obj("head", endpoint, openapi_name, openapi_tags, handler) return self.add_route(HttpMethod.HEAD, endpoint, handler, auth_required=auth_required) @@ -442,6 +453,7 @@ def options( self, endpoint: str, auth_required: bool = False, + openapi_name: str = "", openapi_tags: List[str] = ["options"], ): """ @@ -449,11 +461,12 @@ def options( :param endpoint str: endpoint for the route added :param auth_required bool: represents if the route needs authentication or not + :param openapi_name: str -- the name of the endpoint in the openapi spec :param openapi_tags: List[str] -- for grouping of endpoints in the openapi spec """ def inner(handler): - self.openapi.add_openapi_path_obj("options", endpoint, inspect.getdoc(handler), openapi_tags, inspect.signature(handler)) + self.openapi.add_openapi_path_obj("options", endpoint, openapi_name, openapi_tags, handler) return self.add_route(HttpMethod.OPTIONS, endpoint, handler, auth_required=auth_required) @@ -463,6 +476,7 @@ def connect( self, endpoint: str, auth_required: bool = False, + openapi_name: str = "", openapi_tags: List[str] = ["connect"], ): """ @@ -470,12 +484,12 @@ def connect( :param endpoint str: endpoint for the route added :param auth_required bool: represents if the route needs authentication or not + :param openapi_name: str -- the name of the endpoint in the openapi spec :param openapi_tags: List[str] -- for grouping of endpoints in the openapi spec """ def inner(handler): - self.openapi.add_openapi_path_obj("connect", endpoint, inspect.getdoc(handler), openapi_tags, inspect.signature(handler)) - + self.openapi.add_openapi_path_obj("connect", endpoint, openapi_name, openapi_tags, handler) return self.add_route(HttpMethod.CONNECT, endpoint, handler, auth_required=auth_required) return inner @@ -484,6 +498,7 @@ def trace( self, endpoint: str, auth_required: bool = False, + openapi_name: str = "", openapi_tags: List[str] = ["trace"], ): """ @@ -491,11 +506,12 @@ def trace( :param endpoint str: endpoint for the route added :param auth_required bool: represents if the route needs authentication or not + :param openapi_name: str -- the name of the endpoint in the openapi spec :param openapi_tags: List[str] -- for grouping of endpoints in the openapi spec """ def inner(handler): - self.openapi.add_openapi_path_obj("trace", endpoint, inspect.getdoc(handler), openapi_tags, inspect.signature(handler)) + self.openapi.add_openapi_path_obj("trace", endpoint, openapi_name, openapi_tags, handler) return self.add_route(HttpMethod.TRACE, endpoint, handler, auth_required=auth_required) @@ -539,29 +555,29 @@ def __init__(self, file_object: str, prefix: str = "", config: Config = Config() def __add_prefix(self, endpoint: str): return f"{self.prefix}{endpoint}" - def get(self, endpoint: str, const: bool = False, openapi_tags: List[str] = ["get"]): - return super().get(endpoint=self.__add_prefix(endpoint), const=const, openapi_tags=openapi_tags) + def get(self, endpoint: str, const: bool = False, openapi_name: str = "", openapi_tags: List[str] = ["get"]): + return super().get(endpoint=self.__add_prefix(endpoint), const=const, openapi_name=openapi_name, openapi_tags=openapi_tags) - def post(self, endpoint: str, openapi_tags: List[str] = ["post"]): - return super().post(endpoint=self.__add_prefix(endpoint), openapi_tags=openapi_tags) + def post(self, endpoint: str, openapi_name: str = "", openapi_tags: List[str] = ["post"]): + return super().post(endpoint=self.__add_prefix(endpoint), openapi_name=openapi_name, openapi_tags=openapi_tags) - def put(self, endpoint: str, openapi_tags: List[str] = ["put"]): - return super().put(endpoint=self.__add_prefix(endpoint), openapi_tags=openapi_tags) + def put(self, endpoint: str, openapi_name: str = "", openapi_tags: List[str] = ["put"]): + return super().put(endpoint=self.__add_prefix(endpoint), openapi_name=openapi_name, openapi_tags=openapi_tags) - def delete(self, endpoint: str, openapi_tags: List[str] = ["delete"]): - return super().delete(endpoint=self.__add_prefix(endpoint), openapi_tags=openapi_tags) + def delete(self, endpoint: str, openapi_name: str = "", openapi_tags: List[str] = ["delete"]): + return super().delete(endpoint=self.__add_prefix(endpoint), openapi_name=openapi_name, openapi_tags=openapi_tags) - def patch(self, endpoint: str, openapi_tags: List[str] = ["patch"]): - return super().patch(endpoint=self.__add_prefix(endpoint), openapi_tags=openapi_tags) + def patch(self, endpoint: str, openapi_name: str = "", openapi_tags: List[str] = ["patch"]): + return super().patch(endpoint=self.__add_prefix(endpoint), openapi_name=openapi_name, openapi_tags=openapi_tags) - def head(self, endpoint: str, openapi_tags: List[str] = ["head"]): - return super().head(endpoint=self.__add_prefix(endpoint), openapi_tags=openapi_tags) + def head(self, endpoint: str, openapi_name: str = "", openapi_tags: List[str] = ["head"]): + return super().head(endpoint=self.__add_prefix(endpoint), openapi_name=openapi_name, openapi_tags=openapi_tags) - def trace(self, endpoint: str, openapi_tags: List[str] = ["trace"]): - return super().trace(endpoint=self.__add_prefix(endpoint), openapi_tags=openapi_tags) + def trace(self, endpoint: str, openapi_name: str = "", openapi_tags: List[str] = ["trace"]): + return super().trace(endpoint=self.__add_prefix(endpoint), openapi_name=openapi_name, openapi_tags=openapi_tags) - def options(self, endpoint: str, openapi_tags: List[str] = ["options"]): - return super().options(endpoint=self.__add_prefix(endpoint), openapi_tags=openapi_tags) + def options(self, endpoint: str, openapi_name: str = "", openapi_tags: List[str] = ["options"]): + return super().options(endpoint=self.__add_prefix(endpoint), openapi_name=openapi_name, openapi_tags=openapi_tags) def ALLOW_CORS(app: Robyn, origins: List[str]): diff --git a/robyn/openapi.py b/robyn/openapi.py index 653131911..e28839298 100644 --- a/robyn/openapi.py +++ b/robyn/openapi.py @@ -1,7 +1,8 @@ from dataclasses import asdict, dataclass, field +import inspect from inspect import Signature from pathlib import Path -from typing import Optional, TypedDict, List, Dict +from typing import Callable, Optional, TypedDict, List, Dict from robyn.responses import FileResponse, serve_html @@ -140,7 +141,7 @@ def __post_init__(self): Initializes the openapi_spec dict """ self.openapi_spec = { - "openapi": "3.0.0", + "openapi": "3.1.0", "info": asdict(self.info), "paths": {}, "components": asdict(self.info.components), @@ -148,23 +149,29 @@ def __post_init__(self): "externalDocs": asdict(self.info.externalDocs) if self.info.externalDocs.url else None, } - def add_openapi_path_obj(self, route_type: str, endpoint: str, openapi_summary: str, openapi_tags: List[str], signature: Optional[Signature]): + def add_openapi_path_obj(self, route_type: str, endpoint: str, openapi_name: str, openapi_tags: List[str], handler: Callable): """ Adds the given path to openapi spec @param route_type: str the http method as string (get, post ...) - @param endpoint: srt the endpoint to be added - @param openapi_summary: str short summary of the endpoint (to be fetched from the endpoint defenition by default) + @param endpoint: str the endpoint to be added + @param openapi_name: str the name of the endpoint @param openapi_tags: List[str] for grouping of endpoints - @param signature: Optional[Signature] the function signature -- to grab the typed dict annotations for query params + @param handler: Callable the handler function for the endpoint """ query_params = None + signature = inspect.signature(handler) + openapi_description = inspect.getdoc(handler) or "" if signature and "query_params" in signature.parameters: query_params = signature.parameters["query_params"].default - modified_endpoint, path_obj = self.get_path_obj(endpoint, openapi_summary, openapi_tags, query_params) + return_annotation = signature.return_annotation + + return_type = "text/plain" if return_annotation == Signature.empty or return_annotation is str else "application/json" + + modified_endpoint, path_obj = self.get_path_obj(endpoint, openapi_name, openapi_description, openapi_tags, query_params, return_type) if modified_endpoint not in self.openapi_spec["paths"]: self.openapi_spec["paths"][modified_endpoint] = {} @@ -181,14 +188,16 @@ def add_subrouter_paths(self, subrouter_openapi: "OpenAPI"): for path in paths: self.openapi_spec["paths"][path] = paths[path] - def get_path_obj(self, endpoint: str, summary: str, tags: List[str], query_params: Optional[TypedDict]) -> (str, dict): + def get_path_obj(self, endpoint: str, name: str, description: str, tags: List[str], query_params: Optional[TypedDict], return_type: str) -> (str, dict): """ Get the "path" openapi object according to spec @param endpoint: str the endpoint to be added - @param summary: Optional[str] short summary of the endpoint (to be fetched from the endpoint defenition by default) + @param name: str the name of the endpoint + @param description: Optional[str] short description of the endpoint (to be fetched from the endpoint defenition by default) @param tags: List[str] for grouping of endpoints @param query_params: Optional[TypedDict] query params for the function + @param return_type: str return type of the endpoint handler @return: (str, dict) a tuple containing the endpoint with path params wrapped in braces and the "path" openapi object according to spec @@ -234,18 +243,15 @@ def get_path_obj(self, endpoint: str, summary: str, tags: List[str], query_param } ) - if not summary: - summary = "No summary provided" + if not description: + description = "No description provided" return endpoint_with_path_params_wrapped_in_braces, { - "summary": summary, + "summary": name, + "description": description, "tags": tags, "parameters": openapi_parameter_object, - "responses": { - "200": { - "description": "Successful Response", - } - }, + "responses": {"200": {"description": "Successful Response", "content": {return_type: {"schema": {}}}}}, } def get_openapi_type(self, typed_dict: TypedDict) -> str: diff --git a/robyn/swagger.html b/robyn/swagger.html index 5805b114a..6fabf69b8 100644 --- a/robyn/swagger.html +++ b/robyn/swagger.html @@ -1,14 +1,19 @@ <!DOCTYPE html> <html> <head> - <title>Swagger UI</title> + <title>Robyn OpenAPI Docs</title> <link rel="stylesheet" type="text/css" - href="https://unpkg.com/[email protected]/swagger-ui.css" + href="https://unpkg.com/swagger-ui-dist@5/swagger-ui.css" /> - <script src="https://unpkg.com/[email protected]/swagger-ui-bundle.js"></script> - <script src="https://unpkg.com/[email protected]/swagger-ui-standalone-preset.js"></script> + <link + rel="icon" + type="image/png" + href="https://user-images.githubusercontent.com/29942790/140995889-5d91dcff-3aa7-4cfb-8a90-2cddf1337dca.png" + /> + <script src="https://unpkg.com/swagger-ui-dist@5/swagger-ui-bundle.js"></script> + <script src="https://unpkg.com/swagger-ui-dist@5/swagger-ui-standalone-preset.js"></script> </head> <body> <div id="swagger-ui"></div>
Response schema missing in openapi docs <!-- Thank you for considering improving Robyn! Please describe your idea in depth. If you're not sure what to write, imagine the following: - How is this important to you? How would you use it? - Can you think of any alternatives? - Do you have any ideas about how it can be implemented? Are you willing/able to implement it? Do you need mentoring? --> <img width="530" alt="Screenshot 2024-08-17 at 18 58 39" src="https://github.com/user-attachments/assets/71465eb3-86d8-4c2e-8769-bc9330ef203e">
@VishnuSanal , could you please have a look?
2024-08-19T05:14:08
0.0
[]
[]
sparckles/Robyn
sparckles__Robyn-861
c93d8f9b25e5c225ce0435eb2aafd0dd3a5a6f75
diff --git a/docs_src/src/components/documentation/ApiDocs.jsx b/docs_src/src/components/documentation/ApiDocs.jsx index d54f7f984..14864cc86 100644 --- a/docs_src/src/components/documentation/ApiDocs.jsx +++ b/docs_src/src/components/documentation/ApiDocs.jsx @@ -50,8 +50,7 @@ const guides = [ { href: '/documentation/api_reference/redirection', name: 'Redirection', - description: - 'Learn how to redirect requests to different endpoints.', + description: 'Learn how to redirect requests to different endpoints.', }, { href: '/documentation/api_reference/file-uploads', @@ -61,7 +60,7 @@ const guides = [ }, { href: '/documentation/api_reference/form_data', - name: 'Form Data', + name: 'Form Data and Multi Part Form Data', description: 'Learn how to handle form data.', }, { diff --git a/docs_src/src/components/documentation/Navigation.jsx b/docs_src/src/components/documentation/Navigation.jsx index 5548d16c5..10859d8f6 100644 --- a/docs_src/src/components/documentation/Navigation.jsx +++ b/docs_src/src/components/documentation/Navigation.jsx @@ -256,7 +256,10 @@ export const navigation = [ href: '/documentation/api_reference/templating', title: 'Templating', }, - { title: 'Redirection', href: '/documentation/api_reference/redirection' }, + { + title: 'Redirection', + href: '/documentation/api_reference/redirection', + }, { href: '/documentation/api_reference/file-uploads', title: 'File Uploads', diff --git a/docs_src/src/pages/documentation/api_reference/dependency_injection.mdx b/docs_src/src/pages/documentation/api_reference/dependency_injection.mdx index 64f990637..b942f83f0 100644 --- a/docs_src/src/pages/documentation/api_reference/dependency_injection.mdx +++ b/docs_src/src/pages/documentation/api_reference/dependency_injection.mdx @@ -71,12 +71,12 @@ Router level dependency injection is used to inject dependencies into the router app.inject(ROUTER_DEPENDENCY=ROUTER_DEPENDENCY) @app.get("/sync/global_di") - def sync_global_di(request, router_dependencies): + def sync_global_di(r, router_dependencies): # r is the request object return router_dependencies["ROUTER_DEPENDENCY"] ``` ```python {{ title: 'typed' }} - from robyn import Robyn, ALLOW_CORS + from robyn import Robyn, ALLOW_CORS, Request app = Robyn(__file__) ROUTER_DEPENDENCY = "ROUTER DEPENDENCY" @@ -84,7 +84,7 @@ Router level dependency injection is used to inject dependencies into the router app.inject(ROUTER_DEPENDENCY=ROUTER_DEPENDENCY) @app.get("/sync/global_di") - def sync_global_di(request, router_dependencies): + def sync_global_di(r: Request, router_dependencies): return router_dependencies["ROUTER_DEPENDENCY"] ``` </CodeGroup> @@ -93,7 +93,7 @@ Router level dependency injection is used to inject dependencies into the router <Row> <Col> -Note: `router_dependencies`, `global_dependencies` , `request` are named parameters and **must** be named as such. The order of the parameters does not matter. +Note: `router_dependencies`, `global_dependencies` are reserved parameters and **must** be named as such. The order of the parameters does not matter among them. However, the `router_dependencies` and `global_dependencies` must only come after the `request` parameter. </Col> </Row> diff --git a/docs_src/src/pages/documentation/api_reference/file-uploads.mdx b/docs_src/src/pages/documentation/api_reference/file-uploads.mdx index dec4aa926..10d0ece73 100644 --- a/docs_src/src/pages/documentation/api_reference/file-uploads.mdx +++ b/docs_src/src/pages/documentation/api_reference/file-uploads.mdx @@ -57,26 +57,20 @@ Batman scaled his application across multiple cores for better performance. He u <CodeGroup title="Request" tag="GET" label="/hello_world"> ```python {{ title: 'untyped' }} - @app.post("/upload") - async def upload(request): - file = request.files['filename'] - # write whatever filename - with open('filename', 'wb') as f: - f.write(file) - - return {'message': 'success'} + @app.post("/sync/multipart-file") + def sync_multipart_file(request: Request): + files = request.files + file_names = files.keys() + return {"file_names": list(file_names)} ``` ```python {{ title: 'typed' }} - @app.post("/upload") - async def upload(request: Request): - file = request.files['filename'] - # write whatever filename - with open('filename', 'wb') as f: - f.write(file) - - return {'message': 'success'} + @app.post("/sync/multipart-file") + def sync_multipart_file(request: Request): + files = request.files + file_names = files.keys() + return {"file_names": list(file_names)} ``` </CodeGroup> diff --git a/docs_src/src/pages/documentation/api_reference/form_data.mdx b/docs_src/src/pages/documentation/api_reference/form_data.mdx index 11f528c14..b8021d9cc 100644 --- a/docs_src/src/pages/documentation/api_reference/form_data.mdx +++ b/docs_src/src/pages/documentation/api_reference/form_data.mdx @@ -1,12 +1,12 @@ export const description = 'On this page, we’ll dive into using the form data.' -## Form Data +## Form Data and Multi Part Form Data Batman learned how to handle file uploads using Robyn. Now, he wanted to handle the form data. -## Handling Form Data +## Handling Multi Part Form Data <Row> <Col> diff --git a/docs_src/src/pages/documentation/api_reference/redirection.mdx b/docs_src/src/pages/documentation/api_reference/redirection.mdx index d241fa546..b2427d15b 100644 --- a/docs_src/src/pages/documentation/api_reference/redirection.mdx +++ b/docs_src/src/pages/documentation/api_reference/redirection.mdx @@ -32,6 +32,6 @@ Batman wanted to redirect some endpoints to others. Robyn helped him do so by th ## What's next? -Now, Batman wanted to have the ability to upload files to the server if any new villain appeared. Robyn introduced him to the file upload feature. +Now, Batman wanted to have the ability to upload files to the server if any new villain appeared. Robyn introduced him to the file upload and some of the form data features. -- [File Uploads](/documentation/api_reference/file-uploads) \ No newline at end of file +- [File Uploads](/documentation/api_reference/file-uploads) diff --git a/src/executors/mod.rs b/src/executors/mod.rs index 50da0f896..d47fa7da9 100644 --- a/src/executors/mod.rs +++ b/src/executors/mod.rs @@ -24,37 +24,23 @@ where T: ToPyObject, { let handler = function.handler.as_ref(py); - // kwargs are handled let kwargs = function.kwargs.as_ref(py); - let function_args = function_args.to_object(py); debug!("Function args: {:?}", function_args); match function.number_of_params { 0 => handler.call0(), 1 => { - if function.args.as_ref(py).get_item("request").is_some() - || function.args.as_ref(py).get_item("response").is_some() + if kwargs.get_item("global_dependencies").is_some() + || kwargs.get_item("router_dependencies").is_some() + // these are reserved keywords { - // If 'request' is present, call handler with 'function_args' - handler.call1((function_args,)) - } else { - // If neither 'request' nor 'response' is present handler.call((), Some(kwargs)) - } - } - 2 => { - if function.args.as_ref(py).get_item("request").is_some() - || function.args.as_ref(py).get_item("response").is_some() - { - // If either 'request' or 'response' is present, call handler with 'function_args' and 'kwargs' - handler.call((function_args,), Some(kwargs)) } else { - // If neither 'request' nor 'response' is present - handler.call((), Some(kwargs)) + handler.call1((function_args,)) } } - 3..=u8::MAX => handler.call((function_args,), Some(kwargs)), + _ => handler.call((function_args,), Some(kwargs)), } }
fix: allow generic parameter names in function callbacks <!-- Thank you for considering improving Robyn! Please describe your idea in depth. If you're not sure what to write, imagine the following: - How is this important to you? How would you use it? - Can you think of any alternatives? - Do you have any ideas about how it can be implemented? Are you willing/able to implement it? Do you need mentoring? --> e.g. allow ``` @app.get('/') def fx(r: Request): ... ```
2024-06-18T00:27:44
0.0
[]
[]
teamtomo/mdocfile
teamtomo__mdocfile-28
7084db94d58a511795fae2e0d4567ff9a81ddecd
diff --git a/README.md b/README.md index ec2c00b..58996fd 100644 --- a/README.md +++ b/README.md @@ -31,8 +31,6 @@ df = mdocfile.read('my_mdoc_file.mdoc') For writing valid mdoc files, please see [writing mdoc files](https://teamtomo.org/mdocfile/writing/). - - # Installation pip: @@ -40,3 +38,16 @@ pip: ```shell pip install mdocfile ``` + +# Parsing from text + +`Mdoc.from_string().as_dataframe()` will return the contents of string mdoc data as a pandas dataframe. +This is useful for mdoc data that is not stored in a file (e.g. from a database or a web request). + +```python +from mdocfile.data_models import Mdoc + +mdoc_data = ... + +mdoc = Mdoc.from_string(mdoc_data).as_dataframe() +``` diff --git a/docs/index.md b/docs/index.md index 4f872f1..fefda90 100644 --- a/docs/index.md +++ b/docs/index.md @@ -29,14 +29,29 @@ import mdocfile df = mdocfile.read('my_mdoc_file.mdoc') ``` ---- - For writing valid mdoc files, please see [writing mdoc files](./writing.md). +--- + # Installation pip: ```shell pip install mdocfile -``` \ No newline at end of file +``` + +--- + +# Parsing from text + +`Mdoc.from_string().as_dataframe()` will return the contents of string mdoc data as a pandas dataframe. +This is useful for mdoc data that is not stored in a file (e.g. from a database or a web request). + +```python +from mdocfile.data_models import Mdoc + +mdoc_data = ... + +mdoc = Mdoc.from_string(mdoc_data).as_dataframe() +``` diff --git a/src/mdocfile/__init__.py b/src/mdocfile/__init__.py index 8acbf99..1cb2442 100644 --- a/src/mdocfile/__init__.py +++ b/src/mdocfile/__init__.py @@ -1,1 +1,1 @@ -from .functions import read +from .functions import read \ No newline at end of file diff --git a/src/mdocfile/data_models.py b/src/mdocfile/data_models.py index 0bd5b84..2e30eac 100644 --- a/src/mdocfile/data_models.py +++ b/src/mdocfile/data_models.py @@ -1,3 +1,4 @@ +import pandas as pd from pydantic import field_validator, BaseModel from pathlib import Path, PureWindowsPath from typing import List, Optional, Tuple, Union, Sequence @@ -170,7 +171,17 @@ class Mdoc(BaseModel): @classmethod def from_file(cls, filename: str): with open(filename) as file: - lines = [line.strip() for line in file.readlines()] + return cls.from_lines(file.readlines()) + + @classmethod + def from_string(cls, string: str): + lines = string.split('\n') + + return cls.from_lines(lines) + + @classmethod + def from_lines(cls, file_lines: List[str]) -> 'Mdoc': + lines = [line.strip() for line in file_lines] split_idxs = find_section_entries(lines) split_idxs.append(len(lines)) @@ -185,6 +196,26 @@ def from_file(cls, filename: str): in zip(split_idxs, split_idxs[1:]) ] return cls(titles=titles, global_data=global_data, section_data=section_data) + + def as_dataframe(self) -> pd.DataFrame: + """ + Convert an Mdoc object to a pandas DataFrame + """ + global_data = self.global_data.model_dump() + section_data = { + k: [section.model_dump()[k] for section in self.section_data] + for k + in self.section_data[0].model_dump().keys() + } + df = pd.DataFrame(data=section_data) + + # add duplicate copies of global data and mdoc file titles to each row of + # the dataframe - tidy data is easier to analyse + for k, v in global_data.items(): + df[k] = [v] * len(df) + df['titles'] = [self.titles] * len(df) + df = df.dropna(axis='columns', how='all') + return df def to_string(self): """ diff --git a/src/mdocfile/functions.py b/src/mdocfile/functions.py index 78036c7..3ec1e3e 100644 --- a/src/mdocfile/functions.py +++ b/src/mdocfile/functions.py @@ -18,19 +18,4 @@ def read(filename: PathLike) -> pd.DataFrame: df : pd.DataFrame dataframe containing info from mdoc file """ - mdoc = Mdoc.from_file(filename) - global_data = mdoc.global_data.model_dump() - section_data = { - k: [section.model_dump()[k] for section in mdoc.section_data] - for k - in mdoc.section_data[0].model_dump().keys() - } - df = pd.DataFrame(data=section_data) - - # add duplicate copies of global data and mdoc file titles to each row of - # the dataframe - tidy data is easier to analyse - for k, v in global_data.items(): - df[k] = [v] * len(df) - df['titles'] = [mdoc.titles] * len(df) - df = df.dropna(axis='columns', how='all') - return df + return Mdoc.from_file(filename).as_dataframe()
Read mdoc filedata from string (not filename) Hi, I was trying to use mdocfile to read a s3 file, but I noticed that it only accepted local files as input. I was hoping that I could also pass in the raw data / string, but it didn't seem like that was an option. I was hoping that could be supported? I've created a PR to support this functionality, if it is of interest. Thank you!
2024-08-14T01:23:16
0.0
[]
[]
teamtomo/mdocfile
teamtomo__mdocfile-26
96b7b22b67ccac554519e31fe173052469bf1699
diff --git a/src/mdocfile/data_models.py b/src/mdocfile/data_models.py index d8c8e45..0bd5b84 100644 --- a/src/mdocfile/data_models.py +++ b/src/mdocfile/data_models.py @@ -98,7 +98,7 @@ class MdocSectionData(BaseModel): AlignedPieceCoords: Optional[Union[Tuple[float, float], Tuple[float, float, float]]] = None AlignedPieceCoordsVS: Optional[ Union[Tuple[float, float], Tuple[float, float, float]]] = None - SubFramePath: Optional[PureWindowsPath] = None + SubFramePath: Optional[Union[PureWindowsPath, Path]] = None NumSubFrames: Optional[int] = None FrameDosesAndNumbers: Optional[Sequence[Tuple[float, int]]] = None DateTime: Optional[str] = None
PureWindowsPath validation breaks mdocfile for linux Hi folks - not being familiar with the codebase I'm reluctant to make changes, but I had to replace PureWindowsPath with PurePosixPath throughout data_models.py for it to work on Linux (needless to say this will break it for Windows).
Hey Morgan, thanks for the report! @shahpnmlab any thoughts on this? I know you pushed for the change to WindowsPath I just tried this out (on MacOS, not linux) and don't run into the issue when constructing from a string ```ipython In [1]: from mdocfile.data_models import MdocSectionData In [2]: MdocSectionData(SubFramePath='bla.tif') Out[2]: MdocSectionData(ZValue=None, MontSection=None, FrameSet=None, TiltAngle=None, PieceCoordinates=None, StagePosition=None, StageZ=None, Magnification=None, CameraLength=None, MagIndex=None, Intensity=None, SuperMontCoords=None, PixelSpacing=None, ExposureDose=None, DoseRate=None, SpotSize=None, Defocus=None, TargetDefocus=None, ImageShift=None, RotationAngle=None, ExposureTime=None, Binning=None, UsingCDS=None, CameraIndex=None, DividedBy2=None, LowDoseConSet=None, MinMaxMean=None, PriorRecordDose=None, XedgeDxy=None, YedgeDxy=None, XedgeDxyVS=None, YedgeDxyVS=None, StageOffsets=None, AlignedPieceCoords=None, AlignedPieceCoordsVS=None, SubFramePath=PureWindowsPath('bla.tif'), NumSubFrames=None, FrameDosesAndNumbers=None, DateTime=None, NavigatorLabel=None, FilterSlitAndLoss=None, ChannelName=None, MultiShotHoleAndPosition=None, CameraPixelSize=None, Voltage=None) ``` i.e. I can construct the section data and the path is coerced to a Windows path from a string I see the issue when constructing from a path though and agree that should work ```ipython In [3]: MdocSectionData(SubFramePath=Path('bla.tif')) --------------------------------------------------------------------------- ValidationError Traceback (most recent call last) Cell In[3], line 1 ----> 1 MdocSectionData(SubFramePath=Path('bla.tif')) File ~/mambaforge/envs/mdocfile/lib/python3.10/site-packages/pydantic/main.py:176, in BaseModel.__init__(self, **data) 174 # `__tracebackhide__` tells pytest and some other tools to omit this function from tracebacks 175 __tracebackhide__ = True --> 176 self.__pydantic_validator__.validate_python(data, self_instance=self) ValidationError: 1 validation error for MdocSectionData SubFramePath Input is not a valid path [type=path_type, input_value=PosixPath('bla.tif'), input_type=PosixPath] ``` I have an idea how I might fix it so this works as expected for the future, glad you got yourself sorted for now :)
2024-05-08T08:35:55
0.0
[]
[]
teamtomo/mdocfile
teamtomo__mdocfile-10
8965df863c51c456c4666626571245bac636b919
diff --git a/mdocfile/mdoc.py b/mdocfile/mdoc.py index bbe866e..942f5c4 100644 --- a/mdocfile/mdoc.py +++ b/mdocfile/mdoc.py @@ -3,7 +3,7 @@ from pydantic import BaseModel from .global_data import MdocGlobalData from .section_data import MdocSectionData -from .utils import find_z_value_entries, find_title_entries +from .utils import find_section_entries, find_title_entries class Mdoc(BaseModel): @@ -15,7 +15,7 @@ class Mdoc(BaseModel): def from_file(cls, filename: str): with open(filename) as file: lines = [line.strip() for line in file.readlines()] - split_idxs = find_z_value_entries(lines) + split_idxs = find_section_entries(lines) split_idxs.append(len(lines)) header_lines = lines[0:split_idxs[0]] @@ -27,7 +27,7 @@ def from_file(cls, filename: str): MdocSectionData.from_lines(lines[start_idx:end_idx]) for start_idx, end_idx in zip(split_idxs, split_idxs[1:]) - ] + ] return cls(titles=titles, global_data=global_data, section_data=section_data) def to_string(self): diff --git a/mdocfile/section_data.py b/mdocfile/section_data.py index 7bf07d5..4b0f149 100644 --- a/mdocfile/section_data.py +++ b/mdocfile/section_data.py @@ -10,6 +10,7 @@ class MdocSectionData(BaseModel): https://bio3d.colorado.edu/SerialEM/hlp/html/about_formats.htm """ ZValue: Optional[int] + MontSection: Optional[int] TiltAngle: Optional[float] PieceCoordinates: Optional[Tuple[float, float, int]] StagePosition: Tuple[float, float] @@ -37,11 +38,11 @@ class MdocSectionData(BaseModel): PriorRecordDose: Optional[float] XedgeDxy: Optional[Tuple[float, float]] YedgeDxy: Optional[Tuple[float, float]] - XedgeDxyVS: Optional[Tuple[float, float]] - YedgeDxyVS: Optional[Tuple[float, float]] + XedgeDxyVS: Optional[Union[Tuple[float, float], Tuple[float, float, float]]] + YedgeDxyVS: Optional[Union[Tuple[float, float], Tuple[float, float, float]]] StageOffsets: Optional[Tuple[float, float]] AlignedPieceCoords: Optional[Tuple[float, float]] - AlignedPieceCoordsVS: Optional[Tuple[float, float]] + AlignedPieceCoordsVS: Optional[Union[Tuple[float, float], Tuple[float, float, float]]] SubFramePath: Optional[Path] NumSubFrames: Optional[int] FrameDosesAndNumbers: Optional[Sequence[Tuple[float, int]]] @@ -62,7 +63,7 @@ class MdocSectionData(BaseModel): 'XedgeDxy', 'YedgeDxy', 'XedgeDxyVS', - 'XedgeDxyVS', + 'YedgeDxyVS', 'StageOffsets', 'AlignedPieceCoords', 'AlignedPieceCoordsVS', diff --git a/mdocfile/utils.py b/mdocfile/utils.py index fcc6f5b..9715cd4 100644 --- a/mdocfile/utils.py +++ b/mdocfile/utils.py @@ -8,14 +8,18 @@ def camel_to_snake(word: str) -> str: return camel_to_snake_regex.sub('_', word).lower() -def find_z_value_entries(lines: List[str]) -> List[int]: - """Find the strings which contains a z-value entry. - """ - z_value_idxs = [] - for idx, line in enumerate(lines): - if line.startswith('[ZValue ='): - z_value_idxs.append(idx) - return z_value_idxs +def find_section_entries(lines: List[str]) -> List[int]: + """Find the strings which contains a z-value or montage section entry.""" + section_idx = [ + idx + for idx, line + in enumerate(lines) + if line.startswith('[ZValue =') or line.startswith('[MontSection =') + ] + # for idx, line in enumerate(lines): + # if line.startswith('[ZValue =') or line.startswith('[MontSection ='): + # section_idx.append(idx) + return section_idx def find_title_entries(lines: List[str]) -> List[int]:
Error when reading mdocs with "...VS" entries of length 3 Hi, I'm starting to work with SerialEM data and stumbled upon this great package :) I wanted to report a problem I encountered: `mdocfile.read('example.mrc.mdoc')` complains about the following: ``` ValidationError: 3 validation errors for MdocSectionData XedgeDxyVS wrong tuple length 3, expected 2 (type=value_error.tuple.length; actual_length=3; expected_length=2) YedgeDxyVS value is not a valid tuple (type=type_error.tuple) AlignedPieceCoordsVS wrong tuple length 3, expected 2 (type=value_error.tuple.length; actual_length=3; expected_length=2) ``` Indeed the entries for "XedgeDxyVS" and "AlignedPieceCoordsVS" in my file are of length 3. While on one side I'm trying to understand how to interpret these three values, I'd guess `mdocfile` would want to read them (since my mdoc should be a valid SerialEM output). Cheers!
As a follow up, if I modify the types in `section_data` as such: ` XedgeDxyVS: Optional[Tuple[float, float, float]]` I'm getting a different error instead: ``` ValidationError: 1 validation error for MdocSectionData YedgeDxyVS value is not a valid tuple (type=type_error.tuple) ``` Thanks for the kind words and the report @m-albert ! I'm sorry the package wasn't quite working out of the box with your data, hopefully we can get you up and running quickly - I'll take a look tomorrow Could you share your file so I can use it for debugging? Ideally as a GitHub gist or similar 🙂 Thanks for your quick response @alisterburt :) [Here's](https://gist.github.com/m-albert/3755e09511f717bf6b15efe0226166be) the file as a gist.
2023-04-06T11:12:32
0.0
[]
[]
mlrun/storey
mlrun__storey-543
c1710a2604ea68e8a3a7e07c0da15f34537fa3a9
diff --git a/storey/__init__.py b/storey/__init__.py index 0f0664c0..6017a30e 100644 --- a/storey/__init__.py +++ b/storey/__init__.py @@ -50,6 +50,8 @@ from .flow import Map # noqa: F401 from .flow import MapClass # noqa: F401 from .flow import MapWithState # noqa: F401 +from .flow import ParallelExecution # noqa: F401 +from .flow import ParallelExecutionRunnable # noqa: F401 from .flow import Recover # noqa: F401 from .flow import Reduce # noqa: F401 from .flow import Rename # noqa: F401 diff --git a/storey/flow.py b/storey/flow.py index 6956df30..70d566e1 100644 --- a/storey/flow.py +++ b/storey/flow.py @@ -16,6 +16,8 @@ import copy import datetime import inspect +import multiprocessing +import os import pickle import time import traceback @@ -1435,3 +1437,183 @@ def get_table(self, key): def set_table(self, key, table): self._tables[key] = table + + +class _ParallelExecutionRunnableResult: + def __init__(self, runnable_name: str, data: Any, runtime: float): + self.runnable_name = runnable_name + self.data = data + self.runtime = runtime + + +parallel_execution_mechanisms = ("multiprocessing", "threading", "asyncio", "naive") + + +class ParallelExecutionRunnable: + """ + Runnable to be run by a ParallelExecution step. Subclasses must assign execution_mechanism with one of: + * "multiprocessing" – To run in a separate process. This is appropriate for CPU or GPU intensive tasks as they + would otherwise block the main process by holding Python's Global Interpreter Lock (GIL). + * "threading" – To run in a separate thread. This is appropriate for blocking I/O tasks, as they would otherwise + block the main event loop thread. + * "asyncio" – To run in an asyncio task. This is appropriate for I/O tasks that use asyncio, allowing the event + loop to continue running while waiting for a response. + * "naive" – To run in the main event loop. This is appropriate only for trivial computation and/or file I/O. It + means that the runnable will not actually be run in parallel to anything else. + + Subclasses must also override the run() method, or run_async() when execution_mechanism="asyncio", with user code + that handles the event and returns a result. + + Subclasses may optionally override the init() method if the user's implementation of run() requires prior + initialization. + + :param name: Runnable name + """ + + execution_mechanism: Optional[str] = None + + # ignore unused keyword arguments such as context which may be passed in by mlrun + def __init__(self, name: str, **kwargs): + if self.execution_mechanism not in parallel_execution_mechanisms: + raise ValueError( + "ParallelExecutionRunnable's execution_mechanism attribute must be overridden with one of: " + '"multiprocessing", "threading", "asyncio", "naive"' + ) + self.name = name + + def init(self) -> None: + """Override this method to add initialization logic.""" + pass + + def run(self, body: Any, path: str) -> Any: + """ + Override this method with the code this runnable should run. If execution_mechanism is "asyncio", override + run_async() instead. + + :param body: Event body + :param path: Event path + """ + return body + + async def run_async(self, body: Any, path: str) -> Any: + """ + If execution_mechanism is "asyncio", override this method with the code this runnable should run. Otherwise, + override run() instead. + + :param body: Event body + :param path: Event path + """ + return body + + def _run(self, body: Any, path: str) -> Any: + start = time.monotonic() + body = self.run(body, path) + end = time.monotonic() + return _ParallelExecutionRunnableResult(self.name, body, end - start) + + async def _async_run(self, body: Any, path: str) -> Any: + start = time.monotonic() + body = await self.run_async(body, path) + end = time.monotonic() + return _ParallelExecutionRunnableResult(self.name, body, end - start) + + +class ParallelExecution(Flow): + """ + Runs multiple jobs in parallel for each event. + + :param runnables: A list of ParallelExecutionRunnable instances. + :param max_processes: Maximum number of processes to spawn. Defaults to the number of available CPUs, or 16 if + number of CPUs can't be determined. + :param max_threads: Maximum number of threads to start. Defaults to 32. + """ + + def __init__( + self, + runnables: list[ParallelExecutionRunnable], + max_processes: Optional[int] = None, + max_threads: Optional[int] = None, + **kwargs, + ): + super().__init__(**kwargs) + + if not runnables: + raise ValueError("ParallelExecution cannot be instantiated without at least one runnable") + + self.runnables = runnables + self._runnable_by_name = {} + + self.max_processes = max_processes or os.cpu_count() or 16 + self.max_threads = max_threads or 32 + + def select_runnables(self, event) -> Optional[Union[list[str], list[ParallelExecutionRunnable]]]: + """ + Given an event, returns a list of runnables (or a list of runnable names) to execute on it. It can also return + None, in which case all runnables are executed on the event, which is also the default. + + :param event: Event object + """ + pass + + def _init(self): + super()._init() + num_processes = 0 + num_threads = 0 + for runnable in self.runnables: + if runnable.name in self._runnable_by_name: + raise ValueError(f"ParallelExecutionRunnable name '{runnable.name}' is not unique") + self._runnable_by_name[runnable.name] = runnable + runnable.init() + if runnable.execution_mechanism == "multiprocessing": + num_processes += 1 + elif runnable.execution_mechanism == "threading": + num_threads += 1 + elif runnable.execution_mechanism not in ("asyncio", "naive"): + raise ValueError(f"Unsupported execution mechanism: {runnable.execution_mechanism}") + + # enforce max + num_processes = min(num_processes, self.max_processes) + num_threads = min(num_threads, self.max_threads) + + self._executors = {} + if num_processes: + mp_context = multiprocessing.get_context("spawn") + self._executors["multiprocessing"] = ProcessPoolExecutor(max_workers=num_processes, mp_context=mp_context) + if num_threads: + self._executors["threading"] = ThreadPoolExecutor(max_workers=num_threads) + + async def _do(self, event): + if event is _termination_obj: + return await self._do_downstream(_termination_obj) + else: + runnables = self.select_runnables(event) + if runnables is None: + runnables = self.runnables + futures = [] + runnables_encountered = set() + for runnable in runnables: + if isinstance(runnable, str): + runnable = self._runnable_by_name[runnable] + if id(runnable) in runnables_encountered: + raise ValueError(f"select_runnables() returned more than one outlet named '{runnable.name}'") + input = event.body if runnable.execution_mechanism == "multiprocessing" else copy.deepcopy(event.body) + runnables_encountered.add(id(runnable)) + if runnable.execution_mechanism == "asyncio": + future = asyncio.get_running_loop().create_task(runnable._async_run(input, event.path)) + elif runnable.execution_mechanism == "naive": + future = asyncio.get_running_loop().create_future() + future.set_result(runnable._run(input, event.path)) + else: + executor = self._executors[runnable.execution_mechanism] + future = asyncio.get_running_loop().run_in_executor( + executor, + runnable._run, + input, + event.path, + ) + futures.append(future) + results: list[_ParallelExecutionRunnableResult] = await asyncio.gather(*futures) + event.body = {"input": event.body, "results": {}} + for result in results: + event.body["results"][result.runnable_name] = {"runtime": result.runtime, "output": result.data} + return await self._do_downstream(event)
Raise error in `Choice` on duplicate outlets [ML-8287](https://iguazio.atlassian.net/browse/ML-8287) [ML-8287]: https://iguazio.atlassian.net/browse/ML-8287?atlOrigin=eyJpIjoiNWRkNTljNzYxNjVmNDY3MDlhMDU5Y2ZhYzA5YTRkZjUiLCJwIjoiZ2l0aHViLWNvbS1KU1cifQ
2024-11-05T11:02:06
0.0
[]
[]
mlrun/storey
mlrun__storey-494
51016709a12c56e50f78ffff95e74ee8081f94c0
diff --git a/storey/sources.py b/storey/sources.py index 4c6613d3..22f09b49 100644 --- a/storey/sources.py +++ b/storey/sources.py @@ -142,7 +142,7 @@ def _build_event(self, element, key): else: body = element - if hasattr(body, "__getitem__") and not key and self._key_field: + if isinstance(body, (dict, list)) and not key and self._key_field: if isinstance(self._key_field, str) or isinstance(self._key_field, int): key = body[self._key_field] else:
Extract key only if event body is of a suitable type [ML-5442](https://jira.iguazeng.com/browse/ML-5442)
2024-01-11T07:07:16
0.0
[]
[]
mlrun/storey
mlrun__storey-451
69276d9bf88ad9859df069838b9f105cedda5f30
diff --git a/storey/flow.py b/storey/flow.py index c69423bc..25ac225d 100644 --- a/storey/flow.py +++ b/storey/flow.py @@ -269,13 +269,16 @@ async def _do_downstream(self, event): if len(self._outlets) > 1: awaitable_result = event._awaitable_result event._awaitable_result = None - original_events = event._original_events + original_events = getattr(event, "_original_events", None) + # Temporarily delete self-reference to avoid deepcopy getting stuck in an infinite loop event._original_events = None for i in range(1, len(self._outlets)): event_copy = copy.deepcopy(event) event_copy._awaitable_result = awaitable_result - event._original_events = original_events + event_copy._original_events = original_events tasks.append(asyncio.get_running_loop().create_task(self._outlets[i]._do_and_recover(event_copy))) + # Set self-reference back after deepcopy + event._original_events = original_events event._awaitable_result = awaitable_result if self.verbose and self.logger: step_name = self.name
ML-1325: Commit stream offsets using new nuclio API Instead of relying on auto-commit. Known limitation: the committer is always 1 record behind. This can be resolved by transitioning from `SyncEmitSource` to `AsyncEmitSource`, which is desirable anyway.
2023-07-18T06:57:27
0.0
[]
[]
mlrun/storey
mlrun__storey-402
0b90c5a9d799e8d04b4e6359589d607a9c2b113d
diff --git a/storey/dtypes.py b/storey/dtypes.py index 08fe9635..812a6159 100644 --- a/storey/dtypes.py +++ b/storey/dtypes.py @@ -23,6 +23,8 @@ _termination_obj = object() +known_driver_schemes = ["v3io", "redis", "rediss"] + class Event: """The basic unit of data in storey. All steps receive and emit events. diff --git a/storey/flow.py b/storey/flow.py index d65a9b3c..f2c57288 100644 --- a/storey/flow.py +++ b/storey/flow.py @@ -24,7 +24,7 @@ import aiohttp -from .dtypes import Event, FlowError, V3ioError, _termination_obj +from .dtypes import Event, FlowError, V3ioError, _termination_obj, known_driver_schemes from .queue import AsyncQueue from .table import Table from .utils import _split_path, get_in, stringify_key, update_in @@ -501,10 +501,16 @@ def __init__(self, initial_state, fn, group_by_key=False, **kwargs): raise TypeError(f"Expected a callable, got {type(fn)}") self._is_async = asyncio.iscoroutinefunction(fn) self._state = initial_state - if isinstance(self._state, str) and self._state.startswith("v3io://"): - if not self.context: - raise TypeError("Table can not be string if no context was provided to the step") - self._state = self.context.get_table(self._state) + if isinstance(self._state, str): + should_get_from_context = False + for known_scheme in known_driver_schemes: + if self._state.startswith(f"{known_scheme}://"): + should_get_from_context = True + break + if should_get_from_context: + if not self.context: + raise TypeError("Table can not be string if no context was provided to the step") + self._state = self.context.get_table(self._state) self._fn = fn self._group_by_key = group_by_key if hasattr(self._state, "close"):
Support `redis` scheme in `MapWithState` [1.2.x]
Approved
2022-12-15T06:58:17
0.0
[]
[]
mlrun/storey
mlrun__storey-400
3335c941a103f03a2e104ce2db850debcf09f2ad
diff --git a/storey/flow.py b/storey/flow.py index 3e99c4e1..7a136ac0 100644 --- a/storey/flow.py +++ b/storey/flow.py @@ -491,8 +491,8 @@ def __init__(self, initial_state, fn, group_by_key=False, **kwargs): self._state = self.context.get_table(self._state) self._fn = fn self._group_by_key = group_by_key - if hasattr(initial_state, "close"): - self._closeables = [initial_state] + if hasattr(self._state, "close"): + self._closeables = [self._state] async def _call(self, event): element = self._get_event_or_body(event) diff --git a/storey/sources.py b/storey/sources.py index c7204c02..fe9d5355 100644 --- a/storey/sources.py +++ b/storey/sources.py @@ -305,7 +305,13 @@ async def _run_loop(self): break for closeable in self._closeables: - await closeable.close() + try: + maybe_coroutine = closeable.close() + if asyncio.iscoroutine(maybe_coroutine): + await maybe_coroutine + except Exception as ex: + if self.context: + self.context.logger.error(f"Error trying to close {closeable}: {ex}") def _loop_thread_main(self): asyncio.run(self._run_loop()) @@ -529,7 +535,13 @@ async def _run_loop(self): finally: if event is _termination_obj or self._ex: for closeable in self._closeables: - await closeable.close() + try: + maybe_coroutine = closeable.close() + if asyncio.iscoroutine(maybe_coroutine): + await maybe_coroutine + except Exception as ex: + if self.context: + self.context.logger.error(f"Error trying to close {closeable}: {ex}") def _raise_on_error(self): if self._ex:
Close `Table` object on `MapWithState` termination
2022-12-14T12:24:28
0.0
[]
[]
mlrun/storey
mlrun__storey-310
b1f2a889fb309d931fb6c8dc4023d4a6184fc309
diff --git a/storey/flow.py b/storey/flow.py index 53f17744..7458ea86 100644 --- a/storey/flow.py +++ b/storey/flow.py @@ -603,9 +603,13 @@ def __init__(self, status, body): class _ConcurrentJobExecution(Flow): - def __init__(self, max_in_flight=8, **kwargs): + _BACKOFF_MAX = 120 + + def __init__(self, max_in_flight=8, retries=0, backoff_factor=1, **kwargs): Flow.__init__(self, **kwargs) self._max_in_flight = max_in_flight + self._retries = retries + self._backoff_factor = backoff_factor def _init(self): self._q = None @@ -643,17 +647,23 @@ async def _cleanup(self): async def _lazy_init(self): pass - async def _safe_process_event(self, event): - if event._awaitable_result: + async def _process_event_with_retries(self, event): + times_attempted = 0 + max_attempts = self._retries + 1 + while True: try: return await self._process_event(event) - except BaseException as ex: - none_or_coroutine = event._awaitable_result._set_error(ex) - if none_or_coroutine: - await none_or_coroutine - raise ex - else: - return await self._process_event(event) + except Exception as ex: + times_attempted += 1 + attempts_left = max_attempts - times_attempted + if self.logger: + self.logger.warn(f'{self.name} failed to process event ({attempts_left} retries left): {ex}') + if attempts_left <= 0: + raise ex + backoff_value = self._backoff_factor * (2 ** (times_attempted - 1)) + backoff_value = min(self._BACKOFF_MAX, backoff_value) + if backoff_value >= 0: + await asyncio.sleep(backoff_value) async def _do(self, event): if not self._q: @@ -670,7 +680,7 @@ async def _do(self, event): await self._worker_awaitable return await self._do_downstream(_termination_obj) else: - task = self._safe_process_event(event) + task = self._process_event_with_retries(event) await self._q.put((event, asyncio.get_running_loop().create_task(task))) if self._worker_awaitable.done(): await self._worker_awaitable
Fix completion on error in concurrent execution step.
2021-11-04T16:06:14
0.0
[]
[]
benber86/mamushi
benber86__mamushi-35
1d887cee5013097e82aa3c1e6e52d49f0ca8d03b
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 440a115..48a150b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -10,14 +10,14 @@ repos: rev: 23.1.0 hooks: - id: black - language_version: python3.10 + language_version: python3.11 args: [ --line-length=79 ] - repo: https://github.com/pycqa/flake8 rev: 3.9.2 hooks: - id: flake8 - language_version: python3.10 + language_version: python3.11 args: [ --max-line-length=79 ] stages: [ commit, push ] @@ -25,6 +25,6 @@ repos: rev: v0.971 hooks: - id: mypy - language_version: python3.10 + language_version: python3.11 additional_dependencies: ['types-requests'] stages: [ commit, push ] diff --git a/Pipfile b/Pipfile index 21e4d54..625323a 100644 --- a/Pipfile +++ b/Pipfile @@ -13,6 +13,7 @@ pytest = ">=7.0.0" pytest-cov = ">=3.0.0" coverage = ">=6.5.0" bump2version = ">=1.0.0" +mypy-extensions = "==0.4.3" [dev-packages] diff --git a/Pipfile.lock b/Pipfile.lock index bc50bd8..fdeec64 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "d61e2339a4ab6564a611fe2f7f84ac134d0965e2cc0e60dc84b7e3ef9243204b" + "sha256": "70e45ef62f8fd37fbcafd929fb884f30089ac8edc8d10a5056334b998a975ac8" }, "pipfile-spec": 6, "requires": { @@ -16,14 +16,6 @@ ] }, "default": { - "attrs": { - "hashes": [ - "sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836", - "sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99" - ], - "markers": "python_version >= '3.6'", - "version": "==22.2.0" - }, "black": { "hashes": [ "sha256:0052dba51dec07ed029ed61b18183942043e00008ec65d5028814afaab9a22fd", @@ -53,6 +45,7 @@ "sha256:e6663f91b6feca5d06f2ccd49a10f254f9298cc1f7f49c46e498a0771b507104" ], "index": "pypi", + "markers": "python_version >= '3.7'", "version": "==23.1.0" }, "bump2version": { @@ -61,15 +54,16 @@ "sha256:762cb2bfad61f4ec8e2bdf452c7c267416f8c70dd9ecb1653fd0bbb01fa936e6" ], "index": "pypi", + "markers": "python_version >= '3.5'", "version": "==1.0.1" }, "cfgv": { "hashes": [ - "sha256:c6a0883f3917a037485059700b9e75da2464e6c27051014ad85ba6aaa5884426", - "sha256:f5a830efb9ce7a445376bb66ec94c638a9787422f96264c98edc6bdeed8ab736" + "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9", + "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560" ], - "markers": "python_full_version >= '3.6.1'", - "version": "==3.3.1" + "markers": "python_version >= '3.8'", + "version": "==3.4.0" }, "click": { "hashes": [ @@ -77,95 +71,113 @@ "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48" ], "index": "pypi", + "markers": "python_version >= '3.7'", "version": "==8.1.3" }, "coverage": { + "extras": [ + "toml" + ], "hashes": [ - "sha256:04481245ef966fbd24ae9b9e537ce899ae584d521dfbe78f89cad003c38ca2ab", - "sha256:0c45948f613d5d18c9ec5eaa203ce06a653334cf1bd47c783a12d0dd4fd9c851", - "sha256:10188fe543560ec4874f974b5305cd1a8bdcfa885ee00ea3a03733464c4ca265", - "sha256:218fe982371ac7387304153ecd51205f14e9d731b34fb0568181abaf7b443ba0", - "sha256:29571503c37f2ef2138a306d23e7270687c0efb9cab4bd8038d609b5c2393a3a", - "sha256:2a60d6513781e87047c3e630b33b4d1e89f39836dac6e069ffee28c4786715f5", - "sha256:2bf1d5f2084c3932b56b962a683074a3692bce7cabd3aa023c987a2a8e7612f6", - "sha256:3164d31078fa9efe406e198aecd2a02d32a62fecbdef74f76dad6a46c7e48311", - "sha256:32df215215f3af2c1617a55dbdfb403b772d463d54d219985ac7cd3bf124cada", - "sha256:33d1ae9d4079e05ac4cc1ef9e20c648f5afabf1a92adfaf2ccf509c50b85717f", - "sha256:33ff26d0f6cc3ca8de13d14fde1ff8efe1456b53e3f0273e63cc8b3c84a063d8", - "sha256:38da2db80cc505a611938d8624801158e409928b136c8916cd2e203970dde4dc", - "sha256:3b155caf3760408d1cb903b21e6a97ad4e2bdad43cbc265e3ce0afb8e0057e73", - "sha256:3b946bbcd5a8231383450b195cfb58cb01cbe7f8949f5758566b881df4b33baf", - "sha256:3baf5f126f30781b5e93dbefcc8271cb2491647f8283f20ac54d12161dff080e", - "sha256:4b14d5e09c656de5038a3f9bfe5228f53439282abcab87317c9f7f1acb280352", - "sha256:51b236e764840a6df0661b67e50697aaa0e7d4124ca95e5058fa3d7cbc240b7c", - "sha256:63ffd21aa133ff48c4dff7adcc46b7ec8b565491bfc371212122dd999812ea1c", - "sha256:6a43c7823cd7427b4ed763aa7fb63901ca8288591323b58c9cd6ec31ad910f3c", - "sha256:755e89e32376c850f826c425ece2c35a4fc266c081490eb0a841e7c1cb0d3bda", - "sha256:7a726d742816cb3a8973c8c9a97539c734b3a309345236cd533c4883dda05b8d", - "sha256:7c7c0d0827e853315c9bbd43c1162c006dd808dbbe297db7ae66cd17b07830f0", - "sha256:7ed681b0f8e8bcbbffa58ba26fcf5dbc8f79e7997595bf071ed5430d8c08d6f3", - "sha256:7ee5c9bb51695f80878faaa5598040dd6c9e172ddcf490382e8aedb8ec3fec8d", - "sha256:8361be1c2c073919500b6601220a6f2f98ea0b6d2fec5014c1d9cfa23dd07038", - "sha256:8ae125d1134bf236acba8b83e74c603d1b30e207266121e76484562bc816344c", - "sha256:9817733f0d3ea91bea80de0f79ef971ae94f81ca52f9b66500c6a2fea8e4b4f8", - "sha256:98b85dd86514d889a2e3dd22ab3c18c9d0019e696478391d86708b805f4ea0fa", - "sha256:9ccb092c9ede70b2517a57382a601619d20981f56f440eae7e4d7eaafd1d1d09", - "sha256:9d58885215094ab4a86a6aef044e42994a2bd76a446dc59b352622655ba6621b", - "sha256:b643cb30821e7570c0aaf54feaf0bfb630b79059f85741843e9dc23f33aaca2c", - "sha256:bc7c85a150501286f8b56bd8ed3aa4093f4b88fb68c0843d21ff9656f0009d6a", - "sha256:beeb129cacea34490ffd4d6153af70509aa3cda20fdda2ea1a2be870dfec8d52", - "sha256:c31b75ae466c053a98bf26843563b3b3517b8f37da4d47b1c582fdc703112bc3", - "sha256:c4e4881fa9e9667afcc742f0c244d9364d197490fbc91d12ac3b5de0bf2df146", - "sha256:c5b15ed7644ae4bee0ecf74fee95808dcc34ba6ace87e8dfbf5cb0dc20eab45a", - "sha256:d12d076582507ea460ea2a89a8c85cb558f83406c8a41dd641d7be9a32e1274f", - "sha256:d248cd4a92065a4d4543b8331660121b31c4148dd00a691bfb7a5cdc7483cfa4", - "sha256:d47dd659a4ee952e90dc56c97d78132573dc5c7b09d61b416a9deef4ebe01a0c", - "sha256:d4a5a5879a939cb84959d86869132b00176197ca561c664fc21478c1eee60d75", - "sha256:da9b41d4539eefd408c46725fb76ecba3a50a3367cafb7dea5f250d0653c1040", - "sha256:db61a79c07331e88b9a9974815c075fbd812bc9dbc4dc44b366b5368a2936063", - "sha256:ddb726cb861c3117a553f940372a495fe1078249ff5f8a5478c0576c7be12050", - "sha256:ded59300d6330be27bc6cf0b74b89ada58069ced87c48eaf9344e5e84b0072f7", - "sha256:e2617759031dae1bf183c16cef8fcfb3de7617f394c813fa5e8e46e9b82d4222", - "sha256:e5cdbb5cafcedea04924568d990e20ce7f1945a1dd54b560f879ee2d57226912", - "sha256:ec8e767f13be637d056f7e07e61d089e555f719b387a7070154ad80a0ff31801", - "sha256:ef382417db92ba23dfb5864a3fc9be27ea4894e86620d342a116b243ade5d35d", - "sha256:f2cba5c6db29ce991029b5e4ac51eb36774458f0a3b8d3137241b32d1bb91f06", - "sha256:f5b4198d85a3755d27e64c52f8c95d6333119e49fd001ae5798dac872c95e0f8", - "sha256:ffeeb38ee4a80a30a6877c5c4c359e5498eec095878f1581453202bfacc8fbc2" + "sha256:06a737c882bd26d0d6ee7269b20b12f14a8704807a01056c80bb881a4b2ce6ca", + "sha256:07e2ca0ad381b91350c0ed49d52699b625aab2b44b65e1b4e02fa9df0e92ad2d", + "sha256:0c0420b573964c760df9e9e86d1a9a622d0d27f417e1a949a8a66dd7bcee7bc6", + "sha256:0dbde0f4aa9a16fa4d754356a8f2e36296ff4d83994b2c9d8398aa32f222f989", + "sha256:1125ca0e5fd475cbbba3bb67ae20bd2c23a98fac4e32412883f9bcbaa81c314c", + "sha256:13b0a73a0896988f053e4fbb7de6d93388e6dd292b0d87ee51d106f2c11b465b", + "sha256:166811d20dfea725e2e4baa71fffd6c968a958577848d2131f39b60043400223", + "sha256:170d444ab405852903b7d04ea9ae9b98f98ab6d7e63e1115e82620807519797f", + "sha256:1f4aa8219db826ce6be7099d559f8ec311549bfc4046f7f9fe9b5cea5c581c56", + "sha256:225667980479a17db1048cb2bf8bfb39b8e5be8f164b8f6628b64f78a72cf9d3", + "sha256:260933720fdcd75340e7dbe9060655aff3af1f0c5d20f46b57f262ab6c86a5e8", + "sha256:2bdb062ea438f22d99cba0d7829c2ef0af1d768d1e4a4f528087224c90b132cb", + "sha256:2c09f4ce52cb99dd7505cd0fc8e0e37c77b87f46bc9c1eb03fe3bc9991085388", + "sha256:3115a95daa9bdba70aea750db7b96b37259a81a709223c8448fa97727d546fe0", + "sha256:3e0cadcf6733c09154b461f1ca72d5416635e5e4ec4e536192180d34ec160f8a", + "sha256:3f1156e3e8f2872197af3840d8ad307a9dd18e615dc64d9ee41696f287c57ad8", + "sha256:4421712dbfc5562150f7554f13dde997a2e932a6b5f352edcce948a815efee6f", + "sha256:44df346d5215a8c0e360307d46ffaabe0f5d3502c8a1cefd700b34baf31d411a", + "sha256:502753043567491d3ff6d08629270127e0c31d4184c4c8d98f92c26f65019962", + "sha256:547f45fa1a93154bd82050a7f3cddbc1a7a4dd2a9bf5cb7d06f4ae29fe94eaf8", + "sha256:5621a9175cf9d0b0c84c2ef2b12e9f5f5071357c4d2ea6ca1cf01814f45d2391", + "sha256:609b06f178fe8e9f89ef676532760ec0b4deea15e9969bf754b37f7c40326dbc", + "sha256:645786266c8f18a931b65bfcefdbf6952dd0dea98feee39bd188607a9d307ed2", + "sha256:6878ef48d4227aace338d88c48738a4258213cd7b74fd9a3d4d7582bb1d8a155", + "sha256:6a89ecca80709d4076b95f89f308544ec8f7b4727e8a547913a35f16717856cb", + "sha256:6db04803b6c7291985a761004e9060b2bca08da6d04f26a7f2294b8623a0c1a0", + "sha256:6e2cd258d7d927d09493c8df1ce9174ad01b381d4729a9d8d4e38670ca24774c", + "sha256:6e81d7a3e58882450ec4186ca59a3f20a5d4440f25b1cff6f0902ad890e6748a", + "sha256:702855feff378050ae4f741045e19a32d57d19f3e0676d589df0575008ea5004", + "sha256:78b260de9790fd81e69401c2dc8b17da47c8038176a79092a89cb2b7d945d060", + "sha256:7bb65125fcbef8d989fa1dd0e8a060999497629ca5b0efbca209588a73356232", + "sha256:7dea0889685db8550f839fa202744652e87c60015029ce3f60e006f8c4462c93", + "sha256:8284cf8c0dd272a247bc154eb6c95548722dce90d098c17a883ed36e67cdb129", + "sha256:877abb17e6339d96bf08e7a622d05095e72b71f8afd8a9fefc82cf30ed944163", + "sha256:8929543a7192c13d177b770008bc4e8119f2e1f881d563fc6b6305d2d0ebe9de", + "sha256:8ae539519c4c040c5ffd0632784e21b2f03fc1340752af711f33e5be83a9d6c6", + "sha256:8f59d57baca39b32db42b83b2a7ba6f47ad9c394ec2076b084c3f029b7afca23", + "sha256:9054a0754de38d9dbd01a46621636689124d666bad1936d76c0341f7d71bf569", + "sha256:953510dfb7b12ab69d20135a0662397f077c59b1e6379a768e97c59d852ee51d", + "sha256:95cae0efeb032af8458fc27d191f85d1717b1d4e49f7cb226cf526ff28179778", + "sha256:9bc572be474cafb617672c43fe989d6e48d3c83af02ce8de73fff1c6bb3c198d", + "sha256:9c56863d44bd1c4fe2abb8a4d6f5371d197f1ac0ebdee542f07f35895fc07f36", + "sha256:9e0b2df163b8ed01d515807af24f63de04bebcecbd6c3bfeff88385789fdf75a", + "sha256:a09ece4a69cf399510c8ab25e0950d9cf2b42f7b3cb0374f95d2e2ff594478a6", + "sha256:a1ac0ae2b8bd743b88ed0502544847c3053d7171a3cff9228af618a068ed9c34", + "sha256:a318d68e92e80af8b00fa99609796fdbcdfef3629c77c6283566c6f02c6d6704", + "sha256:a4acd025ecc06185ba2b801f2de85546e0b8ac787cf9d3b06e7e2a69f925b106", + "sha256:a6d3adcf24b624a7b778533480e32434a39ad8fa30c315208f6d3e5542aeb6e9", + "sha256:a78d169acd38300060b28d600344a803628c3fd585c912cacc9ea8790fe96862", + "sha256:a95324a9de9650a729239daea117df21f4b9868ce32e63f8b650ebe6cef5595b", + "sha256:abd5fd0db5f4dc9289408aaf34908072f805ff7792632250dcb36dc591d24255", + "sha256:b06079abebbc0e89e6163b8e8f0e16270124c154dc6e4a47b413dd538859af16", + "sha256:b43c03669dc4618ec25270b06ecd3ee4fa94c7f9b3c14bae6571ca00ef98b0d3", + "sha256:b48f312cca9621272ae49008c7f613337c53fadca647d6384cc129d2996d1133", + "sha256:b5d7b556859dd85f3a541db6a4e0167b86e7273e1cdc973e5b175166bb634fdb", + "sha256:b9f222de8cded79c49bf184bdbc06630d4c58eec9459b939b4a690c82ed05657", + "sha256:c3c02d12f837d9683e5ab2f3d9844dc57655b92c74e286c262e0fc54213c216d", + "sha256:c44fee9975f04b33331cb8eb272827111efc8930cfd582e0320613263ca849ca", + "sha256:cf4b19715bccd7ee27b6b120e7e9dd56037b9c0681dcc1adc9ba9db3d417fa36", + "sha256:d0c212c49b6c10e6951362f7c6df3329f04c2b1c28499563d4035d964ab8e08c", + "sha256:d3296782ca4eab572a1a4eca686d8bfb00226300dcefdf43faa25b5242ab8a3e", + "sha256:d85f5e9a5f8b73e2350097c3756ef7e785f55bd71205defa0bfdaf96c31616ff", + "sha256:da511e6ad4f7323ee5702e6633085fb76c2f893aaf8ce4c51a0ba4fc07580ea7", + "sha256:e05882b70b87a18d937ca6768ff33cc3f72847cbc4de4491c8e73880766718e5", + "sha256:e61c0abb4c85b095a784ef23fdd4aede7a2628478e7baba7c5e3deba61070a02", + "sha256:e6a08c0be454c3b3beb105c0596ebdc2371fab6bb90c0c0297f4e58fd7e1012c", + "sha256:e9a6e0eb86070e8ccaedfbd9d38fec54864f3125ab95419970575b42af7541df", + "sha256:ed37bd3c3b063412f7620464a9ac1314d33100329f39799255fb8d3027da50d3", + "sha256:f1adfc8ac319e1a348af294106bc6a8458a0f1633cc62a1446aebc30c5fa186a", + "sha256:f5796e664fe802da4f57a168c85359a8fbf3eab5e55cd4e4569fbacecc903959", + "sha256:fc5a77d0c516700ebad189b587de289a20a78324bc54baee03dd486f0855d234", + "sha256:fd21f6ae3f08b41004dfb433fa895d858f3f5979e7762d052b12aef444e29afc" ], "index": "pypi", - "version": "==7.1.0" + "markers": "python_version >= '3.8'", + "version": "==7.6.1" }, "distlib": { "hashes": [ - "sha256:14bad2d9b04d3a36127ac97f30b12a19268f211063d8f8ee4f47108896e11b46", - "sha256:f35c4b692542ca110de7ef0bea44d73981caeb34ca0b9b6b2e6d7790dda8f80e" + "sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784", + "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64" ], - "version": "==0.3.6" - }, - "exceptiongroup": { - "hashes": [ - "sha256:327cbda3da756e2de031a3107b81ab7b3770a602c4d16ca618298c526f4bec1e", - "sha256:bcb67d800a4497e1b404c2dd44fca47d3b7a5e5433dbab67f96c1a685cdfdf23" - ], - "markers": "python_version < '3.11'", - "version": "==1.1.0" + "version": "==0.3.8" }, "filelock": { "hashes": [ - "sha256:7b319f24340b51f55a2bf7a12ac0755a9b03e718311dac567a0f4f7fabd2f5de", - "sha256:f58d535af89bb9ad5cd4df046f741f8553a418c01a7856bf0d173bbc9f6bd16d" + "sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb", + "sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7" ], - "markers": "python_version >= '3.7'", - "version": "==3.9.0" + "markers": "python_version >= '3.8'", + "version": "==3.15.4" }, "identify": { "hashes": [ - "sha256:89e144fa560cc4cffb6ef2ab5e9fb18ed9f9b3cb054384bab4b95c12f6c309fe", - "sha256:93aac7ecf2f6abf879b8f29a8002d3c6de7086b8c28d88e1ad15045a15ab63f9" + "sha256:cb171c685bdc31bcc4c1734698736a7d5b6c8bf2e0c15117f4d469c8640ae5cf", + "sha256:e79ae4406387a9d300332b5fd366d8994f1525e8414984e1a59e058b2eda2dd0" ], - "markers": "python_version >= '3.7'", - "version": "==2.5.18" + "markers": "python_version >= '3.8'", + "version": "==2.6.0" }, "iniconfig": { "hashes": [ @@ -177,59 +189,61 @@ }, "lark": { "hashes": [ - "sha256:4b534eae1f9af5b4ea000bea95776350befe1981658eea3820a01c37e504bb4d", - "sha256:8476f9903e93fbde4f6c327f74d79e9b4bd0ed9294c5dfa3164ab8c581b5de2a" + "sha256:c2276486b02f0f1b90be155f2c8ba4a8e194d42775786db622faccd652d8e80c", + "sha256:ca807d0162cd16cef15a8feecb862d7319e7a09bdb13aef927968e45040fed80" ], "index": "pypi", - "version": "==1.1.5" + "markers": "python_version >= '3.8'", + "version": "==1.2.2" }, "mypy-extensions": { "hashes": [ "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d", "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782" ], + "index": "pypi", "markers": "python_version >= '3.5'", "version": "==1.0.0" }, "nodeenv": { "hashes": [ - "sha256:27083a7b96a25f2f5e1d8cb4b6317ee8aeda3bdd121394e5ac54e498028a042e", - "sha256:e0e7f7dfb85fc5394c6fe1e8fa98131a2473e04311a45afb6508f7cf1836fa2b" + "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f", + "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6'", - "version": "==1.7.0" + "version": "==1.9.1" }, "packaging": { "hashes": [ - "sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2", - "sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97" + "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002", + "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124" ], - "markers": "python_version >= '3.7'", - "version": "==23.0" + "markers": "python_version >= '3.8'", + "version": "==24.1" }, "pathspec": { "hashes": [ - "sha256:3a66eb970cbac598f9e5ccb5b2cf58930cd8e3ed86d393d541eaf2d8b1705229", - "sha256:64d338d4e0914e91c1792321e6907b5a593f1ab1851de7fc269557a21b30ebbc" + "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", + "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712" ], - "markers": "python_version >= '3.7'", - "version": "==0.11.0" + "markers": "python_version >= '3.8'", + "version": "==0.12.1" }, "platformdirs": { "hashes": [ - "sha256:8a1228abb1ef82d788f74139988b137e78692984ec7b08eaa6c65f1723af28f9", - "sha256:b1d5eb14f221506f50d6604a561f4c5786d9e80355219694a1b244bcd96f4567" + "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee", + "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3" ], - "markers": "python_version >= '3.7'", - "version": "==3.0.0" + "markers": "python_version >= '3.8'", + "version": "==4.2.2" }, "pluggy": { "hashes": [ - "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159", - "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3" + "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", + "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669" ], - "markers": "python_version >= '3.6'", - "version": "==1.0.0" + "markers": "python_version >= '3.8'", + "version": "==1.5.0" }, "pre-commit": { "hashes": [ @@ -237,77 +251,85 @@ "sha256:a978dac7bc9ec0bcee55c18a277d553b0f419d259dadb4b9418ff2d00eb43959" ], "index": "pypi", + "markers": "python_version >= '3.7'", "version": "==2.20.0" }, "pytest": { "hashes": [ - "sha256:c7c6ca206e93355074ae32f7403e8ea12163b1163c976fee7d4d84027c162be5", - "sha256:d45e0952f3727241918b8fd0f376f5ff6b301cc0777c6f9a556935c92d8a7d42" + "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5", + "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce" ], "index": "pypi", - "version": "==7.2.1" + "markers": "python_version >= '3.8'", + "version": "==8.3.2" }, "pytest-cov": { "hashes": [ - "sha256:2feb1b751d66a8bd934e5edfa2e961d11309dc37b73b0eabe73b5945fee20f6b", - "sha256:996b79efde6433cdbd0088872dbc5fb3ed7fe1578b68cdbba634f14bb8dd0470" + "sha256:4f0764a1219df53214206bf1feea4633c3b558a2925c8b59f144f682861ce652", + "sha256:5837b58e9f6ebd335b0f8060eecce69b662415b16dc503883a02f45dfeb14857" ], "index": "pypi", - "version": "==4.0.0" + "markers": "python_version >= '3.8'", + "version": "==5.0.0" }, "pyyaml": { "hashes": [ - "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf", - "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293", - "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b", - "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57", - "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b", - "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4", - "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07", - "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba", - "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9", - "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287", - "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513", - "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0", - "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782", - "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0", - "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92", - "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f", - "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2", - "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc", - "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1", - "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c", - "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86", - "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4", - "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c", - "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34", - "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b", - "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d", - "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c", - "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb", - "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7", - "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737", - "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3", - "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d", - "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358", - "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53", - "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78", - "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803", - "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a", - "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f", - "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174", - "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5" - ], - "markers": "python_version >= '3.6'", - "version": "==6.0" - }, - "setuptools": { - "hashes": [ - "sha256:e5fd0a713141a4a105412233c63dc4e17ba0090c8e8334594ac790ec97792330", - "sha256:f106dee1b506dee5102cc3f3e9e68137bbad6d47b616be7991714b0c62204251" + "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff", + "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", + "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086", + "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e", + "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", + "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", + "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", + "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", + "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", + "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68", + "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a", + "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf", + "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99", + "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8", + "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", + "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19", + "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", + "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a", + "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", + "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", + "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", + "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631", + "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d", + "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", + "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", + "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", + "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b", + "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", + "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", + "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706", + "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", + "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237", + "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", + "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083", + "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180", + "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", + "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e", + "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f", + "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725", + "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", + "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", + "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", + "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", + "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", + "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5", + "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d", + "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290", + "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", + "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed", + "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", + "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", + "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12", + "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4" ], - "markers": "python_version >= '3.7'", - "version": "==67.4.0" + "markers": "python_version >= '3.8'", + "version": "==6.0.2" }, "toml": { "hashes": [ @@ -317,14 +339,6 @@ "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==0.10.2" }, - "tomli": { - "hashes": [ - "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc", - "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f" - ], - "markers": "python_version < '3.11'", - "version": "==2.0.1" - }, "types-attrs": { "hashes": [ "sha256:d11acf7a2531a7c52a740c30fa3eb8d01d3066c10d34c01ff5e59502caac5352" @@ -334,11 +348,11 @@ }, "virtualenv": { "hashes": [ - "sha256:37a640ba82ed40b226599c522d411e4be5edb339a0c0de030c0dc7b646d61590", - "sha256:54eb59e7352b573aa04d53f80fc9736ed0ad5143af445a1e539aada6eb947dd1" + "sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a", + "sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589" ], "markers": "python_version >= '3.7'", - "version": "==20.19.0" + "version": "==20.26.3" } }, "develop": {} diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 0000000..64c1611 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,558 @@ +# This file is automatically @generated by Poetry and should not be changed by hand. + +[[package]] +name = "black" +version = "23.1.0" +description = "The uncompromising code formatter." +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "black-23.1.0-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:b6a92a41ee34b883b359998f0c8e6eb8e99803aa8bf3123bf2b2e6fec505a221"}, + {file = "black-23.1.0-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:57c18c5165c1dbe291d5306e53fb3988122890e57bd9b3dcb75f967f13411a26"}, + {file = "black-23.1.0-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:9880d7d419bb7e709b37e28deb5e68a49227713b623c72b2b931028ea65f619b"}, + {file = "black-23.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6663f91b6feca5d06f2ccd49a10f254f9298cc1f7f49c46e498a0771b507104"}, + {file = "black-23.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:9afd3f493666a0cd8f8df9a0200c6359ac53940cbde049dcb1a7eb6ee2dd7074"}, + {file = "black-23.1.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:bfffba28dc52a58f04492181392ee380e95262af14ee01d4bc7bb1b1c6ca8d27"}, + {file = "black-23.1.0-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:c1c476bc7b7d021321e7d93dc2cbd78ce103b84d5a4cf97ed535fbc0d6660648"}, + {file = "black-23.1.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:382998821f58e5c8238d3166c492139573325287820963d2f7de4d518bd76958"}, + {file = "black-23.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bf649fda611c8550ca9d7592b69f0637218c2369b7744694c5e4902873b2f3a"}, + {file = "black-23.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:121ca7f10b4a01fd99951234abdbd97728e1240be89fde18480ffac16503d481"}, + {file = "black-23.1.0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:a8471939da5e824b891b25751955be52ee7f8a30a916d570a5ba8e0f2eb2ecad"}, + {file = "black-23.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8178318cb74f98bc571eef19068f6ab5613b3e59d4f47771582f04e175570ed8"}, + {file = "black-23.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:a436e7881d33acaf2536c46a454bb964a50eff59b21b51c6ccf5a40601fbef24"}, + {file = "black-23.1.0-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:a59db0a2094d2259c554676403fa2fac3473ccf1354c1c63eccf7ae65aac8ab6"}, + {file = "black-23.1.0-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:0052dba51dec07ed029ed61b18183942043e00008ec65d5028814afaab9a22fd"}, + {file = "black-23.1.0-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:49f7b39e30f326a34b5c9a4213213a6b221d7ae9d58ec70df1c4a307cf2a1580"}, + {file = "black-23.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:162e37d49e93bd6eb6f1afc3e17a3d23a823042530c37c3c42eeeaf026f38468"}, + {file = "black-23.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b70eb40a78dfac24842458476135f9b99ab952dd3f2dab738c1881a9b38b753"}, + {file = "black-23.1.0-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:a29650759a6a0944e7cca036674655c2f0f63806ddecc45ed40b7b8aa314b651"}, + {file = "black-23.1.0-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:bb460c8561c8c1bec7824ecbc3ce085eb50005883a6203dcfb0122e95797ee06"}, + {file = "black-23.1.0-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:c91dfc2c2a4e50df0026f88d2215e166616e0c80e86004d0003ece0488db2739"}, + {file = "black-23.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a951cc83ab535d248c89f300eccbd625e80ab880fbcfb5ac8afb5f01a258ac9"}, + {file = "black-23.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:0680d4380db3719ebcfb2613f34e86c8e6d15ffeabcf8ec59355c5e7b85bb555"}, + {file = "black-23.1.0-py3-none-any.whl", hash = "sha256:7a0f701d314cfa0896b9001df70a530eb2472babb76086344e688829efd97d32"}, + {file = "black-23.1.0.tar.gz", hash = "sha256:b0bd97bea8903f5a2ba7219257a44e3f1f9d00073d6cc1add68f0beec69692ac"}, +] + +[package.dependencies] +click = ">=8.0.0" +mypy-extensions = ">=0.4.3" +packaging = ">=22.0" +pathspec = ">=0.9.0" +platformdirs = ">=2" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""} + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.7.4)"] +jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] +uvloop = ["uvloop (>=0.15.2)"] + +[[package]] +name = "bump2version" +version = "1.0.1" +description = "Version-bump your software with a single command!" +category = "dev" +optional = false +python-versions = ">=3.5" +files = [ + {file = "bump2version-1.0.1-py2.py3-none-any.whl", hash = "sha256:37f927ea17cde7ae2d7baf832f8e80ce3777624554a653006c9144f8017fe410"}, + {file = "bump2version-1.0.1.tar.gz", hash = "sha256:762cb2bfad61f4ec8e2bdf452c7c267416f8c70dd9ecb1653fd0bbb01fa936e6"}, +] + +[[package]] +name = "cfgv" +version = "3.4.0" +description = "Validate configuration and produce human readable error messages." +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"}, + {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"}, +] + +[[package]] +name = "click" +version = "8.1.3" +description = "Composable command line interface toolkit" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, + {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +category = "main" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "coverage" +version = "7.6.1" +description = "Code coverage measurement for Python" +category = "dev" +optional = false +python-versions = ">=3.8" +files = [ + {file = "coverage-7.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b06079abebbc0e89e6163b8e8f0e16270124c154dc6e4a47b413dd538859af16"}, + {file = "coverage-7.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cf4b19715bccd7ee27b6b120e7e9dd56037b9c0681dcc1adc9ba9db3d417fa36"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61c0abb4c85b095a784ef23fdd4aede7a2628478e7baba7c5e3deba61070a02"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd21f6ae3f08b41004dfb433fa895d858f3f5979e7762d052b12aef444e29afc"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f59d57baca39b32db42b83b2a7ba6f47ad9c394ec2076b084c3f029b7afca23"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a1ac0ae2b8bd743b88ed0502544847c3053d7171a3cff9228af618a068ed9c34"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e6a08c0be454c3b3beb105c0596ebdc2371fab6bb90c0c0297f4e58fd7e1012c"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f5796e664fe802da4f57a168c85359a8fbf3eab5e55cd4e4569fbacecc903959"}, + {file = "coverage-7.6.1-cp310-cp310-win32.whl", hash = "sha256:7bb65125fcbef8d989fa1dd0e8a060999497629ca5b0efbca209588a73356232"}, + {file = "coverage-7.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:3115a95daa9bdba70aea750db7b96b37259a81a709223c8448fa97727d546fe0"}, + {file = "coverage-7.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7dea0889685db8550f839fa202744652e87c60015029ce3f60e006f8c4462c93"}, + {file = "coverage-7.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed37bd3c3b063412f7620464a9ac1314d33100329f39799255fb8d3027da50d3"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85f5e9a5f8b73e2350097c3756ef7e785f55bd71205defa0bfdaf96c31616ff"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bc572be474cafb617672c43fe989d6e48d3c83af02ce8de73fff1c6bb3c198d"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0420b573964c760df9e9e86d1a9a622d0d27f417e1a949a8a66dd7bcee7bc6"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1f4aa8219db826ce6be7099d559f8ec311549bfc4046f7f9fe9b5cea5c581c56"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:fc5a77d0c516700ebad189b587de289a20a78324bc54baee03dd486f0855d234"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b48f312cca9621272ae49008c7f613337c53fadca647d6384cc129d2996d1133"}, + {file = "coverage-7.6.1-cp311-cp311-win32.whl", hash = "sha256:1125ca0e5fd475cbbba3bb67ae20bd2c23a98fac4e32412883f9bcbaa81c314c"}, + {file = "coverage-7.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:8ae539519c4c040c5ffd0632784e21b2f03fc1340752af711f33e5be83a9d6c6"}, + {file = "coverage-7.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:95cae0efeb032af8458fc27d191f85d1717b1d4e49f7cb226cf526ff28179778"}, + {file = "coverage-7.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5621a9175cf9d0b0c84c2ef2b12e9f5f5071357c4d2ea6ca1cf01814f45d2391"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:260933720fdcd75340e7dbe9060655aff3af1f0c5d20f46b57f262ab6c86a5e8"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e2ca0ad381b91350c0ed49d52699b625aab2b44b65e1b4e02fa9df0e92ad2d"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44fee9975f04b33331cb8eb272827111efc8930cfd582e0320613263ca849ca"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:877abb17e6339d96bf08e7a622d05095e72b71f8afd8a9fefc82cf30ed944163"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e0cadcf6733c09154b461f1ca72d5416635e5e4ec4e536192180d34ec160f8a"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3c02d12f837d9683e5ab2f3d9844dc57655b92c74e286c262e0fc54213c216d"}, + {file = "coverage-7.6.1-cp312-cp312-win32.whl", hash = "sha256:e05882b70b87a18d937ca6768ff33cc3f72847cbc4de4491c8e73880766718e5"}, + {file = "coverage-7.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:b5d7b556859dd85f3a541db6a4e0167b86e7273e1cdc973e5b175166bb634fdb"}, + {file = "coverage-7.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a4acd025ecc06185ba2b801f2de85546e0b8ac787cf9d3b06e7e2a69f925b106"}, + {file = "coverage-7.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a6d3adcf24b624a7b778533480e32434a39ad8fa30c315208f6d3e5542aeb6e9"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0c212c49b6c10e6951362f7c6df3329f04c2b1c28499563d4035d964ab8e08c"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e81d7a3e58882450ec4186ca59a3f20a5d4440f25b1cff6f0902ad890e6748a"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78b260de9790fd81e69401c2dc8b17da47c8038176a79092a89cb2b7d945d060"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a78d169acd38300060b28d600344a803628c3fd585c912cacc9ea8790fe96862"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2c09f4ce52cb99dd7505cd0fc8e0e37c77b87f46bc9c1eb03fe3bc9991085388"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6878ef48d4227aace338d88c48738a4258213cd7b74fd9a3d4d7582bb1d8a155"}, + {file = "coverage-7.6.1-cp313-cp313-win32.whl", hash = "sha256:44df346d5215a8c0e360307d46ffaabe0f5d3502c8a1cefd700b34baf31d411a"}, + {file = "coverage-7.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:8284cf8c0dd272a247bc154eb6c95548722dce90d098c17a883ed36e67cdb129"}, + {file = "coverage-7.6.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d3296782ca4eab572a1a4eca686d8bfb00226300dcefdf43faa25b5242ab8a3e"}, + {file = "coverage-7.6.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:502753043567491d3ff6d08629270127e0c31d4184c4c8d98f92c26f65019962"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a89ecca80709d4076b95f89f308544ec8f7b4727e8a547913a35f16717856cb"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a318d68e92e80af8b00fa99609796fdbcdfef3629c77c6283566c6f02c6d6704"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13b0a73a0896988f053e4fbb7de6d93388e6dd292b0d87ee51d106f2c11b465b"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4421712dbfc5562150f7554f13dde997a2e932a6b5f352edcce948a815efee6f"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:166811d20dfea725e2e4baa71fffd6c968a958577848d2131f39b60043400223"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:225667980479a17db1048cb2bf8bfb39b8e5be8f164b8f6628b64f78a72cf9d3"}, + {file = "coverage-7.6.1-cp313-cp313t-win32.whl", hash = "sha256:170d444ab405852903b7d04ea9ae9b98f98ab6d7e63e1115e82620807519797f"}, + {file = "coverage-7.6.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b9f222de8cded79c49bf184bdbc06630d4c58eec9459b939b4a690c82ed05657"}, + {file = "coverage-7.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6db04803b6c7291985a761004e9060b2bca08da6d04f26a7f2294b8623a0c1a0"}, + {file = "coverage-7.6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f1adfc8ac319e1a348af294106bc6a8458a0f1633cc62a1446aebc30c5fa186a"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a95324a9de9650a729239daea117df21f4b9868ce32e63f8b650ebe6cef5595b"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b43c03669dc4618ec25270b06ecd3ee4fa94c7f9b3c14bae6571ca00ef98b0d3"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8929543a7192c13d177b770008bc4e8119f2e1f881d563fc6b6305d2d0ebe9de"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a09ece4a69cf399510c8ab25e0950d9cf2b42f7b3cb0374f95d2e2ff594478a6"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9054a0754de38d9dbd01a46621636689124d666bad1936d76c0341f7d71bf569"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0dbde0f4aa9a16fa4d754356a8f2e36296ff4d83994b2c9d8398aa32f222f989"}, + {file = "coverage-7.6.1-cp38-cp38-win32.whl", hash = "sha256:da511e6ad4f7323ee5702e6633085fb76c2f893aaf8ce4c51a0ba4fc07580ea7"}, + {file = "coverage-7.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:3f1156e3e8f2872197af3840d8ad307a9dd18e615dc64d9ee41696f287c57ad8"}, + {file = "coverage-7.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abd5fd0db5f4dc9289408aaf34908072f805ff7792632250dcb36dc591d24255"}, + {file = "coverage-7.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:547f45fa1a93154bd82050a7f3cddbc1a7a4dd2a9bf5cb7d06f4ae29fe94eaf8"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645786266c8f18a931b65bfcefdbf6952dd0dea98feee39bd188607a9d307ed2"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e0b2df163b8ed01d515807af24f63de04bebcecbd6c3bfeff88385789fdf75a"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:609b06f178fe8e9f89ef676532760ec0b4deea15e9969bf754b37f7c40326dbc"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:702855feff378050ae4f741045e19a32d57d19f3e0676d589df0575008ea5004"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2bdb062ea438f22d99cba0d7829c2ef0af1d768d1e4a4f528087224c90b132cb"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9c56863d44bd1c4fe2abb8a4d6f5371d197f1ac0ebdee542f07f35895fc07f36"}, + {file = "coverage-7.6.1-cp39-cp39-win32.whl", hash = "sha256:6e2cd258d7d927d09493c8df1ce9174ad01b381d4729a9d8d4e38670ca24774c"}, + {file = "coverage-7.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:06a737c882bd26d0d6ee7269b20b12f14a8704807a01056c80bb881a4b2ce6ca"}, + {file = "coverage-7.6.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:e9a6e0eb86070e8ccaedfbd9d38fec54864f3125ab95419970575b42af7541df"}, + {file = "coverage-7.6.1.tar.gz", hash = "sha256:953510dfb7b12ab69d20135a0662397f077c59b1e6379a768e97c59d852ee51d"}, +] + +[package.dependencies] +tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} + +[package.extras] +toml = ["tomli"] + +[[package]] +name = "distlib" +version = "0.3.8" +description = "Distribution utilities" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "distlib-0.3.8-py2.py3-none-any.whl", hash = "sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784"}, + {file = "distlib-0.3.8.tar.gz", hash = "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64"}, +] + +[[package]] +name = "exceptiongroup" +version = "1.2.2" +description = "Backport of PEP 654 (exception groups)" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "filelock" +version = "3.15.4" +description = "A platform independent file lock." +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "filelock-3.15.4-py3-none-any.whl", hash = "sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7"}, + {file = "filelock-3.15.4.tar.gz", hash = "sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb"}, +] + +[package.extras] +docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-asyncio (>=0.21)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)", "virtualenv (>=20.26.2)"] +typing = ["typing-extensions (>=4.8)"] + +[[package]] +name = "identify" +version = "2.6.0" +description = "File identification library for Python" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "identify-2.6.0-py2.py3-none-any.whl", hash = "sha256:e79ae4406387a9d300332b5fd366d8994f1525e8414984e1a59e058b2eda2dd0"}, + {file = "identify-2.6.0.tar.gz", hash = "sha256:cb171c685bdc31bcc4c1734698736a7d5b6c8bf2e0c15117f4d469c8640ae5cf"}, +] + +[package.extras] +license = ["ukkonen"] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "lark" +version = "1.2.2" +description = "a modern parsing library" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "lark-1.2.2-py3-none-any.whl", hash = "sha256:c2276486b02f0f1b90be155f2c8ba4a8e194d42775786db622faccd652d8e80c"}, + {file = "lark-1.2.2.tar.gz", hash = "sha256:ca807d0162cd16cef15a8feecb862d7319e7a09bdb13aef927968e45040fed80"}, +] + +[package.extras] +atomic-cache = ["atomicwrites"] +interegular = ["interegular (>=0.3.1,<0.4.0)"] +nearley = ["js2py"] +regex = ["regex"] + +[[package]] +name = "mypy-extensions" +version = "0.4.3" +description = "Experimental type system extensions for programs checked with the mypy typechecker." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"}, + {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"}, +] + +[[package]] +name = "nodeenv" +version = "1.9.1" +description = "Node.js virtual environment builder" +category = "main" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, + {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, +] + +[[package]] +name = "packaging" +version = "24.1" +description = "Core utilities for Python packages" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, +] + +[[package]] +name = "platformdirs" +version = "4.2.2" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, + {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, +] + +[package.extras] +docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] +type = ["mypy (>=1.8)"] + +[[package]] +name = "pluggy" +version = "1.5.0" +description = "plugin and hook calling mechanisms for python" +category = "dev" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "pre-commit" +version = "2.20.0" +description = "A framework for managing and maintaining multi-language pre-commit hooks." +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pre_commit-2.20.0-py2.py3-none-any.whl", hash = "sha256:51a5ba7c480ae8072ecdb6933df22d2f812dc897d5fe848778116129a681aac7"}, + {file = "pre_commit-2.20.0.tar.gz", hash = "sha256:a978dac7bc9ec0bcee55c18a277d553b0f419d259dadb4b9418ff2d00eb43959"}, +] + +[package.dependencies] +cfgv = ">=2.0.0" +identify = ">=1.0.0" +nodeenv = ">=0.11.1" +pyyaml = ">=5.1" +toml = "*" +virtualenv = ">=20.0.8" + +[[package]] +name = "pytest" +version = "8.3.2" +description = "pytest: simple powerful testing with Python" +category = "dev" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5"}, + {file = "pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=1.5,<2" +tomli = {version = ">=1", markers = "python_version < \"3.11\""} + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-cov" +version = "5.0.0" +description = "Pytest plugin for measuring coverage." +category = "dev" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-cov-5.0.0.tar.gz", hash = "sha256:5837b58e9f6ebd335b0f8060eecce69b662415b16dc503883a02f45dfeb14857"}, + {file = "pytest_cov-5.0.0-py3-none-any.whl", hash = "sha256:4f0764a1219df53214206bf1feea4633c3b558a2925c8b59f144f682861ce652"}, +] + +[package.dependencies] +coverage = {version = ">=5.2.1", extras = ["toml"]} +pytest = ">=4.6" + +[package.extras] +testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] + +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "toml" +version = "0.10.2" +description = "Python Library for Tom's Obvious, Minimal Language" +category = "main" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +] + +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[[package]] +name = "types-attrs" +version = "19.1.0" +description = "Typing stubs for attrs" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "types_attrs-19.1.0-py2.py3-none-any.whl", hash = "sha256:d11acf7a2531a7c52a740c30fa3eb8d01d3066c10d34c01ff5e59502caac5352"}, +] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +description = "Backported and Experimental Type Hints for Python 3.8+" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, +] + +[[package]] +name = "virtualenv" +version = "20.26.3" +description = "Virtual Python Environment builder" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "virtualenv-20.26.3-py3-none-any.whl", hash = "sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589"}, + {file = "virtualenv-20.26.3.tar.gz", hash = "sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a"}, +] + +[package.dependencies] +distlib = ">=0.3.7,<1" +filelock = ">=3.12.2,<4" +platformdirs = ">=3.9.1,<5" + +[package.extras] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] + +[metadata] +lock-version = "2.0" +python-versions = "^3.8" +content-hash = "fcdb99926e0e3627abff0679d89ee9a543332acedfc38eddd296bf845b57da13" diff --git a/pyproject.toml b/pyproject.toml index a616815..9dbfabd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,3 +35,27 @@ ignore_missing_imports = true [build-system] requires = ["setuptools>=42"] build-backend = "setuptools.build_meta" + +[tool.poetry] +name = "mamushi" +version = "0.0.3" +description = "Vyper Formatter" +authors = ["benny <[email protected]>"] + +[tool.poetry.dependencies] +python = "^3.8" +click = "8.1.3" +black = "23.1.0" +pre-commit = "2.20.0" +types-attrs = "^19.1.0" +lark = ">=1.0.0" +mypy-extensions = "0.4.3" + +[tool.poetry.dev-dependencies] +pytest = ">=7.0.0" +pytest-cov = ">=3.0.0" +coverage = ">=6.5.0" +bump2version = ">=1.0.0" + +[tool.poetry.scripts] +mamushi = "mamushi.__main__:main" diff --git a/setup.cfg b/setup.cfg index fa04ae3..f8f43b2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,21 +1,22 @@ [bumpversion] -current_version = 0.0.2-b0 +current_version = 0.0.3 commit = False tag = True parse = (?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)(\-(?P<release>[a-z]+)(?P<build>\d+))? -serialize = +serialize = {major}.{minor}.{patch}-{release}{build} {major}.{minor}.{patch} [options] -package_dir = +package_dir = = src packages = find: python_requires = >= 3.9 -install_requires = +install_requires = click>=8.0.0 pathspec>=0.9.0 lark>=1.0.0 + mypy-extensions==0.4.3 py_modules = mamushi include_package_data = True @@ -23,12 +24,13 @@ include_package_data = True mamushi.parsing = *.lark [options.entry_points] -console_scripts = +console_scripts = mamushi=mamushi:main [options.extras_require] -dev = - black>=22.6.0 +dev = + black==23.1.0 + pre-commit==2.20.0 mypy_extensions==0.4.3 mypy>=0.900 flake8==5.0.4 @@ -40,7 +42,7 @@ dev = [options.packages.find] where = src -exclude = +exclude = tests [metadata] @@ -51,10 +53,10 @@ long_description = file: README.md long_description_content_type = text/markdown version = attr: mamushi.__version__.__version__ url = https://github.com/benber86/mamushi -project_urls = +project_urls = Source = https://github.com/benber86/mamushi Tracker = https://github.com/benber86/mamushi/issues -classifiers = +classifiers = Development Status :: 4 - Beta Environment :: Console Intended Audience :: Developers @@ -69,7 +71,7 @@ classifiers = [bumpversion:part:release] optional_value = release -values = +values = a b release @@ -77,3 +79,7 @@ values = [bumpversion:file:./src/mamushi/__version__.py] [bumpversion:part:build] + +[bumpversion:file:pyproject.toml] +search = version = "{current_version}" +replace = version = "{new_version}" diff --git a/src/mamushi/__version__.py b/src/mamushi/__version__.py index e4b4786..27fdca4 100644 --- a/src/mamushi/__version__.py +++ b/src/mamushi/__version__.py @@ -1,1 +1,1 @@ -__version__ = "0.0.2-b0" +__version__ = "0.0.3" diff --git a/src/mamushi/formatting/whitespace.py b/src/mamushi/formatting/whitespace.py index 30b369f..72c7e89 100644 --- a/src/mamushi/formatting/whitespace.py +++ b/src/mamushi/formatting/whitespace.py @@ -61,6 +61,13 @@ def whitespace(leaf: Leaf) -> str: ): return NO + elif ( + prevp.parent + and prevp.parent.parent + and prevp.parent.parent.type == tokens.INITIALIZES_STMT + ): + return NO + elif p.type == tokens.KWARG: if prev.type != tokens.COMMA: return NO @@ -80,7 +87,7 @@ def whitespace(leaf: Leaf) -> str: ): return NO - elif p.type == tokens.GET_ATTR: + elif p.type == tokens.ATTRIBUTE: # variable access if t == tokens.DOT or prev.type == tokens.DOT: return NO @@ -93,10 +100,13 @@ def whitespace(leaf: Leaf) -> str: tokens.FUNCTION_SIG, tokens.CALL, tokens.EMPTY, - tokens.GET_ITEM, + tokens.ABI_DECODE, + tokens.SUBSCRIPT, tokens.INDEXED_ARGS, tokens.LOG_STMT, tokens.CONSTANT, + tokens.IMPLEMENTS, + tokens.USES, tokens.IMMUTABLE, } ): diff --git a/src/mamushi/parsing/grammar.lark b/src/mamushi/parsing/grammar.lark index 55daeaf..128a38b 100644 --- a/src/mamushi/parsing/grammar.lark +++ b/src/mamushi/parsing/grammar.lark @@ -11,7 +11,13 @@ module: ( DOCSTRING | constant_def | variable_def | enum_def + | flag_def | event_def + | transient_def + | implements_def + | uses_def + | exports_def + | initializes_stmt | function_def | immutable_def | _NEWLINE )* @@ -39,16 +45,20 @@ import: _IMPORT DOT* _import_path [import_alias] constant: "constant" "(" type ")" constant_private: NAME ":" constant constant_with_getter: NAME ":" "public" "(" constant ")" -constant_def: (constant_private | constant_with_getter) "=" _expr +constant_def: (constant_private | constant_with_getter) "=" expr // immutable definitions // NOTE: Temporary until decorators used immutable: "immutable" "(" type ")" immutable_def: NAME ":" immutable +// transient definitions +transient: "transient" "(" type ")" +transient_def: NAME ":" transient + variable: NAME ":" type // NOTE: Temporary until decorators used -variable_with_getter: NAME ":" "public" "(" (type | immutable) ")" +variable_with_getter: NAME ":" "public" "(" (type | immutable | transient) ")" variable_def: variable | variable_with_getter // A decorator "wraps" a method, modifying it's context. @@ -60,7 +70,7 @@ decorators: decorator+ // and can return up to one parameter. // NOTE: Parameters can have a default value, // which must be a constant or environment variable. -parameter: NAME ":" type ["=" _expr] +parameter: NAME ":" type ["=" expr] parameters: parameter ("," parameter?)* _FUNC_DECL: "def" @@ -73,7 +83,7 @@ function_def: [decorators] function_sig ":" body _EVENT_DECL: "event" event_member: NAME ":" type indexed_event_arg: NAME ":" "indexed" "(" type ")" -event_body: _NEWLINE _INDENT ((event_member | indexed_event_arg) _NEWLINE)+ _DEDENT +event_body: _NEWLINE _INDENT (((event_member | indexed_event_arg ) _NEWLINE)+ | _PASS _NEWLINE) _DEDENT // Events which use no args use a pass statement instead event_def: _EVENT_DECL NAME ":" ( event_body | _PASS ) @@ -83,14 +93,21 @@ enum_member: NAME enum_body: _NEWLINE _INDENT (enum_member _NEWLINE)+ _DEDENT enum_def: _ENUM_DECL NAME ":" enum_body +// Flags +_FLAG_DECL: "flag" +flag_member: NAME +flag_body: _NEWLINE _INDENT (flag_member _NEWLINE)+ _DEDENT +flag_def: _FLAG_DECL NAME ":" flag_body + // Types -array_def: (NAME | array_def | dyn_array_def) "[" _expr "]" -dyn_array_def: "DynArray" "[" (NAME | array_def | dyn_array_def) "," _expr "]" +array_def: (NAME | array_def | dyn_array_def) "[" expr "]" +dyn_array_def: "DynArray" "[" (NAME | array_def | dyn_array_def) "," expr "]" tuple_def: "(" ( NAME | array_def | dyn_array_def | tuple_def ) ( "," ( NAME | array_def | dyn_array_def | tuple_def ) )* [","] ")" // NOTE: Map takes a basic type and maps to another type (can be non-basic, including maps) _MAP: "HashMap" map_def: _MAP "[" ( NAME | array_def ) "," type "]" -type: ( NAME | array_def | tuple_def | map_def | dyn_array_def ) +imported_type: NAME ("." NAME)+ +type: ( NAME | imported_type | array_def | tuple_def | map_def | dyn_array_def ) // Structs can be composed of 1+ basic types or other custom_types _STRUCT_DECL: "struct" @@ -103,6 +120,15 @@ mutability: NAME interface_function: function_sig ":" mutability interface_def: _INTERFACE_DECL NAME ":" _NEWLINE _INDENT ( interface_function _NEWLINE)+ _DEDENT +implements_def: "implements" ":" NAME +uses_def: "uses" ":" NAME +exports_def: "exports" ":" (attribute | tuple) + +_INITIALIZES_DECL: "initializes" +initializes_stmt: _INITIALIZES_DECL ":" initializes_list +initializes_list: NAME ("," NAME)* [initializes_dependency] +initializes_dependency: "[" NAME ":=" NAME ("," NAME ":=" NAME)* "]" + // Statements // If and For blocks create a new block, and thus are complete when de-indented @@ -120,17 +146,18 @@ _simple_stmt: (declaration | log_stmt | raise_stmt | assert_stmt - | _expr ) [COMMENT] _NEWLINE + | expr ) [COMMENT] _NEWLINE -declaration: variable ["=" _expr] +declaration: variable ["=" expr] skip_assign: "_" -multiple_assign: (variable_access | skip_assign) ("," (variable_access | skip_assign))+ [","] -assign: (variable_access | multiple_assign | "(" multiple_assign ")" ) "=" _expr +multiple_assign: (atom_expr | skip_assign) ("," (atom_expr | skip_assign))+ [","] +assign: (atom_expr | multiple_assign | "(" multiple_assign ")" ) "=" expr // NOTE: Keep these in sync with bin_op below ?aug_operator: "+" -> add | "-" -> sub | "*" -> mul | "/" -> div + | "//" -> floordiv | "%" -> mod | "**" -> pow | "<<" -> shl @@ -141,7 +168,7 @@ assign: (variable_access | multiple_assign | "(" multiple_assign ")" ) "=" _expr | _AND -> and | _OR -> or // NOTE: Post-process into a normal assign -aug_assign: variable_access aug_operator "=" _expr +aug_assign: atom_expr aug_operator "=" expr _PASS: "pass" _BREAK: "break" @@ -155,47 +182,32 @@ pass_stmt: _PASS break_stmt: _BREAK continue_stmt: _CONTINUE -log_stmt: _LOG NAME "(" [arguments] ")" -return_stmt: _RETURN [_expr ("," _expr)*] +log_stmt: _LOG (NAME | atom_expr) "(" [arguments] ")" +return_stmt: _RETURN [expr ("," expr)*] _UNREACHABLE: "UNREACHABLE" raise_stmt: _RAISE -> raise - | _RAISE _expr -> raise_with_reason + | _RAISE expr -> raise_with_reason | _RAISE _UNREACHABLE -> raise_unreachable -assert_stmt: _ASSERT _expr -> assert - | _ASSERT _expr "," _expr -> assert_with_reason - | _ASSERT _expr "," _UNREACHABLE -> assert_unreachable +assert_stmt: _ASSERT expr -> assert + | _ASSERT expr "," expr -> assert_with_reason + | _ASSERT expr "," _UNREACHABLE -> assert_unreachable body: _simple_stmt | _NEWLINE _INDENT ([COMMENT] _NEWLINE | _stmt)+ _DEDENT -cond_exec: _expr ":" body +cond_exec: expr ":" body default_exec: body if_stmt: "if" cond_exec ("elif" cond_exec)* ["else" ":" default_exec] -// TODO: make this into a variable definition e.g. `for i: uint256 in range(0, 5): ...` -loop_variable: NAME [":" NAME] -loop_iterator: _expr +loop_variable: NAME [":" type] +loop_iterator: expr for_stmt: "for" loop_variable "in" loop_iterator ":" body - -// Expressions -_expr: operation - | dict - -get_item: variable_access "[" _expr "]" -get_attr: variable_access "." NAME -call: variable_access "(" [arguments] ")" -?variable_access: NAME -> get_var - | get_item - | get_attr - | call - | "(" variable_access ")" - -arg: _expr -kwarg: NAME "=" _expr +arg: expr +kwarg: NAME "=" expr ?argument: (arg | kwarg) arguments: argument ("," argument)* [","] -tuple: "(" "," ")" | "(" _expr ( ("," _expr)+ [","] | "," ) ")" -list: "[" "]" | "[" _expr ("," _expr)* [","] "]" -dict: "{" "}" | "{" (NAME ":" _expr) ("," (NAME ":" _expr))* [","] "}" +tuple: "(" "," ")" | "(" expr ( ("," expr)+ [","] | "," ) ")" +list: "[" "]" | "[" expr ("," expr)* [","] "]" +dict: "{" "}" | "{" (NAME ":" expr) ("," (NAME ":" expr))* [","] "}" // Operators @@ -203,7 +215,14 @@ dict: "{" "}" | "{" (NAME ":" _expr) ("," (NAME ":" _expr))* [","] "}" // See https://docs.python.org/3/reference/expressions.html#operator-precedence // NOTE: The recursive cycle here helps enforce operator precedence // Precedence goes up the lower down you go +?expr: assignment_expr +// "walrus" operator +?assignment_expr: ternary + | NAME ":=" assignment_expr +// ternary operator ?operation: bool_or +?ternary: bool_or + | ternary "if" ternary "else" ternary _AND: "and" _OR: "or" @@ -226,7 +245,7 @@ _BITAND: "&" _BITOR: "|" _BITXOR: "^" -// Comparisions +// Comparisons _EQ: "==" _NE: "!=" _LE: "<=" @@ -259,29 +278,43 @@ _IN: "in" ?product: unary | product "*" unary -> mul | product "/" unary -> div + | product "//" unary -> floordiv | product "%" unary -> mod ?unary: power | "+" power -> uadd | "-" power -> usub | "~" power -> invert -?power: atom + +// TODO: add factor rule +?power: external_call | power _POW atom -> pow + | external_call _POW power -> pow + +?external_call: ("extcall" | "staticcall")? atom_expr +subscript: (atom_expr | list) "[" expr "]" +attribute: atom_expr "." NAME +call: atom_expr "(" [arguments] ")" +?atom_expr: NAME -> get_var + | subscript + | attribute + | call + | atom // special rule to handle types as "arguments" (for `empty` builtin) empty: "empty" "(" type ")" // special rule to handle types as "arguments" (for `_abi_decode` builtin) -abi_decode: "_abi_decode" "(" arg "," type ( "," kwarg )* ")" +abi_decode: ("_abi_decode" | "abi_decode") "(" arg "," type ( "," kwarg )* ")" special_builtins: empty | abi_decode // NOTE: Must end recursive cycle like this (with `atom` calling `operation`) -?atom: variable_access - | literal +?atom: literal | special_builtins | tuple | list - | "(" operation ")" + | dict + | "(" expr ")" // Tokens @@ -289,7 +322,7 @@ special_builtins: empty | abi_decode // Adapted from: https://docs.python.org/3/reference/grammar.html // Adapted by: Erez Shinan NAME: /[a-zA-Z_]\w*/ -COMMENT.3: /#[^\n]*/ +COMMENT.3: /#[^\n\r]*/ _NEWLINE: ( /\r?\n[\t ]*/ | COMMENT )+ STRING: /b?("(?!"").*?(?<!\\)(\\\\)*?"|'(?!'').*?(?<!\\)(\\\\)*?')/i @@ -310,8 +343,10 @@ _number: DEC_NUMBER BOOL.2: "True" | "False" +ELLIPSIS: "..." + // TODO: Remove Docstring from here, and add to first part of body -?literal: ( _number | strings | DOCSTRING | BOOL ) +?literal: ( _number | strings | DOCSTRING | BOOL | ELLIPSIS) %ignore /[\t \f]+/ // WS %ignore /\\[\t \f]*\r?\n/ // LINE_CONT diff --git a/src/mamushi/parsing/tokens.py b/src/mamushi/parsing/tokens.py index 63fbffe..055b96f 100644 --- a/src/mamushi/parsing/tokens.py +++ b/src/mamushi/parsing/tokens.py @@ -44,6 +44,7 @@ MUL = "mul" MOD = "mod" DIV = "div" +FLOORDIV = "floordiv" POW = "pow" SHL = "shl" SHR = "shr" @@ -53,7 +54,7 @@ SLASH = "SLASH" STAR = "WILDCARD" PERCENT = "PERCENT" -OPERATIONS = {ADD, SUB, MUL, MOD, DIV} +OPERATIONS = {ADD, SUB, MUL, MOD, DIV, FLOORDIV} EQUAL = "EQUAL" ASSIGN = "assign" AUG_ASSIGN = "aug_assign" @@ -62,6 +63,7 @@ ADD, SUB, DIV, + FLOORDIV, MUL, MOD, POW, @@ -132,13 +134,16 @@ DINARRAY_DEF = "dyn_array_def" CALL = "call" EMPTY = "empty" -GET_ATTR = "get_attr" +ABI_DECODE = "abi_decode" +ATTRIBUTE = "attribute" GET_VAR = "get_var" LOG_STMT = "log_stmt" -GET_ITEM = "get_item" +SUBSCRIPT = "subscript" GETTER_SUFIX = "_with_getter" CONSTANT = "constant" IMMUTABLE = "immutable" +IMPLEMENTS = "implements" +USES = "uses" INDEXED_ARGS = "indexed_event_arg" ATOM = "atom" @@ -168,10 +173,14 @@ PASS_STMT = "pass_stmt" BREAK_STMT = "break_stmt" CONTINUE_STMT = "continue_stmt" +INITIALIZES_STMT = "initializes_stmt" RAISE = "raise" RAISE_WITH_REASON = "raise_with_reason" IMMUTABLE_DEF = "immutable_def" INTERFACE_DEF = "interface_def" +IMPLEMENTS_DEF = "implements_def" +USES_DEF = "uses_def" +EXPORTS_DEF = "exports_def" STRUCT_DEF = "struct_def" ENUM_DEF = "enum_def" EVENT_DEF = "event_def" @@ -189,6 +198,10 @@ RAISE, RAISE_WITH_REASON, LOG_STMT, + INITIALIZES_STMT, + IMPLEMENTS_DEF, + USES_DEF, + EXPORTS_DEF, CONSTANT_DEF, IMMUTABLE_DEF, INTERFACE_DEF, diff --git a/src/mamushi/utils/files.py b/src/mamushi/utils/files.py index 282c5b9..9a10bc3 100644 --- a/src/mamushi/utils/files.py +++ b/src/mamushi/utils/files.py @@ -1,7 +1,7 @@ -from typing import Iterator, Optional +from typing import Iterator from pathlib import Path -VYPER_EXTENSIONS = {".vy"} +VYPER_EXTENSIONS = {".vy", ".vyi"} BLACKLISTED_DIRECTORIES = { "build", "buck-out",
[Bug] Missing mypy extension **Describe the bug** ![image](https://github.com/user-attachments/assets/c921d782-bd12-4a66-8d71-90e2d12eaa0f)
2024-08-24T08:05:02
0.0
[]
[]
benber86/mamushi
benber86__mamushi-32
33012ca6f57b7f66cea72820c4419d3fb1f0b31d
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 63de4ae..440a115 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,7 +13,7 @@ repos: language_version: python3.10 args: [ --line-length=79 ] - - repo: https://gitlab.com/pycqa/flake8 + - repo: https://github.com/pycqa/flake8 rev: 3.9.2 hooks: - id: flake8 diff --git a/src/mamushi/parsing/grammar.lark b/src/mamushi/parsing/grammar.lark index 85816fa..55daeaf 100644 --- a/src/mamushi/parsing/grammar.lark +++ b/src/mamushi/parsing/grammar.lark @@ -296,11 +296,11 @@ STRING: /b?("(?!"").*?(?<!\\)(\\\\)*?"|'(?!'').*?(?<!\\)(\\\\)*?')/i DOCSTRING: /(""".*?(?<!\\)(\\\\)*?"""|'''.*?(?<!\\)(\\\\)*?''')/is ?strings: STRING+ -DEC_NUMBER: /0|[1-9]\d*/i -HEX_NUMBER.2: /0x[\da-f]*/i -OCT_NUMBER.2: /0o[0-7]*/i -BIN_NUMBER.2 : /0b[0-1]*/i -FLOAT_NUMBER.2: /((\d+\.\d*|\.\d+)(e[-+]?\d+)?|\d+(e[-+]?\d+))/i +DEC_NUMBER: /0|[1-9](_?\d)*/ +HEX_NUMBER.2: /0x([_]?[0-9a-f])+/i +OCT_NUMBER.2: /0o([_]?[0-7])+/i +BIN_NUMBER.2 : /0b([_]?[01])+/i +FLOAT_NUMBER.2: /(([1-9]\d*(_?\d*)*\.?\d*(_?\d*)?|\.\d+(_?\d*)?)(e[-+]?\d+(_?\d*)?)?|[1-9]\d*(_?\d*)?(e[-+]?\d+(_?\d*)?)?)/i _number: DEC_NUMBER | HEX_NUMBER
[Feature] Support numeric separator Example: ```python # @version 0.3.9 @external def bar() -> uint256: return 1_000_000_000_000 ``` It won't work at all for now. If I change `1_000_000_000_000` to `1000000000000`, it works.
2023-07-16T03:30:13
0.0
[]
[]
pyro-ppl/pyro
pyro-ppl__pyro-3378
64e71eee1c14dc926d5cbc5e762b6337bb4750a6
diff --git a/pyro/distributions/hmm.py b/pyro/distributions/hmm.py index 9e5d714aa4..9f2a242682 100644 --- a/pyro/distributions/hmm.py +++ b/pyro/distributions/hmm.py @@ -1091,7 +1091,7 @@ def __init__( self.transforms = transforms @constraints.dependent_property(event_dim=2) - def support(self): + def support(self): # noqa: F811 return constraints.independent(self.observation_dist.support, 1) def expand(self, batch_shape, _instance=None): diff --git a/pyro/distributions/stable_log_prob.py b/pyro/distributions/stable_log_prob.py index c5c953c393..fa173e58f9 100644 --- a/pyro/distributions/stable_log_prob.py +++ b/pyro/distributions/stable_log_prob.py @@ -44,7 +44,7 @@ def set_integrator(num_points): # Stub which is replaced by the default integrator when called for the first time # if a default integrator has not already been set. -def integrate(*args, **kwargs): +def integrate(*args, **kwargs): # noqa: F811 set_integrator(num_points=501) return integrate(*args, **kwargs) diff --git a/pyro/infer/inspect.py b/pyro/infer/inspect.py index 88a722fd5d..2580301dba 100644 --- a/pyro/infer/inspect.py +++ b/pyro/infer/inspect.py @@ -331,7 +331,7 @@ def _get_type_from_frozenname(frozen_name): sample_param[name] = [ upstream - for upstream in get_provenance(site["fn"].log_prob(site["value"])) + for upstream in provenance if upstream != name and _get_type_from_frozenname(upstream) == "param" ]
[discussion] Parameters not rendering when feeding into `pyro.deterministic` ### Issue Description Parts of my programs are deterministic, and these parts still have latent parameters I would like to make inferences of. But when I try to render my program, my registered parameters are not showing up. It looks like this only happens when these parameters are feeding strictly deterministic variables. I suspect they're not rendering because parameters that feed only into deterministic "variables" will not actually be part of optimization. Is this true? ### Environment - OS: macOS Sonoma 14.5 - Python version: 3.11.4 - PyTorch version: 2.0.0 - Pyro version: 1.9.0+f02dfb9 ### Code Snippet ``` import pyro import pyro.distributions as dist import torch data = torch.tensor([1.0, 2.0, 3.0]) def deterministic_model(data): value = pyro.param("param", torch.tensor(0.)) with pyro.plate("plate", len(data)): pyro.deterministic("deterministic", data + value) pyro.render_model(deterministic_model, model_args=(data,), render_params=True) ``` ``` def probabilistic_model(data): value = pyro.param("param", torch.tensor(0.)) with pyro.plate("plate", len(data)): pyro.sample("probabilistic", dist.Normal(value, 1), obs=data) pyro.render_model(probabilistic_model, model_args=(data,), render_params=True) ```
2024-06-27T16:43:40
0.0
[]
[]
pyro-ppl/pyro
pyro-ppl__pyro-3366
ca36025a3502c0160395b53145d2e95b56eaf15f
diff --git a/pyro/nn/module.py b/pyro/nn/module.py index 553b33d95d..afa1ac5851 100644 --- a/pyro/nn/module.py +++ b/pyro/nn/module.py @@ -582,7 +582,12 @@ def __getattr__(self, name: str) -> Any: constrained_value.unconstrained = weakref.ref(unconstrained_value) return pyro.poutine.runtime.effectful(type="param")( lambda *_, **__: constrained_value - )(fullname, event_dim=event_dim, name=fullname) + )( + fullname, + constraint=constraint, + event_dim=event_dim, + name=fullname, + ) else: # Cannot determine supermodule and hence cannot compute fullname. constrained_value = transform_to(constraint)(unconstrained_value) constrained_value.unconstrained = weakref.ref(unconstrained_value) @@ -621,7 +626,7 @@ def __getattr__(self, name: str) -> Any: # even though we don't use the contents of the local parameter store fullname = self._pyro_get_fullname(name) pyro.poutine.runtime.effectful(type="param")(lambda *_, **__: result)( - fullname, result, name=fullname + fullname, result, constraint=constraints.real, name=fullname ) if isinstance(result, torch.nn.Module): @@ -645,7 +650,12 @@ def __getattr__(self, name: str) -> Any: ) pyro.poutine.runtime.effectful(type="param")( lambda *_, **__: param_value - )(fullname_param, param_value, name=fullname_param) + )( + fullname_param, + param_value, + constraint=constraints.real, + name=fullname_param, + ) return result
Rendering PyroModules can fail with local parameter mode enabled When using `module_local_params=True`, calling `pyro.render_model` on a `PyroModule` with constrained parameters can fail with a `KeyError`, as shown by the test case in this stack trace: ``` _________________________________________________________________________________________________ test_render_constrained_param[True] __________________________________________________________________________________________________ use_module_local_params = True @pytest.mark.parametrize("use_module_local_params", [True, False]) def test_render_constrained_param(use_module_local_params): class Model(PyroModule): @PyroParam(constraint=constraints.positive) def x(self): return torch.tensor(1.234) @PyroParam(constraint=constraints.real) def y(self): return torch.tensor(0.456) def forward(self): return self.x + self.y with pyro.settings.context(module_local_params=use_module_local_params): model = Model() > pyro.render_model(model) tests/nn/test_module.py:1068: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ pyro/infer/inspect.py:630: in render_model get_model_relations( _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ model = Model(), model_args = (), model_kwargs = {}, include_deterministic = False def get_model_relations( model: Callable, model_args: Optional[tuple] = None, model_kwargs: Optional[dict] = None, include_deterministic: bool = False, ): """ Infer relations of RVs and plates from given model and optionally data. See https://github.com/pyro-ppl/pyro/issues/949 for more details. This returns a dictionary with keys: - "sample_sample" map each downstream sample site to a list of the upstream sample sites on which it depend; - "sample_dist" maps each sample site to the name of the distribution at that site; - "plate_sample" maps each plate name to a list of the sample sites within that plate; and - "observe" is a list of observed sample sites. For example for the model:: def model(data): m = pyro.sample('m', dist.Normal(0, 1)) sd = pyro.sample('sd', dist.LogNormal(m, 1)) with pyro.plate('N', len(data)): pyro.sample('obs', dist.Normal(m, sd), obs=data) the relation is:: {'sample_sample': {'m': [], 'sd': ['m'], 'obs': ['m', 'sd']}, 'sample_dist': {'m': 'Normal', 'sd': 'LogNormal', 'obs': 'Normal'}, 'plate_sample': {'N': ['obs']}, 'observed': ['obs']} :param callable model: A model to inspect. :param model_args: Optional tuple of model args. :param model_kwargs: Optional dict of model kwargs. :param bool include_deterministic: Whether to include deterministic sites. :rtype: dict """ if model_args is None: model_args = () if model_kwargs is None: model_kwargs = {} assert isinstance(model_args, tuple) assert isinstance(model_kwargs, dict) with torch.random.fork_rng(), torch.no_grad(), pyro.validation_enabled(False): with TrackProvenance(include_deterministic=include_deterministic): trace = poutine.trace(model).get_trace(*model_args, **model_kwargs) sample_sample = {} sample_param = {} sample_dist = {} param_constraint = {} plate_sample = defaultdict(list) observed = [] def _get_type_from_frozenname(frozen_name): return trace.nodes[frozen_name]["type"] for name, site in trace.nodes.items(): if site["type"] == "param": > param_constraint[name] = str(site["kwargs"]["constraint"]) E KeyError: 'constraint' pyro/infer/inspect.py:316: KeyError ```
2024-05-07T13:53:26
0.0
[]
[]
pjaselin/Cubist
pjaselin__Cubist-147
d3e7d6e603d8d527f03a3cb1f78c1fd08d403781
diff --git a/cubist/_make_data_string.py b/cubist/_make_data_string.py index 701f89e..82ad1d2 100644 --- a/cubist/_make_data_string.py +++ b/cubist/_make_data_string.py @@ -64,7 +64,11 @@ def _make_data_string(x, y=None, w=None): x[col] = x[col].astype(str) # remove leading whitespace from all elements - x = x.applymap(lambda a: a.lstrip()) + # handling pandas 2.2.2 feature change (applymap -> map) + if hasattr(x, "map"): + x = x.map(lambda a: a.lstrip()) + else: # pragma: no cover + x = x.applymap(lambda a: a.lstrip()) # replace missing values with ? x = x.fillna("?") diff --git a/pyproject.toml b/pyproject.toml index ce42cf9..f7b28de 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "cubist" -version = "0.1.3" +version = "0.1.4" authors = [ {name = "John Ross Quinlan"}, {name = "Max Kuhn"},
Replace deprecated DataFrame.applymap While using cubist, I'm receiving the following warning: ```bash /opt/conda/lib/python3.10/site-packages/cubist/_make_data_string.py:67: FutureWarning: DataFrame.applymap has been deprecated. Use DataFrame.map instead. x = x.applymap(lambda a: a.lstrip()) ``` I suppose that it's possible to solve this by fixing this line: https://github.com/pjaselin/Cubist/blob/d3e7d6e603d8d527f03a3cb1f78c1fd08d403781/cubist/_make_data_string.py#L67
Hi @ramongss! I'm assuming you're using pandas 2.2. I'm wondering if I need to test whether pandas has the attribute and use one or the other to be safe and backwards compatible.
2024-05-19T18:11:01
0.0
[]
[]
pjaselin/Cubist
pjaselin__Cubist-112
764e12aca9eb86fc0139022ae600ad253c5f07f9
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 8eee31f..a5030df 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -22,7 +22,7 @@ on: jobs: test: - name: Test and lint on Python ${{ matrix.python_version }} and ${{ matrix.os }} + name: Test and lint on Python ${{ matrix.python-version }} and ${{ matrix.os }} strategy: matrix: os: [ubuntu-latest, macos-latest, windows-latest] diff --git a/cubist/cubist.py b/cubist/cubist.py index 60f2f77..5351351 100644 --- a/cubist/cubist.py +++ b/cubist/cubist.py @@ -183,7 +183,7 @@ def _validate_model_parameters(self): else: if self.composite: warn("Cubist will choose an appropriate value for `neighbor` " - "as this is not set.", stacklevel=2) + "as this is not set.", stacklevel=3) self.neighbors_ = 0 if self.composite not in [True, False, 'auto']: @@ -279,7 +279,7 @@ def fit(self, X, y, sample_weight = None): f"sampling percent of {self.sample} means Cubist will " f"train with {trained_num_samples} rows. This may lead " f"to incorrect or failing predictions. Please increase " - f"or remove the `sample` parameter.\n", stacklevel=2) + f"or remove the `sample` parameter.\n", stacklevel=3) self.n_features_in_ = X.shape[1] self.n_outputs_ = 1 @@ -326,7 +326,7 @@ def fit(self, X, y, sample_weight = None): # inform user that they may want to use rules only if "Recommend using rules only" in output: warn("Cubist recommends using rules only " - "(i.e. set composite=False)", stacklevel=2) + "(i.e. set composite=False)", stacklevel=3) # print model output if using verbose output if self.verbose:
not going to check exceptions because macs are passing for some reason
2021-12-29T03:52:59
0.0
[]
[]
pjaselin/Cubist
pjaselin__Cubist-107
0ba82fe193e3c69aff97769675c4d5911606178b
diff --git a/README.md b/README.md index 79ac113..9c0b191 100644 --- a/README.md +++ b/README.md @@ -12,24 +12,30 @@ pip install --upgrade cubist ``` ## Background -Cubist is a regression algorithm develped by John Ross Quinlan for generating rule-based predictive models. This has been available in the R world thanks to the work of Max Kuhn and his colleagues. With this package it is introduced to the Python ecosystem and made scikit-learn compatible for easy use with existing data and model pipelines. Additionally, cross-validation and control over whether Cubist creates a composite model is added here. +Cubist is a regression algorithm develped by John Ross Quinlan for generating rule-based predictive models. This has been available in the R world thanks to the work of Max Kuhn and his colleagues. With this package it is introduced to Python and made scikit-learn compatible for easy use with existing data and model pipelines. Additionally, cross-validation and control over whether Cubist creates a composite model is added here. ## Advantages -Unlike other ensemble models such as RandomForest and XGBoost, Cubist generates a set of rules, making it easy to understand precisely how the model makes it's predictive decisions. Thus tools such as SHAP and LIME are not needed as Cubist doesn't exhibit black box behavior. Like XGBoost, Cubist can perform boosting by the addition of more models (here called committees) that correct for the error of prior models (i.e. the second model created corrects for the prediction error of the first, the third for the error of the second, etc.). In addition to boosting, the model can perform instance-based (nearest-neighbor) corrections to create composite models, thus combining the advantages of these two methods. Note that with instance-based correction, model performance may be improved at the expense of some interpretability as the linear regression rules are no longer completely followed. It should also be noted that a composite model might be quite large as the full training dataset must be stored in order to perform instance-based corrections for inferencing. Note that this is not the case when `composite=False`. +Unlike other ensemble models such as RandomForest and XGBoost, Cubist generates a set of rules, making it easy to understand precisely how the model makes it's predictive decisions. Thus tools such as SHAP and LIME are unnecessary as Cubist doesn't exhibit black box behavior. + +Like XGBoost, Cubist can perform boosting by the addition of more models (here called committees) that correct for the error of prior models (i.e. the second model created corrects for the prediction error of the first, the third for the error of the second, etc.). + +In addition to boosting, the model can perform instance-based (nearest-neighbor) corrections to create composite models, thus combining the advantages of these two methods. Note that with instance-based correction, model accuracy may be improved at the expense of computing time (this extra step takes longer) and some interpretability as the linear regression rules are no longer completely followed. It should also be noted that a composite model might be quite large as the full training dataset must be stored in order to perform instance-based corrections for inferencing. A composite model will be used when `composite=True` or Cubist can be allowed to decide whether to take advantage of this feature with `composite='auto'`. ## Use ```python from sklearn.datasets import fetch_california_housing from cubist import Cubist X, y = fetch_california_housing(return_X_y=True, as_frame=True) -model = Cubist() # <- model paramters here +model = Cubist() # <- model parameters here model.fit(X, y) model.predict(X) model.score(X, y) ``` ## Sample Output -![Sample Cubist output for Iris dataset](www/iris_cubist_output.png) +<p align="center"> + <img src="www/iris_cubist_output.png" alt="[Sample Cubist output for Iris dataset" width="400"/> +</p> The above image is a sample of the verbose output produced by Cubist. It first reports the total number of cases (rows) and attributes (columns) in the training dataset. Below that it summarizes the model by committee (if used but not in this sample) and rule where each rule is definined by an if..then statement along with metrics for this rule in the training data and the linear regression equation used for each rule. The 'if' section of each rule identifies the training input columns and feature value ranges for which this rule holds true. The 'then' statement shows the linear regressor for this rule. The model performance is then summarized by the average and relative absolute errors as well as with the Pearson correlation coefficient r. Finally, the output reports the usage of training features in the model and rules as well as the time taken to complete training. @@ -41,12 +47,16 @@ The following parameters can be passed as arguments to the ```Cubist()``` class - unbiased (bool, default=False): Should unbiased rules be used? Since Cubist minimizes the MAE of the predicted values, the rules may be biased and the mean predicted value may differ from the actual mean. This is recommended when there are frequent occurrences of the same value in a training dataset. Note that MAE may be slightly higher. - composite (True, False, or 'auto', default=False): A composite model is a combination of Cubist's rule-based model and instance-based or nearest-neighbor models to improve the predictive performance of the returned model. A value of True requires Cubist to include the nearest-neighbor model, False will ensure Cubist only generates a rule-based model, and 'auto' allows the algorithm to choose whether to use nearest-neighbor corrections. - extrapolation (float, default=0.05): Adjusts how much rule predictions are adjusted to be consistent with the training dataset. Recommended value is 5% as a decimal (0.05) -- sample (float, default=0.0): Percentage of the data set to be randomly selected for model building. +- sample (float, default=None): Percentage of the data set to be randomly selected for model building (0.0 or greater but less than 1.0). - cv (int, default=None): Whether to carry out cross-validation (recommended value is 10) - random_state (int, default=randint(0, 4095)): An integer to set the random seed for the C Cubist code. - target_label (str, default="outcome"): A label for the outcome variable. This is only used for printing rules. - verbose (int, default=0) Should the Cubist output be printed? 1 if yes, 0 if no. +## Considerations +- For small datasets, using the `sample` parameter is probably inadvisable because Cubist won't have enough samples to produce a representative model. +- If you are looking for fast inferencing and can spare accuracy, skip using a composite model with `composite=False`. + ## Model Attributes The following attributes are exposed to understand the Cubist model results: - feature_importances_ (pd.DataFrame): Table of how training data variables are used in the Cubist model. @@ -57,8 +67,7 @@ The following attributes are exposed to understand the Cubist model results: ## Benchmarks There are many literature examples demonstrating the power of Cubist and comparing it to Random Forest as well as other bootstrapped/boosted models. Some of these are compiled here: https://www.rulequest.com/cubist-pubs.html. To demonstrate this, some benchmark scripts are provided in the respectively named folder. - -## Literature for Cubist Model +## Literature for Cubist - https://sci2s.ugr.es/keel/pdf/algorithm/congreso/1992-Quinlan-AI.pdf - http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.34.6358&rep=rep1&type=pdf @@ -68,4 +77,4 @@ There are many literature examples demonstrating the power of Cubist and compari ## To Do - Add visualization utilities -- Add benchmark scripts \ No newline at end of file +- Add benchmark scripts diff --git a/cubist/__init__.py b/cubist/__init__.py index e62200c..5575562 100644 --- a/cubist/__init__.py +++ b/cubist/__init__.py @@ -14,7 +14,8 @@ into existing sklearn-based ML pipelines or scripts for experimenting. """ from .cubist import Cubist +from .exceptions import CubistError from ._version import __version__ -__all__ = ['Cubist', '__version__'] +__all__ = ['Cubist', 'CubistError', '__version__'] diff --git a/cubist/_version.py b/cubist/_version.py index 6561790..d62d967 100644 --- a/cubist/_version.py +++ b/cubist/_version.py @@ -1,1 +1,1 @@ -__version__ = "0.0.15" +__version__ = "0.0.16" diff --git a/cubist/cubist.py b/cubist/cubist.py index 33f1550..60f2f77 100644 --- a/cubist/cubist.py +++ b/cubist/cubist.py @@ -14,6 +14,7 @@ from ._parse_model import parse_model from ._variable_usage import get_variable_usage from _cubist import _cubist, _predictions +from .exceptions import CubistError class Cubist(BaseEstimator, RegressorMixin): @@ -60,7 +61,7 @@ class Cubist(BaseEstimator, RegressorMixin): Adjusts how much rule predictions are adjusted to be consistent with the training dataset. Recommended value is 5% as a decimal (0.05) - sample : float, default=0.0 + sample : float, default=None Percentage of the data set to be randomly selected for model building. cv : int or None, default=None @@ -113,7 +114,9 @@ class Cubist(BaseEstimator, RegressorMixin): >>> from sklearn.datasets import fetch_california_housing >>> from sklearn.model_selection import train_test_split >>> X, y = fetch_california_housing(return_X_y=True, as_frame=True) - >>> X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) + >>> X_train, X_test, y_train, y_test = train_test_split(X, y, + test_size=0.2, + random_state=42) >>> model = Cubist() >>> model.fit(X_train, y_train) >>> model.predict(X_test) @@ -128,7 +131,7 @@ def __init__(self, unbiased: bool = False, composite: Union[bool, str] = False, extrapolation: float = 0.05, - sample: float = 0.0, + sample: float = None, cv: int = None, random_state: int = None, target_label: str = "outcome", @@ -165,11 +168,12 @@ def _validate_model_parameters(self): if not isinstance(self.n_committees, int): raise TypeError("Number of committees must be an integer") elif self.n_committees < 1 or self.n_committees > 100: - raise ValueError("Number of committees must be 1 or greater") + raise ValueError("Number of committees must be between 1 and 100") if self.neighbors: if self.composite is False: - raise ValueError("`neighbors` should not be set when `composite` is False") + raise ValueError("`neighbors` should not be set when " + "`composite` is False") elif not isinstance(self.neighbors, int): raise TypeError("Number of neighbors must be an integer") elif self.neighbors < 1 or self.neighbors > 9: @@ -177,10 +181,14 @@ def _validate_model_parameters(self): else: self.neighbors_ = self.neighbors else: + if self.composite: + warn("Cubist will choose an appropriate value for `neighbor` " + "as this is not set.", stacklevel=2) self.neighbors_ = 0 if self.composite not in [True, False, 'auto']: - raise ValueError(f"Wrong input for parameter `composite`. Expected True, False, or 'auto', got {self.composite}") + raise ValueError(f"Wrong input for parameter `composite`. Expected " + f"True, False, or 'auto', got {self.composite}") else: if self.composite is True: self.composite_ = 'yes' @@ -192,18 +200,26 @@ def _validate_model_parameters(self): if not isinstance(self.extrapolation, float): raise TypeError("Extrapolation percentage must be a float") elif self.extrapolation < 0.0 or self.extrapolation > 1.0: - raise ValueError("Extrapolation percentage must be between 0.0 and 1.0") - - if not isinstance(self.sample, float): - raise TypeError("Sampling percentage must be a float") - if self.sample < 0.0 or self.sample > 1.0: - raise ValueError("Sampling percentage must be between 0.0 and 1.0") + raise ValueError("Extrapolation percentage must be between " + "0.0 and 1.0") + + if self.sample: + if not isinstance(self.sample, float): + raise TypeError("Sampling percentage must be a float") + if self.sample < 0.0 or self.sample >= 1.0: + raise ValueError("Sampling percentage must be between " + "0.0 and 1.0") + self.sample_ = self.sample + else: + self.sample_ = 0.0 if not isinstance(self.cv, (int, type(None))): - raise TypeError("Number of cross-validation folds must be an integer or None") + raise TypeError("Number of cross-validation folds must be an \ + integer or None") if isinstance(self.cv, int): if self.cv <= 1 and self.cv != 0: - raise ValueError("Number of cross-validation folds must be greater than 1") + raise ValueError("Number of cross-validation folds must be \ + greater than 1") else: self.cv_ = self.cv else: @@ -228,6 +244,12 @@ def fit(self, X, y, sample_weight = None): ------- self : object """ + # get column name from y if it is a Pandas Series + if isinstance(y, pd.Series): + target_label_ = y.name + else: + target_label_ = None + # scikit-learn checks X, y = self._validate_data(X, y, dtype=None, @@ -248,6 +270,16 @@ def fit(self, X, y, sample_weight = None): # validate model parameters self._validate_model_parameters() + + # raise warning if sampling a small dataset + if self.sample: + trained_num_samples = int(round(self.sample * X.shape[0], 0)) + if trained_num_samples < 10: + warn(f"Sampling a dataset with {X.shape[0]} rows and a " + f"sampling percent of {self.sample} means Cubist will " + f"train with {trained_num_samples} rows. This may lead " + f"to incorrect or failing predictions. Please increase " + f"or remove the `sample` parameter.\n", stacklevel=2) self.n_features_in_ = X.shape[1] self.n_outputs_ = 1 @@ -258,9 +290,14 @@ def fit(self, X, y, sample_weight = None): random_state = check_random_state(self.random_state) + # if a Pandas series wasn't used or it has no name, + # use the passed target_label feature, otherwise use + # the name of the Pandas series + self.target_label_ = target_label_ or self.target_label + # create the names and data strings required for cubist names_string = make_names_string(X, w=sample_weight, - label=self.target_label) + label=self.target_label_) data_string = make_data_string(X, y, w=sample_weight) # call the C implementation of cubist @@ -270,7 +307,7 @@ def fit(self, X, y, sample_weight = None): compositev_=self.composite_.encode(), neighbors_=self.neighbors_, committees_=self.n_committees, - sample_=self.sample, + sample_=self.sample_, seed_=random_state.randint(0, 4095) % 4096, rules_=self.n_rules, extrapolation_=self.extrapolation, @@ -282,13 +319,14 @@ def fit(self, X, y, sample_weight = None): self.model_ = model.decode() output = output.decode() - # raise cubist errors - if "Error" in output: - raise Exception(output) + # raise Cubist training errors + if "***" in output or "Error" in output: + raise CubistError(output) # inform user that they may want to use rules only if "Recommend using rules only" in output: - warn("Cubist recommends using rules only (i.e. set composite=False)") + warn("Cubist recommends using rules only " + "(i.e. set composite=False)", stacklevel=2) # print model output if using verbose output if self.verbose: @@ -309,11 +347,12 @@ def fit(self, X, y, sample_weight = None): # compress and save descriptors self.names_string_ = zlib.compress(names_string.encode()) - # TODO: check to see when a composite model has been used - # compress and save training data if using a composite model + # when a composite model has been used compress and save training data if self.composite is True or "nearest neighbors" in output \ or self.neighbors_ > 0: self.data_string_ = zlib.compress(data_string.encode()) + else: + self.data_string_ = zlib.compress("1".encode()) # parse model contents and store useful information self.rules_, self.coeff_ = parse_model(self.model_, X) @@ -372,21 +411,22 @@ def predict(self, X): # make data string for predictions data_string = make_data_string(X) - # if a composite model was used, get the training data - if hasattr(self, "data_string_"): - training_data_string = zlib.decompress(self.data_string_) - else: - training_data_string = b"1" - # get cubist predictions from trained model pred, output = _predictions(data_string.encode(), zlib.decompress(self.names_string_), - training_data_string, + zlib.decompress(self.data_string_), self.model_.encode(), np.zeros(X.shape[0]), b"1") + + # decode output + output = output.decode() - # TODO: parse and handle errors in output + # raise Cubist prediction errors + if "***" in output or "Error" in output: + raise CubistError(output) + if output: - print(output.decode()) + print(output) + return pred diff --git a/cubist/exceptions.py b/cubist/exceptions.py new file mode 100644 index 0000000..b887d5c --- /dev/null +++ b/cubist/exceptions.py @@ -0,0 +1,8 @@ +class Error(Exception): + """Base class for exceptions in this module.""" + pass + + +class CubistError(Error): + """Raised when the C Cubist library raises errors""" + pass diff --git a/cubist/src/top.c b/cubist/src/top.c index 076893a..d09b761 100644 --- a/cubist/src/top.c +++ b/cubist/src/top.c @@ -65,9 +65,9 @@ static void cubist(char **namesv, char **datav, int *unbiased, // I think the previous value of *modelv will be garbage collected *modelv = model; } - } else { - printf("cubist code called exit with value %d\n", val - JMP_OFFSET); - } + } //else { + // printf("training code called exit with value %d\n", val - JMP_OFFSET); + //} // Close file object "Of", and return its contents via argument outputv char *outputString = closeOf(); @@ -117,9 +117,9 @@ static void predictions(char **casev, char **namesv, char **datav, // Real work is done here samplemain(predv); - } else { - printf("sample code called exit with value %d\n", val - JMP_OFFSET); - } + } //else { + // printf("prediction code called exit with value %d\n", val - JMP_OFFSET); + // } // Close file object "Of", and return its contents via argument outputv char *outputString = closeOf(); diff --git a/examples/california_housing.py b/examples/california_housing.py index 6719904..6e89ef2 100644 --- a/examples/california_housing.py +++ b/examples/california_housing.py @@ -15,7 +15,7 @@ # model.fit(X, y) # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) -model = Cubist(composite=True, neighbors=9, verbose=True) +model = Cubist(composite=False, verbose=True) model.fit(X, y) print("score", model.score(X, y)) pred_list = model.predict(X).tolist() diff --git a/examples/iris.py b/examples/iris.py index 8540ad4..d5084e2 100644 --- a/examples/iris.py +++ b/examples/iris.py @@ -18,18 +18,18 @@ # X, y = load_diabetes(return_X_y=True, as_frame=True) -# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) +X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # model = Cubist(verbose=True) # model.fit(X, y) # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) -model = Cubist(composite=True, n_committees=879870, verbose=True) -model.fit(X, y) -print("score", model.score(X, y)) -pred_list = model.predict(X).tolist() - -print("pearson", pearsonr(pred_list, y.tolist())[0]) +model = Cubist(composite=True, verbose=True, neighbors=1) +model.fit(X_train, y_train) +print("score", model.score(X_train, y_train)) +pred_list = model.predict(X_test).tolist() +print(pred_list) +print("pearson", pearsonr(pred_list, y_test.tolist())[0]) # print(model.data_string_) # print(model.fit(X_train, y_train, sample_weight=np.ones(y_train.shape[0])).predict(X_test)) # print(model.feature_importances_) diff --git a/examples/jwgda.py b/examples/jwgda.py new file mode 100644 index 0000000..616c773 --- /dev/null +++ b/examples/jwgda.py @@ -0,0 +1,10 @@ +import pandas as pd +from cubist import Cubist + +X = pd.DataFrame([0.803949,0.792776,0.698957,0.889468,0.898589,0.780680,0.763039,0.733734,0.880098,0.749216,0.772409,0.397889,0.569687,0.863047,0.901520,0.903672,0.850031,0.967257,0.924921],columns=['X']) +y = [9.101007550426122, 10.105041400955075, 9.665428777858954, 11.108833294273804, 10.543686195210283, 10.041306334130462, 10.29213332885503, 10.229124133661102, 10.793787318304165, 9.726502315371054, 10.102863786063018, 7.843001261439629, 9.474707491805574, 10.290439628383432, 11.106171764961289, 11.73360218758805, 11.607825754410422, 11.168213131314303, 11.230738412087772] +predict = pd.DataFrame([0.795757],columns=['X']) + +tree = Cubist(n_committees=5, n_rules=500, neighbors=None, unbiased=False, composite=False, extrapolation=0.05, sample=0.1, cv=None, target_label="outcome", random_state=1, verbose=0) +tree.fit(X, y) +tree.predict(predict) \ No newline at end of file
Target outcome could take name from Pandas Series
2021-12-24T02:21:36
0.0
[]
[]
Ceyron/exponax
Ceyron__exponax-54
e761498d7a24071bf9c4cd938411582546f9b4f0
diff --git a/exponax/stepper/_kuramoto_sivashinsky.py b/exponax/stepper/_kuramoto_sivashinsky.py index b4f84c4..fcc39ec 100644 --- a/exponax/stepper/_kuramoto_sivashinsky.py +++ b/exponax/stepper/_kuramoto_sivashinsky.py @@ -7,8 +7,8 @@ class KuramotoSivashinsky(BaseStepper): gradient_norm_scale: float - second_order_diffusivity: float - fourth_order_diffusivity: float + second_order_scale: float + fourth_order_scale: float dealiasing_fraction: float def __init__( @@ -19,8 +19,8 @@ def __init__( dt: float, *, gradient_norm_scale: float = 1.0, - second_order_diffusivity: float = 1.0, - fourth_order_diffusivity: float = 1.0, + second_order_scale: float = 1.0, + fourth_order_scale: float = 1.0, dealiasing_fraction: float = 2 / 3, order: int = 2, num_circle_points: int = 16, @@ -36,24 +36,25 @@ def __init__( In 1d, the KS equation is given by ``` - uₜ + b₂ 1/2 (uₓ)² + ν uₓₓ + μ uₓₓₓₓ = 0 + uₜ + b₂ 1/2 (uₓ)² + ψ₁ uₓₓ + ψ₂ uₓₓₓₓ = 0 ``` - with `b₂` the gradient-norm coefficient, `ν` the diffusivity and `μ` the - hyper viscosity. Note that both viscosity terms are on the left-hand - side. As such for `ν, μ > 0`, the second-order term acts destabilizing - (increases the energy of the system) and the fourth-order term acts - stabilizing (decreases the energy of the system). A common configuration - is `b₂ = ν = μ = 1` and the dynamics are only adapted using the - `domain_extent`. For this, we espect the KS equation to experience - spatio-temporal chaos roughly once `L > 60`. + with `b₂` the gradient-norm coefficient, `ψ₁` the second-order scale and + `ψ₂` the fourth-order. If the latter two terms were on the right-hand + side, they could be interpreted as diffusivity and hyper-diffusivity, + respectively. Here, the second-order term acts destabilizing (increases + the energy of the system) and the fourth-order term acts stabilizing + (decreases the energy of the system). A common configuration is `b₂ = ψ₁ + = ψ₂ = 1` and the dynamics are only adapted using the `domain_extent`. + For this, we espect the KS equation to experience spatio-temporal chaos + roughly once `L > 60`. In this combustion (=non-conservative) format, the number of channels does **not** grow with the spatial dimension. A 2d KS still only has a single channel. In higher dimensions, the equation reads ``` - uₜ + b₂ 1/2 ‖ ∇u ‖₂² + ν (∇ ⋅ ∇) u + μ ((∇ ⊗ ∇) ⋅ (∇ ⊗ ∇))u = 0 + uₜ + b₂ 1/2 ‖ ∇u ‖₂² + ψ₁ν (∇ ⋅ ∇) u + ψ₂ ((∇ ⊗ ∇) ⋅ (∇ ⊗ ∇))u = 0 ``` with `‖ ∇u ‖₂` the gradient norm, `∇ ⋅ ∇` effectively is the Laplace @@ -75,14 +76,9 @@ def __init__( - `gradient_norm_scale`: The gradient-norm coefficient `b₂`. Note that the gradient norm is already scaled by 1/2. This factor allows for further modification. Default: 1.0. - - `second_order_diffusivity`: The diffusivity `ν` in the KS - equation. The sign of this coefficient is interpreted as if the term - was on the left-hand side. Hence it should have a positive value to - act destabilizing. Default: 1.0. - - `fourth_order_diffusivity`: The hyper viscosity `μ` in the KS - equation. The sign of this coefficient is interpreted as if the term - was on the left-hand side. Hence it should have a positive value to - act stabilizing. Default: 1.0. + - `second_order_scale`: The "diffusivity" `ψ₁` in the KS equation. + - `fourth_order_diffusivity`: The "hyper-diffusivity" `ψ₂` in the KS + equation. - `order`: The order of the Exponential Time Differencing Runge Kutta method. Must be one of {0, 1, 2, 3, 4}. The option `0` only solves the linear part of the equation. Use higher values for higher @@ -132,8 +128,8 @@ def __init__( the transitional phase, after that the chaotic attractor is reached. """ self.gradient_norm_scale = gradient_norm_scale - self.second_order_diffusivity = second_order_diffusivity - self.fourth_order_diffusivity = fourth_order_diffusivity + self.second_order_scale = second_order_scale + self.fourth_order_scale = fourth_order_scale self.dealiasing_fraction = dealiasing_fraction super().__init__( num_spatial_dims=num_spatial_dims, @@ -150,9 +146,10 @@ def _build_linear_operator( self, derivative_operator: Complex[Array, "D ... (N//2)+1"], ) -> Complex[Array, "1 ... (N//2)+1"]: - linear_operator = -self.second_order_diffusivity * build_laplace_operator( + # Minuses are required to move the terms to the right-hand side + linear_operator = -self.second_order_scale * build_laplace_operator( derivative_operator, order=2 - ) - self.fourth_order_diffusivity * build_laplace_operator( + ) - self.fourth_order_scale * build_laplace_operator( derivative_operator, order=4 ) return linear_operator @@ -173,8 +170,8 @@ def _build_nonlinear_fun( class KuramotoSivashinskyConservative(BaseStepper): convection_scale: float - second_order_diffusivity: float - fourth_order_diffusivity: float + second_order_scale: float + fourth_order_scale: float single_channel: bool conservative: bool dealiasing_fraction: float @@ -187,8 +184,8 @@ def __init__( dt: float, *, convection_scale: float = 1.0, - second_order_diffusivity: float = 1.0, - fourth_order_diffusivity: float = 1.0, + second_order_scale: float = 1.0, + fourth_order_scale: float = 1.0, single_channel: bool = False, conservative: bool = True, dealiasing_fraction: float = 2 / 3, @@ -202,8 +199,8 @@ def __init__( the number of spatial dimensions. """ self.convection_scale = convection_scale - self.second_order_diffusivity = second_order_diffusivity - self.fourth_order_diffusivity = fourth_order_diffusivity + self.second_order_scale = second_order_scale + self.fourth_order_scale = fourth_order_scale self.single_channel = single_channel self.conservative = conservative self.dealiasing_fraction = dealiasing_fraction @@ -237,9 +234,10 @@ def _build_linear_operator( self, derivative_operator: Complex[Array, "D ... (N//2)+1"], ) -> Complex[Array, "1 ... (N//2)+1"]: - linear_operator = -self.second_order_diffusivity * build_laplace_operator( + # Minuses are required to move the terms to the right-hand side + linear_operator = -self.second_order_scale * build_laplace_operator( derivative_operator, order=2 - ) - self.fourth_order_diffusivity * build_laplace_operator( + ) - self.fourth_order_scale * build_laplace_operator( derivative_operator, order=4 ) return linear_operator
Make attribute namings of specific steppers more concrete For example the KS stepper has `second_order_diffusivity`; technically, it is not a diffusivity because it is on the left hand side. Hence `second_order_factor` is more appropriate.
2024-10-22T12:55:38
0.0
[]
[]
Ceyron/exponax
Ceyron__exponax-52
ec4854df572b5a051d7037d3b39ca3a93026cc89
diff --git a/exponax/stepper/generic/_convection.py b/exponax/stepper/generic/_convection.py index b4241f4..82b9e29 100644 --- a/exponax/stepper/generic/_convection.py +++ b/exponax/stepper/generic/_convection.py @@ -10,7 +10,7 @@ class GeneralConvectionStepper(BaseStepper): - coefficients: tuple[float, ...] + linear_coefficients: tuple[float, ...] convection_scale: float dealiasing_fraction: float single_channel: bool @@ -23,7 +23,7 @@ def __init__( num_points: int, dt: float, *, - coefficients: tuple[float, ...] = (0.0, 0.0, 0.01), + linear_coefficients: tuple[float, ...] = (0.0, 0.0, 0.01), convection_scale: float = 1.0, single_channel: bool = False, conservative: bool = False, @@ -74,7 +74,7 @@ def __init__( in each dimension is the same. Hence, the total number of degrees of freedom is `Nᵈ`. - `dt`: The timestep size `Δt` between two consecutive states. - - `coefficients` (keyword-only): The list of coefficients `a_j` + - `linear_coefficients` (keyword-only): The list of coefficients `a_j` corresponding to the derivatives. The length of this tuple represents the highest occuring derivative. The default value `(0.0, 0.0, 0.01)` corresponds to the Burgers equation (because of the @@ -103,7 +103,7 @@ def __init__( coefficients of the exponential time differencing Runge Kutta method. Default: 1.0. """ - self.coefficients = coefficients + self.linear_coefficients = linear_coefficients self.convection_scale = convection_scale self.single_channel = single_channel self.dealiasing_fraction = dealiasing_fraction @@ -136,7 +136,7 @@ def _build_linear_operator( axis=0, keepdims=True, ) - for i, c in enumerate(self.coefficients) + for i, c in enumerate(self.linear_coefficients) ) return linear_operator @@ -156,7 +156,7 @@ def _build_nonlinear_fun( class NormalizedConvectionStepper(GeneralConvectionStepper): - normalized_coefficients: tuple[float, ...] + normalized_linear_coefficients: tuple[float, ...] normalized_convection_scale: float def __init__( @@ -164,7 +164,7 @@ def __init__( num_spatial_dims: int, num_points: int, *, - normalized_coefficients: tuple[float, ...] = (0.0, 0.0, 0.01 * 0.1), + normalized_linear_coefficients: tuple[float, ...] = (0.0, 0.0, 0.01 * 0.1), normalized_convection_scale: float = 1.0 * 0.1, single_channel: bool = False, conservative: bool = False, @@ -205,7 +205,7 @@ def __init__( boundary point. In higher dimensions; the number of points in each dimension is the same. Hence, the total number of degrees of freedom is `Nᵈ`. - - `normalized_coefficients`: The list of coefficients + - `normalized_linear_coefficients`: The list of coefficients `α_j` corresponding to the derivatives. The length of this tuple represents the highest occuring derivative. The default value `(0.0, 0.0, 0.01)` corresponds to the Burgers equation (because of the @@ -235,14 +235,14 @@ def __init__( coefficients of the exponential time differencing Runge Kutta method. Default: 1.0. """ - self.normalized_coefficients = normalized_coefficients + self.normalized_linear_coefficients = normalized_linear_coefficients self.normalized_convection_scale = normalized_convection_scale super().__init__( num_spatial_dims=num_spatial_dims, domain_extent=1.0, # Derivative operator is just scaled with 2 * jnp.pi num_points=num_points, dt=1.0, - coefficients=normalized_coefficients, + linear_coefficients=normalized_linear_coefficients, convection_scale=normalized_convection_scale, order=order, dealiasing_fraction=dealiasing_fraction, @@ -364,7 +364,7 @@ def __init__( super().__init__( num_spatial_dims=num_spatial_dims, num_points=num_points, - normalized_coefficients=normalized_coefficients, + normalized_linear_coefficients=normalized_coefficients, normalized_convection_scale=normalized_convection_scale, single_channel=single_channel, order=order, diff --git a/exponax/stepper/generic/_gradient_norm.py b/exponax/stepper/generic/_gradient_norm.py index cd6e299..3df31a8 100644 --- a/exponax/stepper/generic/_gradient_norm.py +++ b/exponax/stepper/generic/_gradient_norm.py @@ -10,7 +10,7 @@ class GeneralGradientNormStepper(BaseStepper): - coefficients: tuple[float, ...] + linear_coefficients: tuple[float, ...] gradient_norm_scale: float dealiasing_fraction: float @@ -21,7 +21,7 @@ def __init__( num_points: int, dt: float, *, - coefficients: tuple[float, ...] = (0.0, 0.0, -1.0, 0.0, -1.0), + linear_coefficients: tuple[float, ...] = (0.0, 0.0, -1.0, 0.0, -1.0), gradient_norm_scale: float = 1.0, order=2, dealiasing_fraction: float = 2 / 3, @@ -66,7 +66,7 @@ def __init__( in each dimension is the same. Hence, the total number of degrees of freedom is `Nᵈ`. - `dt`: The timestep size `Δt` between two consecutive states. - - `coefficients` (keyword-only): The list of coefficients `a_j` + - `linear_coefficients` (keyword-only): The list of coefficients `a_j` corresponding to the derivatives. The length of this tuple represents the highest occuring derivative. The default value `(0.0, 0.0, -1.0, 0.0, -1.0)` corresponds to the Kuramoto- Sivashinsky @@ -89,7 +89,7 @@ def __init__( coefficients of the exponential time differencing Runge Kutta method. Default: 1.0. """ - self.coefficients = coefficients + self.linear_coefficients = linear_coefficients self.gradient_norm_scale = gradient_norm_scale self.dealiasing_fraction = dealiasing_fraction super().__init__( @@ -113,7 +113,7 @@ def _build_linear_operator( axis=0, keepdims=True, ) - for i, c in enumerate(self.coefficients) + for i, c in enumerate(self.linear_coefficients) ) return linear_operator @@ -132,7 +132,7 @@ def _build_nonlinear_fun( class NormalizedGradientNormStepper(GeneralGradientNormStepper): - normalized_coefficients: tuple[float, ...] + normalized_linear_coefficients: tuple[float, ...] normalized_gradient_norm_scale: float def __init__( @@ -140,7 +140,7 @@ def __init__( num_spatial_dims: int, num_points: int, *, - normalized_coefficients: tuple[float, ...] = ( + normalized_linear_coefficients: tuple[float, ...] = ( 0.0, 0.0, -1.0 * 0.1 / (60.0**2), @@ -217,14 +217,14 @@ def __init__( coefficients of the exponential time differencing Runge Kutta method. Default: 1.0. """ - self.normalized_coefficients = normalized_coefficients + self.normalized_linear_coefficients = normalized_linear_coefficients self.normalized_gradient_norm_scale = normalized_gradient_norm_scale super().__init__( num_spatial_dims=num_spatial_dims, domain_extent=1.0, num_points=num_points, dt=1.0, - coefficients=normalized_coefficients, + linear_coefficients=normalized_linear_coefficients, gradient_norm_scale=normalized_gradient_norm_scale, order=order, dealiasing_fraction=dealiasing_fraction, @@ -339,7 +339,7 @@ def __init__( super().__init__( num_spatial_dims=num_spatial_dims, num_points=num_points, - normalized_coefficients=normalized_coefficients, + normalized_linear_coefficients=normalized_coefficients, normalized_gradient_norm_scale=normalized_gradient_norm_scale, order=order, dealiasing_fraction=dealiasing_fraction, diff --git a/exponax/stepper/generic/_linear.py b/exponax/stepper/generic/_linear.py index 2c9fc15..8a35dca 100644 --- a/exponax/stepper/generic/_linear.py +++ b/exponax/stepper/generic/_linear.py @@ -11,7 +11,7 @@ class GeneralLinearStepper(BaseStepper): - coefficients: tuple[float, ...] + linear_coefficients: tuple[float, ...] def __init__( self, @@ -20,7 +20,7 @@ def __init__( num_points: int, dt: float, *, - coefficients: tuple[float, ...] = (0.0, -0.1, 0.01), + linear_coefficients: tuple[float, ...] = (0.0, -0.1, 0.01), ): """ General timestepper for a d-dimensional (`d ∈ {1, 2, 3}`) linear @@ -67,7 +67,7 @@ def __init__( number of points in each dimension is the same. Hence, the total number of degrees of freedom is `Nᵈ`. - `dt`: The timestep size `Δt` between two consecutive states. - - `coefficients` (keyword-only): The list of coefficients `a_j` + - `linear_coefficients` (keyword-only): The list of coefficients `a_j` corresponding to the derivatives. Default: `[0.0, -0.1, 0.01]`. **Notes:** @@ -137,7 +137,7 @@ def __init__( the function [`exponax.stepper.generic.normalize_coefficients`][] to obtain the normalized coefficients. """ - self.coefficients = coefficients + self.linear_coefficients = linear_coefficients super().__init__( num_spatial_dims=num_spatial_dims, domain_extent=domain_extent, @@ -157,7 +157,7 @@ def _build_linear_operator( axis=0, keepdims=True, ) - for i, c in enumerate(self.coefficients) + for i, c in enumerate(self.linear_coefficients) ) return linear_operator @@ -172,14 +172,14 @@ def _build_nonlinear_fun( class NormalizedLinearStepper(GeneralLinearStepper): - normalized_coefficients: tuple[float, ...] + normalized_linear_coefficients: tuple[float, ...] def __init__( self, num_spatial_dims: int, num_points: int, *, - normalized_coefficients: tuple[float, ...] = (0.0, -0.5, 0.01), + normalized_linear_coefficients: tuple[float, ...] = (0.0, -0.5, 0.01), ): """ Timestepper for d-dimensional (`d ∈ {1, 2, 3}`) linear PDEs on periodic @@ -218,25 +218,25 @@ def __init__( dynamics. This must a tuple of floats. The length of the tuple defines the highest occuring linear derivative in the PDE. """ - self.normalized_coefficients = normalized_coefficients + self.normalized_linear_coefficients = normalized_linear_coefficients super().__init__( num_spatial_dims=num_spatial_dims, domain_extent=1.0, num_points=num_points, dt=1.0, - coefficients=normalized_coefficients, + linear_coefficients=normalized_linear_coefficients, ) class DifficultyLinearStepper(NormalizedLinearStepper): - difficulties: tuple[float, ...] + linear_difficulties: tuple[float, ...] def __init__( self, num_spatial_dims: int = 1, num_points: int = 48, *, - difficulties: tuple[float, ...] = (0.0, -2.0), + linear_difficulties: tuple[float, ...] = (0.0, -2.0), ): """ Timestepper for d-dimensional (`d ∈ {1, 2, 3}`) linear PDEs on periodic @@ -275,9 +275,9 @@ def __init__( be a tuple of floats. The length of the tuple defines the highest occuring linear derivative in the PDE. Default is `(0.0, -2.0)`. """ - self.difficulties = difficulties + self.linear_difficulties = linear_difficulties normalized_coefficients = extract_normalized_coefficients_from_difficulty( - difficulties, + linear_difficulties, num_spatial_dims=num_spatial_dims, num_points=num_points, ) @@ -285,7 +285,7 @@ def __init__( super().__init__( num_spatial_dims=num_spatial_dims, num_points=num_points, - normalized_coefficients=normalized_coefficients, + normalized_linear_coefficients=normalized_coefficients, ) @@ -318,7 +318,7 @@ def __init__( """ difficulties = (0.0,) * (order) + (difficulty,) super().__init__( - difficulties=difficulties, + linear_difficulties=difficulties, num_spatial_dims=num_spatial_dims, num_points=num_points, ) diff --git a/exponax/stepper/generic/_nonlinear.py b/exponax/stepper/generic/_nonlinear.py index 5c3e875..45fd76c 100644 --- a/exponax/stepper/generic/_nonlinear.py +++ b/exponax/stepper/generic/_nonlinear.py @@ -10,8 +10,8 @@ class GeneralNonlinearStepper(BaseStepper): - coefficients_linear: tuple[float, ...] - coefficients_nonlinear: tuple[float, float, float] + linear_coefficients: tuple[float, ...] + nonlinear_coefficients: tuple[float, float, float] dealiasing_fraction: float def __init__( @@ -21,8 +21,8 @@ def __init__( num_points: int, dt: float, *, - coefficients_linear: tuple[float, ...] = (0.0, 0.0, 0.01), - coefficients_nonlinear: tuple[float, float, float] = (0.0, -1.0, 0.0), + linear_coefficients: tuple[float, ...] = (0.0, 0.0, 0.01), + nonlinear_coefficients: tuple[float, float, float] = (0.0, -1.0, 0.0), order=2, dealiasing_fraction: float = 2 / 3, num_circle_points: int = 16, @@ -83,12 +83,12 @@ def __init__( dimension is the same. Hence, the total number of degrees of freedom is `Nᵈ`. - `dt`: The timestep size `Δt` between two consecutive states. - - `coefficients_linear`: The list of coefficients `a_j` corresponding to + - `linear_coefficients`: The list of coefficients `a_j` corresponding to the derivatives. The length of this tuple represents the highest occuring derivative. The default value `(0.0, 0.0, 0.01)` together - with the default `coefficients_nonlinear` corresponds to the Burgers + with the default `nonlinear_coefficients` corresponds to the Burgers equation. - - `coefficients_nonlinear`: The list of coefficients `b₀`, `b₁`, `b₂` + - `nonlinear_coefficients`: The list of coefficients `b₀`, `b₁`, `b₂` (in this order). The default value `(0.0, -1.0, 0.0)` corresponds to a (single-channel) convection nonlinearity scaled with `1.0`. Note that all nonlinear contributions are considered to be on the @@ -107,12 +107,12 @@ def __init__( - `circle_radius`: The radius of the contour used to compute the coefficients of the exponential time differencing Runge Kutta method. """ - if len(coefficients_nonlinear) != 3: + if len(nonlinear_coefficients) != 3: raise ValueError( "The nonlinear coefficients list must have exactly 3 elements" ) - self.coefficients_linear = coefficients_linear - self.coefficients_nonlinear = coefficients_nonlinear + self.linear_coefficients = linear_coefficients + self.nonlinear_coefficients = nonlinear_coefficients self.dealiasing_fraction = dealiasing_fraction super().__init__( @@ -136,7 +136,7 @@ def _build_linear_operator( axis=0, keepdims=True, ) - for i, c in enumerate(self.coefficients_linear) + for i, c in enumerate(self.linear_coefficients) ) return linear_operator @@ -149,22 +149,22 @@ def _build_nonlinear_fun( self.num_points, derivative_operator=derivative_operator, dealiasing_fraction=self.dealiasing_fraction, - scale_list=self.coefficients_nonlinear, + scale_list=self.nonlinear_coefficients, zero_mode_fix=True, # ToDo: check this ) class NormalizedNonlinearStepper(GeneralNonlinearStepper): - normalized_coefficients_linear: tuple[float, ...] - normalized_coefficients_nonlinear: tuple[float, float, float] + normalized_linear_coefficients: tuple[float, ...] + normalized_nonlinear_coefficients: tuple[float, float, float] def __init__( self, num_spatial_dims: int, num_points: int, *, - normalized_coefficients_linear: tuple[float, ...] = (0.0, 0.0, 0.1 * 0.1), - normalized_coefficients_nonlinear: tuple[float, float, float] = ( + normalized_linear_coefficients: tuple[float, ...] = (0.0, 0.0, 0.1 * 0.1), + normalized_nonlinear_coefficients: tuple[float, float, float] = ( 0.0, -1.0 * 0.1, 0.0, @@ -227,13 +227,13 @@ def __init__( boundary point. In higher dimensions; the number of points in each dimension is the same. Hence, the total number of degrees of freedom is `Nᵈ`. - - `normalized_coefficients_linear`: The list of coefficients `αⱼ` + - `normalized_linear_coefficients`: The list of coefficients `αⱼ` corresponding to the linear derivatives. The length of this tuple represents the highest occuring derivative. The default value `(0.0, 0.0, 0.1 * 0.1)` together with the default - `normalized_coefficients_nonlinear` corresponds to the Burgers + `normalized_nonlinear_coefficients` corresponds to the Burgers equation (in single-channel mode). - - `normalized_coefficients_nonlinear`: The list of coefficients `β₀`, + - `normalized_nonlinear_coefficients`: The list of coefficients `β₀`, `β₁`, and `β₂` (in this order) corresponding to the quadratic, (single-channel) convection, and gradient norm nonlinearity, respectively. The default value `(0.0, -1.0 * 0.1, 0.0)` corresponds @@ -256,16 +256,16 @@ def __init__( coefficients of the exponential time differencing Runge Kutta method. """ - self.normalized_coefficients_linear = normalized_coefficients_linear - self.normalized_coefficients_nonlinear = normalized_coefficients_nonlinear + self.normalized_linear_coefficients = normalized_linear_coefficients + self.normalized_nonlinear_coefficients = normalized_nonlinear_coefficients super().__init__( num_spatial_dims=num_spatial_dims, domain_extent=1.0, # Derivative operator is just scaled with 2 * jnp.pi num_points=num_points, dt=1.0, - coefficients_linear=normalized_coefficients_linear, - coefficients_nonlinear=normalized_coefficients_nonlinear, + linear_coefficients=normalized_linear_coefficients, + nonlinear_coefficients=normalized_nonlinear_coefficients, order=order, dealiasing_fraction=dealiasing_fraction, num_circle_points=num_circle_points, @@ -394,8 +394,8 @@ def __init__( super().__init__( num_spatial_dims=num_spatial_dims, num_points=num_points, - normalized_coefficients_linear=normalized_coefficients_linear, - normalized_coefficients_nonlinear=normalized_coefficients_nonlinear, + normalized_linear_coefficients=normalized_coefficients_linear, + normalized_nonlinear_coefficients=normalized_coefficients_nonlinear, order=order, dealiasing_fraction=dealiasing_fraction, num_circle_points=num_circle_points, diff --git a/exponax/stepper/generic/_polynomial.py b/exponax/stepper/generic/_polynomial.py index 2780569..4523536 100644 --- a/exponax/stepper/generic/_polynomial.py +++ b/exponax/stepper/generic/_polynomial.py @@ -7,8 +7,8 @@ class GeneralPolynomialStepper(BaseStepper): - coefficients: tuple[float, ...] - polynomial_scales: tuple[float, ...] + linear_coefficients: tuple[float, ...] + polynomial_coefficients: tuple[float, ...] dealiasing_fraction: float def __init__( @@ -18,8 +18,8 @@ def __init__( num_points: int, dt: float, *, - coefficients: tuple[float, ...] = (10.0, 0.0, 1.0), - polynomial_scales: tuple[float, ...] = (0.0, 0.0, -10.0), + linear_coefficients: tuple[float, ...] = (10.0, 0.0, 1.0), + polynomial_coefficients: tuple[float, ...] = (0.0, 0.0, -10.0), order=2, dealiasing_fraction: float = 2 / 3, num_circle_points: int = 16, @@ -78,15 +78,15 @@ def __init__( in each dimension is the same. Hence, the total number of degrees of freedom is `Nᵈ`. - `dt`: The timestep size `Δt` between two consecutive states. - - `coefficients`: The list of coefficients `a_j` corresponding to the + - `linear_coefficients`: The list of coefficients `a_j` corresponding to the derivatives. The length of this tuple represents the highest occuring derivative. The default value `(10.0, 0.0, 0.01)` in - combination with the default `polynomial_scales` corresponds to the + combination with the default `polynomial_coefficients` corresponds to the Fisher-KPP equation. - - `polynomial_scales`: The list of scales `pₖ` corresponding to the + - `polynomial_coefficients`: The list of scales `pₖ` corresponding to the polynomial contributions. The length of this tuple represents the highest occuring polynomial. The default value `(0.0, 0.0, 10.0)` in - combination with the default `coefficients` corresponds to the + combination with the default `linear_coefficients` corresponds to the Fisher-KPP equation. - `order`: The order of the Exponential Time Differencing Runge Kutta method. Must be one of {0, 1, 2, 3, 4}. The option `0` only @@ -105,8 +105,8 @@ def __init__( coefficients of the exponential time differencing Runge Kutta method. """ - self.coefficients = coefficients - self.polynomial_scales = polynomial_scales + self.linear_coefficients = linear_coefficients + self.polynomial_coefficients = polynomial_coefficients self.dealiasing_fraction = dealiasing_fraction super().__init__( @@ -130,7 +130,7 @@ def _build_linear_operator( axis=0, keepdims=True, ) - for i, c in enumerate(self.coefficients) + for i, c in enumerate(self.linear_coefficients) ) return linear_operator @@ -142,25 +142,25 @@ def _build_nonlinear_fun( self.num_spatial_dims, self.num_points, dealiasing_fraction=self.dealiasing_fraction, - coefficients=self.polynomial_scales, + coefficients=self.polynomial_coefficients, ) class NormalizedPolynomialStepper(GeneralPolynomialStepper): - normalized_coefficients: tuple[float, ...] - normalized_polynomial_scales: tuple[float, ...] + normalized_linear_coefficients: tuple[float, ...] + normalized_polynomial_coefficients: tuple[float, ...] def __init__( self, num_spatial_dims: int, num_points: int, *, - normalized_coefficients: tuple[float, ...] = ( + normalized_linear_coefficients: tuple[float, ...] = ( 10.0 * 0.001 / (10.0**0), 0.0, 1.0 * 0.001 / (10.0**2), ), - normalized_polynomial_scales: tuple[float, ...] = ( + normalized_polynomial_coefficients: tuple[float, ...] = ( 0.0, 0.0, -10.0 * 0.001, @@ -190,19 +190,19 @@ def __init__( boundary point. In higher dimensions; the number of points in each dimension is the same. Hence, the total number of degrees of freedom is `Nᵈ`. - - `normalized_coefficients`: The list of coefficients `α_j` corresponding - to the derivatives. The length of this tuple represents the highest - occuring derivative. The default value corresponds to the Fisher-KPP - equation. - - `normalized_polynomial_scales`: The list of scales `βₖ` corresponding - to the polynomial contributions. The length of this tuple represents - the highest occuring polynomial. The default value corresponds to the - Fisher-KPP equation. + - `normalized_linear_coefficients`: The list of coefficients `α_j` + corresponding to the derivatives. The length of this tuple + represents the highest occuring derivative. The default value + corresponds to the Fisher-KPP equation. + - `normalized_polynomial_coefficients`: The list of scales `βₖ` + corresponding to the polynomial contributions. The length of this + tuple represents the highest occuring polynomial. The default value + corresponds to the Fisher-KPP equation. - `order`: The order of the Exponential Time Differencing Runge Kutta method. Must be one of {0, 1, 2, 3, 4}. The option `0` only solves - the linear part of the equation. Use higher values for higher accuracy - and stability. The default choice of `2` is a good compromise for - single precision floats. + the linear part of the equation. Use higher values for higher + accuracy and stability. The default choice of `2` is a good + compromise for single precision floats. - `dealiasing_fraction`: The fraction of the wavenumbers to keep before evaluating the nonlinearity. The default 2/3 corresponds to Orszag's 2/3 rule which is sufficient if the highest occuring polynomial is @@ -212,18 +212,19 @@ def __init__( integral method to compute the coefficients of the exponential time differencing Runge Kutta method. - `circle_radius`: The radius of the contour used to compute the - coefficients of the exponential time differencing Runge Kutta method. + coefficients of the exponential time differencing Runge Kutta + method. """ - self.normalized_coefficients = normalized_coefficients - self.normalized_polynomial_scales = normalized_polynomial_scales + self.normalized_linear_coefficients = normalized_linear_coefficients + self.normalized_polynomial_coefficients = normalized_polynomial_coefficients super().__init__( num_spatial_dims=num_spatial_dims, domain_extent=1.0, # Derivative operator is just scaled with 2 * jnp.pi num_points=num_points, dt=1.0, - coefficients=normalized_coefficients, - polynomial_scales=normalized_polynomial_scales, + linear_coefficients=normalized_linear_coefficients, + polynomial_coefficients=normalized_polynomial_coefficients, order=order, dealiasing_fraction=dealiasing_fraction, num_circle_points=num_circle_points, @@ -328,8 +329,8 @@ def __init__( super().__init__( num_spatial_dims=num_spatial_dims, num_points=num_points, - normalized_coefficients=normalized_coefficients, - normalized_polynomial_scales=normalized_polynomial_scales, + normalized_linear_coefficients=normalized_coefficients, + normalized_polynomial_coefficients=normalized_polynomial_scales, order=order, dealiasing_fraction=dealiasing_fraction, num_circle_points=num_circle_points, diff --git a/exponax/stepper/generic/_vorticity_convection.py b/exponax/stepper/generic/_vorticity_convection.py index f75b2c4..d4d83c3 100644 --- a/exponax/stepper/generic/_vorticity_convection.py +++ b/exponax/stepper/generic/_vorticity_convection.py @@ -7,7 +7,7 @@ class GeneralVorticityConvectionStepper(BaseStepper): vorticity_convection_scale: float - coefficients: tuple[float, ...] + linear_coefficients: tuple[float, ...] injection_mode: int injection_scale: float dealiasing_fraction: float @@ -20,7 +20,7 @@ def __init__( dt: float, *, vorticity_convection_scale: float = 1.0, - coefficients: tuple[float, ...] = (0.0, 0.0, 0.001), + linear_coefficients: tuple[float, ...] = (0.0, 0.0, 0.001), injection_mode: int = 4, injection_scale: float = 0.0, order: int = 2, @@ -61,7 +61,7 @@ def __init__( in each dimension is the same. Hence, the total number of degrees of freedom is `Nᵈ`. - `dt`: The timestep size `Δt` between two consecutive states. - - `coefficients`: The list of coefficients `a_j` + - `linear_coefficients`: The list of coefficients `a_j` corresponding to the derivatives. The length of this tuple represents the highest occuring derivative. The default value `(0.0, 0.0, 0.001)` corresponds to pure regular diffusion. @@ -89,7 +89,7 @@ def __init__( if num_spatial_dims != 2: raise ValueError(f"Expected num_spatial_dims = 2, got {num_spatial_dims}.") self.vorticity_convection_scale = vorticity_convection_scale - self.coefficients = coefficients + self.linear_coefficients = linear_coefficients self.injection_mode = injection_mode self.injection_scale = injection_scale self.dealiasing_fraction = dealiasing_fraction @@ -114,7 +114,7 @@ def _build_linear_operator( axis=0, keepdims=True, ) - for i, c in enumerate(self.coefficients) + for i, c in enumerate(self.linear_coefficients) ) return linear_operator diff --git a/validation/qualitative_rollouts.py b/validation/qualitative_rollouts.py index 3e89dd5..8460548 100644 --- a/validation/qualitative_rollouts.py +++ b/validation/qualitative_rollouts.py @@ -66,7 +66,7 @@ 3.0, 110, 0.01, - coefficients=[0.0, 0.0, 0.1, 0.0001], + linear_coefficients=[0.0, 0.0, 0.1, 0.0001], ), "dispersion_diffusion", ex.ic.RandomTruncatedFourierSeries(1, cutoff=5), @@ -80,7 +80,7 @@ 3.0, 110, 0.01, - coefficients=[0.0, 0.0, 0.0, 0.0001, -0.001], + linear_coefficients=[0.0, 0.0, 0.0, 0.0001, -0.001], ), "dispersion_hyper_diffusion", ex.ic.RandomTruncatedFourierSeries(1, cutoff=5),
Have consistent attributes for generic time steppers
2024-10-22T12:31:39
0.0
[]
[]
openapi-generators/openapi-python-client
openapi-generators__openapi-python-client-1141
2fcd3cbd0e68ba2fb5702554db6b7cf331ea750c
diff --git a/openapi_python_client/parser/properties/list_property.py b/openapi_python_client/parser/properties/list_property.py index c78e50513..47a52cda3 100644 --- a/openapi_python_client/parser/properties/list_property.py +++ b/openapi_python_client/parser/properties/list_property.py @@ -58,12 +58,28 @@ def build( """ from . import property_from_data - if data.items is None: - return PropertyError(data=data, detail="type array must have items defined"), schemas + if data.items is None and not data.prefixItems: + return ( + PropertyError( + data=data, + detail="type array must have items or prefixItems defined", + ), + schemas, + ) + + items = data.prefixItems or [] + if data.items: + items.append(data.items) + + if len(items) == 1: + inner_schema = items[0] + else: + inner_schema = oai.Schema(anyOf=items) + inner_prop, schemas = property_from_data( name=f"{name}_item", required=True, - data=data.items, + data=inner_schema, schemas=schemas, parent_name=parent_name, config=config, diff --git a/openapi_python_client/schema/openapi_schema_pydantic/schema.py b/openapi_python_client/schema/openapi_schema_pydantic/schema.py index 9bd6f5cde..a3e4cb522 100644 --- a/openapi_python_client/schema/openapi_schema_pydantic/schema.py +++ b/openapi_python_client/schema/openapi_schema_pydantic/schema.py @@ -43,6 +43,7 @@ class Schema(BaseModel): anyOf: List[Union[Reference, "Schema"]] = Field(default_factory=list) schema_not: Optional[Union[Reference, "Schema"]] = Field(default=None, alias="not") items: Optional[Union[Reference, "Schema"]] = None + prefixItems: Optional[List[Union[Reference, "Schema"]]] = Field(default_factory=list) properties: Optional[Dict[str, Union[Reference, "Schema"]]] = None additionalProperties: Optional[Union[bool, Reference, "Schema"]] = None description: Optional[str] = None
feat: support OpenAPI 3.1 prefixItems property for arrays Generates a union of all types in `prefixItems` and `items` for the inner list item type. This could probably be made more strict by validating each prefix item against its specific type, but this felt good enough for a start.
2024-10-20T21:49:17
0.0
[]
[]
openapi-generators/openapi-python-client
openapi-generators__openapi-python-client-1042
9b55d70a85400fbd623a77510ab5bf01cebf0336
diff --git a/.changeset/switch_yaml_parsing_to_12.md b/.changeset/switch_yaml_parsing_to_12.md new file mode 100644 index 000000000..44d945ad7 --- /dev/null +++ b/.changeset/switch_yaml_parsing_to_12.md @@ -0,0 +1,11 @@ +--- +default: major +--- + +# Switch YAML parsing to 1.2 + +This change switches the YAML parsing library to `ruamel.yaml` which follows the YAML 1.2 specification. +[There are breaking changes](https://yaml.readthedocs.io/en/latest/pyyaml/#defaulting-to-yaml-12-support) from YAML 1.1 to 1.2, +though they will not affect most use cases. + +PR #1042 fixes #1041. Thanks @rtaycher! diff --git a/openapi_python_client/__init__.py b/openapi_python_client/__init__.py index 23d972eac..5f36e2fbc 100644 --- a/openapi_python_client/__init__.py +++ b/openapi_python_client/__init__.py @@ -11,8 +11,9 @@ import httpcore import httpx -import yaml from jinja2 import BaseLoader, ChoiceLoader, Environment, FileSystemLoader, PackageLoader +from ruamel.yaml import YAML +from ruamel.yaml.error import YAMLError from openapi_python_client import utils @@ -350,8 +351,9 @@ def _load_yaml_or_json(data: bytes, content_type: Optional[str]) -> Union[Dict[s return GeneratorError(header=f"Invalid JSON from provided source: {err}") else: try: - return yaml.safe_load(data) - except yaml.YAMLError as err: + yaml = YAML(typ="safe") + return yaml.load(data) + except YAMLError as err: return GeneratorError(header=f"Invalid YAML from provided source: {err}") diff --git a/openapi_python_client/config.py b/openapi_python_client/config.py index f779d90ac..535755cca 100644 --- a/openapi_python_client/config.py +++ b/openapi_python_client/config.py @@ -4,9 +4,9 @@ from pathlib import Path from typing import Dict, List, Optional, Union -import yaml from attr import define from pydantic import BaseModel +from ruamel.yaml import YAML class ClassOverride(BaseModel): @@ -51,7 +51,8 @@ def load_from_path(path: Path) -> "ConfigFile": if mime == "application/json": config_data = json.loads(path.read_text()) else: - config_data = yaml.safe_load(path.read_text()) + yaml = YAML(typ="safe") + config_data = yaml.load(path) config = ConfigFile(**config_data) return config diff --git a/pdm.lock b/pdm.lock index 395ecd6c3..892f64929 100644 --- a/pdm.lock +++ b/pdm.lock @@ -5,7 +5,7 @@ groups = ["default", "dev"] strategy = ["cross_platform", "inherit_metadata"] lock_version = "4.4.1" -content_hash = "sha256:33fd6653aa69618cd07823ced1ee6f90250d4f22e8fdd90012950d043cbc18bf" +content_hash = "sha256:cf2b9eebb1ee290dba283b2732207f7c20bc4d8920e071179eb7f7da975ff2b9" [[package]] name = "annotated-types" @@ -23,18 +23,19 @@ files = [ [[package]] name = "anyio" -version = "3.7.1" -requires_python = ">=3.7" +version = "4.3.0" +requires_python = ">=3.8" summary = "High level compatibility layer for multiple asynchronous event loop implementations" groups = ["default"] dependencies = [ - "exceptiongroup; python_version < \"3.11\"", + "exceptiongroup>=1.0.2; python_version < \"3.11\"", "idna>=2.8", "sniffio>=1.1", + "typing-extensions>=4.1; python_version < \"3.11\"", ] files = [ - {file = "anyio-3.7.1-py3-none-any.whl", hash = "sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5"}, - {file = "anyio-3.7.1.tar.gz", hash = "sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780"}, + {file = "anyio-4.3.0-py3-none-any.whl", hash = "sha256:048e05d0f6caeed70d731f3db756d35dcc1f35747c8c403364a8332c630441b8"}, + {file = "anyio-4.3.0.tar.gz", hash = "sha256:f75253795a87df48568485fd18cdd2a3fa5c4f7c5be8e5e36637733fce06fed6"}, ] [[package]] @@ -64,13 +65,13 @@ files = [ [[package]] name = "certifi" -version = "2023.11.17" +version = "2024.2.2" requires_python = ">=3.6" summary = "Python package for providing Mozilla's CA Bundle." groups = ["default", "dev"] files = [ - {file = "certifi-2023.11.17-py3-none-any.whl", hash = "sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474"}, - {file = "certifi-2023.11.17.tar.gz", hash = "sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1"}, + {file = "certifi-2024.2.2-py3-none-any.whl", hash = "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"}, + {file = "certifi-2024.2.2.tar.gz", hash = "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f"}, ] [[package]] @@ -251,134 +252,134 @@ files = [ [[package]] name = "coverage" -version = "7.4.0" +version = "7.5.1" requires_python = ">=3.8" summary = "Code coverage measurement for Python" groups = ["dev"] files = [ - {file = "coverage-7.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:36b0ea8ab20d6a7564e89cb6135920bc9188fb5f1f7152e94e8300b7b189441a"}, - {file = "coverage-7.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0676cd0ba581e514b7f726495ea75aba3eb20899d824636c6f59b0ed2f88c471"}, - {file = "coverage-7.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0ca5c71a5a1765a0f8f88022c52b6b8be740e512980362f7fdbb03725a0d6b9"}, - {file = "coverage-7.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7c97726520f784239f6c62506bc70e48d01ae71e9da128259d61ca5e9788516"}, - {file = "coverage-7.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:815ac2d0f3398a14286dc2cea223a6f338109f9ecf39a71160cd1628786bc6f5"}, - {file = "coverage-7.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:80b5ee39b7f0131ebec7968baa9b2309eddb35b8403d1869e08f024efd883566"}, - {file = "coverage-7.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5b2ccb7548a0b65974860a78c9ffe1173cfb5877460e5a229238d985565574ae"}, - {file = "coverage-7.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:995ea5c48c4ebfd898eacb098164b3cc826ba273b3049e4a889658548e321b43"}, - {file = "coverage-7.4.0-cp310-cp310-win32.whl", hash = "sha256:79287fd95585ed36e83182794a57a46aeae0b64ca53929d1176db56aacc83451"}, - {file = "coverage-7.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:5b14b4f8760006bfdb6e08667af7bc2d8d9bfdb648351915315ea17645347137"}, - {file = "coverage-7.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:04387a4a6ecb330c1878907ce0dc04078ea72a869263e53c72a1ba5bbdf380ca"}, - {file = "coverage-7.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea81d8f9691bb53f4fb4db603203029643caffc82bf998ab5b59ca05560f4c06"}, - {file = "coverage-7.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74775198b702868ec2d058cb92720a3c5a9177296f75bd97317c787daf711505"}, - {file = "coverage-7.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:76f03940f9973bfaee8cfba70ac991825611b9aac047e5c80d499a44079ec0bc"}, - {file = "coverage-7.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:485e9f897cf4856a65a57c7f6ea3dc0d4e6c076c87311d4bc003f82cfe199d25"}, - {file = "coverage-7.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6ae8c9d301207e6856865867d762a4b6fd379c714fcc0607a84b92ee63feff70"}, - {file = "coverage-7.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:bf477c355274a72435ceb140dc42de0dc1e1e0bf6e97195be30487d8eaaf1a09"}, - {file = "coverage-7.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:83c2dda2666fe32332f8e87481eed056c8b4d163fe18ecc690b02802d36a4d26"}, - {file = "coverage-7.4.0-cp311-cp311-win32.whl", hash = "sha256:697d1317e5290a313ef0d369650cfee1a114abb6021fa239ca12b4849ebbd614"}, - {file = "coverage-7.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:26776ff6c711d9d835557ee453082025d871e30b3fd6c27fcef14733f67f0590"}, - {file = "coverage-7.4.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:13eaf476ec3e883fe3e5fe3707caeb88268a06284484a3daf8250259ef1ba143"}, - {file = "coverage-7.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846f52f46e212affb5bcf131c952fb4075b55aae6b61adc9856222df89cbe3e2"}, - {file = "coverage-7.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26f66da8695719ccf90e794ed567a1549bb2644a706b41e9f6eae6816b398c4a"}, - {file = "coverage-7.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:164fdcc3246c69a6526a59b744b62e303039a81e42cfbbdc171c91a8cc2f9446"}, - {file = "coverage-7.4.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:316543f71025a6565677d84bc4df2114e9b6a615aa39fb165d697dba06a54af9"}, - {file = "coverage-7.4.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bb1de682da0b824411e00a0d4da5a784ec6496b6850fdf8c865c1d68c0e318dd"}, - {file = "coverage-7.4.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:0e8d06778e8fbffccfe96331a3946237f87b1e1d359d7fbe8b06b96c95a5407a"}, - {file = "coverage-7.4.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a56de34db7b7ff77056a37aedded01b2b98b508227d2d0979d373a9b5d353daa"}, - {file = "coverage-7.4.0-cp312-cp312-win32.whl", hash = "sha256:51456e6fa099a8d9d91497202d9563a320513fcf59f33991b0661a4a6f2ad450"}, - {file = "coverage-7.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:cd3c1e4cb2ff0083758f09be0f77402e1bdf704adb7f89108007300a6da587d0"}, - {file = "coverage-7.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e9d1bf53c4c8de58d22e0e956a79a5b37f754ed1ffdbf1a260d9dcfa2d8a325e"}, - {file = "coverage-7.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:109f5985182b6b81fe33323ab4707011875198c41964f014579cf82cebf2bb85"}, - {file = "coverage-7.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cc9d4bc55de8003663ec94c2f215d12d42ceea128da8f0f4036235a119c88ac"}, - {file = "coverage-7.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cc6d65b21c219ec2072c1293c505cf36e4e913a3f936d80028993dd73c7906b1"}, - {file = "coverage-7.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a10a4920def78bbfff4eff8a05c51be03e42f1c3735be42d851f199144897ba"}, - {file = "coverage-7.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b8e99f06160602bc64da35158bb76c73522a4010f0649be44a4e167ff8555952"}, - {file = "coverage-7.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:7d360587e64d006402b7116623cebf9d48893329ef035278969fa3bbf75b697e"}, - {file = "coverage-7.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:29f3abe810930311c0b5d1a7140f6395369c3db1be68345638c33eec07535105"}, - {file = "coverage-7.4.0-cp38-cp38-win32.whl", hash = "sha256:5040148f4ec43644702e7b16ca864c5314ccb8ee0751ef617d49aa0e2d6bf4f2"}, - {file = "coverage-7.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:9864463c1c2f9cb3b5db2cf1ff475eed2f0b4285c2aaf4d357b69959941aa555"}, - {file = "coverage-7.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:936d38794044b26c99d3dd004d8af0035ac535b92090f7f2bb5aa9c8e2f5cd42"}, - {file = "coverage-7.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:799c8f873794a08cdf216aa5d0531c6a3747793b70c53f70e98259720a6fe2d7"}, - {file = "coverage-7.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7defbb9737274023e2d7af02cac77043c86ce88a907c58f42b580a97d5bcca9"}, - {file = "coverage-7.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a1526d265743fb49363974b7aa8d5899ff64ee07df47dd8d3e37dcc0818f09ed"}, - {file = "coverage-7.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf635a52fc1ea401baf88843ae8708591aa4adff875e5c23220de43b1ccf575c"}, - {file = "coverage-7.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:756ded44f47f330666843b5781be126ab57bb57c22adbb07d83f6b519783b870"}, - {file = "coverage-7.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0eb3c2f32dabe3a4aaf6441dde94f35687224dfd7eb2a7f47f3fd9428e421058"}, - {file = "coverage-7.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bfd5db349d15c08311702611f3dccbef4b4e2ec148fcc636cf8739519b4a5c0f"}, - {file = "coverage-7.4.0-cp39-cp39-win32.whl", hash = "sha256:53d7d9158ee03956e0eadac38dfa1ec8068431ef8058fe6447043db1fb40d932"}, - {file = "coverage-7.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:cfd2a8b6b0d8e66e944d47cdec2f47c48fef2ba2f2dff5a9a75757f64172857e"}, - {file = "coverage-7.4.0-pp38.pp39.pp310-none-any.whl", hash = "sha256:c530833afc4707fe48524a44844493f36d8727f04dcce91fb978c414a8556cc6"}, - {file = "coverage-7.4.0.tar.gz", hash = "sha256:707c0f58cb1712b8809ece32b68996ee1e609f71bd14615bd8f87a1293cb610e"}, + {file = "coverage-7.5.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0884920835a033b78d1c73b6d3bbcda8161a900f38a488829a83982925f6c2e"}, + {file = "coverage-7.5.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:39afcd3d4339329c5f58de48a52f6e4e50f6578dd6099961cf22228feb25f38f"}, + {file = "coverage-7.5.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a7b0ceee8147444347da6a66be737c9d78f3353b0681715b668b72e79203e4a"}, + {file = "coverage-7.5.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a9ca3f2fae0088c3c71d743d85404cec8df9be818a005ea065495bedc33da35"}, + {file = "coverage-7.5.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fd215c0c7d7aab005221608a3c2b46f58c0285a819565887ee0b718c052aa4e"}, + {file = "coverage-7.5.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4bf0655ab60d754491004a5efd7f9cccefcc1081a74c9ef2da4735d6ee4a6223"}, + {file = "coverage-7.5.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:61c4bf1ba021817de12b813338c9be9f0ad5b1e781b9b340a6d29fc13e7c1b5e"}, + {file = "coverage-7.5.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:db66fc317a046556a96b453a58eced5024af4582a8dbdc0c23ca4dbc0d5b3146"}, + {file = "coverage-7.5.1-cp310-cp310-win32.whl", hash = "sha256:b016ea6b959d3b9556cb401c55a37547135a587db0115635a443b2ce8f1c7228"}, + {file = "coverage-7.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:df4e745a81c110e7446b1cc8131bf986157770fa405fe90e15e850aaf7619bc8"}, + {file = "coverage-7.5.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:796a79f63eca8814ca3317a1ea443645c9ff0d18b188de470ed7ccd45ae79428"}, + {file = "coverage-7.5.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4fc84a37bfd98db31beae3c2748811a3fa72bf2007ff7902f68746d9757f3746"}, + {file = "coverage-7.5.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6175d1a0559986c6ee3f7fccfc4a90ecd12ba0a383dcc2da30c2b9918d67d8a3"}, + {file = "coverage-7.5.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fc81d5878cd6274ce971e0a3a18a8803c3fe25457165314271cf78e3aae3aa2"}, + {file = "coverage-7.5.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:556cf1a7cbc8028cb60e1ff0be806be2eded2daf8129b8811c63e2b9a6c43bca"}, + {file = "coverage-7.5.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9981706d300c18d8b220995ad22627647be11a4276721c10911e0e9fa44c83e8"}, + {file = "coverage-7.5.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d7fed867ee50edf1a0b4a11e8e5d0895150e572af1cd6d315d557758bfa9c057"}, + {file = "coverage-7.5.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ef48e2707fb320c8f139424a596f5b69955a85b178f15af261bab871873bb987"}, + {file = "coverage-7.5.1-cp311-cp311-win32.whl", hash = "sha256:9314d5678dcc665330df5b69c1e726a0e49b27df0461c08ca12674bcc19ef136"}, + {file = "coverage-7.5.1-cp311-cp311-win_amd64.whl", hash = "sha256:5fa567e99765fe98f4e7d7394ce623e794d7cabb170f2ca2ac5a4174437e90dd"}, + {file = "coverage-7.5.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b6cf3764c030e5338e7f61f95bd21147963cf6aa16e09d2f74f1fa52013c1206"}, + {file = "coverage-7.5.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2ec92012fefebee89a6b9c79bc39051a6cb3891d562b9270ab10ecfdadbc0c34"}, + {file = "coverage-7.5.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16db7f26000a07efcf6aea00316f6ac57e7d9a96501e990a36f40c965ec7a95d"}, + {file = "coverage-7.5.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:beccf7b8a10b09c4ae543582c1319c6df47d78fd732f854ac68d518ee1fb97fa"}, + {file = "coverage-7.5.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8748731ad392d736cc9ccac03c9845b13bb07d020a33423fa5b3a36521ac6e4e"}, + {file = "coverage-7.5.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7352b9161b33fd0b643ccd1f21f3a3908daaddf414f1c6cb9d3a2fd618bf2572"}, + {file = "coverage-7.5.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:7a588d39e0925f6a2bff87154752481273cdb1736270642aeb3635cb9b4cad07"}, + {file = "coverage-7.5.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:68f962d9b72ce69ea8621f57551b2fa9c70509af757ee3b8105d4f51b92b41a7"}, + {file = "coverage-7.5.1-cp312-cp312-win32.whl", hash = "sha256:f152cbf5b88aaeb836127d920dd0f5e7edff5a66f10c079157306c4343d86c19"}, + {file = "coverage-7.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:5a5740d1fb60ddf268a3811bcd353de34eb56dc24e8f52a7f05ee513b2d4f596"}, + {file = "coverage-7.5.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e2213def81a50519d7cc56ed643c9e93e0247f5bbe0d1247d15fa520814a7cd7"}, + {file = "coverage-7.5.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5037f8fcc2a95b1f0e80585bd9d1ec31068a9bcb157d9750a172836e98bc7a90"}, + {file = "coverage-7.5.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3721c2c9e4c4953a41a26c14f4cef64330392a6d2d675c8b1db3b645e31f0e"}, + {file = "coverage-7.5.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca498687ca46a62ae590253fba634a1fe9836bc56f626852fb2720f334c9e4e5"}, + {file = "coverage-7.5.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0cdcbc320b14c3e5877ee79e649677cb7d89ef588852e9583e6b24c2e5072661"}, + {file = "coverage-7.5.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:57e0204b5b745594e5bc14b9b50006da722827f0b8c776949f1135677e88d0b8"}, + {file = "coverage-7.5.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8fe7502616b67b234482c3ce276ff26f39ffe88adca2acf0261df4b8454668b4"}, + {file = "coverage-7.5.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:9e78295f4144f9dacfed4f92935fbe1780021247c2fabf73a819b17f0ccfff8d"}, + {file = "coverage-7.5.1-cp38-cp38-win32.whl", hash = "sha256:1434e088b41594baa71188a17533083eabf5609e8e72f16ce8c186001e6b8c41"}, + {file = "coverage-7.5.1-cp38-cp38-win_amd64.whl", hash = "sha256:0646599e9b139988b63704d704af8e8df7fa4cbc4a1f33df69d97f36cb0a38de"}, + {file = "coverage-7.5.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4cc37def103a2725bc672f84bd939a6fe4522310503207aae4d56351644682f1"}, + {file = "coverage-7.5.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fc0b4d8bfeabd25ea75e94632f5b6e047eef8adaed0c2161ada1e922e7f7cece"}, + {file = "coverage-7.5.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d0a0f5e06881ecedfe6f3dd2f56dcb057b6dbeb3327fd32d4b12854df36bf26"}, + {file = "coverage-7.5.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9735317685ba6ec7e3754798c8871c2f49aa5e687cc794a0b1d284b2389d1bd5"}, + {file = "coverage-7.5.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d21918e9ef11edf36764b93101e2ae8cc82aa5efdc7c5a4e9c6c35a48496d601"}, + {file = "coverage-7.5.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c3e757949f268364b96ca894b4c342b41dc6f8f8b66c37878aacef5930db61be"}, + {file = "coverage-7.5.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:79afb6197e2f7f60c4824dd4b2d4c2ec5801ceb6ba9ce5d2c3080e5660d51a4f"}, + {file = "coverage-7.5.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d1d0d98d95dd18fe29dc66808e1accf59f037d5716f86a501fc0256455219668"}, + {file = "coverage-7.5.1-cp39-cp39-win32.whl", hash = "sha256:1cc0fe9b0b3a8364093c53b0b4c0c2dd4bb23acbec4c9240b5f284095ccf7981"}, + {file = "coverage-7.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:dde0070c40ea8bb3641e811c1cfbf18e265d024deff6de52c5950677a8fb1e0f"}, + {file = "coverage-7.5.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:6537e7c10cc47c595828b8a8be04c72144725c383c4702703ff4e42e44577312"}, + {file = "coverage-7.5.1.tar.gz", hash = "sha256:54de9ef3a9da981f7af93eafde4ede199e0846cd819eb27c88e2b712aae9708c"}, ] [[package]] name = "coverage" -version = "7.4.0" +version = "7.5.1" extras = ["toml"] requires_python = ">=3.8" summary = "Code coverage measurement for Python" groups = ["dev"] dependencies = [ - "coverage==7.4.0", + "coverage==7.5.1", "tomli; python_full_version <= \"3.11.0a6\"", ] files = [ - {file = "coverage-7.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:36b0ea8ab20d6a7564e89cb6135920bc9188fb5f1f7152e94e8300b7b189441a"}, - {file = "coverage-7.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0676cd0ba581e514b7f726495ea75aba3eb20899d824636c6f59b0ed2f88c471"}, - {file = "coverage-7.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0ca5c71a5a1765a0f8f88022c52b6b8be740e512980362f7fdbb03725a0d6b9"}, - {file = "coverage-7.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7c97726520f784239f6c62506bc70e48d01ae71e9da128259d61ca5e9788516"}, - {file = "coverage-7.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:815ac2d0f3398a14286dc2cea223a6f338109f9ecf39a71160cd1628786bc6f5"}, - {file = "coverage-7.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:80b5ee39b7f0131ebec7968baa9b2309eddb35b8403d1869e08f024efd883566"}, - {file = "coverage-7.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5b2ccb7548a0b65974860a78c9ffe1173cfb5877460e5a229238d985565574ae"}, - {file = "coverage-7.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:995ea5c48c4ebfd898eacb098164b3cc826ba273b3049e4a889658548e321b43"}, - {file = "coverage-7.4.0-cp310-cp310-win32.whl", hash = "sha256:79287fd95585ed36e83182794a57a46aeae0b64ca53929d1176db56aacc83451"}, - {file = "coverage-7.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:5b14b4f8760006bfdb6e08667af7bc2d8d9bfdb648351915315ea17645347137"}, - {file = "coverage-7.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:04387a4a6ecb330c1878907ce0dc04078ea72a869263e53c72a1ba5bbdf380ca"}, - {file = "coverage-7.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea81d8f9691bb53f4fb4db603203029643caffc82bf998ab5b59ca05560f4c06"}, - {file = "coverage-7.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74775198b702868ec2d058cb92720a3c5a9177296f75bd97317c787daf711505"}, - {file = "coverage-7.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:76f03940f9973bfaee8cfba70ac991825611b9aac047e5c80d499a44079ec0bc"}, - {file = "coverage-7.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:485e9f897cf4856a65a57c7f6ea3dc0d4e6c076c87311d4bc003f82cfe199d25"}, - {file = "coverage-7.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6ae8c9d301207e6856865867d762a4b6fd379c714fcc0607a84b92ee63feff70"}, - {file = "coverage-7.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:bf477c355274a72435ceb140dc42de0dc1e1e0bf6e97195be30487d8eaaf1a09"}, - {file = "coverage-7.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:83c2dda2666fe32332f8e87481eed056c8b4d163fe18ecc690b02802d36a4d26"}, - {file = "coverage-7.4.0-cp311-cp311-win32.whl", hash = "sha256:697d1317e5290a313ef0d369650cfee1a114abb6021fa239ca12b4849ebbd614"}, - {file = "coverage-7.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:26776ff6c711d9d835557ee453082025d871e30b3fd6c27fcef14733f67f0590"}, - {file = "coverage-7.4.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:13eaf476ec3e883fe3e5fe3707caeb88268a06284484a3daf8250259ef1ba143"}, - {file = "coverage-7.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846f52f46e212affb5bcf131c952fb4075b55aae6b61adc9856222df89cbe3e2"}, - {file = "coverage-7.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26f66da8695719ccf90e794ed567a1549bb2644a706b41e9f6eae6816b398c4a"}, - {file = "coverage-7.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:164fdcc3246c69a6526a59b744b62e303039a81e42cfbbdc171c91a8cc2f9446"}, - {file = "coverage-7.4.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:316543f71025a6565677d84bc4df2114e9b6a615aa39fb165d697dba06a54af9"}, - {file = "coverage-7.4.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bb1de682da0b824411e00a0d4da5a784ec6496b6850fdf8c865c1d68c0e318dd"}, - {file = "coverage-7.4.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:0e8d06778e8fbffccfe96331a3946237f87b1e1d359d7fbe8b06b96c95a5407a"}, - {file = "coverage-7.4.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a56de34db7b7ff77056a37aedded01b2b98b508227d2d0979d373a9b5d353daa"}, - {file = "coverage-7.4.0-cp312-cp312-win32.whl", hash = "sha256:51456e6fa099a8d9d91497202d9563a320513fcf59f33991b0661a4a6f2ad450"}, - {file = "coverage-7.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:cd3c1e4cb2ff0083758f09be0f77402e1bdf704adb7f89108007300a6da587d0"}, - {file = "coverage-7.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e9d1bf53c4c8de58d22e0e956a79a5b37f754ed1ffdbf1a260d9dcfa2d8a325e"}, - {file = "coverage-7.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:109f5985182b6b81fe33323ab4707011875198c41964f014579cf82cebf2bb85"}, - {file = "coverage-7.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cc9d4bc55de8003663ec94c2f215d12d42ceea128da8f0f4036235a119c88ac"}, - {file = "coverage-7.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cc6d65b21c219ec2072c1293c505cf36e4e913a3f936d80028993dd73c7906b1"}, - {file = "coverage-7.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a10a4920def78bbfff4eff8a05c51be03e42f1c3735be42d851f199144897ba"}, - {file = "coverage-7.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b8e99f06160602bc64da35158bb76c73522a4010f0649be44a4e167ff8555952"}, - {file = "coverage-7.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:7d360587e64d006402b7116623cebf9d48893329ef035278969fa3bbf75b697e"}, - {file = "coverage-7.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:29f3abe810930311c0b5d1a7140f6395369c3db1be68345638c33eec07535105"}, - {file = "coverage-7.4.0-cp38-cp38-win32.whl", hash = "sha256:5040148f4ec43644702e7b16ca864c5314ccb8ee0751ef617d49aa0e2d6bf4f2"}, - {file = "coverage-7.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:9864463c1c2f9cb3b5db2cf1ff475eed2f0b4285c2aaf4d357b69959941aa555"}, - {file = "coverage-7.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:936d38794044b26c99d3dd004d8af0035ac535b92090f7f2bb5aa9c8e2f5cd42"}, - {file = "coverage-7.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:799c8f873794a08cdf216aa5d0531c6a3747793b70c53f70e98259720a6fe2d7"}, - {file = "coverage-7.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7defbb9737274023e2d7af02cac77043c86ce88a907c58f42b580a97d5bcca9"}, - {file = "coverage-7.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a1526d265743fb49363974b7aa8d5899ff64ee07df47dd8d3e37dcc0818f09ed"}, - {file = "coverage-7.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf635a52fc1ea401baf88843ae8708591aa4adff875e5c23220de43b1ccf575c"}, - {file = "coverage-7.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:756ded44f47f330666843b5781be126ab57bb57c22adbb07d83f6b519783b870"}, - {file = "coverage-7.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0eb3c2f32dabe3a4aaf6441dde94f35687224dfd7eb2a7f47f3fd9428e421058"}, - {file = "coverage-7.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bfd5db349d15c08311702611f3dccbef4b4e2ec148fcc636cf8739519b4a5c0f"}, - {file = "coverage-7.4.0-cp39-cp39-win32.whl", hash = "sha256:53d7d9158ee03956e0eadac38dfa1ec8068431ef8058fe6447043db1fb40d932"}, - {file = "coverage-7.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:cfd2a8b6b0d8e66e944d47cdec2f47c48fef2ba2f2dff5a9a75757f64172857e"}, - {file = "coverage-7.4.0-pp38.pp39.pp310-none-any.whl", hash = "sha256:c530833afc4707fe48524a44844493f36d8727f04dcce91fb978c414a8556cc6"}, - {file = "coverage-7.4.0.tar.gz", hash = "sha256:707c0f58cb1712b8809ece32b68996ee1e609f71bd14615bd8f87a1293cb610e"}, + {file = "coverage-7.5.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0884920835a033b78d1c73b6d3bbcda8161a900f38a488829a83982925f6c2e"}, + {file = "coverage-7.5.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:39afcd3d4339329c5f58de48a52f6e4e50f6578dd6099961cf22228feb25f38f"}, + {file = "coverage-7.5.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a7b0ceee8147444347da6a66be737c9d78f3353b0681715b668b72e79203e4a"}, + {file = "coverage-7.5.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a9ca3f2fae0088c3c71d743d85404cec8df9be818a005ea065495bedc33da35"}, + {file = "coverage-7.5.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fd215c0c7d7aab005221608a3c2b46f58c0285a819565887ee0b718c052aa4e"}, + {file = "coverage-7.5.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4bf0655ab60d754491004a5efd7f9cccefcc1081a74c9ef2da4735d6ee4a6223"}, + {file = "coverage-7.5.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:61c4bf1ba021817de12b813338c9be9f0ad5b1e781b9b340a6d29fc13e7c1b5e"}, + {file = "coverage-7.5.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:db66fc317a046556a96b453a58eced5024af4582a8dbdc0c23ca4dbc0d5b3146"}, + {file = "coverage-7.5.1-cp310-cp310-win32.whl", hash = "sha256:b016ea6b959d3b9556cb401c55a37547135a587db0115635a443b2ce8f1c7228"}, + {file = "coverage-7.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:df4e745a81c110e7446b1cc8131bf986157770fa405fe90e15e850aaf7619bc8"}, + {file = "coverage-7.5.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:796a79f63eca8814ca3317a1ea443645c9ff0d18b188de470ed7ccd45ae79428"}, + {file = "coverage-7.5.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4fc84a37bfd98db31beae3c2748811a3fa72bf2007ff7902f68746d9757f3746"}, + {file = "coverage-7.5.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6175d1a0559986c6ee3f7fccfc4a90ecd12ba0a383dcc2da30c2b9918d67d8a3"}, + {file = "coverage-7.5.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fc81d5878cd6274ce971e0a3a18a8803c3fe25457165314271cf78e3aae3aa2"}, + {file = "coverage-7.5.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:556cf1a7cbc8028cb60e1ff0be806be2eded2daf8129b8811c63e2b9a6c43bca"}, + {file = "coverage-7.5.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9981706d300c18d8b220995ad22627647be11a4276721c10911e0e9fa44c83e8"}, + {file = "coverage-7.5.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d7fed867ee50edf1a0b4a11e8e5d0895150e572af1cd6d315d557758bfa9c057"}, + {file = "coverage-7.5.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ef48e2707fb320c8f139424a596f5b69955a85b178f15af261bab871873bb987"}, + {file = "coverage-7.5.1-cp311-cp311-win32.whl", hash = "sha256:9314d5678dcc665330df5b69c1e726a0e49b27df0461c08ca12674bcc19ef136"}, + {file = "coverage-7.5.1-cp311-cp311-win_amd64.whl", hash = "sha256:5fa567e99765fe98f4e7d7394ce623e794d7cabb170f2ca2ac5a4174437e90dd"}, + {file = "coverage-7.5.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b6cf3764c030e5338e7f61f95bd21147963cf6aa16e09d2f74f1fa52013c1206"}, + {file = "coverage-7.5.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2ec92012fefebee89a6b9c79bc39051a6cb3891d562b9270ab10ecfdadbc0c34"}, + {file = "coverage-7.5.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16db7f26000a07efcf6aea00316f6ac57e7d9a96501e990a36f40c965ec7a95d"}, + {file = "coverage-7.5.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:beccf7b8a10b09c4ae543582c1319c6df47d78fd732f854ac68d518ee1fb97fa"}, + {file = "coverage-7.5.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8748731ad392d736cc9ccac03c9845b13bb07d020a33423fa5b3a36521ac6e4e"}, + {file = "coverage-7.5.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7352b9161b33fd0b643ccd1f21f3a3908daaddf414f1c6cb9d3a2fd618bf2572"}, + {file = "coverage-7.5.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:7a588d39e0925f6a2bff87154752481273cdb1736270642aeb3635cb9b4cad07"}, + {file = "coverage-7.5.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:68f962d9b72ce69ea8621f57551b2fa9c70509af757ee3b8105d4f51b92b41a7"}, + {file = "coverage-7.5.1-cp312-cp312-win32.whl", hash = "sha256:f152cbf5b88aaeb836127d920dd0f5e7edff5a66f10c079157306c4343d86c19"}, + {file = "coverage-7.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:5a5740d1fb60ddf268a3811bcd353de34eb56dc24e8f52a7f05ee513b2d4f596"}, + {file = "coverage-7.5.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e2213def81a50519d7cc56ed643c9e93e0247f5bbe0d1247d15fa520814a7cd7"}, + {file = "coverage-7.5.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5037f8fcc2a95b1f0e80585bd9d1ec31068a9bcb157d9750a172836e98bc7a90"}, + {file = "coverage-7.5.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3721c2c9e4c4953a41a26c14f4cef64330392a6d2d675c8b1db3b645e31f0e"}, + {file = "coverage-7.5.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca498687ca46a62ae590253fba634a1fe9836bc56f626852fb2720f334c9e4e5"}, + {file = "coverage-7.5.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0cdcbc320b14c3e5877ee79e649677cb7d89ef588852e9583e6b24c2e5072661"}, + {file = "coverage-7.5.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:57e0204b5b745594e5bc14b9b50006da722827f0b8c776949f1135677e88d0b8"}, + {file = "coverage-7.5.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8fe7502616b67b234482c3ce276ff26f39ffe88adca2acf0261df4b8454668b4"}, + {file = "coverage-7.5.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:9e78295f4144f9dacfed4f92935fbe1780021247c2fabf73a819b17f0ccfff8d"}, + {file = "coverage-7.5.1-cp38-cp38-win32.whl", hash = "sha256:1434e088b41594baa71188a17533083eabf5609e8e72f16ce8c186001e6b8c41"}, + {file = "coverage-7.5.1-cp38-cp38-win_amd64.whl", hash = "sha256:0646599e9b139988b63704d704af8e8df7fa4cbc4a1f33df69d97f36cb0a38de"}, + {file = "coverage-7.5.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4cc37def103a2725bc672f84bd939a6fe4522310503207aae4d56351644682f1"}, + {file = "coverage-7.5.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fc0b4d8bfeabd25ea75e94632f5b6e047eef8adaed0c2161ada1e922e7f7cece"}, + {file = "coverage-7.5.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d0a0f5e06881ecedfe6f3dd2f56dcb057b6dbeb3327fd32d4b12854df36bf26"}, + {file = "coverage-7.5.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9735317685ba6ec7e3754798c8871c2f49aa5e687cc794a0b1d284b2389d1bd5"}, + {file = "coverage-7.5.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d21918e9ef11edf36764b93101e2ae8cc82aa5efdc7c5a4e9c6c35a48496d601"}, + {file = "coverage-7.5.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c3e757949f268364b96ca894b4c342b41dc6f8f8b66c37878aacef5930db61be"}, + {file = "coverage-7.5.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:79afb6197e2f7f60c4824dd4b2d4c2ec5801ceb6ba9ce5d2c3080e5660d51a4f"}, + {file = "coverage-7.5.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d1d0d98d95dd18fe29dc66808e1accf59f037d5716f86a501fc0256455219668"}, + {file = "coverage-7.5.1-cp39-cp39-win32.whl", hash = "sha256:1cc0fe9b0b3a8364093c53b0b4c0c2dd4bb23acbec4c9240b5f284095ccf7981"}, + {file = "coverage-7.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:dde0070c40ea8bb3641e811c1cfbf18e265d024deff6de52c5950677a8fb1e0f"}, + {file = "coverage-7.5.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:6537e7c10cc47c595828b8a8be04c72144725c383c4702703ff4e42e44577312"}, + {file = "coverage-7.5.1.tar.gz", hash = "sha256:54de9ef3a9da981f7af93eafde4ede199e0846cd819eb27c88e2b712aae9708c"}, ] [[package]] name = "cryptography" -version = "42.0.5" +version = "42.0.7" requires_python = ">=3.7" summary = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." groups = ["dev"] @@ -386,38 +387,38 @@ dependencies = [ "cffi>=1.12; platform_python_implementation != \"PyPy\"", ] files = [ - {file = "cryptography-42.0.5-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:a30596bae9403a342c978fb47d9b0ee277699fa53bbafad14706af51fe543d16"}, - {file = "cryptography-42.0.5-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:b7ffe927ee6531c78f81aa17e684e2ff617daeba7f189f911065b2ea2d526dec"}, - {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2424ff4c4ac7f6b8177b53c17ed5d8fa74ae5955656867f5a8affaca36a27abb"}, - {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:329906dcc7b20ff3cad13c069a78124ed8247adcac44b10bea1130e36caae0b4"}, - {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:b03c2ae5d2f0fc05f9a2c0c997e1bc18c8229f392234e8a0194f202169ccd278"}, - {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f8837fe1d6ac4a8052a9a8ddab256bc006242696f03368a4009be7ee3075cdb7"}, - {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:0270572b8bd2c833c3981724b8ee9747b3ec96f699a9665470018594301439ee"}, - {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:b8cac287fafc4ad485b8a9b67d0ee80c66bf3574f655d3b97ef2e1082360faf1"}, - {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:16a48c23a62a2f4a285699dba2e4ff2d1cff3115b9df052cdd976a18856d8e3d"}, - {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2bce03af1ce5a5567ab89bd90d11e7bbdff56b8af3acbbec1faded8f44cb06da"}, - {file = "cryptography-42.0.5-cp37-abi3-win32.whl", hash = "sha256:b6cd2203306b63e41acdf39aa93b86fb566049aeb6dc489b70e34bcd07adca74"}, - {file = "cryptography-42.0.5-cp37-abi3-win_amd64.whl", hash = "sha256:98d8dc6d012b82287f2c3d26ce1d2dd130ec200c8679b6213b3c73c08b2b7940"}, - {file = "cryptography-42.0.5-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:5e6275c09d2badf57aea3afa80d975444f4be8d3bc58f7f80d2a484c6f9485c8"}, - {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4985a790f921508f36f81831817cbc03b102d643b5fcb81cd33df3fa291a1a1"}, - {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7cde5f38e614f55e28d831754e8a3bacf9ace5d1566235e39d91b35502d6936e"}, - {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:7367d7b2eca6513681127ebad53b2582911d1736dc2ffc19f2c3ae49997496bc"}, - {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cd2030f6650c089aeb304cf093f3244d34745ce0cfcc39f20c6fbfe030102e2a"}, - {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a2913c5375154b6ef2e91c10b5720ea6e21007412f6437504ffea2109b5a33d7"}, - {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:c41fb5e6a5fe9ebcd58ca3abfeb51dffb5d83d6775405305bfa8715b76521922"}, - {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:3eaafe47ec0d0ffcc9349e1708be2aaea4c6dd4978d76bf6eb0cb2c13636c6fc"}, - {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1b95b98b0d2af784078fa69f637135e3c317091b615cd0905f8b8a087e86fa30"}, - {file = "cryptography-42.0.5-cp39-abi3-win32.whl", hash = "sha256:1f71c10d1e88467126f0efd484bd44bca5e14c664ec2ede64c32f20875c0d413"}, - {file = "cryptography-42.0.5-cp39-abi3-win_amd64.whl", hash = "sha256:a011a644f6d7d03736214d38832e030d8268bcff4a41f728e6030325fea3e400"}, - {file = "cryptography-42.0.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9481ffe3cf013b71b2428b905c4f7a9a4f76ec03065b05ff499bb5682a8d9ad8"}, - {file = "cryptography-42.0.5-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:ba334e6e4b1d92442b75ddacc615c5476d4ad55cc29b15d590cc6b86efa487e2"}, - {file = "cryptography-42.0.5-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ba3e4a42397c25b7ff88cdec6e2a16c2be18720f317506ee25210f6d31925f9c"}, - {file = "cryptography-42.0.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:111a0d8553afcf8eb02a4fea6ca4f59d48ddb34497aa8706a6cf536f1a5ec576"}, - {file = "cryptography-42.0.5-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cd65d75953847815962c84a4654a84850b2bb4aed3f26fadcc1c13892e1e29f6"}, - {file = "cryptography-42.0.5-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e807b3188f9eb0eaa7bbb579b462c5ace579f1cedb28107ce8b48a9f7ad3679e"}, - {file = "cryptography-42.0.5-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f12764b8fffc7a123f641d7d049d382b73f96a34117e0b637b80643169cec8ac"}, - {file = "cryptography-42.0.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:37dd623507659e08be98eec89323469e8c7b4c1407c85112634ae3dbdb926fdd"}, - {file = "cryptography-42.0.5.tar.gz", hash = "sha256:6fe07eec95dfd477eb9530aef5bead34fec819b3aaf6c5bd6d20565da607bfe1"}, + {file = "cryptography-42.0.7-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:a987f840718078212fdf4504d0fd4c6effe34a7e4740378e59d47696e8dfb477"}, + {file = "cryptography-42.0.7-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:bd13b5e9b543532453de08bcdc3cc7cebec6f9883e886fd20a92f26940fd3e7a"}, + {file = "cryptography-42.0.7-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a79165431551042cc9d1d90e6145d5d0d3ab0f2d66326c201d9b0e7f5bf43604"}, + {file = "cryptography-42.0.7-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a47787a5e3649008a1102d3df55424e86606c9bae6fb77ac59afe06d234605f8"}, + {file = "cryptography-42.0.7-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:02c0eee2d7133bdbbc5e24441258d5d2244beb31da5ed19fbb80315f4bbbff55"}, + {file = "cryptography-42.0.7-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5e44507bf8d14b36b8389b226665d597bc0f18ea035d75b4e53c7b1ea84583cc"}, + {file = "cryptography-42.0.7-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:7f8b25fa616d8b846aef64b15c606bb0828dbc35faf90566eb139aa9cff67af2"}, + {file = "cryptography-42.0.7-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:93a3209f6bb2b33e725ed08ee0991b92976dfdcf4e8b38646540674fc7508e13"}, + {file = "cryptography-42.0.7-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e6b8f1881dac458c34778d0a424ae5769de30544fc678eac51c1c8bb2183e9da"}, + {file = "cryptography-42.0.7-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3de9a45d3b2b7d8088c3fbf1ed4395dfeff79d07842217b38df14ef09ce1d8d7"}, + {file = "cryptography-42.0.7-cp37-abi3-win32.whl", hash = "sha256:789caea816c6704f63f6241a519bfa347f72fbd67ba28d04636b7c6b7da94b0b"}, + {file = "cryptography-42.0.7-cp37-abi3-win_amd64.whl", hash = "sha256:8cb8ce7c3347fcf9446f201dc30e2d5a3c898d009126010cbd1f443f28b52678"}, + {file = "cryptography-42.0.7-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:a3a5ac8b56fe37f3125e5b72b61dcde43283e5370827f5233893d461b7360cd4"}, + {file = "cryptography-42.0.7-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:779245e13b9a6638df14641d029add5dc17edbef6ec915688f3acb9e720a5858"}, + {file = "cryptography-42.0.7-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d563795db98b4cd57742a78a288cdbdc9daedac29f2239793071fe114f13785"}, + {file = "cryptography-42.0.7-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:31adb7d06fe4383226c3e963471f6837742889b3c4caa55aac20ad951bc8ffda"}, + {file = "cryptography-42.0.7-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:efd0bf5205240182e0f13bcaea41be4fdf5c22c5129fc7ced4a0282ac86998c9"}, + {file = "cryptography-42.0.7-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a9bc127cdc4ecf87a5ea22a2556cab6c7eda2923f84e4f3cc588e8470ce4e42e"}, + {file = "cryptography-42.0.7-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:3577d029bc3f4827dd5bf8bf7710cac13527b470bbf1820a3f394adb38ed7d5f"}, + {file = "cryptography-42.0.7-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2e47577f9b18723fa294b0ea9a17d5e53a227867a0a4904a1a076d1646d45ca1"}, + {file = "cryptography-42.0.7-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1a58839984d9cb34c855197043eaae2c187d930ca6d644612843b4fe8513c886"}, + {file = "cryptography-42.0.7-cp39-abi3-win32.whl", hash = "sha256:e6b79d0adb01aae87e8a44c2b64bc3f3fe59515280e00fb6d57a7267a2583cda"}, + {file = "cryptography-42.0.7-cp39-abi3-win_amd64.whl", hash = "sha256:16268d46086bb8ad5bf0a2b5544d8a9ed87a0e33f5e77dd3c3301e63d941a83b"}, + {file = "cryptography-42.0.7-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2954fccea107026512b15afb4aa664a5640cd0af630e2ee3962f2602693f0c82"}, + {file = "cryptography-42.0.7-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:362e7197754c231797ec45ee081f3088a27a47c6c01eff2ac83f60f85a50fe60"}, + {file = "cryptography-42.0.7-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4f698edacf9c9e0371112792558d2f705b5645076cc0aaae02f816a0171770fd"}, + {file = "cryptography-42.0.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5482e789294854c28237bba77c4c83be698be740e31a3ae5e879ee5444166582"}, + {file = "cryptography-42.0.7-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e9b2a6309f14c0497f348d08a065d52f3020656f675819fc405fb63bbcd26562"}, + {file = "cryptography-42.0.7-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d8e3098721b84392ee45af2dd554c947c32cc52f862b6a3ae982dbb90f577f14"}, + {file = "cryptography-42.0.7-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c65f96dad14f8528a447414125e1fc8feb2ad5a272b8f68477abbcc1ea7d94b9"}, + {file = "cryptography-42.0.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:36017400817987670037fbb0324d71489b6ead6231c9604f8fc1f7d008087c68"}, + {file = "cryptography-42.0.7.tar.gz", hash = "sha256:ecbfbc00bf55888edda9868a4cf927205de8499e7fabe6c050322298382953f2"}, ] [[package]] @@ -437,14 +438,14 @@ files = [ [[package]] name = "exceptiongroup" -version = "1.2.0" +version = "1.2.1" requires_python = ">=3.7" summary = "Backport of PEP 654 (exception groups)" groups = ["default", "dev"] marker = "python_version < \"3.11\"" files = [ - {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"}, - {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"}, + {file = "exceptiongroup-1.2.1-py3-none-any.whl", hash = "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad"}, + {file = "exceptiongroup-1.2.1.tar.gz", hash = "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16"}, ] [[package]] @@ -460,7 +461,7 @@ files = [ [[package]] name = "httpcore" -version = "1.0.2" +version = "1.0.5" requires_python = ">=3.8" summary = "A minimal low-level HTTP client." groups = ["default"] @@ -469,8 +470,8 @@ dependencies = [ "h11<0.15,>=0.13", ] files = [ - {file = "httpcore-1.0.2-py3-none-any.whl", hash = "sha256:096cc05bca73b8e459a1fc3dcf585148f63e534eae4339559c9b8a8d6399acc7"}, - {file = "httpcore-1.0.2.tar.gz", hash = "sha256:9fc092e4799b26174648e54b74ed5f683132a464e95643b226e00c2ed2fa6535"}, + {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, + {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, ] [[package]] @@ -493,13 +494,13 @@ files = [ [[package]] name = "idna" -version = "3.6" +version = "3.7" requires_python = ">=3.5" summary = "Internationalized Domain Names in Applications (IDNA)" groups = ["default", "dev"] files = [ - {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"}, - {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, + {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, + {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, ] [[package]] @@ -515,7 +516,7 @@ files = [ [[package]] name = "jinja2" -version = "3.1.3" +version = "3.1.4" requires_python = ">=3.7" summary = "A very fast and expressive template engine." groups = ["default", "dev"] @@ -523,8 +524,8 @@ dependencies = [ "MarkupSafe>=2.0", ] files = [ - {file = "Jinja2-3.1.3-py3-none-any.whl", hash = "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa"}, - {file = "Jinja2-3.1.3.tar.gz", hash = "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90"}, + {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, + {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, ] [[package]] @@ -543,67 +544,67 @@ files = [ [[package]] name = "markupsafe" -version = "2.1.3" +version = "2.1.5" requires_python = ">=3.7" summary = "Safely add untrusted strings to HTML/XML markup." groups = ["default", "dev"] files = [ - {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"}, - {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, + {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, ] [[package]] name = "marshmallow" -version = "3.21.1" +version = "3.21.2" requires_python = ">=3.8" summary = "A lightweight library for converting complex datatypes to and from native Python datatypes." groups = ["dev"] @@ -611,8 +612,8 @@ dependencies = [ "packaging>=17.0", ] files = [ - {file = "marshmallow-3.21.1-py3-none-any.whl", hash = "sha256:f085493f79efb0644f270a9bf2892843142d80d7174bbbd2f3713f2a589dc633"}, - {file = "marshmallow-3.21.1.tar.gz", hash = "sha256:4e65e9e0d80fc9e609574b9983cf32579f305c718afb30d7233ab818571768c3"}, + {file = "marshmallow-3.21.2-py3-none-any.whl", hash = "sha256:70b54a6282f4704d12c0a41599682c5c5450e843b9ec406308653b47c59648a1"}, + {file = "marshmallow-3.21.2.tar.gz", hash = "sha256:82408deadd8b33d56338d2182d455db632c6313aa2af61916672146bb32edc56"}, ] [[package]] @@ -628,19 +629,19 @@ files = [ [[package]] name = "mslex" -version = "1.1.0" +version = "1.2.0" requires_python = ">=3.5" summary = "shlex for windows" groups = ["dev"] marker = "sys_platform == \"win32\"" files = [ - {file = "mslex-1.1.0-py2.py3-none-any.whl", hash = "sha256:8826f4bb8d8c63402203d921dc8c2df0c7fec0d9c91d020ddf02fc9d0dce81bd"}, - {file = "mslex-1.1.0.tar.gz", hash = "sha256:7fe305fbdc9721283875e0b737fdb344374b761338a7f41af91875de278568e4"}, + {file = "mslex-1.2.0-py3-none-any.whl", hash = "sha256:c68ec637485ee3544c5847c1b4e78b02940b32708568fb1d8715491815aa2341"}, + {file = "mslex-1.2.0.tar.gz", hash = "sha256:79e2abc5a129dd71cdde58a22a2039abb7fa8afcbac498b723ba6e9b9fbacc14"}, ] [[package]] name = "mypy" -version = "1.9.0" +version = "1.10.0" requires_python = ">=3.8" summary = "Optional static typing for Python" groups = ["dev"] @@ -650,33 +651,33 @@ dependencies = [ "typing-extensions>=4.1.0", ] files = [ - {file = "mypy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8a67616990062232ee4c3952f41c779afac41405806042a8126fe96e098419f"}, - {file = "mypy-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d357423fa57a489e8c47b7c85dfb96698caba13d66e086b412298a1a0ea3b0ed"}, - {file = "mypy-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49c87c15aed320de9b438ae7b00c1ac91cd393c1b854c2ce538e2a72d55df150"}, - {file = "mypy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:48533cdd345c3c2e5ef48ba3b0d3880b257b423e7995dada04248725c6f77374"}, - {file = "mypy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:4d3dbd346cfec7cb98e6cbb6e0f3c23618af826316188d587d1c1bc34f0ede03"}, - {file = "mypy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:653265f9a2784db65bfca694d1edd23093ce49740b2244cde583aeb134c008f3"}, - {file = "mypy-1.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a3c007ff3ee90f69cf0a15cbcdf0995749569b86b6d2f327af01fd1b8aee9dc"}, - {file = "mypy-1.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2418488264eb41f69cc64a69a745fad4a8f86649af4b1041a4c64ee61fc61129"}, - {file = "mypy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:68edad3dc7d70f2f17ae4c6c1b9471a56138ca22722487eebacfd1eb5321d612"}, - {file = "mypy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:85ca5fcc24f0b4aeedc1d02f93707bccc04733f21d41c88334c5482219b1ccb3"}, - {file = "mypy-1.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aceb1db093b04db5cd390821464504111b8ec3e351eb85afd1433490163d60cd"}, - {file = "mypy-1.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0235391f1c6f6ce487b23b9dbd1327b4ec33bb93934aa986efe8a9563d9349e6"}, - {file = "mypy-1.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4d5ddc13421ba3e2e082a6c2d74c2ddb3979c39b582dacd53dd5d9431237185"}, - {file = "mypy-1.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:190da1ee69b427d7efa8aa0d5e5ccd67a4fb04038c380237a0d96829cb157913"}, - {file = "mypy-1.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe28657de3bfec596bbeef01cb219833ad9d38dd5393fc649f4b366840baefe6"}, - {file = "mypy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e54396d70be04b34f31d2edf3362c1edd023246c82f1730bbf8768c28db5361b"}, - {file = "mypy-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5e6061f44f2313b94f920e91b204ec600982961e07a17e0f6cd83371cb23f5c2"}, - {file = "mypy-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a10926e5473c5fc3da8abb04119a1f5811a236dc3a38d92015cb1e6ba4cb9e"}, - {file = "mypy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b685154e22e4e9199fc95f298661deea28aaede5ae16ccc8cbb1045e716b3e04"}, - {file = "mypy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d741d3fc7c4da608764073089e5f58ef6352bedc223ff58f2f038c2c4698a89"}, - {file = "mypy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:587ce887f75dd9700252a3abbc9c97bbe165a4a630597845c61279cf32dfbf02"}, - {file = "mypy-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f88566144752999351725ac623471661c9d1cd8caa0134ff98cceeea181789f4"}, - {file = "mypy-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61758fabd58ce4b0720ae1e2fea5cfd4431591d6d590b197775329264f86311d"}, - {file = "mypy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e49499be624dead83927e70c756970a0bc8240e9f769389cdf5714b0784ca6bf"}, - {file = "mypy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:571741dc4194b4f82d344b15e8837e8c5fcc462d66d076748142327626a1b6e9"}, - {file = "mypy-1.9.0-py3-none-any.whl", hash = "sha256:a260627a570559181a9ea5de61ac6297aa5af202f06fd7ab093ce74e7181e43e"}, - {file = "mypy-1.9.0.tar.gz", hash = "sha256:3cc5da0127e6a478cddd906068496a97a7618a21ce9b54bde5bf7e539c7af974"}, + {file = "mypy-1.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:da1cbf08fb3b851ab3b9523a884c232774008267b1f83371ace57f412fe308c2"}, + {file = "mypy-1.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:12b6bfc1b1a66095ab413160a6e520e1dc076a28f3e22f7fb25ba3b000b4ef99"}, + {file = "mypy-1.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e36fb078cce9904c7989b9693e41cb9711e0600139ce3970c6ef814b6ebc2b2"}, + {file = "mypy-1.10.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2b0695d605ddcd3eb2f736cd8b4e388288c21e7de85001e9f85df9187f2b50f9"}, + {file = "mypy-1.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:cd777b780312ddb135bceb9bc8722a73ec95e042f911cc279e2ec3c667076051"}, + {file = "mypy-1.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3be66771aa5c97602f382230165b856c231d1277c511c9a8dd058be4784472e1"}, + {file = "mypy-1.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8b2cbaca148d0754a54d44121b5825ae71868c7592a53b7292eeb0f3fdae95ee"}, + {file = "mypy-1.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ec404a7cbe9fc0e92cb0e67f55ce0c025014e26d33e54d9e506a0f2d07fe5de"}, + {file = "mypy-1.10.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e22e1527dc3d4aa94311d246b59e47f6455b8729f4968765ac1eacf9a4760bc7"}, + {file = "mypy-1.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:a87dbfa85971e8d59c9cc1fcf534efe664d8949e4c0b6b44e8ca548e746a8d53"}, + {file = "mypy-1.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a781f6ad4bab20eef8b65174a57e5203f4be627b46291f4589879bf4e257b97b"}, + {file = "mypy-1.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b808e12113505b97d9023b0b5e0c0705a90571c6feefc6f215c1df9381256e30"}, + {file = "mypy-1.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f55583b12156c399dce2df7d16f8a5095291354f1e839c252ec6c0611e86e2e"}, + {file = "mypy-1.10.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4cf18f9d0efa1b16478c4c129eabec36148032575391095f73cae2e722fcf9d5"}, + {file = "mypy-1.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:bc6ac273b23c6b82da3bb25f4136c4fd42665f17f2cd850771cb600bdd2ebeda"}, + {file = "mypy-1.10.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9fd50226364cd2737351c79807775136b0abe084433b55b2e29181a4c3c878c0"}, + {file = "mypy-1.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f90cff89eea89273727d8783fef5d4a934be2fdca11b47def50cf5d311aff727"}, + {file = "mypy-1.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fcfc70599efde5c67862a07a1aaf50e55bce629ace26bb19dc17cece5dd31ca4"}, + {file = "mypy-1.10.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:075cbf81f3e134eadaf247de187bd604748171d6b79736fa9b6c9685b4083061"}, + {file = "mypy-1.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:3f298531bca95ff615b6e9f2fc0333aae27fa48052903a0ac90215021cdcfa4f"}, + {file = "mypy-1.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fa7ef5244615a2523b56c034becde4e9e3f9b034854c93639adb667ec9ec2976"}, + {file = "mypy-1.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3236a4c8f535a0631f85f5fcdffba71c7feeef76a6002fcba7c1a8e57c8be1ec"}, + {file = "mypy-1.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a2b5cdbb5dd35aa08ea9114436e0d79aceb2f38e32c21684dcf8e24e1e92821"}, + {file = "mypy-1.10.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:92f93b21c0fe73dc00abf91022234c79d793318b8a96faac147cd579c1671746"}, + {file = "mypy-1.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:28d0e038361b45f099cc086d9dd99c15ff14d0188f44ac883010e172ce86c38a"}, + {file = "mypy-1.10.0-py3-none-any.whl", hash = "sha256:f8c083976eb530019175aabadb60921e73b4f45736760826aa1689dda8208aee"}, + {file = "mypy-1.10.0.tar.gz", hash = "sha256:3d087fcbec056c4ee34974da493a826ce316947485cef3901f511848e687c131"}, ] [[package]] @@ -692,76 +693,73 @@ files = [ [[package]] name = "packaging" -version = "21.3" -requires_python = ">=3.6" +version = "24.0" +requires_python = ">=3.7" summary = "Core utilities for Python packages" groups = ["dev"] -dependencies = [ - "pyparsing!=3.0.5,>=2.0.2", -] files = [ - {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"}, - {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"}, + {file = "packaging-24.0-py3-none-any.whl", hash = "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5"}, + {file = "packaging-24.0.tar.gz", hash = "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"}, ] [[package]] name = "pluggy" -version = "1.4.0" +version = "1.5.0" requires_python = ">=3.8" summary = "plugin and hook calling mechanisms for python" groups = ["dev"] files = [ - {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, - {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, ] [[package]] name = "psutil" -version = "5.9.7" +version = "5.9.8" requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" summary = "Cross-platform lib for process and system monitoring in Python." groups = ["dev"] files = [ - {file = "psutil-5.9.7-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ea36cc62e69a13ec52b2f625c27527f6e4479bca2b340b7a452af55b34fcbe2e"}, - {file = "psutil-5.9.7-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1132704b876e58d277168cd729d64750633d5ff0183acf5b3c986b8466cd0284"}, - {file = "psutil-5.9.7-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe8b7f07948f1304497ce4f4684881250cd859b16d06a1dc4d7941eeb6233bfe"}, - {file = "psutil-5.9.7-cp37-abi3-win32.whl", hash = "sha256:c727ca5a9b2dd5193b8644b9f0c883d54f1248310023b5ad3e92036c5e2ada68"}, - {file = "psutil-5.9.7-cp37-abi3-win_amd64.whl", hash = "sha256:f37f87e4d73b79e6c5e749440c3113b81d1ee7d26f21c19c47371ddea834f414"}, - {file = "psutil-5.9.7-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:032f4f2c909818c86cea4fe2cc407f1c0f0cde8e6c6d702b28b8ce0c0d143340"}, - {file = "psutil-5.9.7.tar.gz", hash = "sha256:3f02134e82cfb5d089fddf20bb2e03fd5cd52395321d1c8458a9e58500ff417c"}, + {file = "psutil-5.9.8-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:aee678c8720623dc456fa20659af736241f575d79429a0e5e9cf88ae0605cc81"}, + {file = "psutil-5.9.8-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cb6403ce6d8e047495a701dc7c5bd788add903f8986d523e3e20b98b733e421"}, + {file = "psutil-5.9.8-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d06016f7f8625a1825ba3732081d77c94589dca78b7a3fc072194851e88461a4"}, + {file = "psutil-5.9.8-cp37-abi3-win32.whl", hash = "sha256:bc56c2a1b0d15aa3eaa5a60c9f3f8e3e565303b465dbf57a1b730e7a2b9844e0"}, + {file = "psutil-5.9.8-cp37-abi3-win_amd64.whl", hash = "sha256:8db4c1b57507eef143a15a6884ca10f7c73876cdf5d51e713151c1236a0e68cf"}, + {file = "psutil-5.9.8-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:d16bbddf0693323b8c6123dd804100241da461e41d6e332fb0ba6058f630f8c8"}, + {file = "psutil-5.9.8.tar.gz", hash = "sha256:6be126e3225486dff286a8fb9a06246a5253f4c7c53b475ea5f5ac934e64194c"}, ] [[package]] name = "pycparser" -version = "2.21" -requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "2.22" +requires_python = ">=3.8" summary = "C parser in Python" groups = ["dev"] marker = "platform_python_implementation != \"PyPy\"" files = [ - {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, - {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, + {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, + {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, ] [[package]] name = "pydantic" -version = "2.7.0" +version = "2.7.1" requires_python = ">=3.8" summary = "Data validation using Python type hints" groups = ["default", "dev"] dependencies = [ "annotated-types>=0.4.0", - "pydantic-core==2.18.1", + "pydantic-core==2.18.2", "typing-extensions>=4.6.1", ] files = [ - {file = "pydantic-2.7.0-py3-none-any.whl", hash = "sha256:9dee74a271705f14f9a1567671d144a851c675b072736f0a7b2608fd9e495352"}, - {file = "pydantic-2.7.0.tar.gz", hash = "sha256:b5ecdd42262ca2462e2624793551e80911a1e989f462910bb81aef974b4bb383"}, + {file = "pydantic-2.7.1-py3-none-any.whl", hash = "sha256:e029badca45266732a9a79898a15ae2e8b14840b1eabbb25844be28f0b33f3d5"}, + {file = "pydantic-2.7.1.tar.gz", hash = "sha256:e9dbb5eada8abe4d9ae5f46b9939aead650cd2b68f249bb3a8139dbe125803cc"}, ] [[package]] name = "pydantic-core" -version = "2.18.1" +version = "2.18.2" requires_python = ">=3.8" summary = "Core functionality for Pydantic validation and serialization" groups = ["default", "dev"] @@ -769,112 +767,101 @@ dependencies = [ "typing-extensions!=4.7.0,>=4.6.0", ] files = [ - {file = "pydantic_core-2.18.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ee9cf33e7fe14243f5ca6977658eb7d1042caaa66847daacbd2117adb258b226"}, - {file = "pydantic_core-2.18.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6b7bbb97d82659ac8b37450c60ff2e9f97e4eb0f8a8a3645a5568b9334b08b50"}, - {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df4249b579e75094f7e9bb4bd28231acf55e308bf686b952f43100a5a0be394c"}, - {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d0491006a6ad20507aec2be72e7831a42efc93193d2402018007ff827dc62926"}, - {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ae80f72bb7a3e397ab37b53a2b49c62cc5496412e71bc4f1277620a7ce3f52b"}, - {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:58aca931bef83217fca7a390e0486ae327c4af9c3e941adb75f8772f8eeb03a1"}, - {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1be91ad664fc9245404a789d60cba1e91c26b1454ba136d2a1bf0c2ac0c0505a"}, - {file = "pydantic_core-2.18.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:667880321e916a8920ef49f5d50e7983792cf59f3b6079f3c9dac2b88a311d17"}, - {file = "pydantic_core-2.18.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f7054fdc556f5421f01e39cbb767d5ec5c1139ea98c3e5b350e02e62201740c7"}, - {file = "pydantic_core-2.18.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:030e4f9516f9947f38179249778709a460a3adb516bf39b5eb9066fcfe43d0e6"}, - {file = "pydantic_core-2.18.1-cp310-none-win32.whl", hash = "sha256:2e91711e36e229978d92642bfc3546333a9127ecebb3f2761372e096395fc649"}, - {file = "pydantic_core-2.18.1-cp310-none-win_amd64.whl", hash = "sha256:9a29726f91c6cb390b3c2338f0df5cd3e216ad7a938762d11c994bb37552edb0"}, - {file = "pydantic_core-2.18.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:9ece8a49696669d483d206b4474c367852c44815fca23ac4e48b72b339807f80"}, - {file = "pydantic_core-2.18.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7a5d83efc109ceddb99abd2c1316298ced2adb4570410defe766851a804fcd5b"}, - {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f7973c381283783cd1043a8c8f61ea5ce7a3a58b0369f0ee0ee975eaf2f2a1b"}, - {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:54c7375c62190a7845091f521add19b0f026bcf6ae674bdb89f296972272e86d"}, - {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dd63cec4e26e790b70544ae5cc48d11b515b09e05fdd5eff12e3195f54b8a586"}, - {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:561cf62c8a3498406495cfc49eee086ed2bb186d08bcc65812b75fda42c38294"}, - {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68717c38a68e37af87c4da20e08f3e27d7e4212e99e96c3d875fbf3f4812abfc"}, - {file = "pydantic_core-2.18.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d5728e93d28a3c63ee513d9ffbac9c5989de8c76e049dbcb5bfe4b923a9739d"}, - {file = "pydantic_core-2.18.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f0f17814c505f07806e22b28856c59ac80cee7dd0fbb152aed273e116378f519"}, - {file = "pydantic_core-2.18.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d816f44a51ba5175394bc6c7879ca0bd2be560b2c9e9f3411ef3a4cbe644c2e9"}, - {file = "pydantic_core-2.18.1-cp311-none-win32.whl", hash = "sha256:09f03dfc0ef8c22622eaa8608caa4a1e189cfb83ce847045eca34f690895eccb"}, - {file = "pydantic_core-2.18.1-cp311-none-win_amd64.whl", hash = "sha256:27f1009dc292f3b7ca77feb3571c537276b9aad5dd4efb471ac88a8bd09024e9"}, - {file = "pydantic_core-2.18.1-cp311-none-win_arm64.whl", hash = "sha256:48dd883db92e92519201f2b01cafa881e5f7125666141a49ffba8b9facc072b0"}, - {file = "pydantic_core-2.18.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:b6b0e4912030c6f28bcb72b9ebe4989d6dc2eebcd2a9cdc35fefc38052dd4fe8"}, - {file = "pydantic_core-2.18.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f3202a429fe825b699c57892d4371c74cc3456d8d71b7f35d6028c96dfecad31"}, - {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3982b0a32d0a88b3907e4b0dc36809fda477f0757c59a505d4e9b455f384b8b"}, - {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25595ac311f20e5324d1941909b0d12933f1fd2171075fcff763e90f43e92a0d"}, - {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:14fe73881cf8e4cbdaded8ca0aa671635b597e42447fec7060d0868b52d074e6"}, - {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca976884ce34070799e4dfc6fbd68cb1d181db1eefe4a3a94798ddfb34b8867f"}, - {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:684d840d2c9ec5de9cb397fcb3f36d5ebb6fa0d94734f9886032dd796c1ead06"}, - {file = "pydantic_core-2.18.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:54764c083bbe0264f0f746cefcded6cb08fbbaaf1ad1d78fb8a4c30cff999a90"}, - {file = "pydantic_core-2.18.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:201713f2f462e5c015b343e86e68bd8a530a4f76609b33d8f0ec65d2b921712a"}, - {file = "pydantic_core-2.18.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fd1a9edb9dd9d79fbeac1ea1f9a8dd527a6113b18d2e9bcc0d541d308dae639b"}, - {file = "pydantic_core-2.18.1-cp312-none-win32.whl", hash = "sha256:d5e6b7155b8197b329dc787356cfd2684c9d6a6b1a197f6bbf45f5555a98d411"}, - {file = "pydantic_core-2.18.1-cp312-none-win_amd64.whl", hash = "sha256:9376d83d686ec62e8b19c0ac3bf8d28d8a5981d0df290196fb6ef24d8a26f0d6"}, - {file = "pydantic_core-2.18.1-cp312-none-win_arm64.whl", hash = "sha256:c562b49c96906b4029b5685075fe1ebd3b5cc2601dfa0b9e16c2c09d6cbce048"}, - {file = "pydantic_core-2.18.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:3e352f0191d99fe617371096845070dee295444979efb8f27ad941227de6ad09"}, - {file = "pydantic_core-2.18.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c0295d52b012cbe0d3059b1dba99159c3be55e632aae1999ab74ae2bd86a33d7"}, - {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56823a92075780582d1ffd4489a2e61d56fd3ebb4b40b713d63f96dd92d28144"}, - {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dd3f79e17b56741b5177bcc36307750d50ea0698df6aa82f69c7db32d968c1c2"}, - {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38a5024de321d672a132b1834a66eeb7931959c59964b777e8f32dbe9523f6b1"}, - {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2ce426ee691319d4767748c8e0895cfc56593d725594e415f274059bcf3cb76"}, - {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2adaeea59849ec0939af5c5d476935f2bab4b7f0335b0110f0f069a41024278e"}, - {file = "pydantic_core-2.18.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9b6431559676a1079eac0f52d6d0721fb8e3c5ba43c37bc537c8c83724031feb"}, - {file = "pydantic_core-2.18.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:85233abb44bc18d16e72dc05bf13848a36f363f83757541f1a97db2f8d58cfd9"}, - {file = "pydantic_core-2.18.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:641a018af4fe48be57a2b3d7a1f0f5dbca07c1d00951d3d7463f0ac9dac66622"}, - {file = "pydantic_core-2.18.1-cp38-none-win32.whl", hash = "sha256:63d7523cd95d2fde0d28dc42968ac731b5bb1e516cc56b93a50ab293f4daeaad"}, - {file = "pydantic_core-2.18.1-cp38-none-win_amd64.whl", hash = "sha256:907a4d7720abfcb1c81619863efd47c8a85d26a257a2dbebdb87c3b847df0278"}, - {file = "pydantic_core-2.18.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:aad17e462f42ddbef5984d70c40bfc4146c322a2da79715932cd8976317054de"}, - {file = "pydantic_core-2.18.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:94b9769ba435b598b547c762184bcfc4783d0d4c7771b04a3b45775c3589ca44"}, - {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80e0e57cc704a52fb1b48f16d5b2c8818da087dbee6f98d9bf19546930dc64b5"}, - {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:76b86e24039c35280ceee6dce7e62945eb93a5175d43689ba98360ab31eebc4a"}, - {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12a05db5013ec0ca4a32cc6433f53faa2a014ec364031408540ba858c2172bb0"}, - {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:250ae39445cb5475e483a36b1061af1bc233de3e9ad0f4f76a71b66231b07f88"}, - {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a32204489259786a923e02990249c65b0f17235073149d0033efcebe80095570"}, - {file = "pydantic_core-2.18.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6395a4435fa26519fd96fdccb77e9d00ddae9dd6c742309bd0b5610609ad7fb2"}, - {file = "pydantic_core-2.18.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2533ad2883f001efa72f3d0e733fb846710c3af6dcdd544fe5bf14fa5fe2d7db"}, - {file = "pydantic_core-2.18.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b560b72ed4816aee52783c66854d96157fd8175631f01ef58e894cc57c84f0f6"}, - {file = "pydantic_core-2.18.1-cp39-none-win32.whl", hash = "sha256:582cf2cead97c9e382a7f4d3b744cf0ef1a6e815e44d3aa81af3ad98762f5a9b"}, - {file = "pydantic_core-2.18.1-cp39-none-win_amd64.whl", hash = "sha256:ca71d501629d1fa50ea7fa3b08ba884fe10cefc559f5c6c8dfe9036c16e8ae89"}, - {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e178e5b66a06ec5bf51668ec0d4ac8cfb2bdcb553b2c207d58148340efd00143"}, - {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:72722ce529a76a4637a60be18bd789d8fb871e84472490ed7ddff62d5fed620d"}, - {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fe0c1ce5b129455e43f941f7a46f61f3d3861e571f2905d55cdbb8b5c6f5e2c"}, - {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4284c621f06a72ce2cb55f74ea3150113d926a6eb78ab38340c08f770eb9b4d"}, - {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1a0c3e718f4e064efde68092d9d974e39572c14e56726ecfaeebbe6544521f47"}, - {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:2027493cc44c23b598cfaf200936110433d9caa84e2c6cf487a83999638a96ac"}, - {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:76909849d1a6bffa5a07742294f3fa1d357dc917cb1fe7b470afbc3a7579d539"}, - {file = "pydantic_core-2.18.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ee7ccc7fb7e921d767f853b47814c3048c7de536663e82fbc37f5eb0d532224b"}, - {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ee2794111c188548a4547eccc73a6a8527fe2af6cf25e1a4ebda2fd01cdd2e60"}, - {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a139fe9f298dc097349fb4f28c8b81cc7a202dbfba66af0e14be5cfca4ef7ce5"}, - {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d074b07a10c391fc5bbdcb37b2f16f20fcd9e51e10d01652ab298c0d07908ee2"}, - {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c69567ddbac186e8c0aadc1f324a60a564cfe25e43ef2ce81bcc4b8c3abffbae"}, - {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:baf1c7b78cddb5af00971ad5294a4583188bda1495b13760d9f03c9483bb6203"}, - {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:2684a94fdfd1b146ff10689c6e4e815f6a01141781c493b97342cdc5b06f4d5d"}, - {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:73c1bc8a86a5c9e8721a088df234265317692d0b5cd9e86e975ce3bc3db62a59"}, - {file = "pydantic_core-2.18.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e60defc3c15defb70bb38dd605ff7e0fae5f6c9c7cbfe0ad7868582cb7e844a6"}, - {file = "pydantic_core-2.18.1.tar.gz", hash = "sha256:de9d3e8717560eb05e28739d1b35e4eac2e458553a52a301e51352a7ffc86a35"}, + {file = "pydantic_core-2.18.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9e08e867b306f525802df7cd16c44ff5ebbe747ff0ca6cf3fde7f36c05a59a81"}, + {file = "pydantic_core-2.18.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f0a21cbaa69900cbe1a2e7cad2aa74ac3cf21b10c3efb0fa0b80305274c0e8a2"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0680b1f1f11fda801397de52c36ce38ef1c1dc841a0927a94f226dea29c3ae3d"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:95b9d5e72481d3780ba3442eac863eae92ae43a5f3adb5b4d0a1de89d42bb250"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fcf5cd9c4b655ad666ca332b9a081112cd7a58a8b5a6ca7a3104bc950f2038"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b5155ff768083cb1d62f3e143b49a8a3432e6789a3abee8acd005c3c7af1c74"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:553ef617b6836fc7e4df130bb851e32fe357ce36336d897fd6646d6058d980af"}, + {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b89ed9eb7d616ef5714e5590e6cf7f23b02d0d539767d33561e3675d6f9e3857"}, + {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:75f7e9488238e920ab6204399ded280dc4c307d034f3924cd7f90a38b1829563"}, + {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ef26c9e94a8c04a1b2924149a9cb081836913818e55681722d7f29af88fe7b38"}, + {file = "pydantic_core-2.18.2-cp310-none-win32.whl", hash = "sha256:182245ff6b0039e82b6bb585ed55a64d7c81c560715d1bad0cbad6dfa07b4027"}, + {file = "pydantic_core-2.18.2-cp310-none-win_amd64.whl", hash = "sha256:e23ec367a948b6d812301afc1b13f8094ab7b2c280af66ef450efc357d2ae543"}, + {file = "pydantic_core-2.18.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:219da3f096d50a157f33645a1cf31c0ad1fe829a92181dd1311022f986e5fbe3"}, + {file = "pydantic_core-2.18.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cc1cfd88a64e012b74e94cd00bbe0f9c6df57049c97f02bb07d39e9c852e19a4"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b7133a6e6aeb8df37d6f413f7705a37ab4031597f64ab56384c94d98fa0e90"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:224c421235f6102e8737032483f43c1a8cfb1d2f45740c44166219599358c2cd"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b14d82cdb934e99dda6d9d60dc84a24379820176cc4a0d123f88df319ae9c150"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2728b01246a3bba6de144f9e3115b532ee44bd6cf39795194fb75491824a1413"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:470b94480bb5ee929f5acba6995251ada5e059a5ef3e0dfc63cca287283ebfa6"}, + {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:997abc4df705d1295a42f95b4eec4950a37ad8ae46d913caeee117b6b198811c"}, + {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75250dbc5290e3f1a0f4618db35e51a165186f9034eff158f3d490b3fed9f8a0"}, + {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4456f2dca97c425231d7315737d45239b2b51a50dc2b6f0c2bb181fce6207664"}, + {file = "pydantic_core-2.18.2-cp311-none-win32.whl", hash = "sha256:269322dcc3d8bdb69f054681edff86276b2ff972447863cf34c8b860f5188e2e"}, + {file = "pydantic_core-2.18.2-cp311-none-win_amd64.whl", hash = "sha256:800d60565aec896f25bc3cfa56d2277d52d5182af08162f7954f938c06dc4ee3"}, + {file = "pydantic_core-2.18.2-cp311-none-win_arm64.whl", hash = "sha256:1404c69d6a676245199767ba4f633cce5f4ad4181f9d0ccb0577e1f66cf4c46d"}, + {file = "pydantic_core-2.18.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:fb2bd7be70c0fe4dfd32c951bc813d9fe6ebcbfdd15a07527796c8204bd36242"}, + {file = "pydantic_core-2.18.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6132dd3bd52838acddca05a72aafb6eab6536aa145e923bb50f45e78b7251043"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d904828195733c183d20a54230c0df0eb46ec746ea1a666730787353e87182"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c9bd70772c720142be1020eac55f8143a34ec9f82d75a8e7a07852023e46617f"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b8ed04b3582771764538f7ee7001b02e1170223cf9b75dff0bc698fadb00cf3"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6dac87ddb34aaec85f873d737e9d06a3555a1cc1a8e0c44b7f8d5daeb89d86f"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca4ae5a27ad7a4ee5170aebce1574b375de390bc01284f87b18d43a3984df72"}, + {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:886eec03591b7cf058467a70a87733b35f44707bd86cf64a615584fd72488b7c"}, + {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ca7b0c1f1c983e064caa85f3792dd2fe3526b3505378874afa84baf662e12241"}, + {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b4356d3538c3649337df4074e81b85f0616b79731fe22dd11b99499b2ebbdf3"}, + {file = "pydantic_core-2.18.2-cp312-none-win32.whl", hash = "sha256:8b172601454f2d7701121bbec3425dd71efcb787a027edf49724c9cefc14c038"}, + {file = "pydantic_core-2.18.2-cp312-none-win_amd64.whl", hash = "sha256:b1bd7e47b1558ea872bd16c8502c414f9e90dcf12f1395129d7bb42a09a95438"}, + {file = "pydantic_core-2.18.2-cp312-none-win_arm64.whl", hash = "sha256:98758d627ff397e752bc339272c14c98199c613f922d4a384ddc07526c86a2ec"}, + {file = "pydantic_core-2.18.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:9fdad8e35f278b2c3eb77cbdc5c0a49dada440657bf738d6905ce106dc1de439"}, + {file = "pydantic_core-2.18.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1d90c3265ae107f91a4f279f4d6f6f1d4907ac76c6868b27dc7fb33688cfb347"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:390193c770399861d8df9670fb0d1874f330c79caaca4642332df7c682bf6b91"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:82d5d4d78e4448683cb467897fe24e2b74bb7b973a541ea1dcfec1d3cbce39fb"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4774f3184d2ef3e14e8693194f661dea5a4d6ca4e3dc8e39786d33a94865cefd"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4d938ec0adf5167cb335acb25a4ee69a8107e4984f8fbd2e897021d9e4ca21b"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0e8b1be28239fc64a88a8189d1df7fad8be8c1ae47fcc33e43d4be15f99cc70"}, + {file = "pydantic_core-2.18.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:868649da93e5a3d5eacc2b5b3b9235c98ccdbfd443832f31e075f54419e1b96b"}, + {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:78363590ef93d5d226ba21a90a03ea89a20738ee5b7da83d771d283fd8a56761"}, + {file = "pydantic_core-2.18.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:852e966fbd035a6468fc0a3496589b45e2208ec7ca95c26470a54daed82a0788"}, + {file = "pydantic_core-2.18.2-cp38-none-win32.whl", hash = "sha256:6a46e22a707e7ad4484ac9ee9f290f9d501df45954184e23fc29408dfad61350"}, + {file = "pydantic_core-2.18.2-cp38-none-win_amd64.whl", hash = "sha256:d91cb5ea8b11607cc757675051f61b3d93f15eca3cefb3e6c704a5d6e8440f4e"}, + {file = "pydantic_core-2.18.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ae0a8a797a5e56c053610fa7be147993fe50960fa43609ff2a9552b0e07013e8"}, + {file = "pydantic_core-2.18.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:042473b6280246b1dbf530559246f6842b56119c2926d1e52b631bdc46075f2a"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a388a77e629b9ec814c1b1e6b3b595fe521d2cdc625fcca26fbc2d44c816804"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25add29b8f3b233ae90ccef2d902d0ae0432eb0d45370fe315d1a5cf231004b"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f459a5ce8434614dfd39bbebf1041952ae01da6bed9855008cb33b875cb024c0"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eff2de745698eb46eeb51193a9f41d67d834d50e424aef27df2fcdee1b153845"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8309f67285bdfe65c372ea3722b7a5642680f3dba538566340a9d36e920b5f0"}, + {file = "pydantic_core-2.18.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f93a8a2e3938ff656a7c1bc57193b1319960ac015b6e87d76c76bf14fe0244b4"}, + {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:22057013c8c1e272eb8d0eebc796701167d8377441ec894a8fed1af64a0bf399"}, + {file = "pydantic_core-2.18.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cfeecd1ac6cc1fb2692c3d5110781c965aabd4ec5d32799773ca7b1456ac636b"}, + {file = "pydantic_core-2.18.2-cp39-none-win32.whl", hash = "sha256:0d69b4c2f6bb3e130dba60d34c0845ba31b69babdd3f78f7c0c8fae5021a253e"}, + {file = "pydantic_core-2.18.2-cp39-none-win_amd64.whl", hash = "sha256:d9319e499827271b09b4e411905b24a426b8fb69464dfa1696258f53a3334641"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a1874c6dd4113308bd0eb568418e6114b252afe44319ead2b4081e9b9521fe75"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:ccdd111c03bfd3666bd2472b674c6899550e09e9f298954cfc896ab92b5b0e6d"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e18609ceaa6eed63753037fc06ebb16041d17d28199ae5aba0052c51449650a9"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e5c584d357c4e2baf0ff7baf44f4994be121e16a2c88918a5817331fc7599d7"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43f0f463cf89ace478de71a318b1b4f05ebc456a9b9300d027b4b57c1a2064fb"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e1b395e58b10b73b07b7cf740d728dd4ff9365ac46c18751bf8b3d8cca8f625a"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0098300eebb1c837271d3d1a2cd2911e7c11b396eac9661655ee524a7f10587b"}, + {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:36789b70d613fbac0a25bb07ab3d9dba4d2e38af609c020cf4d888d165ee0bf3"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3f9a801e7c8f1ef8718da265bba008fa121243dfe37c1cea17840b0944dfd72c"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:3a6515ebc6e69d85502b4951d89131ca4e036078ea35533bb76327f8424531ce"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20aca1e2298c56ececfd8ed159ae4dde2df0781988c97ef77d5c16ff4bd5b400"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:223ee893d77a310a0391dca6df00f70bbc2f36a71a895cecd9a0e762dc37b349"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2334ce8c673ee93a1d6a65bd90327588387ba073c17e61bf19b4fd97d688d63c"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cbca948f2d14b09d20268cda7b0367723d79063f26c4ffc523af9042cad95592"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b3ef08e20ec49e02d5c6717a91bb5af9b20f1805583cb0adfe9ba2c6b505b5ae"}, + {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6fdc8627910eed0c01aed6a390a252fe3ea6d472ee70fdde56273f198938374"}, + {file = "pydantic_core-2.18.2.tar.gz", hash = "sha256:2e29d20810dfc3043ee13ac7d9e25105799817683348823f305ab3f349b9386e"}, ] [[package]] name = "pygments" -version = "2.17.2" -requires_python = ">=3.7" +version = "2.18.0" +requires_python = ">=3.8" summary = "Pygments is a syntax highlighting package written in Python." groups = ["default", "dev"] files = [ - {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"}, - {file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"}, -] - -[[package]] -name = "pyparsing" -version = "3.1.1" -requires_python = ">=3.6.8" -summary = "pyparsing module - Classes and methods to define and execute parsing grammars" -groups = ["dev"] -files = [ - {file = "pyparsing-3.1.1-py3-none-any.whl", hash = "sha256:32c7c0b711493c72ff18a981d24f28aaf9c1fb7ed5e9667c9e84e3db623bdbfb"}, - {file = "pyparsing-3.1.1.tar.gz", hash = "sha256:ede28a1a32462f5a9705e07aea48001a08f7cf81a021585011deba701581a0db"}, + {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, + {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, ] [[package]] name = "pytest" -version = "8.1.1" +version = "8.2.0" requires_python = ">=3.8" summary = "pytest: simple powerful testing with Python" groups = ["dev"] @@ -883,12 +870,12 @@ dependencies = [ "exceptiongroup>=1.0.0rc8; python_version < \"3.11\"", "iniconfig", "packaging", - "pluggy<2.0,>=1.4", + "pluggy<2.0,>=1.5", "tomli>=1; python_version < \"3.11\"", ] files = [ - {file = "pytest-8.1.1-py3-none-any.whl", hash = "sha256:2a8386cfc11fa9d2c50ee7b2a57e7d898ef90470a7a34c4b949ff59662bb78b7"}, - {file = "pytest-8.1.1.tar.gz", hash = "sha256:ac978141a75948948817d360297b7aae0fcb9d6ff6bc9ec6d514b85d5a65c044"}, + {file = "pytest-8.2.0-py3-none-any.whl", hash = "sha256:1733f0620f6cda4095bbf0d9ff8022486e91892245bb9e7d5542c018f612f233"}, + {file = "pytest-8.2.0.tar.gz", hash = "sha256:d507d4482197eac0ba2bae2e9babf0672eb333017bcedaa5fb1a3d42c1174b3f"}, ] [[package]] @@ -945,54 +932,6 @@ files = [ {file = "python_multipart-0.0.9.tar.gz", hash = "sha256:03f54688c663f1b7977105f021043b0793151e4cb1c1a9d4a11fc13d622c4026"}, ] -[[package]] -name = "pyyaml" -version = "6.0.1" -requires_python = ">=3.6" -summary = "YAML parser and emitter for Python" -groups = ["default"] -files = [ - {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, - {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, - {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, - {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, - {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, - {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, - {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, - {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, - {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, - {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, - {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, -] - [[package]] name = "requests" version = "2.31.0" @@ -1028,16 +967,16 @@ files = [ [[package]] name = "ruamel-yaml" -version = "0.18.5" +version = "0.18.6" requires_python = ">=3.7" summary = "ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order" -groups = ["dev"] +groups = ["default", "dev"] dependencies = [ "ruamel-yaml-clib>=0.2.7; platform_python_implementation == \"CPython\" and python_version < \"3.13\"", ] files = [ - {file = "ruamel.yaml-0.18.5-py3-none-any.whl", hash = "sha256:a013ac02f99a69cdd6277d9664689eb1acba07069f912823177c5eced21a6ada"}, - {file = "ruamel.yaml-0.18.5.tar.gz", hash = "sha256:61917e3a35a569c1133a8f772e1226961bf5a1198bea7e23f06a0841dea1ab0e"}, + {file = "ruamel.yaml-0.18.6-py3-none-any.whl", hash = "sha256:57b53ba33def16c4f3d807c0ccbc00f8a6081827e81ba2491691b76882d0c636"}, + {file = "ruamel.yaml-0.18.6.tar.gz", hash = "sha256:8b27e6a217e786c6fbe5634d8f3f11bc63e0f80f6a5890f28863d9c45aac311b"}, ] [[package]] @@ -1045,7 +984,7 @@ name = "ruamel-yaml-clib" version = "0.2.8" requires_python = ">=3.6" summary = "C version of reader, parser and emitter for ruamel.yaml derived from libyaml" -groups = ["dev"] +groups = ["default", "dev"] marker = "platform_python_implementation == \"CPython\" and python_version < \"3.13\"" files = [ {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d"}, @@ -1091,35 +1030,49 @@ files = [ {file = "ruamel.yaml.clib-0.2.8.tar.gz", hash = "sha256:beb2e0404003de9a4cab9753a8805a8fe9320ee6673136ed7f04255fe60bb512"}, ] +[[package]] +name = "ruamel-yaml-string" +version = "0.1.1" +requires_python = ">=3" +summary = "add dump_to_string/dumps method that returns YAML document as string" +groups = ["dev"] +dependencies = [ + "ruamel-yaml>=0.17.17", +] +files = [ + {file = "ruamel.yaml.string-0.1.1-py3-none-any.whl", hash = "sha256:eb146bcb42b116216638034a434e9cf3ae2a5d3933aa37183a9854b5f3ff42de"}, + {file = "ruamel.yaml.string-0.1.1.tar.gz", hash = "sha256:7a7aedcc055d45c004d38b756f58474ebefb106851f4ce56ce58415709784350"}, +] + [[package]] name = "ruff" -version = "0.4.1" +version = "0.4.4" requires_python = ">=3.7" summary = "An extremely fast Python linter and code formatter, written in Rust." groups = ["default"] files = [ - {file = "ruff-0.4.1-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:2d9ef6231e3fbdc0b8c72404a1a0c46fd0dcea84efca83beb4681c318ea6a953"}, - {file = "ruff-0.4.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:9485f54a7189e6f7433e0058cf8581bee45c31a25cd69009d2a040d1bd4bfaef"}, - {file = "ruff-0.4.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2921ac03ce1383e360e8a95442ffb0d757a6a7ddd9a5be68561a671e0e5807e"}, - {file = "ruff-0.4.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eec8d185fe193ad053eda3a6be23069e0c8ba8c5d20bc5ace6e3b9e37d246d3f"}, - {file = "ruff-0.4.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:baa27d9d72a94574d250f42b7640b3bd2edc4c58ac8ac2778a8c82374bb27984"}, - {file = "ruff-0.4.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:f1ee41580bff1a651339eb3337c20c12f4037f6110a36ae4a2d864c52e5ef954"}, - {file = "ruff-0.4.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0926cefb57fc5fced629603fbd1a23d458b25418681d96823992ba975f050c2b"}, - {file = "ruff-0.4.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2c6e37f2e3cd74496a74af9a4fa67b547ab3ca137688c484749189bf3a686ceb"}, - {file = "ruff-0.4.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efd703a5975ac1998c2cc5e9494e13b28f31e66c616b0a76e206de2562e0843c"}, - {file = "ruff-0.4.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:b92f03b4aa9fa23e1799b40f15f8b95cdc418782a567d6c43def65e1bbb7f1cf"}, - {file = "ruff-0.4.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:1c859f294f8633889e7d77de228b203eb0e9a03071b72b5989d89a0cf98ee262"}, - {file = "ruff-0.4.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:b34510141e393519a47f2d7b8216fec747ea1f2c81e85f076e9f2910588d4b64"}, - {file = "ruff-0.4.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:6e68d248ed688b9d69fd4d18737edcbb79c98b251bba5a2b031ce2470224bdf9"}, - {file = "ruff-0.4.1-py3-none-win32.whl", hash = "sha256:b90506f3d6d1f41f43f9b7b5ff845aeefabed6d2494307bc7b178360a8805252"}, - {file = "ruff-0.4.1-py3-none-win_amd64.whl", hash = "sha256:c7d391e5936af5c9e252743d767c564670dc3889aff460d35c518ee76e4b26d7"}, - {file = "ruff-0.4.1-py3-none-win_arm64.whl", hash = "sha256:a1eaf03d87e6a7cd5e661d36d8c6e874693cb9bc3049d110bc9a97b350680c43"}, - {file = "ruff-0.4.1.tar.gz", hash = "sha256:d592116cdbb65f8b1b7e2a2b48297eb865f6bdc20641879aa9d7b9c11d86db79"}, + {file = "ruff-0.4.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:29d44ef5bb6a08e235c8249294fa8d431adc1426bfda99ed493119e6f9ea1bf6"}, + {file = "ruff-0.4.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:c4efe62b5bbb24178c950732ddd40712b878a9b96b1d02b0ff0b08a090cbd891"}, + {file = "ruff-0.4.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c8e2f1e8fc12d07ab521a9005d68a969e167b589cbcaee354cb61e9d9de9c15"}, + {file = "ruff-0.4.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:60ed88b636a463214905c002fa3eaab19795679ed55529f91e488db3fe8976ab"}, + {file = "ruff-0.4.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b90fc5e170fc71c712cc4d9ab0e24ea505c6a9e4ebf346787a67e691dfb72e85"}, + {file = "ruff-0.4.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:8e7e6ebc10ef16dcdc77fd5557ee60647512b400e4a60bdc4849468f076f6eef"}, + {file = "ruff-0.4.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b9ddb2c494fb79fc208cd15ffe08f32b7682519e067413dbaf5f4b01a6087bcd"}, + {file = "ruff-0.4.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c51c928a14f9f0a871082603e25a1588059b7e08a920f2f9fa7157b5bf08cfe9"}, + {file = "ruff-0.4.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b5eb0a4bfd6400b7d07c09a7725e1a98c3b838be557fee229ac0f84d9aa49c36"}, + {file = "ruff-0.4.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:b1867ee9bf3acc21778dcb293db504692eda5f7a11a6e6cc40890182a9f9e595"}, + {file = "ruff-0.4.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:1aecced1269481ef2894cc495647392a34b0bf3e28ff53ed95a385b13aa45768"}, + {file = "ruff-0.4.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9da73eb616b3241a307b837f32756dc20a0b07e2bcb694fec73699c93d04a69e"}, + {file = "ruff-0.4.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:958b4ea5589706a81065e2a776237de2ecc3e763342e5cc8e02a4a4d8a5e6f95"}, + {file = "ruff-0.4.4-py3-none-win32.whl", hash = "sha256:cb53473849f011bca6e754f2cdf47cafc9c4f4ff4570003a0dad0b9b6890e876"}, + {file = "ruff-0.4.4-py3-none-win_amd64.whl", hash = "sha256:424e5b72597482543b684c11def82669cc6b395aa8cc69acc1858b5ef3e5daae"}, + {file = "ruff-0.4.4-py3-none-win_arm64.whl", hash = "sha256:39df0537b47d3b597293edbb95baf54ff5b49589eb7ff41926d8243caa995ea6"}, + {file = "ruff-0.4.4.tar.gz", hash = "sha256:f87ea42d5cdebdc6a69761a9d0bc83ae9b3b30d0ad78952005ba6568d6c022af"}, ] [[package]] name = "safety" -version = "3.1.0" +version = "3.2.0" requires_python = ">=3.7" summary = "Checks installed dependencies for known vulnerabilities and licenses." groups = ["dev"] @@ -1141,8 +1094,8 @@ dependencies = [ "urllib3>=1.26.5", ] files = [ - {file = "safety-3.1.0-py3-none-any.whl", hash = "sha256:f2ba2d36f15ac1e24751547a73b854509a7d6db31efd30b57f64ffdf9d021934"}, - {file = "safety-3.1.0.tar.gz", hash = "sha256:71f47b82ece153ec2f240e277f7cbfa70d5da2e0d143162c67f63b2f7459a1aa"}, + {file = "safety-3.2.0-py3-none-any.whl", hash = "sha256:a432fc9d17e79a4386c4f093656b617c56f839cde022649cfa796d72c7a544de"}, + {file = "safety-3.2.0.tar.gz", hash = "sha256:8bd5cab5f3d8a61ce0ea6e98f267c1006d056097c45c644fee7afeff7d5949c1"}, ] [[package]] @@ -1165,13 +1118,13 @@ files = [ [[package]] name = "setuptools" -version = "69.0.3" +version = "69.5.1" requires_python = ">=3.8" summary = "Easily download, build, install, upgrade, and uninstall Python packages" groups = ["dev"] files = [ - {file = "setuptools-69.0.3-py3-none-any.whl", hash = "sha256:385eb4edd9c9d5c17540511303e39a147ce2fc04bc55289c322b9e5904fe2c05"}, - {file = "setuptools-69.0.3.tar.gz", hash = "sha256:be1af57fc409f93647f2e8e4573a142ed38724b8cdd389706a867bb4efcf1e78"}, + {file = "setuptools-69.5.1-py3-none-any.whl", hash = "sha256:c636ac361bc47580504644275c9ad802c50415c7522212252c033bd15f301f32"}, + {file = "setuptools-69.5.1.tar.gz", hash = "sha256:6c1fccdac05a97e598fb0ae3bbed5904ccb317337a51139dcd51453611bbb987"}, ] [[package]] @@ -1198,13 +1151,13 @@ files = [ [[package]] name = "sniffio" -version = "1.3.0" +version = "1.3.1" requires_python = ">=3.7" summary = "Sniff out which async library your code is running under" groups = ["default"] files = [ - {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"}, - {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"}, + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, ] [[package]] @@ -1298,11 +1251,11 @@ files = [ [[package]] name = "urllib3" -version = "2.1.0" +version = "2.2.1" requires_python = ">=3.8" summary = "HTTP library with thread-safe connection pooling, file post, and more." groups = ["dev"] files = [ - {file = "urllib3-2.1.0-py3-none-any.whl", hash = "sha256:55901e917a5896a349ff771be919f8bd99aff50b79fe58fec595eb37bbc56bb3"}, - {file = "urllib3-2.1.0.tar.gz", hash = "sha256:df7aa8afb0148fa78488e7899b2c59b5f4ffcfa82e6c54ccb9dd37c1d7b52d54"}, + {file = "urllib3-2.2.1-py3-none-any.whl", hash = "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d"}, + {file = "urllib3-2.2.1.tar.gz", hash = "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"}, ] diff --git a/pyproject.toml b/pyproject.toml index c828b564e..b91ba6e8d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,7 +13,7 @@ dependencies = [ "attrs>=21.3.0", "python-dateutil>=2.8.1,<3.0.0", "httpx>=0.20.0,<0.28.0", - "PyYAML>=6.0,<7.0", + "ruamel.yaml>=0.18.6,<0.19.0", "ruff>=0.2,<0.5", "typing-extensions>=4.8.0,<5.0.0", ] @@ -97,6 +97,7 @@ dev = [ "types-PyYAML<7.0.0,>=6.0.3", "types-certifi<2021.10.9,>=2020.0.0", "types-python-dateutil<3.0.0,>=2.0.0", + "ruamel-yaml-string>=0.1.1", ] [tool.pdm.build]
Use ruamel.yaml instead of pyyaml to avoid the "NO"rway problem **Describe the bug** openapi-python-client uses pyyaml which is sadly stuck on yamp 1.1 (yaml 1.2 came out in 2009, since then there's only been minor revisions). One of the main benefits of yaml 1.2 is that it treats strings like no/No/NO/OFF/Off/off as strings and not as booleans. I ran into this issue and had the person maintaining the API ask what version of yaml I was using since it doesn't appear in Ruamel also support comments if we ever say want to paste those in the documentation strings or something.
I think I tried ruamel at one point and had some issues. Can’t remember what they were… Definitely open to a PR!
2024-05-14T09:55:16
0.0
[]
[]
openapi-generators/openapi-python-client
openapi-generators__openapi-python-client-899
87b969c2fe29cd206ad66558441ef9ee5a56aea0
diff --git a/.changeset/support_applicationoctet_stream_request_bodies.md b/.changeset/support_applicationoctet_stream_request_bodies.md new file mode 100644 index 000000000..8fd9df8f7 --- /dev/null +++ b/.changeset/support_applicationoctet_stream_request_bodies.md @@ -0,0 +1,11 @@ +--- +default: minor +--- + +# Support `application/octet-stream` request bodies + +Endpoints that accept `application/octet-stream` request bodies are now supported using the same `File` type as octet-stream responses. + +Thanks to @kgutwin for the implementation and @rtaycher for the discussion! + +PR #899 closes #588 diff --git a/openapi_python_client/parser/openapi.py b/openapi_python_client/parser/openapi.py index 94a42998e..0ab5cd26c 100644 --- a/openapi_python_client/parser/openapi.py +++ b/openapi_python_client/parser/openapi.py @@ -3,7 +3,7 @@ from copy import deepcopy from dataclasses import dataclass, field from http import HTTPStatus -from typing import Any, Dict, Iterator, List, Optional, Set, Tuple, Union +from typing import Any, Dict, Iterator, List, Optional, Protocol, Set, Tuple, Union import attr from pydantic import ValidationError @@ -111,6 +111,15 @@ def generate_operation_id(*, path: str, method: str) -> str: models_relative_prefix: str = "..." +class RequestBodyParser(Protocol): + __name__: str = "RequestBodyParser" + + def __call__( + self, *, body: oai.RequestBody, schemas: Schemas, parent_name: str, config: Config + ) -> Tuple[Union[Property, PropertyError, None], Schemas]: + ... # pragma: no cover + + @dataclass class Endpoint: """ @@ -133,6 +142,7 @@ class Endpoint: form_body: Optional[Property] = None json_body: Optional[Property] = None multipart_body: Optional[Property] = None + binary_body: Optional[Property] = None errors: List[ParseError] = field(default_factory=list) used_python_identifiers: Set[PythonIdentifier] = field(default_factory=set) @@ -217,6 +227,30 @@ def parse_request_json_body( ) return None, schemas + @staticmethod + def parse_request_binary_body( + *, body: oai.RequestBody, schemas: Schemas, parent_name: str, config: Config + ) -> Tuple[Union[Property, PropertyError, None], Schemas]: + """Return binary_body""" + binary_body = None + for content_type, schema in body.content.items(): + parsed_content_type = get_content_type(content_type) + + if parsed_content_type == "application/octet-stream": + binary_body = schema + break + + if binary_body is not None and binary_body.media_type_schema is not None: + return property_from_data( + name="binary_body", + required=True, + data=binary_body.media_type_schema, + schemas=schemas, + parent_name=parent_name, + config=config, + ) + return None, schemas + @staticmethod def _add_body( *, @@ -230,68 +264,34 @@ def _add_body( if data.requestBody is None or isinstance(data.requestBody, oai.Reference): return endpoint, schemas - form_body, schemas = Endpoint.parse_request_form_body( - body=data.requestBody, - schemas=schemas, - parent_name=endpoint.name, - config=config, - ) + request_body_parsers: List[Tuple[str, RequestBodyParser]] = [ + ("form_body", Endpoint.parse_request_form_body), + ("json_body", Endpoint.parse_request_json_body), + ("binary_body", Endpoint.parse_request_binary_body), + ("multipart_body", Endpoint.parse_multipart_body), + ] - if isinstance(form_body, ParseError): - return ( - ParseError( - header=f"Cannot parse form body of endpoint {endpoint.name}", - detail=form_body.detail, - data=form_body.data, - ), - schemas, - ) + for property_name, parser in request_body_parsers: + body, schemas = parser(body=data.requestBody, schemas=schemas, parent_name=endpoint.name, config=config) - json_body, schemas = Endpoint.parse_request_json_body( - body=data.requestBody, - schemas=schemas, - parent_name=endpoint.name, - config=config, - ) - if isinstance(json_body, ParseError): - return ( - ParseError( - header=f"Cannot parse JSON body of endpoint {endpoint.name}", - detail=json_body.detail, - data=json_body.data, - ), - schemas, - ) + if isinstance(body, ParseError): + property_type = property_name + if property_type.endswith("_body"): + property_type = property_type[:-5] + return ( + ParseError( + header=f"Cannot parse {property_type} request body of endpoint {endpoint.name}", + detail=body.detail, + data=body.data, + ), + schemas, + ) - multipart_body, schemas = Endpoint.parse_multipart_body( - body=data.requestBody, - schemas=schemas, - parent_name=endpoint.name, - config=config, - ) - if isinstance(multipart_body, ParseError): - return ( - ParseError( - header=f"Cannot parse multipart body of endpoint {endpoint.name}", - detail=multipart_body.detail, - data=multipart_body.data, - ), - schemas, - ) + if body is not None: + setattr(endpoint, property_name, body) + endpoint.relative_imports.update(body.get_imports(prefix=models_relative_prefix)) + endpoint.relative_imports.update(body.get_lazy_imports(prefix=models_relative_prefix)) - # No reasons to use lazy imports in endpoints, so add lazy imports to relative here. - if form_body is not None: - endpoint.form_body = form_body - endpoint.relative_imports.update(endpoint.form_body.get_imports(prefix=models_relative_prefix)) - endpoint.relative_imports.update(endpoint.form_body.get_lazy_imports(prefix=models_relative_prefix)) - if multipart_body is not None: - endpoint.multipart_body = multipart_body - endpoint.relative_imports.update(endpoint.multipart_body.get_imports(prefix=models_relative_prefix)) - endpoint.relative_imports.update(endpoint.multipart_body.get_lazy_imports(prefix=models_relative_prefix)) - if json_body is not None: - endpoint.json_body = json_body - endpoint.relative_imports.update(endpoint.json_body.get_imports(prefix=models_relative_prefix)) - endpoint.relative_imports.update(endpoint.json_body.get_lazy_imports(prefix=models_relative_prefix)) return endpoint, schemas @staticmethod @@ -586,6 +586,8 @@ def iter_all_parameters(self) -> Iterator[Property]: yield self.multipart_body if self.json_body: yield self.json_body + if self.binary_body: + yield self.binary_body def list_all_parameters(self) -> List[Property]: """Return a List of all the parameters of this endpoint""" diff --git a/openapi_python_client/templates/endpoint_macros.py.jinja b/openapi_python_client/templates/endpoint_macros.py.jinja index 931554299..000f2c368 100644 --- a/openapi_python_client/templates/endpoint_macros.py.jinja +++ b/openapi_python_client/templates/endpoint_macros.py.jinja @@ -2,8 +2,9 @@ {% from "helpers.jinja" import safe_docstring %} {% macro header_params(endpoint) %} -{% if endpoint.header_parameters %} +{% if endpoint.header_parameters or endpoint.binary_body %} headers = {} +{% if endpoint.header_parameters %} {% for parameter in endpoint.header_parameters.values() %} {% import "property_templates/" + parameter.template as param_template %} {% if param_template.transform_header %} @@ -15,6 +16,10 @@ headers = {} {{ guarded_statement(parameter, parameter.python_name, statement) }} {% endfor %} {% endif %} +{% if endpoint.binary_body %} +headers['Content-Type'] = {{ endpoint.binary_body.python_name }}.mime_type if {{ endpoint.binary_body.python_name}}.mime_type else 'application/octet-stream' +{% endif %} +{% endif %} {% endmacro %} {% macro cookie_params(endpoint) %} @@ -108,6 +113,9 @@ multipart_data: {{ endpoint.multipart_body.get_type_string() }}, {% if endpoint.json_body %} json_body: {{ endpoint.json_body.get_type_string() }}, {% endif %} +{% if endpoint.binary_body %} +binary_body: {{ endpoint.binary_body.get_type_string() }}, +{% endif %} {# query parameters #} {% for parameter in endpoint.query_parameters.values() %} {{ parameter.to_string() }}, @@ -138,6 +146,9 @@ multipart_data=multipart_data, {% if endpoint.json_body %} json_body=json_body, {% endif %} +{% if endpoint.binary_body %} +binary_body=binary_body, +{% endif %} {% for parameter in endpoint.query_parameters.values() %} {{ parameter.python_name }}={{ parameter.python_name }}, {% endfor %} diff --git a/openapi_python_client/templates/endpoint_module.py.jinja b/openapi_python_client/templates/endpoint_module.py.jinja index 6a9921e8a..3aec2dcfc 100644 --- a/openapi_python_client/templates/endpoint_module.py.jinja +++ b/openapi_python_client/templates/endpoint_module.py.jinja @@ -47,11 +47,13 @@ def _get_kwargs( "files": {{ "multipart_" + endpoint.multipart_body.python_name }}, {% elif endpoint.json_body %} "json": {{ "json_" + endpoint.json_body.python_name }}, + {% elif endpoint.binary_body %} + "content": {{ endpoint.binary_body.python_name }}.payload, {% endif %} {% if endpoint.query_parameters %} "params": params, {% endif %} - {% if endpoint.header_parameters %} + {% if endpoint.header_parameters or endpoint.binary_body %} "headers": headers, {% endif %} {% if endpoint.cookie_parameters %}
Support all `text/*` content types in responses Replaces #780. Closes #797, #821 TODO: - [x] Fix unnecessary cast
2023-12-06T17:54:38
0.0
[]
[]
openapi-generators/openapi-python-client
openapi-generators__openapi-python-client-897
ea4b545732bba85fdbd3d8ba00436cc61507eddc
diff --git a/.changeset/support_all_text_content_types_in_responses.md b/.changeset/support_all_text_content_types_in_responses.md new file mode 100644 index 000000000..36c06a97f --- /dev/null +++ b/.changeset/support_all_text_content_types_in_responses.md @@ -0,0 +1,11 @@ +--- +default: minor +--- + +# Support all `text/*` content types in responses + +Within an API response, any content type which starts with `text/` will now be treated the same as `text/html` already was—they will return the `response.text` attribute from the [httpx Response](https://www.python-httpx.org/api/#response). + +Thanks to @fdintino for the initial implementation, and thanks for the discussions from @kairntech, @rubenfiszel, and @antoneladestito. + +Closes #797 and #821. diff --git a/openapi_python_client/parser/openapi.py b/openapi_python_client/parser/openapi.py index dc6e9f47b..94a42998e 100644 --- a/openapi_python_client/parser/openapi.py +++ b/openapi_python_client/parser/openapi.py @@ -46,7 +46,11 @@ class EndpointCollection: @staticmethod def from_data( - *, data: Dict[str, oai.PathItem], schemas: Schemas, parameters: Parameters, config: Config + *, + data: Dict[str, oai.PathItem], + schemas: Schemas, + parameters: Parameters, + config: Config, ) -> Tuple[Dict[utils.PythonIdentifier, "EndpointCollection"], Schemas, Parameters]: """Parse the openapi paths data to get EndpointCollections by tag""" endpoints_by_tag: Dict[utils.PythonIdentifier, EndpointCollection] = {} @@ -72,7 +76,11 @@ def from_data( # Add `PathItem` parameters if not isinstance(endpoint, ParseError): endpoint, schemas, parameters = Endpoint.add_parameters( - endpoint=endpoint, data=path_data, schemas=schemas, parameters=parameters, config=config + endpoint=endpoint, + data=path_data, + schemas=schemas, + parameters=parameters, + config=config, ) if not isinstance(endpoint, ParseError): endpoint = Endpoint.sort_parameters(endpoint=endpoint) @@ -145,7 +153,13 @@ def parse_request_form_body( config=config, ) if isinstance(prop, ModelProperty): - schemas = attr.evolve(schemas, classes_by_name={**schemas.classes_by_name, prop.class_info.name: prop}) + schemas = attr.evolve( + schemas, + classes_by_name={ + **schemas.classes_by_name, + prop.class_info.name: prop, + }, + ) return prop, schemas return None, schemas @@ -167,7 +181,13 @@ def parse_multipart_body( ) if isinstance(prop, ModelProperty): prop = attr.evolve(prop, is_multipart_body=True) - schemas = attr.evolve(schemas, classes_by_name={**schemas.classes_by_name, prop.class_info.name: prop}) + schemas = attr.evolve( + schemas, + classes_by_name={ + **schemas.classes_by_name, + prop.class_info.name: prop, + }, + ) return prop, schemas return None, schemas @@ -178,9 +198,11 @@ def parse_request_json_body( """Return json_body""" json_body = None for content_type, schema in body.content.items(): - content_type = get_content_type(content_type) # noqa: PLW2901 + parsed_content_type = get_content_type(content_type) - if content_type == "application/json" or content_type.endswith("+json"): + if parsed_content_type is not None and ( + parsed_content_type == "application/json" or parsed_content_type.endswith("+json") + ): json_body = schema break @@ -209,7 +231,10 @@ def _add_body( return endpoint, schemas form_body, schemas = Endpoint.parse_request_form_body( - body=data.requestBody, schemas=schemas, parent_name=endpoint.name, config=config + body=data.requestBody, + schemas=schemas, + parent_name=endpoint.name, + config=config, ) if isinstance(form_body, ParseError): @@ -223,7 +248,10 @@ def _add_body( ) json_body, schemas = Endpoint.parse_request_json_body( - body=data.requestBody, schemas=schemas, parent_name=endpoint.name, config=config + body=data.requestBody, + schemas=schemas, + parent_name=endpoint.name, + config=config, ) if isinstance(json_body, ParseError): return ( @@ -236,7 +264,10 @@ def _add_body( ) multipart_body, schemas = Endpoint.parse_multipart_body( - body=data.requestBody, schemas=schemas, parent_name=endpoint.name, config=config + body=data.requestBody, + schemas=schemas, + parent_name=endpoint.name, + config=config, ) if isinstance(multipart_body, ParseError): return ( @@ -285,7 +316,11 @@ def _add_responses( continue response, schemas = response_from_data( - status_code=status_code, data=response_data, schemas=schemas, parent_name=endpoint.name, config=config + status_code=status_code, + data=response_data, + schemas=schemas, + parent_name=endpoint.name, + config=config, ) if isinstance(response, ParseError): detail_suffix = "" if response.detail is None else f" ({response.detail})" @@ -350,7 +385,15 @@ def add_parameters( # noqa: PLR0911, PLR0912 oai.ParameterLocation.HEADER: endpoint.header_parameters, oai.ParameterLocation.COOKIE: endpoint.cookie_parameters, "RESERVED": { # These can't be param names because codegen needs them as vars, the properties don't matter - "client": AnyProperty("client", True, False, None, PythonIdentifier("client", ""), None, None), + "client": AnyProperty( + "client", + True, + False, + None, + PythonIdentifier("client", ""), + None, + None, + ), "url": AnyProperty("url", True, False, None, PythonIdentifier("url", ""), None, None), }, } @@ -393,7 +436,10 @@ def add_parameters( # noqa: PLR0911, PLR0912 if isinstance(prop, ParseError): return ( - ParseError(detail=f"cannot parse parameter of endpoint {endpoint.name}", data=prop.data), + ParseError( + detail=f"cannot parse parameter of endpoint {endpoint.name}", + data=prop.data, + ), schemas, parameters, ) @@ -432,7 +478,8 @@ def add_parameters( # noqa: PLR0911, PLR0912 if prop.python_name in endpoint.used_python_identifiers: return ( ParseError( - detail=f"Parameters with same Python identifier `{prop.python_name}` detected", data=data + detail=f"Parameters with same Python identifier `{prop.python_name}` detected", + data=data, ), schemas, parameters, @@ -465,7 +512,8 @@ def sort_parameters(*, endpoint: "Endpoint") -> Union["Endpoint", ParseError]: parameters_from_path = re.findall(_PATH_PARAM_REGEX, endpoint.path) try: sorted_params = sorted( - endpoint.path_parameters.values(), key=lambda param: parameters_from_path.index(param.name) + endpoint.path_parameters.values(), + key=lambda param: parameters_from_path.index(param.name), ) endpoint.path_parameters = OrderedDict((param.name, param) for param in sorted_params) except ValueError: @@ -506,7 +554,11 @@ def from_data( ) result, schemas, parameters = Endpoint.add_parameters( - endpoint=endpoint, data=data, schemas=schemas, parameters=parameters, config=config + endpoint=endpoint, + data=data, + schemas=schemas, + parameters=parameters, + config=config, ) if isinstance(result, ParseError): return result, schemas, parameters @@ -570,7 +622,9 @@ def from_dict(data: Dict[str, Any], *, config: Config) -> Union["GeneratorData", schemas = build_schemas(components=openapi.components.schemas, schemas=schemas, config=config) if openapi.components and openapi.components.parameters: parameters = build_parameters( - components=openapi.components.parameters, parameters=parameters, config=config + components=openapi.components.parameters, + parameters=parameters, + config=config, ) endpoint_collections_by_tag, schemas, parameters = EndpointCollection.from_data( data=openapi.paths, schemas=schemas, parameters=parameters, config=config diff --git a/openapi_python_client/parser/responses.py b/openapi_python_client/parser/responses.py index 2b41eac8d..97909a40c 100644 --- a/openapi_python_client/parser/responses.py +++ b/openapi_python_client/parser/responses.py @@ -1,7 +1,7 @@ __all__ = ["Response", "response_from_data"] from http import HTTPStatus -from typing import Optional, Tuple, Union +from typing import Optional, Tuple, TypedDict, Union from attrs import define @@ -14,32 +14,53 @@ from .properties import AnyProperty, Property, Schemas, property_from_data +class _ResponseSource(TypedDict): + """What data should be pulled from the httpx Response object""" + + attribute: str + return_type: str + + +JSON_SOURCE = _ResponseSource(attribute="response.json()", return_type="Any") +BYTES_SOURCE = _ResponseSource(attribute="response.content", return_type="bytes") +TEXT_SOURCE = _ResponseSource(attribute="response.text", return_type="str") +NONE_SOURCE = _ResponseSource(attribute="None", return_type="None") + + @define class Response: """Describes a single response for an endpoint""" status_code: HTTPStatus prop: Property - source: str + source: _ResponseSource + +def _source_by_content_type(content_type: str) -> Optional[_ResponseSource]: + parsed_content_type = utils.get_content_type(content_type) + if parsed_content_type is None: + return None -def _source_by_content_type(content_type: str) -> Optional[str]: - content_type = utils.get_content_type(content_type) + if parsed_content_type.startswith("text/"): + return TEXT_SOURCE known_content_types = { - "application/json": "response.json()", - "application/octet-stream": "response.content", - "text/html": "response.text", + "application/json": JSON_SOURCE, + "application/octet-stream": BYTES_SOURCE, } - source = known_content_types.get(content_type) - if source is None and content_type.endswith("+json"): + source = known_content_types.get(parsed_content_type) + if source is None and parsed_content_type.endswith("+json"): # Implements https://www.rfc-editor.org/rfc/rfc6838#section-4.2.8 for the +json suffix - source = "response.json()" + source = JSON_SOURCE return source def empty_response( - *, status_code: HTTPStatus, response_name: str, config: Config, description: Optional[str] + *, + status_code: HTTPStatus, + response_name: str, + config: Config, + description: Optional[str], ) -> Response: """Return an untyped response, for when no response type is defined""" return Response( @@ -53,7 +74,7 @@ def empty_response( description=description, example=None, ), - source="None", + source=NONE_SOURCE, ) @@ -70,7 +91,12 @@ def response_from_data( response_name = f"response_{status_code}" if isinstance(data, oai.Reference): return ( - empty_response(status_code=status_code, response_name=response_name, config=config, description=None), + empty_response( + status_code=status_code, + response_name=response_name, + config=config, + description=None, + ), schemas, ) @@ -78,7 +104,10 @@ def response_from_data( if not content: return ( empty_response( - status_code=status_code, response_name=response_name, config=config, description=data.description + status_code=status_code, + response_name=response_name, + config=config, + description=data.description, ), schemas, ) @@ -89,12 +118,18 @@ def response_from_data( schema_data = media_type.media_type_schema break else: - return ParseError(data=data, detail=f"Unsupported content_type {content}"), schemas + return ( + ParseError(data=data, detail=f"Unsupported content_type {content}"), + schemas, + ) if schema_data is None: return ( empty_response( - status_code=status_code, response_name=response_name, config=config, description=data.description + status_code=status_code, + response_name=response_name, + config=config, + description=data.description, ), schemas, ) diff --git a/openapi_python_client/templates/endpoint_module.py.jinja b/openapi_python_client/templates/endpoint_module.py.jinja index c2b738ced..6a9921e8a 100644 --- a/openapi_python_client/templates/endpoint_module.py.jinja +++ b/openapi_python_client/templates/endpoint_module.py.jinja @@ -65,9 +65,11 @@ def _parse_response(*, client: Union[AuthenticatedClient, Client], response: htt if response.status_code == HTTPStatus.{{ response.status_code.name }}: {% if parsed_responses %}{% import "property_templates/" + response.prop.template as prop_template %} {% if prop_template.construct %} - {{ prop_template.construct(response.prop, response.source) | indent(8) }} + {{ prop_template.construct(response.prop, response.source.attribute) | indent(8) }} + {% elif response.source.return_type == response.prop.get_type_string() %} + {{ response.prop.python_name }} = {{ response.source.attribute }} {% else %} - {{ response.prop.python_name }} = cast({{ response.prop.get_type_string() }}, {{ response.source }}) + {{ response.prop.python_name }} = cast({{ response.prop.get_type_string() }}, {{ response.source.attribute }}) {% endif %} return {{ response.prop.python_name }} {% else %} diff --git a/openapi_python_client/utils.py b/openapi_python_client/utils.py index 8d54de096..ea19622c4 100644 --- a/openapi_python_client/utils.py +++ b/openapi_python_client/utils.py @@ -1,8 +1,10 @@ +from __future__ import annotations + import builtins import re from email.message import Message from keyword import iskeyword -from typing import Any, List +from typing import Any DELIMITERS = r"\. _-" @@ -10,21 +12,21 @@ class PythonIdentifier(str): """A snake_case string which has been validated / transformed into a valid identifier for Python""" - def __new__(cls, value: str, prefix: str) -> "PythonIdentifier": + def __new__(cls, value: str, prefix: str) -> PythonIdentifier: new_value = fix_reserved_words(snake_case(sanitize(value))) if not new_value.isidentifier() or value.startswith("_"): new_value = f"{prefix}{new_value}" return str.__new__(cls, new_value) - def __deepcopy__(self, _: Any) -> "PythonIdentifier": + def __deepcopy__(self, _: Any) -> PythonIdentifier: return self class ClassName(str): """A PascalCase string which has been validated / transformed into a valid class name for Python""" - def __new__(cls, value: str, prefix: str) -> "ClassName": + def __new__(cls, value: str, prefix: str) -> ClassName: new_value = fix_reserved_words(pascal_case(sanitize(value))) if not new_value.isidentifier(): @@ -32,7 +34,7 @@ def __new__(cls, value: str, prefix: str) -> "ClassName": new_value = fix_reserved_words(pascal_case(sanitize(value))) return str.__new__(cls, new_value) - def __deepcopy__(self, _: Any) -> "ClassName": + def __deepcopy__(self, _: Any) -> ClassName: return self @@ -41,7 +43,7 @@ def sanitize(value: str) -> str: return re.sub(rf"[^\w{DELIMITERS}]+", "", value) -def split_words(value: str) -> List[str]: +def split_words(value: str) -> list[str]: """Split a string on words and known delimiters""" # We can't guess words if there is no capital letter if any(c.isupper() for c in value): @@ -49,7 +51,10 @@ def split_words(value: str) -> List[str]: return re.findall(rf"[^{DELIMITERS}]+", value) -RESERVED_WORDS = (set(dir(builtins)) | {"self", "true", "false", "datetime"}) - {"type", "id"} +RESERVED_WORDS = (set(dir(builtins)) | {"self", "true", "false", "datetime"}) - { + "type", + "id", +} def fix_reserved_words(value: str) -> str: @@ -97,13 +102,16 @@ def remove_string_escapes(value: str) -> str: return value.replace('"', r"\"") -def get_content_type(content_type: str) -> str: +def get_content_type(content_type: str) -> str | None: """ Given a string representing a content type with optional parameters, returns the content type only """ message = Message() message.add_header("Content-Type", content_type) - content_type = message.get_content_type() + parsed_content_type = message.get_content_type() + if not content_type.startswith(parsed_content_type): + # Always defaults to `text/plain` if it's not recognized. We want to return an error, not default. + return None - return content_type + return parsed_content_type
fix: Use source "response.text" for any mime-type starting with text/ Responses with `Content-Type: text/csv` or `Content-Type: text/xml` ought to be treated the same as `text/html`
2023-12-04T21:15:46
0.0
[]
[]