in_source_id
stringlengths 13
58
| issue
stringlengths 3
241k
| before_files
listlengths 0
3
| after_files
listlengths 0
3
| pr_diff
stringlengths 109
107M
⌀ |
---|---|---|---|---|
kserve__kserve-2478 | Update ModelMesh version to `v0.10.0`
#### Proposed Changes
Update to the latest ModelMesh version (`v0.10.0`)
> If your changes should also be in the most recent release, add the corresponding "cherrypick-0.X"
label to the original PR; for example, "cherrypick-0.10".
#### TODOs:
> - [ ] Best practice is to open a PR for the cherry-pick yourself after your original PR has been merged
into the main branch.
> - [ ] After the cherry-pick PR has merged, remove the cherry-pick label from the original PR.
**Type of changes**
- [x] This change requires a documentation update --> https://github.com/kserve/website/pull/214
**Special notes for your reviewer**:
1. Please confirm that if this PR changes any image versions, then that's the sole change this PR makes.
YES
**Checklist**:
- [x] Have you made corresponding changes to the documentation?
**Release note**:
<!-- Write your release note:
1. Enter your extended release note in the below block. If the PR requires additional action from users switching to the new release, include the string "action required".
3. If no release note is required, just write "NONE".
-->
```release-note
???
```
Not sure, maybe,
- "Updgrade to ModelMesh v0.10.0"
or
- point to ModelMesh release notes here
https://github.com/kserve/modelmesh-serving/releases/tag/v0.10.0
---
/cc @yuzisun @rachitchauhan43 @njhill
| [
{
"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport pathlib\n\nimport setuptools\n\nTESTS_REQUIRES = [\n 'pytest',\n 'pytest-xdist',\n 'pytest-cov',\n 'pytest-asyncio',\n 'pytest-tornasync',\n 'mypy'\n]\n\nwith open('requirements.txt') as f:\n REQUIRES = f.readlines()\n\nwith open(pathlib.Path(__file__).parent.parent / 'VERSION') as version_file:\n version = version_file.read().strip()\n\nsetuptools.setup(\n name='kserve',\n version=version,\n author=\"The KServe Authors\",\n author_email='[email protected], [email protected], [email protected]',\n license=\"Apache License Version 2.0\",\n url=\"https://github.com/kserve/kserve/tree/master/python/kserve\",\n description=\"KServe Python SDK\",\n long_description=\"Python SDK for KServe Server and Client.\",\n python_requires='>=3.7',\n packages=[\n 'kserve',\n 'kserve.api',\n 'kserve.constants',\n 'kserve.models',\n 'kserve.handlers',\n 'kserve.utils',\n ],\n package_data={'': ['requirements.txt']},\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRES,\n extras_require={'test': TESTS_REQUIRES}\n)\n",
"path": "python/kserve/setup.py"
}
] | [
{
"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport pathlib\n\nimport setuptools\n\nTESTS_REQUIRES = [\n 'pytest',\n 'pytest-xdist',\n 'pytest-cov',\n 'pytest-asyncio',\n 'pytest-tornasync',\n 'mypy',\n 'portforward',\n]\n\nwith open('requirements.txt') as f:\n REQUIRES = f.readlines()\n\nwith open(pathlib.Path(__file__).parent.parent / 'VERSION') as version_file:\n version = version_file.read().strip()\n\nsetuptools.setup(\n name='kserve',\n version=version,\n author=\"The KServe Authors\",\n author_email='[email protected], [email protected], [email protected]',\n license=\"Apache License Version 2.0\",\n url=\"https://github.com/kserve/kserve/tree/master/python/kserve\",\n description=\"KServe Python SDK\",\n long_description=\"Python SDK for KServe Server and Client.\",\n python_requires='>=3.7',\n packages=[\n 'kserve',\n 'kserve.api',\n 'kserve.constants',\n 'kserve.models',\n 'kserve.handlers',\n 'kserve.utils',\n ],\n package_data={'': ['requirements.txt']},\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRES,\n extras_require={'test': TESTS_REQUIRES}\n)\n",
"path": "python/kserve/setup.py"
}
] | diff --git a/.github/workflows/e2e-test.yml b/.github/workflows/e2e-test.yml
index 7f56710651e..1529ca65444 100644
--- a/.github/workflows/e2e-test.yml
+++ b/.github/workflows/e2e-test.yml
@@ -47,17 +47,17 @@ jobs:
./test/scripts/gh-actions/build-server-runtimes.sh predictor,transformer
docker image ls
cat ./config/overlays/test/configmap/inferenceservice.yaml
-
+
- name: Upload sklearn server image
uses: ishworkh/docker-image-artifact-upload@v1
with:
image: "kserve/sklearnserver:${{ github.sha }}"
-
+
- name: Upload xgb server image
uses: ishworkh/docker-image-artifact-upload@v1
with:
image: "kserve/xgbserver:${{ github.sha }}"
-
+
- name: Upload lgb server image
uses: ishworkh/docker-image-artifact-upload@v1
with:
@@ -67,7 +67,7 @@ jobs:
uses: ishworkh/docker-image-artifact-upload@v1
with:
image: "kserve/pmmlserver:${{ github.sha }}"
-
+
- name: Upload paddle image
uses: ishworkh/docker-image-artifact-upload@v1
with:
@@ -77,7 +77,7 @@ jobs:
uses: ishworkh/docker-image-artifact-upload@v1
with:
image: "kserve/image-transformer:${{ github.sha }}"
-
+
explainer-runtime-build:
runs-on: ubuntu-latest
steps:
@@ -93,17 +93,17 @@ jobs:
uses: ishworkh/docker-image-artifact-upload@v1
with:
image: "kserve/aix-explainer:${{ github.sha }}"
-
+
- name: Upload alibi image
uses: ishworkh/docker-image-artifact-upload@v1
with:
image: "kserve/alibi-explainer:${{ github.sha }}"
-
+
- name: Upload art explainer image
uses: ishworkh/docker-image-artifact-upload@v1
with:
image: "kserve/art-explainer:${{ github.sha }}"
-
+
test-fast:
runs-on: ubuntu-latest
needs: [kserve-image-build, predictor-runtime-build, explainer-runtime-build]
@@ -114,17 +114,17 @@ jobs:
go-version: '1.17.3'
- uses: ./.github/actions/minikube-setup
- uses: ./.github/actions/base-download
-
+
- name: Download sklearn server image
uses: ishworkh/docker-image-artifact-download@v1
with:
image: "kserve/sklearnserver:${{ github.sha }}"
-
+
- name: Download xgb server image
uses: ishworkh/docker-image-artifact-download@v1
with:
image: "kserve/xgbserver:${{ github.sha }}"
-
+
- name: Download lgb server image
uses: ishworkh/docker-image-artifact-download@v1
with:
@@ -133,8 +133,8 @@ jobs:
- name: Download pmml image
uses: ishworkh/docker-image-artifact-download@v1
with:
- image: "kserve/pmmlserver:${{ github.sha }}"
-
+ image: "kserve/pmmlserver:${{ github.sha }}"
+
- name: Install KServe
run: |
./test/scripts/gh-actions/setup-kserve.sh
@@ -151,7 +151,7 @@ jobs:
if: always()
run: |
./test/scripts/gh-actions/status-check.sh
-
+
test-slow:
runs-on: ubuntu-latest
needs: [kserve-image-build, predictor-runtime-build, explainer-runtime-build]
@@ -162,22 +162,22 @@ jobs:
go-version: '1.17.3'
- uses: ./.github/actions/minikube-setup
- uses: ./.github/actions/base-download
-
+
- name: Download sklearn server image
uses: ishworkh/docker-image-artifact-download@v1
with:
image: "kserve/sklearnserver:${{ github.sha }}"
-
+
- name: Download xgb server image
uses: ishworkh/docker-image-artifact-download@v1
with:
image: "kserve/xgbserver:${{ github.sha }}"
-
+
- name: Download lgb server image
uses: ishworkh/docker-image-artifact-download@v1
with:
image: "kserve/lgbserver:${{ github.sha }}"
-
+
- name: Download paddle image
uses: ishworkh/docker-image-artifact-download@v1
with:
@@ -215,12 +215,12 @@ jobs:
uses: ishworkh/docker-image-artifact-download@v1
with:
image: "kserve/sklearnserver:${{ github.sha }}"
-
+
- name: Download aix image
uses: ishworkh/docker-image-artifact-download@v1
with:
image: "kserve/aix-explainer:${{ github.sha }}"
-
+
- name: Download alibi image
uses: ishworkh/docker-image-artifact-download@v1
with:
@@ -256,11 +256,11 @@ jobs:
- uses: actions/setup-go@v2
with:
go-version: '1.17.3'
-
+
- uses: ./.github/actions/minikube-setup
- uses: ./.github/actions/base-download
-
-
+
+
- name: Download sklearn server image
uses: ishworkh/docker-image-artifact-download@v1
with:
@@ -292,3 +292,49 @@ jobs:
if: always()
run: |
./test/scripts/gh-actions/status-check.sh
+
+ test-qpext:
+ runs-on: ubuntu-latest
+ needs: [kserve-image-build, predictor-runtime-build]
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions/setup-go@v2
+ with:
+ go-version: '1.17.3'
+ - uses: ./.github/actions/minikube-setup
+ - uses: ./.github/actions/base-download
+ - name: Build queue proxy extension image
+ run: |
+ ./test/scripts/gh-actions/build-qpext-image.sh
+ docker image ls
+ - name: Upload qpext image
+ uses: ishworkh/docker-image-artifact-upload@v1
+ with:
+ image: "kserve/qpext:${{ github.sha }}"
+ - name: Download qpext image
+ uses: ishworkh/docker-image-artifact-download@v1
+ with:
+ image: "kserve/qpext:${{ github.sha }}"
+ - name: Download sklearn server image
+ uses: ishworkh/docker-image-artifact-download@v1
+ with:
+ image: "kserve/sklearnserver:${{ github.sha }}"
+ - name: Install KServe
+ run: |
+ ./test/scripts/gh-actions/setup-kserve.sh
+
+ kubectl get pods -n kserve
+ kubectl describe pods -n kserve
+ - name: Patch qpext image
+ run : |
+ kubectl patch configmaps -n knative-serving config-deployment --patch '{"data": {"queueSidecarImage": "kserve/qpext:${{ github.sha }}"}}'
+ kubectl describe configmaps -n knative-serving config-deployment
+ - name: Run queue proxy extension E2E tests
+ timeout-minutes: 30
+ run: |
+ ./test/scripts/gh-actions/run-qpext-test.sh
+ kubectl get pods -n kserve
+ - name: Check system status
+ if: always()
+ run: |
+ ./test/scripts/gh-actions/status-check.sh
diff --git a/.github/workflows/qpext-docker-publish.yml b/.github/workflows/qpext-docker-publish.yml
new file mode 100644
index 00000000000..144ac152dbc
--- /dev/null
+++ b/.github/workflows/qpext-docker-publish.yml
@@ -0,0 +1,79 @@
+name: Queue Proxy Extension Docker Publisher
+
+on:
+ push:
+ # Publish `master` as Docker `latest` image.
+ branches:
+ - master
+
+ # Publish `v1.2.3` tags as releases.
+ tags:
+ - v*
+
+ # Run tests for any PRs.
+ pull_request:
+
+env:
+ IMAGE_NAME: qpext
+
+jobs:
+ # Run tests.
+ # See also https://docs.docker.com/docker-hub/builds/automated-testing/
+ test:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Run tests
+ run: |
+ if [ -f docker-compose.test.yml ]; then
+ docker-compose --file docker-compose.test.yml build
+ docker-compose --file docker-compose.test.yml run sut
+ else
+ cd qpext
+ docker build . --file qpext.Dockerfile
+ fi
+
+ # Push image to GitHub Packages.
+ # See also https://docs.docker.com/docker-hub/builds/
+ push:
+ # Ensure test job passes before pushing image.
+ needs: test
+
+ runs-on: ubuntu-latest
+ if: github.event_name == 'push'
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Build image
+ run: |
+ cd qpext
+ docker build . --file qpext.Dockerfile --tag $IMAGE_NAME
+
+ - name: Log into registry
+ run: docker login -u ${{ secrets.DOCKER_USER }} -p ${{ secrets.DOCKER_PASSWORD }}
+
+ - name: Push image
+ run: |
+ IMAGE_ID=kserve/$IMAGE_NAME
+
+ # Change all uppercase to lowercase
+ IMAGE_ID=$(echo $IMAGE_ID | tr '[A-Z]' '[a-z]')
+
+ # Strip git ref prefix from version
+ VERSION=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,')
+
+ # Strip "v" prefix from tag name
+ # [[ "${{ github.ref }}" == "refs/tags/"* ]] && VERSION=$(echo $VERSION | sed -e 's/^v//')
+
+ # Use Docker `latest` tag convention
+ [ "$VERSION" == "master" ] && VERSION=latest
+
+ echo IMAGE_ID=$IMAGE_ID
+ echo VERSION=$VERSION
+
+ docker tag $IMAGE_NAME $IMAGE_ID:$VERSION
+ docker push $IMAGE_ID:$VERSION
+
diff --git a/Makefile b/Makefile
index 0568faecd09..dc87d1e97c0 100644
--- a/Makefile
+++ b/Makefile
@@ -12,6 +12,7 @@ PADDLE_IMG ?= paddleserver
ALIBI_IMG ?= alibi-explainer
AIX_IMG ?= aix-explainer
STORAGE_INIT_IMG ?= storage-initializer
+QPEXT_IMG ?= qpext
CRD_OPTIONS ?= "crd:maxDescLen=0"
KSERVE_ENABLE_SELF_SIGNED_CA ?= false
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
@@ -263,6 +264,12 @@ docker-build-storageInitializer:
docker-push-storageInitializer: docker-build-storageInitializer
docker push ${KO_DOCKER_REPO}/${STORAGE_INIT_IMG}
+docker-build-qpext:
+ cd qpext && docker build -t ${KO_DOCKER_REPO}/${QPEXT_IMG} -f qpext.Dockerfile .
+
+docker-build-push-qpext: docker-build-qpext
+ docker push ${KO_DOCKER_REPO}/${QPEXT_IMG}
+
controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary.
$(CONTROLLER_GEN): $(LOCALBIN)
test -s $(LOCALBIN)/controller-gen || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION)
diff --git a/charts/kserve/templates/configmap.yaml b/charts/kserve/templates/configmap.yaml
index 2f4cab0b1af..94e4a1faab1 100644
--- a/charts/kserve/templates/configmap.yaml
+++ b/charts/kserve/templates/configmap.yaml
@@ -200,6 +200,11 @@ data:
transformers: |-
{
}
+ metricsAggregator: |-
+ {
+ "enableMetricAggregation": {{ .Values.kserve.metricsaggregator.enableMetricAggregation }},
+ "enablePrometheusScraping" : {{ .Values.kserve.metricsaggregator.enablePrometheusScraping }}
+ }
kind: ConfigMap
metadata:
name: inferenceservice-config
diff --git a/charts/kserve/values.yaml b/charts/kserve/values.yaml
index 4ac31db49f0..8b83f349cfa 100644
--- a/charts/kserve/values.yaml
+++ b/charts/kserve/values.yaml
@@ -10,6 +10,9 @@ kserve:
s3:
accessKeyIdName: AWS_ACCESS_KEY_ID
secretAccessKeyName: AWS_SECRET_ACCESS_KEY
+ metricsaggregator:
+ enableMetricAggregation: "false"
+ enablePrometheusScraping: "false"
controller:
deploymentMode: "Serverless"
gateway:
diff --git a/config/configmap/inferenceservice.yaml b/config/configmap/inferenceservice.yaml
index 70e605b950a..99343afd543 100644
--- a/config/configmap/inferenceservice.yaml
+++ b/config/configmap/inferenceservice.yaml
@@ -102,3 +102,8 @@ data:
{
"defaultDeploymentMode": "Serverless"
}
+ metricsAggregator: |-
+ {
+ "enableMetricAggregation": "false",
+ "enablePrometheusScraping" : "false"
+ }
diff --git a/config/overlays/test/configmap/inferenceservice.yaml b/config/overlays/test/configmap/inferenceservice.yaml
index 71f2c5fb57f..ea3bfa41e4e 100644
--- a/config/overlays/test/configmap/inferenceservice.yaml
+++ b/config/overlays/test/configmap/inferenceservice.yaml
@@ -71,3 +71,8 @@ data:
"cpuRequest": "100m",
"cpuLimit": "300m"
}
+ metricsAggregator: |-
+ {
+ "enableMetricAggregation": "false",
+ "enablePrometheusScraping" : "false"
+ }
diff --git a/docs/samples/v1beta1/tensorflow/tensorflow.yaml b/docs/samples/v1beta1/tensorflow/tensorflow.yaml
index c2de3a591b6..08ec9bfe782 100644
--- a/docs/samples/v1beta1/tensorflow/tensorflow.yaml
+++ b/docs/samples/v1beta1/tensorflow/tensorflow.yaml
@@ -2,6 +2,9 @@ apiVersion: "serving.kserve.io/v1beta1"
kind: "InferenceService"
metadata:
name: "flower-sample"
+ annotations:
+ serving.kserve.io/enable-metric-aggregation: "true"
+ serving.kserve.io/enable-prometheus-scraping: "true"
spec:
predictor:
tensorflow:
diff --git a/go.mod b/go.mod
index 8a40a1d0ff2..c2d8ae09a1b 100644
--- a/go.mod
+++ b/go.mod
@@ -12,6 +12,7 @@ require (
github.com/gogo/protobuf v1.3.2
github.com/golang/protobuf v1.5.2
github.com/google/go-cmp v0.5.8
+ github.com/google/martian v2.1.0+incompatible
github.com/google/uuid v1.3.0
github.com/googleapis/google-cloud-go-testing v0.0.0-20210719221736-1c9a4c676720
github.com/json-iterator/go v1.1.12
@@ -25,7 +26,9 @@ require (
github.com/stretchr/testify v1.7.0
github.com/tidwall/gjson v1.14.1
go.uber.org/zap v1.19.1
+ gomodules.xyz/jsonpatch/v2 v2.2.0
google.golang.org/api v0.93.0
+ google.golang.org/protobuf v1.28.1
istio.io/api v0.0.0-20200715212100-dbf5277541ef
istio.io/client-go v0.0.0-20201005161859-d8818315d678
k8s.io/api v0.23.9
@@ -105,11 +108,9 @@ require (
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect
golang.org/x/tools v0.1.9 // indirect
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
- gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959 // indirect
google.golang.org/grpc v1.48.0 // indirect
- google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
diff --git a/hack/image_patch_dev.sh b/hack/image_patch_dev.sh
index da1a0a8a694..8ccfd887a41 100755
--- a/hack/image_patch_dev.sh
+++ b/hack/image_patch_dev.sh
@@ -77,4 +77,9 @@ data:
"cpuRequest": "100m",
"cpuLimit": "100m"
}
+ metricsAggregator: |-
+ {
+ "enableMetricAggregation": "false",
+ "enablePrometheusScraping" : "false"
+ }
EOF
diff --git a/pkg/apis/serving/v1beta1/configmap.go b/pkg/apis/serving/v1beta1/configmap.go
index 5ce04350834..fa6c82fe48e 100644
--- a/pkg/apis/serving/v1beta1/configmap.go
+++ b/pkg/apis/serving/v1beta1/configmap.go
@@ -108,11 +108,11 @@ func NewIngressConfig(cli client.Client) (*IngressConfig, error) {
if ingress, ok := configMap.Data[IngressConfigKeyName]; ok {
err := json.Unmarshal([]byte(ingress), &ingressConfig)
if err != nil {
- return nil, fmt.Errorf("Unable to parse ingress config json: %v", err)
+ return nil, fmt.Errorf("unable to parse ingress config json: %v", err)
}
if ingressConfig.IngressGateway == "" || ingressConfig.IngressServiceName == "" {
- return nil, fmt.Errorf("Invalid ingress config, ingressGateway, ingressService are required.")
+ return nil, fmt.Errorf("invalid ingress config - ingressGateway and ingressService are required")
}
}
diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go
index 215242fa0b5..ffb732c0e4f 100644
--- a/pkg/constants/constants.go
+++ b/pkg/constants/constants.go
@@ -78,13 +78,14 @@ var (
MaxScaleAnnotationKey = KnativeAutoscalingAPIGroupName + "/maxScale"
RollOutDurationAnnotationKey = KnativeServingAPIGroupName + "/rollout-duration"
EnableMetricAggregation = KServeAPIGroupName + "/enable-metric-aggregation"
- SetPrometheusAggregateAnnotation = KServeAPIGroupName + "/enable-prometheus-aggregate-scraping"
+ SetPrometheusAnnotation = KServeAPIGroupName + "/enable-prometheus-scraping"
KserveContainerPrometheusPortKey = "prometheus.kserve.io/port"
KServeContainerPrometheusPathKey = "prometheus.kserve.io/path"
PrometheusPortAnnotationKey = "prometheus.io/port"
PrometheusPathAnnotationKey = "prometheus.io/path"
DefaultPrometheusPath = "/metrics"
QueueProxyAggregatePrometheusMetricsPort = "9088"
+ DefaultPodPrometheusPort = "9090"
)
// InferenceService Internal Annotations
diff --git a/pkg/webhook/admission/pod/agent_injector.go b/pkg/webhook/admission/pod/agent_injector.go
index a9224b778d9..a411a6afcf6 100644
--- a/pkg/webhook/admission/pod/agent_injector.go
+++ b/pkg/webhook/admission/pod/agent_injector.go
@@ -71,10 +71,11 @@ func getAgentConfigs(configMap *v1.ConfigMap) (*AgentConfig, error) {
if agentConfigValue, ok := configMap.Data[constants.AgentConfigMapKeyName]; ok {
err := json.Unmarshal([]byte(agentConfigValue), &agentConfig)
if err != nil {
- panic(fmt.Errorf("Unable to unmarshall agent json string due to %v ", err))
+ panic(fmt.Errorf("unable to unmarshall agent json string due to %v", err))
}
}
- //Ensure that we set proper values for CPU/Memory Limit/Request
+
+ //Ensure that we set proper values
resourceDefaults := []string{agentConfig.MemoryRequest,
agentConfig.MemoryLimit,
agentConfig.CpuRequest,
@@ -82,7 +83,7 @@ func getAgentConfigs(configMap *v1.ConfigMap) (*AgentConfig, error) {
for _, key := range resourceDefaults {
_, err := resource.ParseQuantity(key)
if err != nil {
- return agentConfig, fmt.Errorf("Failed to parse resource configuration for %q: %q",
+ return agentConfig, fmt.Errorf("failed to parse resource configuration for %q: %s",
constants.AgentConfigMapKeyName, err.Error())
}
}
diff --git a/pkg/webhook/admission/pod/metrics_aggregate_injector.go b/pkg/webhook/admission/pod/metrics_aggregate_injector.go
index ddc707a06f0..3a5d6fa3569 100644
--- a/pkg/webhook/admission/pod/metrics_aggregate_injector.go
+++ b/pkg/webhook/admission/pod/metrics_aggregate_injector.go
@@ -17,46 +17,94 @@ limitations under the License.
package pod
import (
+ "encoding/json"
+ "fmt"
"github.com/kserve/kserve/pkg/constants"
v1 "k8s.io/api/core/v1"
)
-const defaultPrometheusPort = "8080"
+const (
+ defaultKserveContainerPrometheusPort = "8080"
+ MetricsAggregatorConfigMapKeyName = "metricsAggregator"
+)
-// InjectMetricsAggregator looks for the annotations to enable aggregate kserve-container and queue-proxy metrics and
-// if specified, sets port-related EnvVars in queue-proxy and the aggregate prometheus annotation.
-func InjectMetricsAggregator(pod *v1.Pod) error {
+type MetricsAggregator struct {
+ EnableMetricAggregation string `json:"enableMetricAggregation"`
+ EnablePrometheusScraping string `json:"enablePrometheusScraping"`
+}
+
+func newMetricsAggregator(configMap *v1.ConfigMap) (*MetricsAggregator, error) {
+ ma := &MetricsAggregator{}
+
+ if maConfigVal, ok := configMap.Data[MetricsAggregatorConfigMapKeyName]; ok {
+ err := json.Unmarshal([]byte(maConfigVal), &ma)
+ if err != nil {
+ panic(fmt.Errorf("Unable to unmarshall %v json string due to %v ", MetricsAggregatorConfigMapKeyName, err))
+ }
+ }
+
+ return ma, nil
+}
+
+func setMetricAggregationEnvVars(pod *v1.Pod) {
for i, container := range pod.Spec.Containers {
if container.Name == "queue-proxy" {
- if enableMetricAgg, ok := pod.ObjectMeta.Annotations[constants.EnableMetricAggregation]; ok && enableMetricAgg == "true" {
- // The kserve-container prometheus port/path is inherited from the ClusterServingRuntime YAML.
- // If no port is defined (transformer using python SDK), use the default port/path for the kserve-container.
- kserveContainerPromPort := defaultPrometheusPort
- if port, ok := pod.ObjectMeta.Annotations[constants.KserveContainerPrometheusPortKey]; ok {
- kserveContainerPromPort = port
- }
-
- kserveContainerPromPath := constants.DefaultPrometheusPath
- if path, ok := pod.ObjectMeta.Annotations[constants.KServeContainerPrometheusPathKey]; ok {
- kserveContainerPromPath = path
- }
-
- // The kserve container port/path is set as an EnvVar in the queue-proxy container
- // so that it knows which port/path to scrape from the kserve-container.
- pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env, v1.EnvVar{Name: constants.KServeContainerPrometheusMetricsPortEnvVarKey, Value: kserveContainerPromPort})
- pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env, v1.EnvVar{Name: constants.KServeContainerPrometheusMetricsPathEnvVarKey, Value: kserveContainerPromPath})
-
- // Set the port that queue-proxy will use to expose the aggregate metrics.
- pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env, v1.EnvVar{Name: constants.QueueProxyAggregatePrometheusMetricsPortEnvVarKey, Value: constants.QueueProxyAggregatePrometheusMetricsPort})
-
- // If SetPrometheusAggregateAnnotation is true, the pod annotations for prometheus port and path will be set. The scrape annotation is not set,
- // that is left for the user to configure.
- if setPromAnnotation, ok := pod.ObjectMeta.Annotations[constants.SetPrometheusAggregateAnnotation]; ok && setPromAnnotation == "true" {
- pod.ObjectMeta.Annotations[constants.PrometheusPortAnnotationKey] = constants.QueueProxyAggregatePrometheusMetricsPort
- pod.ObjectMeta.Annotations[constants.PrometheusPathAnnotationKey] = constants.DefaultPrometheusPath
- }
+ // The kserve-container prometheus port/path is inherited from the ClusterServingRuntime YAML.
+ // If no port is defined (transformer using python SDK), use the default port/path for the kserve-container.
+ kserveContainerPromPort := defaultKserveContainerPrometheusPort
+ if port, ok := pod.ObjectMeta.Annotations[constants.KserveContainerPrometheusPortKey]; ok {
+ kserveContainerPromPort = port
}
+
+ kserveContainerPromPath := constants.DefaultPrometheusPath
+ if path, ok := pod.ObjectMeta.Annotations[constants.KServeContainerPrometheusPathKey]; ok {
+ kserveContainerPromPath = path
+ }
+
+ // The kserve container port/path is set as an EnvVar in the queue-proxy container
+ // so that it knows which port/path to scrape from the kserve-container.
+ pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env, v1.EnvVar{Name: constants.KServeContainerPrometheusMetricsPortEnvVarKey, Value: kserveContainerPromPort})
+ pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env, v1.EnvVar{Name: constants.KServeContainerPrometheusMetricsPathEnvVarKey, Value: kserveContainerPromPath})
+
+ // Set the port that queue-proxy will use to expose the aggregate metrics.
+ pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env, v1.EnvVar{Name: constants.QueueProxyAggregatePrometheusMetricsPortEnvVarKey, Value: constants.QueueProxyAggregatePrometheusMetricsPort})
+
+ }
+ }
+}
+
+// InjectMetricsAggregator looks for the annotations to enable aggregate kserve-container and queue-proxy metrics and
+// if specified, sets port-related EnvVars in queue-proxy and the aggregate prometheus annotation.
+func (ma *MetricsAggregator) InjectMetricsAggregator(pod *v1.Pod) error {
+ //Only set metric configs if the required annotations are set
+ enableMetricAggregation, ok := pod.ObjectMeta.Annotations[constants.EnableMetricAggregation]
+ if !ok {
+ if pod.ObjectMeta.Annotations == nil {
+ pod.ObjectMeta.Annotations = make(map[string]string)
}
+ pod.ObjectMeta.Annotations[constants.EnableMetricAggregation] = ma.EnableMetricAggregation
+ enableMetricAggregation = ma.EnableMetricAggregation
+ }
+ if enableMetricAggregation == "true" {
+ setMetricAggregationEnvVars(pod)
+ }
+
+ // Handle setting the pod prometheus annotations
+ setPromAnnotation, ok := pod.ObjectMeta.Annotations[constants.SetPrometheusAnnotation]
+ if !ok {
+ pod.ObjectMeta.Annotations[constants.SetPrometheusAnnotation] = ma.EnablePrometheusScraping
+ setPromAnnotation = ma.EnablePrometheusScraping
}
+ if setPromAnnotation == "true" {
+ // Set prometheus port to default queue proxy prometheus metrics port.
+ // If enableMetricAggregation is true, set it as the queue proxy metrics aggregation port.
+ podPromPort := constants.DefaultPodPrometheusPort
+ if enableMetricAggregation == "true" {
+ podPromPort = constants.QueueProxyAggregatePrometheusMetricsPort
+ }
+ pod.ObjectMeta.Annotations[constants.PrometheusPortAnnotationKey] = podPromPort
+ pod.ObjectMeta.Annotations[constants.PrometheusPathAnnotationKey] = constants.DefaultPrometheusPath
+ }
+
return nil
}
diff --git a/pkg/webhook/admission/pod/metrics_aggregate_injector_test.go b/pkg/webhook/admission/pod/metrics_aggregate_injector_test.go
index 72b266b502a..e1a0a3b99f0 100644
--- a/pkg/webhook/admission/pod/metrics_aggregate_injector_test.go
+++ b/pkg/webhook/admission/pod/metrics_aggregate_injector_test.go
@@ -95,7 +95,7 @@ func TestInjectMetricsAggregator(t *testing.T) {
Name: "deployment",
Namespace: "default",
Annotations: map[string]string{
- constants.EnableMetricAggregation: "true",
+ constants.EnableMetricAggregation: "false",
},
},
Spec: v1.PodSpec{
@@ -147,14 +147,14 @@ func TestInjectMetricsAggregator(t *testing.T) {
},
},
},
- "setPromAnnotationTrue": {
+ "setPromAnnotationTrueWithAggTrue": {
original: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "deployment",
Namespace: "default",
Annotations: map[string]string{
- constants.EnableMetricAggregation: "true",
- constants.SetPrometheusAggregateAnnotation: "true",
+ constants.EnableMetricAggregation: "true",
+ constants.SetPrometheusAnnotation: "true",
},
},
Spec: v1.PodSpec{
@@ -172,10 +172,10 @@ func TestInjectMetricsAggregator(t *testing.T) {
Name: "deployment",
Namespace: "default",
Annotations: map[string]string{
- constants.EnableMetricAggregation: "true",
- constants.SetPrometheusAggregateAnnotation: "true",
- constants.PrometheusPortAnnotationKey: constants.QueueProxyAggregatePrometheusMetricsPort,
- constants.PrometheusPathAnnotationKey: constants.DefaultPrometheusPath,
+ constants.EnableMetricAggregation: "true",
+ constants.SetPrometheusAnnotation: "true",
+ constants.PrometheusPortAnnotationKey: constants.QueueProxyAggregatePrometheusMetricsPort,
+ constants.PrometheusPathAnnotationKey: constants.DefaultPrometheusPath,
},
},
Spec: v1.PodSpec{
@@ -194,14 +194,56 @@ func TestInjectMetricsAggregator(t *testing.T) {
},
},
},
+ "setPromAnnotationTrueWithAggFalse": {
+ original: &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "deployment",
+ Namespace: "default",
+ Annotations: map[string]string{
+ constants.EnableMetricAggregation: "false",
+ constants.SetPrometheusAnnotation: "true",
+ },
+ },
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{{
+ Name: "sklearn",
+ },
+ {
+ Name: "queue-proxy",
+ },
+ },
+ },
+ },
+ expected: &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "deployment",
+ Namespace: "default",
+ Annotations: map[string]string{
+ constants.EnableMetricAggregation: "false",
+ constants.SetPrometheusAnnotation: "true",
+ constants.PrometheusPortAnnotationKey: constants.DefaultPodPrometheusPort,
+ constants.PrometheusPathAnnotationKey: constants.DefaultPrometheusPath,
+ },
+ },
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{{
+ Name: "sklearn",
+ },
+ {
+ Name: "queue-proxy",
+ },
+ },
+ },
+ },
+ },
"SetPromAnnotationFalse": {
original: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "deployment",
Namespace: "default",
Annotations: map[string]string{
- constants.EnableMetricAggregation: "true",
- constants.SetPrometheusAggregateAnnotation: "false",
+ constants.EnableMetricAggregation: "true",
+ constants.SetPrometheusAnnotation: "false",
},
},
Spec: v1.PodSpec{
@@ -219,8 +261,8 @@ func TestInjectMetricsAggregator(t *testing.T) {
Name: "deployment",
Namespace: "default",
Annotations: map[string]string{
- constants.EnableMetricAggregation: "true",
- constants.SetPrometheusAggregateAnnotation: "false",
+ constants.EnableMetricAggregation: "true",
+ constants.SetPrometheusAnnotation: "false",
},
},
Spec: v1.PodSpec{
@@ -241,11 +283,14 @@ func TestInjectMetricsAggregator(t *testing.T) {
},
}
+ cfgMap := v1.ConfigMap{Data: map[string]string{"enableMetricAggregation": "false", "enablePrometheusScraping": "false"}}
+ ma, err := newMetricsAggregator(&cfgMap)
+ if err != nil {
+ t.Errorf("Error creating the metrics aggregator %v", err)
+ }
+
for name, scenario := range scenarios {
- err := InjectMetricsAggregator(scenario.original)
- if err != nil {
- t.Errorf("Test %q unexpected error %e", name, err)
- }
+ ma.InjectMetricsAggregator(scenario.original)
if diff, _ := kmp.SafeDiff(scenario.expected.Spec, scenario.original.Spec); diff != "" {
t.Errorf("Test %q unexpected result (-want +got): %v", name, diff)
}
diff --git a/pkg/webhook/admission/pod/mutator.go b/pkg/webhook/admission/pod/mutator.go
index d5352c4a2eb..c320967f3c1 100644
--- a/pkg/webhook/admission/pod/mutator.go
+++ b/pkg/webhook/admission/pod/mutator.go
@@ -111,11 +111,16 @@ func (mutator *Mutator) mutate(pod *v1.Pod, configMap *v1.ConfigMap) error {
batcherConfig: batcherConfig,
}
+ metricsAggregator, err := newMetricsAggregator(configMap)
+ if err != nil {
+ return err
+ }
+
mutators := []func(pod *v1.Pod) error{
InjectGKEAcceleratorSelector,
storageInitializer.InjectStorageInitializer,
agentInjector.InjectAgent,
- InjectMetricsAggregator,
+ metricsAggregator.InjectMetricsAggregator,
}
for _, mutator := range mutators {
diff --git a/pkg/webhook/admission/pod/mutator_test.go b/pkg/webhook/admission/pod/mutator_test.go
index fe3b58a65a1..0a7d8066499 100644
--- a/pkg/webhook/admission/pod/mutator_test.go
+++ b/pkg/webhook/admission/pod/mutator_test.go
@@ -15,6 +15,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
+ "sort"
"testing"
)
@@ -255,8 +256,16 @@ func TestMutator_Handle(t *testing.T) {
},
},
},
- matcher: gomega.Equal(admission.Response{
+ matcher: gomega.BeEquivalentTo(admission.Response{
Patches: []jsonpatch.JsonPatchOperation{
+ {
+ Operation: "add",
+ Path: "/metadata/annotations",
+ Value: map[string]interface{}{
+ "serving.kserve.io/enable-metric-aggregation": "",
+ "serving.kserve.io/enable-prometheus-scraping": "",
+ },
+ },
{
Operation: "add",
Path: "/metadata/namespace",
@@ -287,11 +296,21 @@ func TestMutator_Handle(t *testing.T) {
}
tc.request.Object.Raw = byteData
res := mutator.Handle(context.TODO(), tc.request)
+ sortPatches(res.Patches)
g.Expect(res).Should(tc.matcher)
if err := c.Delete(context.TODO(), &tc.configMap); err != nil {
t.Errorf("failed to delete configmap %v", err)
}
})
}
+}
+// sortPatches sorts the slice of patches by Path so that the comparison works
+// when there are > 1 patches. Note: make sure the matcher Patches are sorted.
+func sortPatches(patches []jsonpatch.JsonPatchOperation) {
+ if len(patches) > 1 {
+ sort.Slice(patches, func(i, j int) bool {
+ return patches[i].Path < patches[j].Path
+ })
+ }
}
diff --git a/python/kserve/requirements.txt b/python/kserve/requirements.txt
index 8a2f9521d4e..216c63dc0a1 100644
--- a/python/kserve/requirements.txt
+++ b/python/kserve/requirements.txt
@@ -22,4 +22,4 @@ tritonclient==2.18.0
protobuf~=3.19.0
prometheus-client>=0.13.1
orjson>=3.8.0
-httpx>=0.23.0
\ No newline at end of file
+httpx>=0.23.0
diff --git a/python/kserve/setup.py b/python/kserve/setup.py
index 933fbccdef6..9837a1b9c67 100644
--- a/python/kserve/setup.py
+++ b/python/kserve/setup.py
@@ -21,7 +21,8 @@
'pytest-cov',
'pytest-asyncio',
'pytest-tornasync',
- 'mypy'
+ 'mypy',
+ 'portforward',
]
with open('requirements.txt') as f:
diff --git a/qpext/README.md b/qpext/README.md
new file mode 100644
index 00000000000..3f977aa4692
--- /dev/null
+++ b/qpext/README.md
@@ -0,0 +1,197 @@
+# Queue Proxy Extension
+
+## What qpext does
+This directory handles extending the Knative queue-proxy sidecar container.
+
+The qpext creates a new port in the queue-proxy container that scrapes Prometheus metrics from both `queue-proxy` and `kserve-container`.
+When the new aggregate metrics endpoint is hit, the response will contain metrics from both `queue-proxy` and `kserve-container`.
+The qpext adds the functionality to emit metrics from both containers on a single endpoint.
+
+## Why qpext is needed
+If an InferenceService uses Knative, then it has at least two containers in one pod, `queue-proxy` and `kserve-container`. A limitation of using Prometheus is that it supports scraping only one endpoint in the pod.
+When there are multiple containers in a pod that emit Prometheus metrics, this becomes an issue (see [Prometheus for multiple port annotations issue #3756](https://github.com/prometheus/prometheus/issues/3756) for the
+full discussion on this topic). In an attempt to make an easy-to-use solution, the queue-proxy is extended to handle this use case.
+
+
+see also: [KServe Issue #2645](https://github.com/kserve/kserve/issues/2465),
+
+## How to use
+
+Save this file as qpext_image_patch.yaml, update the tag if needed.
+```yaml
+data:
+ queueSidecarImage: kserve/qpext:latest
+```
+
+Run the following command to patch the deployment config in the appropriate knative namespace.
+```shell
+kubectl patch configmaps -n knative-serving config-deployment --patch-file qpext_image_patch.yaml
+```
+
+## Configs
+
+The qpext relies on pod annotations to be set in the InferenceService YAML. If these annotations are set to true, then environment variables will be added to the queue-proxy container.
+The qpext uses the environment variables to configure which port/path to expose metrics on and which port/path to scrape metrics from in `queue-proxy` and `kserve-container`.
+
+| Annotation | Default | Description |
+|------------------------------------------------------|---------|-------------|
+| serving.kserve.io/enable-metric-aggregation | false | If true, enables metric aggregation in queue-proxy by setting env vars in the queue proxy container to configure scraping ports. |
+| serving.kserve.io/enable-prometheus-scraping | false | If true, sets the prometheus annotations in the pod. If true and "serving.kserve.io/enable-metric-aggregation" is false, the prometheus port will be set as the default queue-proxy port. If both are true, the prometheus port annotation will be set as the aggregate metric port. |
+
+
+| Queue Proxy Env Vars | Default | Description |
+|------------------------------------------|----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| AGGREGATE_PROMETHEUS_METRICS_PORT | 9088 | The metrics aggregation port in queue-proxy that is added in the qpext. |
+| KSERVE_CONTAINER_PROMETHEUS_METRICS_PORT | 8080 | The default metrics port for the `kserve-container`. If present, the default ClusterServingRuntime overrides this value with each runtime's default prometheus port. |
+| KSERVE_CONTAINER_PROMETHEUS_METRICS_PATH | /metrics | The default metrics path for the `kserve-container`. If present, the default ClusterServingRuntime annotation overrides this value with each runtime's default prometheus path. |
+
+To implement this feature, configure the InferenceService YAML annotations.
+
+```yaml
+apiVersion: "serving.kserve.io/v1beta1"
+kind: "InferenceService"
+metadata:
+ name: "sklearn-irisv2"
+ annotations:
+ serving.kserve.io/enable-metric-aggregation: "true"
+ serving.kserve.io/enable-prometheus-scraping: "true"
+spec:
+ predictor:
+ sklearn:
+ protocolVersion: v2
+ storageUri: "gs://seldon-models/sklearn/iris"
+```
+
+To view the runtime specific defaults for the `kserve-container` prometheus port and path, view the spec annotations in `kserve/config/runtimes`.
+These values can be overriden in the InferenceService YAML annotations.
+
+```yaml
+apiVersion: "serving.kserve.io/v1beta1"
+kind: "InferenceService"
+metadata:
+ name: "sklearn-irisv2"
+ annotations:
+ serving.kserve.io/enable-metric-aggregation: "true"
+ serving.kserve.io/enable-prometheus-scraping: "true"
+ prometheus.kserve.io/port: '8081'
+ prometheus.kserve.io/path: "/other/metrics"
+spec:
+ predictor:
+ sklearn:
+ protocolVersion: v2
+ storageUri: "gs://seldon-models/sklearn/iris"
+```
+The default port for sklearn runtime is `8080`, and the default path is `/metrics`.
+By setting the annotations in the InferenceService YAML, the default runtime configurations are overridden.
+
+**KServe Developer's Note:** If the qpext is implemented in the cluster and you wish to set the default annotation values to `true`,
+the defaults in the configMap can be overridden via patching the configMap or setting up a webhook to override the values.
+To check the default values in your cluster, run
+
+```shell
+kubectl get configmaps inferenceservice-config -n kserve -oyaml
+```
+
+the values are in the output of the YAML like
+
+```yaml
+ metricsAggregator: |-
+ {
+ "enableMetricAggregation": "false",
+ "enablePrometheusScraping" : "false"
+ }
+```
+
+If these values are overridden to default to `true`
+
+```yaml
+ metricsAggregator: |-
+ {
+ "enableMetricAggregation": "true",
+ "enablePrometheusScraping" : "true"
+ }
+```
+then the annotations should be inserted into the YAML with `false` values when
+an InferenceService does not want to aggregate metrics and/or set the prometheus
+scraping port annotation.
+
+## Developer's guide
+
+Changes can be made in the qpext and tested via unit tests, e2e tests, and interactively in a cluster.
+
+### Note on dependencies
+
+The controller reads the `serving.kserve.io/enable-metric-aggregation` and `serving.kserve.io/enable-prometheus-scraping`
+annotations and then adds prometheus annotations to the pod and/or environment variables to the queue-proxy container if specified.
+This code is found in `kserve/pkg/webhook/admission/pod/metrics_aggregate_injector.go`.
+
+The specific runtime default configurations are annotations in the YAML files in `kserve/config/runtimes`.
+
+### Test
+
+In kserve/qpext, run `go test -v ./... -cover` to run the unit tests and get the total coverage.
+The e2e tests are defined in `kserve/test/qpext`. To add an e2e test, create a python test in this directory.
+
+### Build
+The qpext code can be interactively tested by building the image with any changes,
+pushing the image to dockerhub/container registry, and patching the knative deploy config to use
+the test image. The pods will then pick up the new configuration.
+
+
+(1) To build the qpext image in the kserve/qpext directory (as an example, `some_docker_repo` in dockerhub), run
+```shell
+make docker-build-push-qpext
+```
+
+Alternatively, build and push the image step by step yourself.
+```shell
+cd kserve/qpext
+export QPEXT_IMG={some_docker_repo}/qpext
+docker build -t ${QPEXT_IMG} -f qpext.Dockerfile .
+```
+
+Next push the image to a container registry,
+```shell
+docker push {some_docker_repo}/qpext:latest
+```
+
+(2) Save this file as qpext_image_patch.yaml, update the tag if needed.
+```yaml
+data:
+ queueSidecarImage: kserve/qpext:latest
+```
+
+(3) Run the following command to patch the deployment config in the appropriate knative namespace.
+```shell
+kubectl patch configmaps -n knative-serving config-deployment --patch-file qpext_image_patch.yaml
+```
+
+(4) Confirm the config-deployment updated
+```shell
+kubectl get configmaps -n knative-serving config-deployment -oyaml
+```
+
+(5) Deploy an InferenceService and check that the change works.
+
+For example, using the sklearn example above saved as `sklearn.yaml`
+```shell
+kubectl apply -f sklearn.yaml
+```
+
+To check that the configs were applied as env vars in the queue-proxy container
+and annotations on the pod, check the Pod output.
+```shell
+kubectl get pod {name_of_pod} -oyaml
+```
+
+To check that the metrics are aggregated, use the KServe [Getting Started](https://kserve.github.io/website/0.9/get_started/first_isvc/#4-determine-the-ingress-ip-and-ports)
+documentation as a guide to send a request to the pod. Next, send a request to the metrics endpoint.
+
+For example, port-forward the pod prometheus aggregate metrics port to localhost.
+```shell
+kubectl port-forward pods/{pod_name} 9088:9088
+```
+Next, cURL the port to see the metrics output.
+```shell
+curl localhost:9088
+```
diff --git a/qpext/cmd/qpext/main.go b/qpext/cmd/qpext/main.go
new file mode 100644
index 00000000000..27463e76394
--- /dev/null
+++ b/qpext/cmd/qpext/main.go
@@ -0,0 +1,255 @@
+/*
+Copyright 2022 The KServe Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "context"
+ "fmt"
+ "github.com/hashicorp/go-multierror"
+ logger "github.com/kserve/kserve/qpext"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/expfmt"
+ "go.uber.org/zap"
+ "io"
+ "mime"
+ "net"
+ "net/http"
+ "os"
+ "strconv"
+ "time"
+)
+
+import "knative.dev/serving/pkg/queue/sharedmain"
+
+var (
+ promRegistry *prometheus.Registry
+)
+
+const (
+ // aggregate scraping env vars from kserve/pkg/constants
+ KServeContainerPrometheusMetricsPortEnvVarKey = "KSERVE_CONTAINER_PROMETHEUS_METRICS_PORT"
+ KServeContainerPrometheusMetricsPathEnvVarKey = "KSERVE_CONTAINER_PROMETHEUS_METRICS_PATH"
+ QueueProxyAggregatePrometheusMetricsPortEnvVarKey = "AGGREGATE_PROMETHEUS_METRICS_PORT"
+ QueueProxyMetricsPort = "9091"
+ DefaultQueueProxyMetricsPath = "/metrics"
+ prometheusTimeoutHeader = "X-Prometheus-Scrape-Timeout-Seconds"
+)
+
+type ScrapeConfigurations struct {
+ logger *zap.Logger
+ QueueProxyPath string `json:"path"`
+ QueueProxyPort string `json:"port"`
+ AppPort string
+ AppPath string
+}
+
+func getURL(port string, path string) string {
+ return fmt.Sprintf("http://localhost:%s%s", port, path)
+}
+
+// getHeaderTimeout parse a string like (1.234) representing number of seconds
+func getHeaderTimeout(timeout string) (time.Duration, error) {
+ timeoutSeconds, err := strconv.ParseFloat(timeout, 64)
+ if err != nil {
+ return 0 * time.Second, err
+ }
+
+ return time.Duration(timeoutSeconds * 1e9), nil
+}
+
+func applyHeaders(into http.Header, from http.Header, keys ...string) {
+ for _, key := range keys {
+ val := from.Get(key)
+ if val != "" {
+ into.Set(key, val)
+ }
+ }
+}
+
+func negotiateMetricsFormat(contentType string) expfmt.Format {
+ mediaType, _, err := mime.ParseMediaType(contentType)
+ if err == nil && mediaType == expfmt.OpenMetricsType {
+ return expfmt.FmtOpenMetrics
+ }
+ return expfmt.FmtText
+}
+
+// scrapeAndWriteAgentMetrics gathers a slice of prometheus metric families and encodes the metrics.
+func scrapeAndWriteAgentMetrics(w io.Writer) error {
+ mfs, err := promRegistry.Gather()
+ if err != nil {
+ return err
+ }
+ enc := expfmt.NewEncoder(w, expfmt.FmtText)
+ var errs error
+ for _, mf := range mfs {
+ if err = enc.Encode(mf); err != nil {
+ errs = multierror.Append(errs, err)
+ }
+ }
+ return errs
+}
+
+// scrape sends a request to the provided url to scrape metrics from
+// This will attempt to mimic some of Prometheus functionality by passing some headers through
+// scrape returns the scraped metrics reader as well as the response's "Content-Type" header to determine the metrics format
+func scrape(url string, header http.Header, logger *zap.Logger) (io.ReadCloser, context.CancelFunc, string, error) {
+ var cancel context.CancelFunc
+ ctx := context.Background()
+ if timeoutString := header.Get(prometheusTimeoutHeader); timeoutString != "" {
+ timeout, err := getHeaderTimeout(timeoutString)
+ if err != nil {
+ logger.Error("Failed to parse timeout header", zap.Error(err), zap.String("timeout", timeoutString))
+ } else {
+ ctx, cancel = context.WithTimeout(ctx, timeout)
+ defer cancel()
+ }
+ }
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
+ if err != nil {
+ return nil, cancel, "", err
+ }
+
+ applyHeaders(req.Header, header, "Accept",
+ "User-Agent",
+ prometheusTimeoutHeader,
+ )
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return nil, cancel, "", fmt.Errorf("error scraping %s: %v", url, err)
+ }
+ if resp.StatusCode != http.StatusOK {
+ return nil, cancel, "", fmt.Errorf("error scraping %s, status code: %v", url, resp.StatusCode)
+ }
+ format := resp.Header.Get("Content-Type")
+ return resp.Body, cancel, format, nil
+}
+
+func NewScrapeConfigs(logger *zap.Logger, queueProxyPort string, appPort string, appPath string) *ScrapeConfigurations {
+ return &ScrapeConfigurations{
+ logger: logger,
+ QueueProxyPath: DefaultQueueProxyMetricsPath,
+ QueueProxyPort: queueProxyPort,
+ AppPort: appPort,
+ AppPath: appPath,
+ }
+}
+
+func (sc *ScrapeConfigurations) handleStats(w http.ResponseWriter, r *http.Request) {
+ var err error
+ var queueProxy, application io.ReadCloser
+ var queueProxyCancel, appCancel context.CancelFunc
+
+ defer func() {
+ if application != nil {
+ application.Close()
+ }
+ if queueProxyCancel != nil {
+ queueProxyCancel()
+ }
+ if appCancel != nil {
+ appCancel()
+ }
+ }()
+
+ // Gather all the metrics we will merge
+ if sc.QueueProxyPort != "" {
+ queueProxyURL := getURL(sc.QueueProxyPort, sc.QueueProxyPath)
+ if queueProxy, queueProxyCancel, _, err = scrape(queueProxyURL, r.Header, sc.logger); err != nil {
+ sc.logger.Error("failed scraping envoy metrics", zap.Error(err))
+ }
+ }
+
+ // Scrape app metrics if defined and capture their format
+ var format expfmt.Format
+ if sc.AppPort != "" {
+ kserveContainerURL := getURL(sc.AppPort, sc.AppPath)
+ var contentType string
+ if application, appCancel, contentType, err = scrape(kserveContainerURL, r.Header, sc.logger); err != nil {
+ sc.logger.Error("failed scraping application metrics", zap.Error(err))
+ }
+ format = negotiateMetricsFormat(contentType)
+ } else {
+ // Without app metrics format use a default
+ format = expfmt.FmtText
+ }
+
+ w.Header().Set("Content-Type", string(format))
+
+ // Write out the metrics
+ if err = scrapeAndWriteAgentMetrics(io.Writer(w)); err != nil {
+ sc.logger.Error("failed scraping and writing agent metrics", zap.Error(err))
+ }
+
+ if queueProxy != nil {
+ _, err = io.Copy(w, queueProxy)
+ if err != nil {
+ sc.logger.Error("failed to scraping and writing queue proxy metrics", zap.Error(err))
+ }
+ }
+
+ // App metrics must go last because if they are FmtOpenMetrics,
+ // they will have a trailing "# EOF" which terminates the full exposition
+ if application != nil {
+ _, err = io.Copy(w, application)
+ if err != nil {
+ sc.logger.Error("failed to scraping and writing application metrics", zap.Error(err))
+ }
+ }
+}
+
+func main() {
+ zapLogger := logger.InitializeLogger()
+ promRegistry = prometheus.NewRegistry()
+ mux := http.NewServeMux()
+ ctx := context.Background()
+ sc := NewScrapeConfigs(
+ zapLogger,
+ QueueProxyMetricsPort,
+ os.Getenv(KServeContainerPrometheusMetricsPortEnvVarKey),
+ os.Getenv(KServeContainerPrometheusMetricsPathEnvVarKey),
+ )
+ mux.HandleFunc(`/metrics`, sc.handleStats)
+ l, err := net.Listen("tcp", fmt.Sprintf(":%v", os.Getenv(QueueProxyAggregatePrometheusMetricsPortEnvVarKey)))
+ if err != nil {
+ zapLogger.Error("error listening on status port", zap.Error(err))
+ return
+ }
+
+ defer l.Close()
+
+ go func() {
+ if err = http.Serve(l, mux); err != nil {
+ zapLogger.Error("error serving aggregate metrics", zap.Error(err))
+ fmt.Println(err)
+ select {
+ case <-ctx.Done():
+ // We are shutting down already, don't trigger SIGTERM
+ return
+ default:
+ }
+ }
+ }()
+ zapLogger.Info("Stats server has successfully started")
+ if sharedmain.Main() != nil {
+ os.Exit(1)
+ }
+ // Wait for the agent to be shut down.
+ <-ctx.Done()
+ zapLogger.Info("Stats server has successfully terminated")
+}
diff --git a/qpext/cmd/qpext/main_test.go b/qpext/cmd/qpext/main_test.go
new file mode 100644
index 00000000000..485844707bd
--- /dev/null
+++ b/qpext/cmd/qpext/main_test.go
@@ -0,0 +1,233 @@
+/*
+Copyright 2022 The KServe Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ logger "github.com/kserve/kserve/qpext"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/expfmt"
+ "github.com/stretchr/testify/assert"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+)
+
+func TestGetHeaderTimeout(t *testing.T) {
+ inputs := []string{"1.23", "100", "notvalid", "12.wrong"}
+ errIsNil := []bool{true, true, false, false}
+
+ for i, input := range inputs {
+ _, err := getHeaderTimeout(input)
+ if errIsNil[i] == true {
+ assert.NoError(t, err)
+ } else {
+ assert.Error(t, err)
+ }
+ }
+}
+
+func TestNegotiateMetricsFromat(t *testing.T) {
+ contentTypes := []string{"", "random", "text/plain;version=0.0.4;q=0.5,*/*;q=0.1", `application/openmetrics-text; version=1.0.0; charset=utf-8`}
+ expected := []expfmt.Format{expfmt.FmtText, expfmt.FmtText, expfmt.FmtText, expfmt.FmtOpenMetrics}
+
+ for i, contentType := range contentTypes {
+ result := negotiateMetricsFormat(contentType)
+ assert.Equal(t, expected[i], result)
+ }
+}
+
+func TestScrapeHeaders(t *testing.T) {
+ metricExample := `# TYPE my_metric counter
+ my_metric{} 0
+ `
+ timeoutHeader := "X-Prometheus-Scrape-Timeout-Seconds"
+ tests := []struct {
+ name string
+ headerVal string
+ expectNilCancel bool
+ }{
+ {
+ name: "timeout header parses",
+ headerVal: "10",
+ },
+ {
+ name: "timeout header invalid",
+ headerVal: "invalid",
+ expectNilCancel: true,
+ },
+ }
+
+ zapLogger := logger.InitializeLogger()
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ promRegistry = prometheus.NewRegistry()
+ qp := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ _, err := w.Write([]byte(metricExample))
+ assert.NoError(t, err)
+ }))
+ defer qp.Close()
+
+ url := getURL(strings.Split(qp.URL, ":")[2], "/metrics")
+
+ req := &http.Request{
+ Header: map[string][]string{timeoutHeader: {test.headerVal}},
+ }
+ queueProxy, queueProxyCancel, _, err := scrape(url, req.Header, zapLogger)
+ assert.NoError(t, err)
+ assert.NotNil(t, queueProxy)
+ if test.expectNilCancel {
+ assert.Nil(t, queueProxyCancel)
+ } else {
+ assert.NotNil(t, queueProxyCancel)
+ }
+ })
+ }
+}
+
+func TestScrapeErr(t *testing.T) {
+ metricExample := `# TYPE my_metric counter
+ my_metric{} 0
+ `
+ zapLogger := logger.InitializeLogger()
+ promRegistry = prometheus.NewRegistry()
+ qp := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ _, err := w.Write([]byte(metricExample))
+ assert.NoError(t, err)
+ }))
+ defer qp.Close()
+
+ url := "not-a-real-url"
+
+ req := &http.Request{}
+ queueProxy, _, _, err := scrape(url, req.Header, zapLogger)
+ assert.Error(t, err)
+ assert.Nil(t, queueProxy)
+}
+
+func TestHandleStats(t *testing.T) {
+ metricExample := `# TYPE my_metric counter
+ my_metric{} 0
+ `
+ otherMetricExample := `# TYPE my_other_metric counter
+ my_other_metric{} 0
+ `
+ tests := []struct {
+ name string
+ queueproxy string
+ app string
+ output string
+ expectParseError bool
+ }{
+ {
+ name: "queueproxy metric only",
+ queueproxy: metricExample,
+ output: metricExample,
+ },
+ {
+ name: "app metric only",
+ app: metricExample,
+ output: metricExample,
+ },
+ {
+ name: "multiple metric",
+ queueproxy: metricExample,
+ app: otherMetricExample,
+ output: metricExample + otherMetricExample,
+ },
+ // when app and queueproxy share a metric, Prometheus will fail.
+ {
+ name: "conflict metric",
+ queueproxy: metricExample + otherMetricExample,
+ app: metricExample,
+ output: metricExample + otherMetricExample + metricExample,
+ expectParseError: true,
+ },
+ }
+
+ zapLogger := logger.InitializeLogger()
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ promRegistry = prometheus.NewRegistry()
+ rec := httptest.NewRecorder()
+ qp := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ _, err := w.Write([]byte(test.queueproxy))
+ assert.NoError(t, err)
+ }))
+ defer qp.Close()
+
+ app := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ _, err := w.Write([]byte(test.app))
+ assert.NoError(t, err)
+ }))
+ defer app.Close()
+
+ psc := &ScrapeConfigurations{
+ logger: zapLogger,
+ QueueProxyPort: strings.Split(qp.URL, ":")[2],
+ AppPort: strings.Split(app.URL, ":")[2],
+ }
+ req := &http.Request{}
+ psc.handleStats(rec, req)
+ assert.Equal(t, rec.Code, 200)
+ assert.Contains(t, rec.Body.String(), test.output)
+
+ parser := expfmt.TextParser{}
+ mfMap, err := parser.TextToMetricFamilies(strings.NewReader(rec.Body.String()))
+ if !test.expectParseError {
+ assert.NoErrorf(t, err, "failed to parse metrics: %v", err)
+ } else if err == nil && test.expectParseError {
+ assert.False(t, test.expectParseError, "expected a prse error, got %+v", mfMap)
+ }
+ })
+ }
+
+}
+
+func TestHandleStatsErr(t *testing.T) {
+ zapLogger := logger.InitializeLogger()
+ fail := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusInternalServerError)
+ }))
+ defer fail.Close()
+ pass := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ }))
+ defer pass.Close()
+ failPort := strings.Split(fail.URL, ":")[2]
+ passPort := strings.Split(pass.URL, ":")[2]
+
+ tests := []struct {
+ name string
+ queueproxy string
+ app string
+ }{
+ {"both pass", passPort, passPort},
+ {"queue proxy pass", passPort, failPort},
+ {"app pass", failPort, passPort},
+ {"both fail", failPort, failPort},
+ }
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ sc := NewScrapeConfigs(zapLogger, test.queueproxy, test.app, DefaultQueueProxyMetricsPath)
+ req := &http.Request{}
+ rec := httptest.NewRecorder()
+ sc.handleStats(rec, req)
+ assert.Equal(t, 200, rec.Code)
+ })
+ }
+}
diff --git a/qpext/go.mod b/qpext/go.mod
new file mode 100644
index 00000000000..69c46f51371
--- /dev/null
+++ b/qpext/go.mod
@@ -0,0 +1,79 @@
+module github.com/kserve/kserve/qpext
+
+go 1.18
+
+require (
+ github.com/hashicorp/go-multierror v1.1.1
+ github.com/prometheus/client_golang v1.13.0
+ github.com/prometheus/common v0.37.0
+ github.com/stretchr/testify v1.7.0
+ go.uber.org/zap v1.19.1
+ knative.dev/serving v0.34.2
+)
+
+require (
+ contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d // indirect
+ contrib.go.opencensus.io/exporter/prometheus v0.4.0 // indirect
+ contrib.go.opencensus.io/exporter/zipkin v0.1.2 // indirect
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/blendle/zapdriver v1.3.1 // indirect
+ github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect
+ github.com/cespare/xxhash/v2 v2.1.2 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/evanphx/json-patch/v5 v5.6.0 // indirect
+ github.com/go-kit/log v0.2.0 // indirect
+ github.com/go-logfmt/logfmt v0.5.1 // indirect
+ github.com/go-logr/logr v1.2.2 // indirect
+ github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+ github.com/golang/protobuf v1.5.2 // indirect
+ github.com/google/go-cmp v0.5.8 // indirect
+ github.com/google/go-containerregistry v0.8.1-0.20220414143355-892d7a808387 // indirect
+ github.com/google/gofuzz v1.2.0 // indirect
+ github.com/gorilla/websocket v1.4.2 // indirect
+ github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
+ github.com/hashicorp/errwrap v1.0.0 // indirect
+ github.com/hashicorp/golang-lru v0.5.4 // indirect
+ github.com/json-iterator/go v1.1.12 // indirect
+ github.com/kelseyhightower/envconfig v1.4.0 // indirect
+ github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
+ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+ github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/openzipkin/zipkin-go v0.3.0 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/prometheus/client_model v0.2.0 // indirect
+ github.com/prometheus/procfs v0.8.0 // indirect
+ github.com/prometheus/statsd_exporter v0.21.0 // indirect
+ go.opencensus.io v0.23.0 // indirect
+ go.uber.org/atomic v1.9.0 // indirect
+ go.uber.org/automaxprocs v1.4.0 // indirect
+ go.uber.org/multierr v1.6.0 // indirect
+ golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
+ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
+ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
+ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
+ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
+ golang.org/x/text v0.3.7 // indirect
+ golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect
+ gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
+ google.golang.org/api v0.70.0 // indirect
+ google.golang.org/appengine v1.6.7 // indirect
+ google.golang.org/genproto v0.0.0-20220301145929-1ac2ace0dbf7 // indirect
+ google.golang.org/grpc v1.44.0 // indirect
+ google.golang.org/protobuf v1.28.1 // indirect
+ gopkg.in/inf.v0 v0.9.1 // indirect
+ gopkg.in/yaml.v2 v2.4.0 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+ k8s.io/api v0.23.9 // indirect
+ k8s.io/apimachinery v0.23.9 // indirect
+ k8s.io/client-go v0.23.9 // indirect
+ k8s.io/klog/v2 v2.70.2-0.20220707122935-0990e81f1a8f // indirect
+ k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect
+ knative.dev/control-protocol v0.0.0-20220818153549-f18dbde7d9bd // indirect
+ knative.dev/networking v0.0.0-20220818010248-e51df7cdf571 // indirect
+ knative.dev/pkg v0.0.0-20220818004048-4a03844c0b15 // indirect
+ sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
+ sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
+ sigs.k8s.io/yaml v1.3.0 // indirect
+)
diff --git a/qpext/go.sum b/qpext/go.sum
new file mode 100644
index 00000000000..7d1dda53946
--- /dev/null
+++ b/qpext/go.sum
@@ -0,0 +1,954 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
+cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
+cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
+cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
+cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
+cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
+cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
+cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
+cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
+cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d h1:LblfooH1lKOpp1hIhukktmSAxFkqMPFk9KR6iZ0MJNI=
+contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d/go.mod h1:IshRmMJBhDfFj5Y67nVhMYTTIze91RUeT73ipWKs/GY=
+contrib.go.opencensus.io/exporter/prometheus v0.4.0 h1:0QfIkj9z/iVZgK31D9H9ohjjIDApI2GOPScCKwxedbs=
+contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0=
+contrib.go.opencensus.io/exporter/zipkin v0.1.2 h1:YqE293IZrKtqPnpwDPH/lOqTWD/s3Iwabycam74JV3g=
+contrib.go.opencensus.io/exporter/zipkin v0.1.2/go.mod h1:mP5xM3rrgOjpn79MM8fZbj3gsxcuytSqtH0dxSWW1RE=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
+github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
+github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
+github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
+github.com/Shopify/sarama v1.30.0/go.mod h1:zujlQQx1kzHsh4jfV1USnptCQrHAEZ2Hk8fTKCulPVs=
+github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/Shopify/toxiproxy/v2 v2.1.6-0.20210914104332-15ea381dcdae/go.mod h1:/cvHQkZ1fst0EmZnA5dFtiQdWCNCFYzb+uE2vqVgvx0=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
+github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/blendle/zapdriver v1.3.1 h1:C3dydBOWYRiOk+B8X9IVZ5IOe+7cl+tGOexN4QqHfpE=
+github.com/blendle/zapdriver v1.3.1/go.mod h1:mdXfREi6u5MArG4j9fewC+FGnXaBR+T4Ox4J2u4eHCc=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk=
+github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
+github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
+github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
+github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
+github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww=
+github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
+github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
+github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
+github.com/go-kit/log v0.2.0 h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw=
+github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
+github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
+github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
+github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-containerregistry v0.8.1-0.20220414143355-892d7a808387 h1:GWICy4b02s8EA1M9H5krRQ48BKpIHO5LtBBm2BQLhx0=
+github.com/google/go-containerregistry v0.8.1-0.20220414143355-892d7a808387/go.mod h1:eTLvLZaEe2FoQsb25t7BLxQQryyrwHTzFfwxN87mhAw=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
+github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
+github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
+github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
+github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
+github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
+github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
+github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
+github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
+github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
+github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM=
+github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
+github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg=
+github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc=
+github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
+github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
+github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8=
+github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
+github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
+github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
+github.com/openzipkin/zipkin-go v0.3.0 h1:XtuXmOLIXLjiU2XduuWREDT0LOKtSgos/g7i7RYyoZQ=
+github.com/openzipkin/zipkin-go v0.3.0/go.mod h1:4c3sLeE8xjNqehmF5RpAFLPLJxXscc0R4l6Zg0P1tTQ=
+github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
+github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
+github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
+github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
+github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
+github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
+github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
+github.com/prometheus/statsd_exporter v0.21.0 h1:hA05Q5RFeIjgwKIYEdFd59xu5Wwaznf33yKI+pyX6T8=
+github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ=
+github.com/rabbitmq/amqp091-go v1.1.0/go.mod h1:ogQDLSOACsLPsIq0NpbtiifNZi2YOz0VTJ0kHRghqbM=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
+github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
+github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
+github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
+github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
+go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/automaxprocs v1.4.0 h1:CpDZl6aOlLhReez+8S3eEotD7Jx0Os++lemPlMULQP0=
+go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q=
+go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI=
+go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210920023735-84f357641f63/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg=
+golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 h1:M73Iuj3xbbb9Uk1DYhzydthsj6oOd6l9bpuFcNoUvTs=
+golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.9 h1:j9KsMiaP1c3B0OTQGth0/k+miLGTgLsAFUCrF2vLcF8=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY=
+gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
+google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
+google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
+google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
+google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
+google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
+google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
+google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
+google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
+google.golang.org/api v0.70.0 h1:67zQnAE0T2rB0A3CwLSas0K+SbVzSxP+zTLkQLexeiw=
+google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
+google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
+google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220301145929-1ac2ace0dbf7 h1:6fWaU2vz6/23eNdMU1V00ZPO5AZFQ2SLsO6r/z0Z2H0=
+google.golang.org/genproto v0.0.0-20220301145929-1ac2ace0dbf7/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
+google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg=
+google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
+google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+k8s.io/api v0.23.9 h1:v7Ee2CZuyb6rVm1q4bUe7ZonWleLsrvgcOTxPGjQVa4=
+k8s.io/api v0.23.9/go.mod h1:r4g0GrGdLgwSYB90qgO4tBrbKtALBhUfut+oFt4ikCc=
+k8s.io/apimachinery v0.23.9 h1:u9Pu7Ffe+9+QJUemtNjuCwvHSnOUeYEwgSHV+88Ne0g=
+k8s.io/apimachinery v0.23.9/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
+k8s.io/client-go v0.23.9 h1:OKxNCL+nhw7UBB5b01OVuAV4Db/AdBdaV6/GYpucuOw=
+k8s.io/client-go v0.23.9/go.mod h1:sNo0X0MZqo4Uu0qDY5Fl5Y60cJFinBDWWUBOAM5JUCM=
+k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
+k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
+k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/klog/v2 v2.70.2-0.20220707122935-0990e81f1a8f h1:dltw7bAn8bCrQ2CmzzhgoieUZEbWqrvIGVdHGioP5nY=
+k8s.io/klog/v2 v2.70.2-0.20220707122935-0990e81f1a8f/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
+k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf h1:M9XBsiMslw2lb2ZzglC0TOkBPK5NQi0/noUrdnoFwUg=
+k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc=
+k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+knative.dev/control-protocol v0.0.0-20220818153549-f18dbde7d9bd h1:sco6+LdPEq1iYm5Fo1Sva2LuiRjS1tqMqZFAVb7Yfd8=
+knative.dev/control-protocol v0.0.0-20220818153549-f18dbde7d9bd/go.mod h1:vO3Xc0k0h6fFVsVG9kNMUMcVKG7MAx7jMbZDvgSuzwI=
+knative.dev/networking v0.0.0-20220818010248-e51df7cdf571 h1:Lu/TsJjxg1p+2CMr2LNHEdEFBNHYjDoZv2f1QZoM8jg=
+knative.dev/networking v0.0.0-20220818010248-e51df7cdf571/go.mod h1:m3ataWRwmbHjOY9sCFvcDWRNLVITxVl0fH0RxdCa4jE=
+knative.dev/pkg v0.0.0-20220818004048-4a03844c0b15 h1:GNmzHVaUo3zoi/wtIN71LPQaWy6DdoYzmb+GIq2s4fw=
+knative.dev/pkg v0.0.0-20220818004048-4a03844c0b15/go.mod h1:YLjXbkQLlGHok+u0FLfMbBHFzY9WGu3GHhnrptoAy8I=
+knative.dev/serving v0.34.2 h1:bFamUUcc+Rh73mss2ICwmoK+dBKK/MdbgEsfdIK/Mis=
+knative.dev/serving v0.34.2/go.mod h1:IyfedOBq3KzcD5dZONjbix2BfS0jOwDq5td8UE9CjCk=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
+sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y=
+sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
+sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
+sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
+sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
diff --git a/qpext/logger.go b/qpext/logger.go
new file mode 100644
index 00000000000..90e8993ab4b
--- /dev/null
+++ b/qpext/logger.go
@@ -0,0 +1,28 @@
+/*
+Copyright 2022 The KServe Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logger
+
+import (
+ "go.uber.org/zap"
+)
+
+type Logger = zap.Logger
+
+func InitializeLogger() *Logger {
+ zaplogger, _ := zap.NewProduction()
+ return zaplogger
+}
diff --git a/qpext/qpext.Dockerfile b/qpext/qpext.Dockerfile
new file mode 100644
index 00000000000..34beea04639
--- /dev/null
+++ b/qpext/qpext.Dockerfile
@@ -0,0 +1,20 @@
+# Build the inference qpext binary
+FROM golang:1.18 as builder
+
+# Copy in the go src
+WORKDIR /go/src/github.com/kserve/kserve/qpext
+COPY go.mod go.mod
+COPY go.sum go.sum
+
+RUN go mod download
+
+COPY cmd/qpext cmd/qpext
+COPY logger.go logger.go
+
+# Build
+RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o qpext ./cmd/qpext
+
+FROM gcr.io/distroless/static:nonroot
+WORKDIR /ko-app
+COPY --from=builder /go/src/github.com/kserve/kserve/qpext /ko-app/
+ENTRYPOINT ["/ko-app/qpext"]
diff --git a/test/e2e/qpext/__init__.py b/test/e2e/qpext/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/test/e2e/qpext/test_qpext.py b/test/e2e/qpext/test_qpext.py
new file mode 100644
index 00000000000..fbb4b64afcd
--- /dev/null
+++ b/test/e2e/qpext/test_qpext.py
@@ -0,0 +1,94 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import logging
+import time
+import requests
+import portforward
+from kubernetes import client
+from kserve import (
+ constants,
+ KServeClient,
+ V1beta1InferenceService,
+ V1beta1InferenceServiceSpec,
+ V1beta1PredictorSpec,
+ V1beta1SKLearnSpec,
+)
+from kubernetes.client import V1ResourceRequirements
+from ..common.utils import KSERVE_TEST_NAMESPACE
+from ..common.utils import predict
+
+logging.basicConfig(level=logging.INFO)
+
+ENABLE_METRIC_AGG = "serving.kserve.io/enable-metric-aggregation"
+METRICS_AGG_PORT = 9088
+METRICS_PATH = "/metrics"
+
+
+def test_qpext_kserve():
+ # test the qpext using the sklearn predictor
+ service_name = "isvc-sklearn-v2"
+ predictor = V1beta1PredictorSpec(
+ min_replicas=1,
+ sklearn=V1beta1SKLearnSpec(
+ storage_uri="gs://seldon-models/sklearn/mms/lr_model",
+ protocol_version="v2",
+ resources=V1ResourceRequirements(
+ requests={"cpu": "50m", "memory": "128Mi"},
+ limits={"cpu": "100m", "memory": "512Mi"},
+ ),
+ ),
+ )
+
+ isvc = V1beta1InferenceService(
+ api_version=constants.KSERVE_V1BETA1,
+ kind=constants.KSERVE_KIND,
+ metadata=client.V1ObjectMeta(
+ name=service_name, namespace=KSERVE_TEST_NAMESPACE,
+ # set the metric aggregation annotation to true
+ annotations={ENABLE_METRIC_AGG: "true"},
+ ),
+ spec=V1beta1InferenceServiceSpec(predictor=predictor),
+ )
+
+ kserve_client = KServeClient(config_file=os.environ.get("KUBECONFIG", "~/.kube/config"))
+ kserve_client.create(isvc)
+ kserve_client.wait_isvc_ready(service_name, namespace=KSERVE_TEST_NAMESPACE)
+
+ res = predict(service_name, "./data/iris_input_v2.json", protocol_version="v2")
+ assert res["outputs"][0]["data"] == [1, 1]
+
+ send_metrics_request(kserve_client, service_name)
+ kserve_client.delete(service_name, KSERVE_TEST_NAMESPACE)
+
+
+def send_metrics_request(kserve_client, service_name):
+ time.sleep(10)
+ pods = kserve_client.core_api.list_namespaced_pod(KSERVE_TEST_NAMESPACE,
+ label_selector='serving.kserve.io/inferenceservice={}'.
+ format(service_name))
+ pod_name = ""
+ for pod in pods.items:
+ # get a pod name
+ pod_name = pod.metadata.name
+ break
+
+ url = f"http://localhost:{METRICS_AGG_PORT}/{METRICS_PATH}"
+ with portforward.forward(KSERVE_TEST_NAMESPACE, pod_name, METRICS_AGG_PORT, METRICS_AGG_PORT):
+ response = requests.get(url)
+ logging.info(f"response: {response}, content: {response.content}")
+ logging.info("Got response code %s, content %s", response.status_code, response.content)
+
+ assert response.status_code == 200
+ assert len(response.content) > 0
diff --git a/test/scripts/gh-actions/build-qpext-image.sh b/test/scripts/gh-actions/build-qpext-image.sh
new file mode 100755
index 00000000000..0cfd8d30a31
--- /dev/null
+++ b/test/scripts/gh-actions/build-qpext-image.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+# Copyright 2022 The KServe Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The script is used to build all the queue-proxy extension image.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+echo "Github SHA ${GITHUB_SHA}"
+export QPEXT_IMG=kserve/qpext:${GITHUB_SHA}
+
+pushd qpext >/dev/null
+echo "Building queue proxy extension image"
+docker build -t ${QPEXT_IMG} -f qpext.Dockerfile .
+popd
+echo "Done building image"
diff --git a/test/scripts/gh-actions/run-e2e-tests.sh b/test/scripts/gh-actions/run-e2e-tests.sh
index 8a74af7587b..c557a7773ba 100755
--- a/test/scripts/gh-actions/run-e2e-tests.sh
+++ b/test/scripts/gh-actions/run-e2e-tests.sh
@@ -22,5 +22,5 @@ set -o pipefail
echo "Starting E2E functional tests ..."
pushd test/e2e >/dev/null
- pytest -m "$1"
+ pytest -m "$1" --ignore=qpext
popd
diff --git a/test/scripts/gh-actions/run-qpext-test.sh b/test/scripts/gh-actions/run-qpext-test.sh
new file mode 100755
index 00000000000..4506c414883
--- /dev/null
+++ b/test/scripts/gh-actions/run-qpext-test.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+# Copyright 2022 The KServe Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The script is used to deploy knative and kserve, and run e2e tests.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+echo "Starting E2E queue proxy extension tests ..."
+pushd test/e2e >/dev/null
+ pytest qpext
+popd
|
horovod__horovod-2262 | [Horovod on Ray]Failed on example of tensorflow2_mnist_ray.py
**Environment:**
1. Framework: (TensorFlow1.15.0)
3. Horovod version:0.20.0
**Your question:**
Please ask your question here.
Hi @tgaddair @richardliaw I tried the example of Horovod on ray on CPU nodes, but failed.
The ray cluster is ok.
Using TensorFlow backend.
[2m[36m(pid=262081)[0m Using TensorFlow backend.
[2m[36m(pid=262071)[0m Using TensorFlow backend.
Traceback (most recent call last):
File "tensorflow2_mnist_ray.py", line 94, in <module>
executor.run(train, kwargs=dict(num_epochs=1))
File "/home/xianyang/sw/miniconda3/envs/test/lib/python3.7/site-packages/horovod/ray/runner.py", line 426, in run
lambda w: fn(*args, **kwargs)) for worker in self.workers])
File "/home/xianyang/sw/miniconda3/envs/test/lib/python3.7/site-packages/ray/worker.py", line 1516, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(RecursionError): [36mray::BaseHorovodWorker.execute()[39m (pid=262081, ip=10.0.0.131)
File "python/ray/_raylet.pyx", line 414, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 431, in ray._raylet.execute_task
File "/home/xianyang/sw/miniconda3/envs/test/lib/python3.7/site-packages/ray/serialization.py", line 317, in deserialize_objects
self._deserialize_object(data, metadata, object_id))
File "/home/xianyang/sw/miniconda3/envs/test/lib/python3.7/site-packages/ray/serialization.py", line 257, in _deserialize_object
return self._deserialize_msgpack_data(data, metadata)
File "/home/xianyang/sw/miniconda3/envs/test/lib/python3.7/site-packages/ray/serialization.py", line 238, in _deserialize_msgpack_data
python_objects = self._deserialize_pickle5_data(pickle5_data)
File "/home/xianyang/sw/miniconda3/envs/test/lib/python3.7/site-packages/ray/serialization.py", line 226, in _deserialize_pickle5_data
obj = pickle.loads(in_band)
File "/home/xianyang/sw/miniconda3/envs/test/lib/python3.7/site-packages/tensorflow/__init__.py", line 50, in __getattr__
module = self._load()
File "/home/xianyang/sw/miniconda3/envs/test/lib/python3.7/site-packages/tensorflow/__init__.py", line 44, in _load
module = _importlib.import_module(self.__name__)
File "/home/xianyang/sw/miniconda3/envs/test/lib/python3.7/site-packages/tensorflow/__init__.py", line 50, in __getattr__
module = self._load()
File "/home/xianyang/sw/miniconda3/envs/test/lib/python3.7/site-packages/tensorflow/__init__.py", line 44, in _load
module = _importlib.import_module(self.__name__)
File "/home/xianyang/sw/miniconda3/envs/test/lib/python3.7/site-packages/tensorflow/__init__.py", line 50, in __getattr__
module = self._load()
File "/home/xianyang/sw/miniconda3/envs/test/lib/python3.7/site-packages/tensorflow/__init__.py", line 44, in _load
module = _importlib.import_module(self.__name__)
.......
File "/home/xianyang/sw/miniconda3/envs/test/lib/python3.7/site-packages/tensorflow/__init__.py", line 50, in __getattr__
module = self._load()
File "/home/xianyang/sw/miniconda3/envs/test/lib/python3.7/site-packages/tensorflow/__init__.py", line 44, in _load
module = _importlib.import_module(self.__name__)
File "/home/xianyang/sw/miniconda3/envs/test/lib/python3.7/site-packages/tensorflow/__init__.py", line 50, in __getattr__
module = self._load()
File "/home/xianyang/sw/miniconda3/envs/test/lib/python3.7/site-packages/tensorflow/__init__.py", line 44, in _load
module = _importlib.import_module(self.__name__)
File "/home/xianyang/sw/miniconda3/envs/test/lib/python3.7/site-packages/tensorflow/__init__.py", line 50, in __getattr__
module = self._load()
File "/home/xianyang/sw/miniconda3/envs/test/lib/python3.7/site-packages/tensorflow/__init__.py", line 44, in _load
module = _importlib.import_module(self.__name__)
File "/home/xianyang/sw/miniconda3/envs/test/lib/python3.7/site-packages/tensorflow/__init__.py", line 50, in __getattr__
module = self._load()
File "/home/xianyang/sw/miniconda3/envs/test/lib/python3.7/site-packages/tensorflow/__init__.py", line 44, in _load
module = _importlib.import_module(self.__name__)
File "/home/xianyang/sw/miniconda3/envs/test/lib/python3.7/site-packages/tensorflow/__init__.py", line 50, in __getattr__
module = self._load()
File "/home/xianyang/sw/miniconda3/envs/test/lib/python3.7/site-packages/tensorflow/__init__.py", line 44, in _load
module = _importlib.import_module(self.__name__)
RecursionError: maximum recursion depth exceeded while calling a Python object
| [
{
"content": "import tensorflow as tf\nimport horovod.tensorflow.keras as hvd\nimport horovod.keras as hvd\n\nimport ray\nfrom horovod.ray import RayExecutor\n\n\ndef train(num_epochs):\n # Horovod: initialize Horovod.\n hvd.init()\n\n # Horovod: pin GPU to be used to process local rank (one GPU per process)\n gpus = tf.config.experimental.list_physical_devices('GPU')\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n if gpus:\n tf.config.experimental.set_visible_devices(\n gpus[hvd.local_rank()], 'GPU')\n\n (mnist_images, mnist_labels), _ = \\\n tf.keras.datasets.mnist.load_data(path='mnist-%d.npz' % hvd.rank())\n\n dataset = tf.data.Dataset.from_tensor_slices(\n (tf.cast(mnist_images[..., tf.newaxis] / 255.0, tf.float32),\n tf.cast(mnist_labels, tf.int64))\n )\n dataset = dataset.repeat().shuffle(10000).batch(128)\n\n mnist_model = tf.keras.Sequential([\n tf.keras.layers.Conv2D(32, [3, 3], activation='relu'),\n tf.keras.layers.Conv2D(64, [3, 3], activation='relu'),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n tf.keras.layers.Dropout(0.25),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(10, activation='softmax')\n ])\n\n # Horovod: adjust learning rate based on number of GPUs.\n scaled_lr = 0.001 * hvd.size()\n opt = tf.optimizers.Adam(scaled_lr)\n\n # Horovod: add Horovod DistributedOptimizer.\n opt = hvd.DistributedOptimizer(opt)\n\n # Horovod: Specify `experimental_run_tf_function=False` to ensure TensorFlow\n # uses hvd.DistributedOptimizer() to compute gradients.\n mnist_model.compile(loss=tf.losses.SparseCategoricalCrossentropy(),\n optimizer=opt,\n metrics=['accuracy'],\n experimental_run_tf_function=False)\n\n callbacks = [\n # Horovod: broadcast initial variable states from rank 0 to all other processes.\n # This is necessary to ensure consistent initialization of all workers when\n # training is started with random weights or restored from a checkpoint.\n hvd.callbacks.BroadcastGlobalVariablesCallback(0),\n\n # Horovod: average metrics among workers at the end of every epoch.\n #\n # Note: This callback must be in the list before the ReduceLROnPlateau,\n # TensorBoard or other metrics-based callbacks.\n hvd.callbacks.MetricAverageCallback(),\n\n # Horovod: using `lr = 1.0 * hvd.size()` from the very beginning leads to worse final\n # accuracy. Scale the learning rate `lr = 1.0` ---> `lr = 1.0 * hvd.size()` during\n # the first three epochs. See https://arxiv.org/abs/1706.02677 for details.\n hvd.callbacks.LearningRateWarmupCallback(\n warmup_epochs=3, initial_lr=scaled_lr, verbose=1),\n ]\n\n # Horovod: save checkpoints only on worker 0 to prevent other workers from corrupting them.\n if hvd.rank() == 0:\n callbacks.append(tf.keras.callbacks.ModelCheckpoint(\n './checkpoint-{epoch}.h5'))\n\n # Horovod: write logs on worker 0.\n verbose = 1 if hvd.rank() == 0 else 0\n\n # Train the model.\n # Horovod: adjust number of steps based on number of GPUs.\n mnist_model.fit(dataset, steps_per_epoch=500 // hvd.size(),\n callbacks=callbacks, epochs=num_epochs, verbose=verbose)\n\n\nray.init()\nsettings = RayExecutor.create_settings(timeout_s=30)\nexecutor = RayExecutor(settings, num_hosts=1, num_slots=2, use_gpu=False)\nexecutor.start()\nexecutor.run(train, kwargs=dict(num_epochs=1))\nexecutor.shutdown()\n",
"path": "examples/tensorflow2_mnist_ray.py"
}
] | [
{
"content": "import tensorflow as tf\nimport horovod.tensorflow.keras as hvd\n\nimport ray\nfrom horovod.ray import RayExecutor\n\n\ndef train(num_epochs):\n # Horovod: initialize Horovod.\n hvd.init()\n\n # Horovod: pin GPU to be used to process local rank (one GPU per process)\n gpus = tf.config.experimental.list_physical_devices('GPU')\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n if gpus:\n tf.config.experimental.set_visible_devices(\n gpus[hvd.local_rank()], 'GPU')\n\n (mnist_images, mnist_labels), _ = \\\n tf.keras.datasets.mnist.load_data(path='mnist-%d.npz' % hvd.rank())\n\n dataset = tf.data.Dataset.from_tensor_slices(\n (tf.cast(mnist_images[..., tf.newaxis] / 255.0, tf.float32),\n tf.cast(mnist_labels, tf.int64))\n )\n dataset = dataset.repeat().shuffle(10000).batch(128)\n\n mnist_model = tf.keras.Sequential([\n tf.keras.layers.Conv2D(32, [3, 3], activation='relu'),\n tf.keras.layers.Conv2D(64, [3, 3], activation='relu'),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n tf.keras.layers.Dropout(0.25),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(10, activation='softmax')\n ])\n\n # Horovod: adjust learning rate based on number of GPUs.\n scaled_lr = 0.001 * hvd.size()\n opt = tf.optimizers.Adam(scaled_lr)\n\n # Horovod: add Horovod DistributedOptimizer.\n opt = hvd.DistributedOptimizer(opt)\n\n # Horovod: Specify `experimental_run_tf_function=False` to ensure TensorFlow\n # uses hvd.DistributedOptimizer() to compute gradients.\n mnist_model.compile(loss=tf.losses.SparseCategoricalCrossentropy(),\n optimizer=opt,\n metrics=['accuracy'],\n experimental_run_tf_function=False)\n\n callbacks = [\n # Horovod: broadcast initial variable states from rank 0 to all other processes.\n # This is necessary to ensure consistent initialization of all workers when\n # training is started with random weights or restored from a checkpoint.\n hvd.callbacks.BroadcastGlobalVariablesCallback(0),\n\n # Horovod: average metrics among workers at the end of every epoch.\n #\n # Note: This callback must be in the list before the ReduceLROnPlateau,\n # TensorBoard or other metrics-based callbacks.\n hvd.callbacks.MetricAverageCallback(),\n\n # Horovod: using `lr = 1.0 * hvd.size()` from the very beginning leads to worse final\n # accuracy. Scale the learning rate `lr = 1.0` ---> `lr = 1.0 * hvd.size()` during\n # the first three epochs. See https://arxiv.org/abs/1706.02677 for details.\n hvd.callbacks.LearningRateWarmupCallback(\n warmup_epochs=3, initial_lr=scaled_lr, verbose=1),\n ]\n\n # Horovod: save checkpoints only on worker 0 to prevent other workers from corrupting them.\n if hvd.rank() == 0:\n callbacks.append(tf.keras.callbacks.ModelCheckpoint(\n './checkpoint-{epoch}.h5'))\n\n # Horovod: write logs on worker 0.\n verbose = 1 if hvd.rank() == 0 else 0\n\n # Train the model.\n # Horovod: adjust number of steps based on number of GPUs.\n mnist_model.fit(dataset, steps_per_epoch=500 // hvd.size(),\n callbacks=callbacks, epochs=num_epochs, verbose=verbose)\n\n\nray.init()\nsettings = RayExecutor.create_settings(timeout_s=30)\nexecutor = RayExecutor(settings, num_hosts=1, num_slots=2, use_gpu=False)\nexecutor.start()\nexecutor.run(train, kwargs=dict(num_epochs=1))\nexecutor.shutdown()\n",
"path": "examples/tensorflow2_mnist_ray.py"
}
] | diff --git a/examples/tensorflow2_mnist_ray.py b/examples/tensorflow2_mnist_ray.py
index f44905221b..1e446d931c 100644
--- a/examples/tensorflow2_mnist_ray.py
+++ b/examples/tensorflow2_mnist_ray.py
@@ -1,6 +1,5 @@
import tensorflow as tf
import horovod.tensorflow.keras as hvd
-import horovod.keras as hvd
import ray
from horovod.ray import RayExecutor
|
PaddlePaddle__PaddleSpeech-1311 | speech_recognition示例运行出错
这是源码:
import paddle
from paddlespeech.cli import ASRExecutor
asr_executor = ASRExecutor()
text = asr_executor(
model='conformer_wenetspeech',
lang='zh',
sample_rate=16000,
config=None, # Set `config` and `ckpt_path` to None to use pretrained model.
ckpt_path=None,
audio_file='input.wav',
force_yes=False,
device=paddle.get_device())
print('ASR Result: \n{}'.format(text))
运行后提示:
Traceback (most recent call last):
File "D:\BaiduNetdiskDownload\PaddleSpeech-r0.1.0\demos\speech_recognition\speech_recognition.py", line 5, in <module>
text = asr_executor(
File "D:\BaiduNetdiskDownload\PaddleSpeech-r0.1.0\paddlespeech\cli\asr\infer.py", line 449, in __call__
self._init_from_path(model, lang, sample_rate, config, ckpt_path)
File "D:\BaiduNetdiskDownload\PaddleSpeech-r0.1.0\paddlespeech\cli\asr\infer.py", line 166, in _init_from_path
self.config.merge_from_file(self.cfg_path)
File "D:\SoftWare\Anaconda\lib\site-packages\yacs\config.py", line 212, in merge_from_file
cfg = self.load_cfg(f)
File "D:\SoftWare\Anaconda\lib\site-packages\yacs\config.py", line 349, in load_cfg
return cls._load_cfg_from_file(cfg_file_obj_or_str)
File "D:\SoftWare\Anaconda\lib\site-packages\yacs\config.py", line 358, in _load_cfg_from_file
return cls._load_cfg_from_yaml_str(file_obj.read())
File "D:\SoftWare\Anaconda\lib\site-packages\yacs\config.py", line 371, in _load_cfg_from_yaml_str
return cls(cfg_as_dict)
File "D:\SoftWare\Anaconda\lib\site-packages\yacs\config.py", line 86, in __init__
init_dict = self._create_config_tree_from_dict(init_dict, key_list)
File "D:\SoftWare\Anaconda\lib\site-packages\yacs\config.py", line 126, in _create_config_tree_from_dict
dic[k] = cls(v, key_list=key_list + [k])
File "D:\SoftWare\Anaconda\lib\site-packages\yacs\config.py", line 86, in __init__
init_dict = self._create_config_tree_from_dict(init_dict, key_list)
File "D:\SoftWare\Anaconda\lib\site-packages\yacs\config.py", line 126, in _create_config_tree_from_dict
dic[k] = cls(v, key_list=key_list + [k])
File "D:\SoftWare\Anaconda\lib\site-packages\yacs\config.py", line 86, in __init__
init_dict = self._create_config_tree_from_dict(init_dict, key_list)
File "D:\SoftWare\Anaconda\lib\site-packages\yacs\config.py", line 129, in _create_config_tree_from_dict
_assert_with_logging(
File "D:\SoftWare\Anaconda\lib\site-packages\yacs\config.py", line 521, in _assert_with_logging
assert cond, msg
AssertionError: Key model.model_conf.ctc_grad_norm_type with value <class 'NoneType'> is not a valid type; valid types: {<class 'list'>, <class 'tuple'>, <class 'str'>, <class 'int'>, <class 'float'>, <class 'bool'>}
目前是按照文档在windows下进行的paddlepaddle安装,并下载了PaddleSpeech-r0.1.0源码后直接进行的测试,请问我该如何解决这个问题,谢谢
| [
{
"content": "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport contextlib\nimport inspect\nimport io\nimport os\nimport subprocess as sp\nimport sys\nfrom pathlib import Path\n\nfrom setuptools import Command\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.command.develop import develop\nfrom setuptools.command.install import install\n\nHERE = Path(os.path.abspath(os.path.dirname(__file__)))\n\nrequirements = {\n \"install\": [\n \"editdistance\",\n \"g2p_en\",\n \"g2pM\",\n \"h5py\",\n \"inflect\",\n \"jieba\",\n \"jsonlines\",\n \"kaldiio\",\n \"librosa\",\n \"loguru\",\n \"matplotlib\",\n \"nara_wpe\",\n \"nltk\",\n \"pandas\",\n \"paddleaudio\",\n \"paddlenlp\",\n \"paddlespeech_feat\",\n \"praatio==5.0.0\",\n \"pypinyin\",\n \"python-dateutil\",\n \"pyworld\",\n \"resampy==0.2.2\",\n \"sacrebleu\",\n \"scipy\",\n \"sentencepiece~=0.1.96\",\n \"soundfile~=0.10\",\n \"textgrid\",\n \"timer\",\n \"tqdm\",\n \"typeguard\",\n \"visualdl\",\n \"webrtcvad\",\n \"yacs\",\n ],\n \"develop\": [\n \"ConfigArgParse\",\n \"coverage\",\n \"gpustat\",\n \"paddlespeech_ctcdecoders\",\n \"phkit\",\n \"Pillow\",\n \"pybind11\",\n \"pypi-kenlm\",\n \"snakeviz\",\n \"sox\",\n \"soxbindings\",\n \"unidecode\",\n \"yq\",\n \"pre-commit\",\n ]\n}\n\n\[email protected]\ndef pushd(new_dir):\n old_dir = os.getcwd()\n os.chdir(new_dir)\n print(new_dir)\n yield\n os.chdir(old_dir)\n print(old_dir)\n\n\ndef read(*names, **kwargs):\n with io.open(\n os.path.join(os.path.dirname(__file__), *names),\n encoding=kwargs.get(\"encoding\", \"utf8\")) as fp:\n return fp.read()\n\n\ndef check_call(cmd: str, shell=False, executable=None):\n try:\n sp.check_call(\n cmd.split(),\n shell=shell,\n executable=\"/bin/bash\" if shell else executable)\n except sp.CalledProcessError as e:\n print(\n f\"{__file__}:{inspect.currentframe().f_lineno}: CMD: {cmd}, Error:\",\n e.output,\n file=sys.stderr)\n raise e\n\n\ndef _remove(files: str):\n for f in files:\n f.unlink()\n\n\ndef _post_install(install_lib_dir):\n # tools/make\n tool_dir = HERE / \"tools\"\n _remove(tool_dir.glob(\"*.done\"))\n with pushd(tool_dir):\n check_call(\"make\")\n print(\"tools install.\")\n\n # ctcdecoder\n ctcdecoder_dir = HERE / 'paddlespeech/s2t/decoders/ctcdecoder/swig'\n with pushd(ctcdecoder_dir):\n check_call(\"bash -e setup.sh\")\n print(\"ctcdecoder install.\")\n\n\nclass DevelopCommand(develop):\n def run(self):\n develop.run(self)\n # must after develop.run, or pkg install by shell will not see\n self.execute(_post_install, (self.install_lib, ), msg=\"Post Install...\")\n\n\nclass InstallCommand(install):\n def run(self):\n install.run(self)\n\n\n # cmd: python setup.py upload\nclass UploadCommand(Command):\n description = \"Build and publish the package.\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n try:\n print(\"Removing previous dist/ ...\")\n shutil.rmtree(str(HERE / \"dist\"))\n except OSError:\n pass\n print(\"Building source distribution...\")\n sp.check_call([sys.executable, \"setup.py\", \"sdist\"])\n print(\"Uploading package to PyPi...\")\n sp.check_call([\"twine\", \"upload\", \"dist/*\"])\n sys.exit()\n\n\nsetup_info = dict(\n # Metadata\n name='paddlespeech',\n version='0.1.0',\n author='PaddlePaddle Speech and Language Team',\n author_email='[email protected]',\n url='https://github.com/PaddlePaddle/PaddleSpeech',\n license='Apache 2.0',\n description='Speech tools and models based on Paddlepaddle',\n long_description=read(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n keywords=[\n \"speech\",\n \"asr\",\n \"tts\",\n \"speaker verfication\",\n \"speech classfication\",\n \"text frontend\",\n \"MFA\",\n \"paddlepaddle\",\n \"beam search\",\n \"ctcdecoder\",\n \"deepspeech2\",\n \"transformer\",\n \"conformer\",\n \"fastspeech\",\n \"vocoder\",\n \"pwgan\",\n \"gan\",\n ],\n python_requires='>=3.7',\n install_requires=requirements[\"install\"],\n extras_require={\n 'develop':\n requirements[\"develop\"],\n 'doc': [\n \"sphinx\", \"sphinx-rtd-theme\", \"numpydoc\", \"myst_parser\",\n \"recommonmark>=0.5.0\", \"sphinx-markdown-tables\", \"sphinx-autobuild\"\n ],\n },\n cmdclass={\n 'develop': DevelopCommand,\n 'install': InstallCommand,\n 'upload': UploadCommand,\n },\n\n # Package info\n packages=find_packages(include=('paddlespeech*')),\n zip_safe=True,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n entry_points={\n 'console_scripts': ['paddlespeech=paddlespeech.cli.entry:_execute']\n })\n\nsetup(**setup_info)\n",
"path": "setup.py"
}
] | [
{
"content": "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport contextlib\nimport inspect\nimport io\nimport os\nimport subprocess as sp\nimport sys\nfrom pathlib import Path\n\nfrom setuptools import Command\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.command.develop import develop\nfrom setuptools.command.install import install\n\nHERE = Path(os.path.abspath(os.path.dirname(__file__)))\n\nrequirements = {\n \"install\": [\n \"editdistance\",\n \"g2p_en\",\n \"g2pM\",\n \"h5py\",\n \"inflect\",\n \"jieba\",\n \"jsonlines\",\n \"kaldiio\",\n \"librosa\",\n \"loguru\",\n \"matplotlib\",\n \"nara_wpe\",\n \"nltk\",\n \"pandas\",\n \"paddleaudio\",\n \"paddlenlp\",\n \"paddlespeech_feat\",\n \"praatio==5.0.0\",\n \"pypinyin\",\n \"python-dateutil\",\n \"pyworld\",\n \"resampy==0.2.2\",\n \"sacrebleu\",\n \"scipy\",\n \"sentencepiece~=0.1.96\",\n \"soundfile~=0.10\",\n \"textgrid\",\n \"timer\",\n \"tqdm\",\n \"typeguard\",\n \"visualdl\",\n \"webrtcvad\",\n \"yacs~=0.1.8\",\n ],\n \"develop\": [\n \"ConfigArgParse\",\n \"coverage\",\n \"gpustat\",\n \"paddlespeech_ctcdecoders\",\n \"phkit\",\n \"Pillow\",\n \"pybind11\",\n \"pypi-kenlm\",\n \"snakeviz\",\n \"sox\",\n \"soxbindings\",\n \"unidecode\",\n \"yq\",\n \"pre-commit\",\n ]\n}\n\n\[email protected]\ndef pushd(new_dir):\n old_dir = os.getcwd()\n os.chdir(new_dir)\n print(new_dir)\n yield\n os.chdir(old_dir)\n print(old_dir)\n\n\ndef read(*names, **kwargs):\n with io.open(\n os.path.join(os.path.dirname(__file__), *names),\n encoding=kwargs.get(\"encoding\", \"utf8\")) as fp:\n return fp.read()\n\n\ndef check_call(cmd: str, shell=False, executable=None):\n try:\n sp.check_call(\n cmd.split(),\n shell=shell,\n executable=\"/bin/bash\" if shell else executable)\n except sp.CalledProcessError as e:\n print(\n f\"{__file__}:{inspect.currentframe().f_lineno}: CMD: {cmd}, Error:\",\n e.output,\n file=sys.stderr)\n raise e\n\n\ndef _remove(files: str):\n for f in files:\n f.unlink()\n\n\ndef _post_install(install_lib_dir):\n # tools/make\n tool_dir = HERE / \"tools\"\n _remove(tool_dir.glob(\"*.done\"))\n with pushd(tool_dir):\n check_call(\"make\")\n print(\"tools install.\")\n\n # ctcdecoder\n ctcdecoder_dir = HERE / 'paddlespeech/s2t/decoders/ctcdecoder/swig'\n with pushd(ctcdecoder_dir):\n check_call(\"bash -e setup.sh\")\n print(\"ctcdecoder install.\")\n\n\nclass DevelopCommand(develop):\n def run(self):\n develop.run(self)\n # must after develop.run, or pkg install by shell will not see\n self.execute(_post_install, (self.install_lib, ), msg=\"Post Install...\")\n\n\nclass InstallCommand(install):\n def run(self):\n install.run(self)\n\n\n # cmd: python setup.py upload\nclass UploadCommand(Command):\n description = \"Build and publish the package.\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n try:\n print(\"Removing previous dist/ ...\")\n shutil.rmtree(str(HERE / \"dist\"))\n except OSError:\n pass\n print(\"Building source distribution...\")\n sp.check_call([sys.executable, \"setup.py\", \"sdist\"])\n print(\"Uploading package to PyPi...\")\n sp.check_call([\"twine\", \"upload\", \"dist/*\"])\n sys.exit()\n\n\nsetup_info = dict(\n # Metadata\n name='paddlespeech',\n version='0.1.0',\n author='PaddlePaddle Speech and Language Team',\n author_email='[email protected]',\n url='https://github.com/PaddlePaddle/PaddleSpeech',\n license='Apache 2.0',\n description='Speech tools and models based on Paddlepaddle',\n long_description=read(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n keywords=[\n \"speech\",\n \"asr\",\n \"tts\",\n \"speaker verfication\",\n \"speech classfication\",\n \"text frontend\",\n \"MFA\",\n \"paddlepaddle\",\n \"beam search\",\n \"ctcdecoder\",\n \"deepspeech2\",\n \"transformer\",\n \"conformer\",\n \"fastspeech\",\n \"vocoder\",\n \"pwgan\",\n \"gan\",\n ],\n python_requires='>=3.7',\n install_requires=requirements[\"install\"],\n extras_require={\n 'develop':\n requirements[\"develop\"],\n 'doc': [\n \"sphinx\", \"sphinx-rtd-theme\", \"numpydoc\", \"myst_parser\",\n \"recommonmark>=0.5.0\", \"sphinx-markdown-tables\", \"sphinx-autobuild\"\n ],\n },\n cmdclass={\n 'develop': DevelopCommand,\n 'install': InstallCommand,\n 'upload': UploadCommand,\n },\n\n # Package info\n packages=find_packages(include=('paddlespeech*')),\n zip_safe=True,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n entry_points={\n 'console_scripts': ['paddlespeech=paddlespeech.cli.entry:_execute']\n })\n\nsetup(**setup_info)\n",
"path": "setup.py"
}
] | diff --git a/requirements.txt b/requirements.txt
index e567dfa7099..c6889318398 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -43,5 +43,5 @@ typeguard
unidecode
visualdl
webrtcvad
-yacs
+yacs~=0.1.8
yq
diff --git a/setup.py b/setup.py
index 75b3fe5c809..5d4ff80f7bc 100644
--- a/setup.py
+++ b/setup.py
@@ -61,7 +61,7 @@
"typeguard",
"visualdl",
"webrtcvad",
- "yacs",
+ "yacs~=0.1.8",
],
"develop": [
"ConfigArgParse",
|
pyodide__pyodide-4018 | Add check for WASM magic number in .so files
> `pyodide build` now replaces native `.so` slugs with Emscripten
> slugs. Usually `.so`s in the generated wheels are actually Emscripten `.so`s
> so this is good. If they are actually native `.so`s then there is a problem
> either way.
Not very critical, but should we actually check that the .so are emscripten .so rather than native .so that ended up there by mistake? For instance, we could check for the [WASM magic number](https://openhome.cc/eGossip/WebAssembly/Module.html) in the first 4 bytes maybe? It's supposed to be, `0061 736d`. Though I get the same bytes but in a different order, when I try,
```
$ hexdump -n 8 numpy/core/_multiarray_umath.cpython-311-wasm32-emscripten.so
0000000 6100 6d73 0001 0000
```
but maybe I'm using hexdump wrong (**Edit:** yes, with the `-C` option it's better)
_Originally posted by @rth in https://github.com/pyodide/pyodide/issues/3927#issuecomment-1599511454_
| [
{
"content": "# Common functions shared by other modules.\n# Notes for contributors:\n# This module should not import any other modules from pyodide-build except logger to avoid circular imports.\n\nimport contextlib\nimport hashlib\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport textwrap\nimport zipfile\nfrom collections import deque\nfrom collections.abc import Generator, Iterable, Iterator, Mapping\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\nfrom typing import Any, NoReturn\nfrom zipfile import ZipFile\n\nfrom packaging.tags import Tag\nfrom packaging.utils import canonicalize_name as canonicalize_package_name\nfrom packaging.utils import parse_wheel_filename\n\nfrom .logger import logger\n\n\ndef find_matching_wheels(\n wheel_paths: Iterable[Path], supported_tags: Iterator[Tag]\n) -> Iterator[Path]:\n \"\"\"\n Returns the sequence wheels whose tags match the Pyodide interpreter.\n\n Parameters\n ----------\n wheel_paths\n A list of paths to wheels\n supported_tags\n A list of tags that the environment supports\n\n Returns\n -------\n The subset of wheel_paths that have tags that match the Pyodide interpreter.\n \"\"\"\n wheel_paths = list(wheel_paths)\n wheel_tags_list: list[frozenset[Tag]] = []\n\n for wheel in wheel_paths:\n _, _, _, tags = parse_wheel_filename(wheel.name)\n wheel_tags_list.append(tags)\n\n for supported_tag in supported_tags:\n for wheel_path, wheel_tags in zip(wheel_paths, wheel_tags_list, strict=True):\n if supported_tag in wheel_tags:\n yield wheel_path\n\n\ndef parse_top_level_import_name(whlfile: Path) -> list[str] | None:\n \"\"\"\n Parse the top-level import names from a wheel file.\n \"\"\"\n\n if not whlfile.name.endswith(\".whl\"):\n raise RuntimeError(f\"{whlfile} is not a wheel file.\")\n\n whlzip = zipfile.Path(whlfile)\n\n def _valid_package_name(dirname: str) -> bool:\n return all([invalid_chr not in dirname for invalid_chr in \".- \"])\n\n def _has_python_file(subdir: zipfile.Path) -> bool:\n queue = deque([subdir])\n while queue:\n nested_subdir = queue.pop()\n for subfile in nested_subdir.iterdir():\n if subfile.is_file() and subfile.name.endswith(\".py\"):\n return True\n elif subfile.is_dir() and _valid_package_name(subfile.name):\n queue.append(subfile)\n\n return False\n\n # If there is no top_level.txt file, we will find top level imports by\n # 1) a python file on a top-level directory\n # 2) a sub directory with __init__.py\n # following: https://github.com/pypa/setuptools/blob/d680efc8b4cd9aa388d07d3e298b870d26e9e04b/setuptools/discovery.py#L122\n top_level_imports = []\n for subdir in whlzip.iterdir():\n if subdir.is_file() and subdir.name.endswith(\".py\"):\n top_level_imports.append(subdir.name[:-3])\n elif subdir.is_dir() and _valid_package_name(subdir.name):\n if _has_python_file(subdir):\n top_level_imports.append(subdir.name)\n\n if not top_level_imports:\n logger.warning(\n f\"WARNING: failed to parse top level import name from {whlfile}.\"\n )\n return None\n\n return top_level_imports\n\n\ndef _environment_substitute_str(string: str, env: dict[str, str] | None = None) -> str:\n \"\"\"\n Substitute $(VAR) in string with the value of the environment variable VAR.\n\n Parameters\n ----------\n string\n A string\n\n env\n A dictionary of environment variables. If None, use os.environ.\n\n Returns\n -------\n A string with the substitutions applied.\n \"\"\"\n if env is None:\n env = dict(os.environ)\n\n for e_name, e_value in env.items():\n string = string.replace(f\"$({e_name})\", e_value)\n\n return string\n\n\ndef environment_substitute_args(\n args: dict[str, str], env: dict[str, str] | None = None\n) -> dict[str, Any]:\n \"\"\"\n Substitute $(VAR) in args with the value of the environment variable VAR.\n\n Parameters\n ----------\n args\n A dictionary of arguments\n\n env\n A dictionary of environment variables. If None, use os.environ.\n\n Returns\n -------\n A dictionary of arguments with the substitutions applied.\n \"\"\"\n if env is None:\n env = dict(os.environ)\n subbed_args = {}\n for arg, value in args.items():\n if isinstance(value, str):\n value = _environment_substitute_str(value, env)\n subbed_args[arg] = value\n return subbed_args\n\n\[email protected]\ndef replace_env(build_env: Mapping[str, str]) -> Generator[None, None, None]:\n old_environ = dict(os.environ)\n os.environ.clear()\n os.environ.update(build_env)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(old_environ)\n\n\ndef exit_with_stdio(result: subprocess.CompletedProcess[str]) -> NoReturn:\n if result.stdout:\n logger.error(\" stdout:\")\n logger.error(textwrap.indent(result.stdout, \" \"))\n if result.stderr:\n logger.error(\" stderr:\")\n logger.error(textwrap.indent(result.stderr, \" \"))\n raise SystemExit(result.returncode)\n\n\ndef find_missing_executables(executables: list[str]) -> list[str]:\n return list(filter(lambda exe: shutil.which(exe) is None, executables))\n\n\n@contextmanager\ndef chdir(new_dir: Path) -> Generator[None, None, None]:\n orig_dir = Path.cwd()\n try:\n os.chdir(new_dir)\n yield\n finally:\n os.chdir(orig_dir)\n\n\ndef get_num_cores() -> int:\n \"\"\"\n Return the number of CPUs the current process can use.\n If the number of CPUs cannot be determined, return 1.\n \"\"\"\n import loky\n\n return loky.cpu_count()\n\n\ndef make_zip_archive(\n archive_path: Path,\n input_dir: Path,\n compression_level: int = 6,\n) -> None:\n \"\"\"Create a zip archive out of a input folder\n\n Parameters\n ----------\n archive_path\n Path to the zip file that will be created\n input_dir\n input dir to compress\n compression_level\n compression level of the resulting zip file.\n \"\"\"\n if compression_level > 0:\n compression = zipfile.ZIP_DEFLATED\n else:\n compression = zipfile.ZIP_STORED\n\n with zipfile.ZipFile(\n archive_path, \"w\", compression=compression, compresslevel=compression_level\n ) as zf:\n for file in input_dir.rglob(\"*\"):\n zf.write(file, file.relative_to(input_dir))\n\n\ndef repack_zip_archive(archive_path: Path, compression_level: int = 6) -> None:\n \"\"\"Repack zip archive with a different compression level\"\"\"\n if compression_level > 0:\n compression = zipfile.ZIP_DEFLATED\n else:\n compression = zipfile.ZIP_STORED\n\n with TemporaryDirectory() as temp_dir:\n input_path = Path(temp_dir) / archive_path.name\n shutil.move(archive_path, input_path)\n with zipfile.ZipFile(input_path) as fh_zip_in, zipfile.ZipFile(\n archive_path, \"w\", compression=compression, compresslevel=compression_level\n ) as fh_zip_out:\n for name in fh_zip_in.namelist():\n fh_zip_out.writestr(name, fh_zip_in.read(name))\n\n\ndef _get_sha256_checksum(archive: Path) -> str:\n \"\"\"Compute the sha256 checksum of a file\n\n Parameters\n ----------\n archive\n the path to the archive we wish to checksum\n\n Returns\n -------\n checksum\n sha256 checksum of the archive\n \"\"\"\n CHUNK_SIZE = 1 << 16\n h = hashlib.sha256()\n with open(archive, \"rb\") as fd:\n while True:\n chunk = fd.read(CHUNK_SIZE)\n h.update(chunk)\n if len(chunk) < CHUNK_SIZE:\n break\n return h.hexdigest()\n\n\ndef unpack_wheel(wheel_path: Path, target_dir: Path | None = None) -> None:\n if target_dir is None:\n target_dir = wheel_path.parent\n result = subprocess.run(\n [sys.executable, \"-m\", \"wheel\", \"unpack\", wheel_path, \"-d\", target_dir],\n check=False,\n encoding=\"utf-8\",\n )\n if result.returncode != 0:\n logger.error(f\"ERROR: Unpacking wheel {wheel_path.name} failed\")\n exit_with_stdio(result)\n\n\ndef pack_wheel(wheel_dir: Path, target_dir: Path | None = None) -> None:\n if target_dir is None:\n target_dir = wheel_dir.parent\n result = subprocess.run(\n [sys.executable, \"-m\", \"wheel\", \"pack\", wheel_dir, \"-d\", target_dir],\n check=False,\n encoding=\"utf-8\",\n )\n if result.returncode != 0:\n logger.error(f\"ERROR: Packing wheel {wheel_dir} failed\")\n exit_with_stdio(result)\n\n\n@contextmanager\ndef modify_wheel(wheel: Path) -> Iterator[Path]:\n \"\"\"Unpacks the wheel into a temp directory and yields the path to the\n unpacked directory.\n\n The body of the with block is expected to inspect the wheel contents and\n possibly change it. If the body of the \"with\" block is successful, on\n exiting the with block the wheel contents are replaced with the updated\n contents of unpacked directory. If an exception is raised, then the original\n wheel is left unchanged.\n \"\"\"\n with TemporaryDirectory() as temp_dir:\n unpack_wheel(wheel, Path(temp_dir))\n name, ver, _ = wheel.name.split(\"-\", 2)\n wheel_dir_name = f\"{name}-{ver}\"\n wheel_dir = Path(temp_dir) / wheel_dir_name\n yield wheel_dir\n wheel.unlink()\n pack_wheel(wheel_dir, wheel.parent)\n\n\ndef extract_wheel_metadata_file(wheel_path: Path, output_path: Path) -> None:\n \"\"\"Extracts the METADATA file from the given wheel and writes it to the\n output path.\n\n Raises an exception if the METADATA file does not exist.\n\n For a wheel called \"NAME-VERSION-...\", the METADATA file is expected to be\n found in a directory inside the wheel archive, whose name starts with NAME\n and ends with \".dist-info\". See:\n https://packaging.python.org/en/latest/specifications/binary-distribution-format/#file-contents\n \"\"\"\n with ZipFile(wheel_path, mode=\"r\") as wheel:\n pkg_name = wheel_path.name.split(\"-\", 1)[0]\n dist_info_dir = get_wheel_dist_info_dir(wheel, pkg_name)\n metadata_path = f\"{dist_info_dir}/METADATA\"\n try:\n wheel.getinfo(metadata_path).filename = output_path.name\n wheel.extract(metadata_path, output_path.parent)\n except KeyError as err:\n raise Exception(f\"METADATA file not found for {pkg_name}\") from err\n\n\ndef get_wheel_dist_info_dir(wheel: ZipFile, pkg_name: str) -> str:\n \"\"\"Returns the path of the contained .dist-info directory.\n\n Raises an Exception if the directory is not found, more than\n one is found, or it does not match the provided `pkg_name`.\n\n Adapted from:\n https://github.com/pypa/pip/blob/ea727e4d6ab598f34f97c50a22350febc1214a97/src/pip/_internal/utils/wheel.py#L38\n \"\"\"\n\n # Zip file path separators must be /\n subdirs = {name.split(\"/\", 1)[0] for name in wheel.namelist()}\n info_dirs = [subdir for subdir in subdirs if subdir.endswith(\".dist-info\")]\n\n if len(info_dirs) == 0:\n raise Exception(f\".dist-info directory not found for {pkg_name}\")\n\n if len(info_dirs) > 1:\n raise Exception(\n f\"multiple .dist-info directories found for {pkg_name}: {', '.join(info_dirs)}\"\n )\n\n (info_dir,) = info_dirs\n\n info_dir_name = canonicalize_package_name(info_dir)\n canonical_name = canonicalize_package_name(pkg_name)\n\n if not info_dir_name.startswith(canonical_name):\n raise Exception(\n f\".dist-info directory {info_dir!r} does not start with {canonical_name!r}\"\n )\n\n return info_dir\n",
"path": "pyodide-build/pyodide_build/common.py"
}
] | [
{
"content": "# Common functions shared by other modules.\n# Notes for contributors:\n# This module should not import any other modules from pyodide-build except logger to avoid circular imports.\n\nimport contextlib\nimport hashlib\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport textwrap\nimport zipfile\nfrom collections import deque\nfrom collections.abc import Generator, Iterable, Iterator, Mapping\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\nfrom typing import Any, NoReturn\nfrom zipfile import ZipFile\n\nfrom packaging.tags import Tag\nfrom packaging.utils import canonicalize_name as canonicalize_package_name\nfrom packaging.utils import parse_wheel_filename\n\nfrom .logger import logger\n\n\ndef find_matching_wheels(\n wheel_paths: Iterable[Path], supported_tags: Iterator[Tag]\n) -> Iterator[Path]:\n \"\"\"\n Returns the sequence wheels whose tags match the Pyodide interpreter.\n\n Parameters\n ----------\n wheel_paths\n A list of paths to wheels\n supported_tags\n A list of tags that the environment supports\n\n Returns\n -------\n The subset of wheel_paths that have tags that match the Pyodide interpreter.\n \"\"\"\n wheel_paths = list(wheel_paths)\n wheel_tags_list: list[frozenset[Tag]] = []\n\n for wheel in wheel_paths:\n _, _, _, tags = parse_wheel_filename(wheel.name)\n wheel_tags_list.append(tags)\n\n for supported_tag in supported_tags:\n for wheel_path, wheel_tags in zip(wheel_paths, wheel_tags_list, strict=True):\n if supported_tag in wheel_tags:\n yield wheel_path\n\n\ndef parse_top_level_import_name(whlfile: Path) -> list[str] | None:\n \"\"\"\n Parse the top-level import names from a wheel file.\n \"\"\"\n\n if not whlfile.name.endswith(\".whl\"):\n raise RuntimeError(f\"{whlfile} is not a wheel file.\")\n\n whlzip = zipfile.Path(whlfile)\n\n def _valid_package_name(dirname: str) -> bool:\n return all([invalid_chr not in dirname for invalid_chr in \".- \"])\n\n def _has_python_file(subdir: zipfile.Path) -> bool:\n queue = deque([subdir])\n while queue:\n nested_subdir = queue.pop()\n for subfile in nested_subdir.iterdir():\n if subfile.is_file() and subfile.name.endswith(\".py\"):\n return True\n elif subfile.is_dir() and _valid_package_name(subfile.name):\n queue.append(subfile)\n\n return False\n\n # If there is no top_level.txt file, we will find top level imports by\n # 1) a python file on a top-level directory\n # 2) a sub directory with __init__.py\n # following: https://github.com/pypa/setuptools/blob/d680efc8b4cd9aa388d07d3e298b870d26e9e04b/setuptools/discovery.py#L122\n top_level_imports = []\n for subdir in whlzip.iterdir():\n if subdir.is_file() and subdir.name.endswith(\".py\"):\n top_level_imports.append(subdir.name[:-3])\n elif subdir.is_dir() and _valid_package_name(subdir.name):\n if _has_python_file(subdir):\n top_level_imports.append(subdir.name)\n\n if not top_level_imports:\n logger.warning(\n f\"WARNING: failed to parse top level import name from {whlfile}.\"\n )\n return None\n\n return top_level_imports\n\n\ndef _environment_substitute_str(string: str, env: dict[str, str] | None = None) -> str:\n \"\"\"\n Substitute $(VAR) in string with the value of the environment variable VAR.\n\n Parameters\n ----------\n string\n A string\n\n env\n A dictionary of environment variables. If None, use os.environ.\n\n Returns\n -------\n A string with the substitutions applied.\n \"\"\"\n if env is None:\n env = dict(os.environ)\n\n for e_name, e_value in env.items():\n string = string.replace(f\"$({e_name})\", e_value)\n\n return string\n\n\ndef environment_substitute_args(\n args: dict[str, str], env: dict[str, str] | None = None\n) -> dict[str, Any]:\n \"\"\"\n Substitute $(VAR) in args with the value of the environment variable VAR.\n\n Parameters\n ----------\n args\n A dictionary of arguments\n\n env\n A dictionary of environment variables. If None, use os.environ.\n\n Returns\n -------\n A dictionary of arguments with the substitutions applied.\n \"\"\"\n if env is None:\n env = dict(os.environ)\n subbed_args = {}\n for arg, value in args.items():\n if isinstance(value, str):\n value = _environment_substitute_str(value, env)\n subbed_args[arg] = value\n return subbed_args\n\n\[email protected]\ndef replace_env(build_env: Mapping[str, str]) -> Generator[None, None, None]:\n old_environ = dict(os.environ)\n os.environ.clear()\n os.environ.update(build_env)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(old_environ)\n\n\ndef exit_with_stdio(result: subprocess.CompletedProcess[str]) -> NoReturn:\n if result.stdout:\n logger.error(\" stdout:\")\n logger.error(textwrap.indent(result.stdout, \" \"))\n if result.stderr:\n logger.error(\" stderr:\")\n logger.error(textwrap.indent(result.stderr, \" \"))\n raise SystemExit(result.returncode)\n\n\ndef find_missing_executables(executables: list[str]) -> list[str]:\n return list(filter(lambda exe: shutil.which(exe) is None, executables))\n\n\n@contextmanager\ndef chdir(new_dir: Path) -> Generator[None, None, None]:\n orig_dir = Path.cwd()\n try:\n os.chdir(new_dir)\n yield\n finally:\n os.chdir(orig_dir)\n\n\ndef get_num_cores() -> int:\n \"\"\"\n Return the number of CPUs the current process can use.\n If the number of CPUs cannot be determined, return 1.\n \"\"\"\n import loky\n\n return loky.cpu_count()\n\n\ndef make_zip_archive(\n archive_path: Path,\n input_dir: Path,\n compression_level: int = 6,\n) -> None:\n \"\"\"Create a zip archive out of a input folder\n\n Parameters\n ----------\n archive_path\n Path to the zip file that will be created\n input_dir\n input dir to compress\n compression_level\n compression level of the resulting zip file.\n \"\"\"\n if compression_level > 0:\n compression = zipfile.ZIP_DEFLATED\n else:\n compression = zipfile.ZIP_STORED\n\n with zipfile.ZipFile(\n archive_path, \"w\", compression=compression, compresslevel=compression_level\n ) as zf:\n for file in input_dir.rglob(\"*\"):\n zf.write(file, file.relative_to(input_dir))\n\n\ndef repack_zip_archive(archive_path: Path, compression_level: int = 6) -> None:\n \"\"\"Repack zip archive with a different compression level\"\"\"\n if compression_level > 0:\n compression = zipfile.ZIP_DEFLATED\n else:\n compression = zipfile.ZIP_STORED\n\n with TemporaryDirectory() as temp_dir:\n input_path = Path(temp_dir) / archive_path.name\n shutil.move(archive_path, input_path)\n with zipfile.ZipFile(input_path) as fh_zip_in, zipfile.ZipFile(\n archive_path, \"w\", compression=compression, compresslevel=compression_level\n ) as fh_zip_out:\n for name in fh_zip_in.namelist():\n fh_zip_out.writestr(name, fh_zip_in.read(name))\n\n\ndef _get_sha256_checksum(archive: Path) -> str:\n \"\"\"Compute the sha256 checksum of a file\n\n Parameters\n ----------\n archive\n the path to the archive we wish to checksum\n\n Returns\n -------\n checksum\n sha256 checksum of the archive\n \"\"\"\n CHUNK_SIZE = 1 << 16\n h = hashlib.sha256()\n with open(archive, \"rb\") as fd:\n while True:\n chunk = fd.read(CHUNK_SIZE)\n h.update(chunk)\n if len(chunk) < CHUNK_SIZE:\n break\n return h.hexdigest()\n\n\ndef unpack_wheel(wheel_path: Path, target_dir: Path | None = None) -> None:\n if target_dir is None:\n target_dir = wheel_path.parent\n result = subprocess.run(\n [sys.executable, \"-m\", \"wheel\", \"unpack\", wheel_path, \"-d\", target_dir],\n check=False,\n encoding=\"utf-8\",\n )\n if result.returncode != 0:\n logger.error(f\"ERROR: Unpacking wheel {wheel_path.name} failed\")\n exit_with_stdio(result)\n\n\ndef pack_wheel(wheel_dir: Path, target_dir: Path | None = None) -> None:\n if target_dir is None:\n target_dir = wheel_dir.parent\n result = subprocess.run(\n [sys.executable, \"-m\", \"wheel\", \"pack\", wheel_dir, \"-d\", target_dir],\n check=False,\n encoding=\"utf-8\",\n )\n if result.returncode != 0:\n logger.error(f\"ERROR: Packing wheel {wheel_dir} failed\")\n exit_with_stdio(result)\n\n\n@contextmanager\ndef modify_wheel(wheel: Path) -> Iterator[Path]:\n \"\"\"Unpacks the wheel into a temp directory and yields the path to the\n unpacked directory.\n\n The body of the with block is expected to inspect the wheel contents and\n possibly change it. If the body of the \"with\" block is successful, on\n exiting the with block the wheel contents are replaced with the updated\n contents of unpacked directory. If an exception is raised, then the original\n wheel is left unchanged.\n \"\"\"\n with TemporaryDirectory() as temp_dir:\n unpack_wheel(wheel, Path(temp_dir))\n name, ver, _ = wheel.name.split(\"-\", 2)\n wheel_dir_name = f\"{name}-{ver}\"\n wheel_dir = Path(temp_dir) / wheel_dir_name\n yield wheel_dir\n wheel.unlink()\n pack_wheel(wheel_dir, wheel.parent)\n\n\ndef extract_wheel_metadata_file(wheel_path: Path, output_path: Path) -> None:\n \"\"\"Extracts the METADATA file from the given wheel and writes it to the\n output path.\n\n Raises an exception if the METADATA file does not exist.\n\n For a wheel called \"NAME-VERSION-...\", the METADATA file is expected to be\n found in a directory inside the wheel archive, whose name starts with NAME\n and ends with \".dist-info\". See:\n https://packaging.python.org/en/latest/specifications/binary-distribution-format/#file-contents\n \"\"\"\n with ZipFile(wheel_path, mode=\"r\") as wheel:\n pkg_name = wheel_path.name.split(\"-\", 1)[0]\n dist_info_dir = get_wheel_dist_info_dir(wheel, pkg_name)\n metadata_path = f\"{dist_info_dir}/METADATA\"\n try:\n wheel.getinfo(metadata_path).filename = output_path.name\n wheel.extract(metadata_path, output_path.parent)\n except KeyError as err:\n raise Exception(f\"METADATA file not found for {pkg_name}\") from err\n\n\ndef get_wheel_dist_info_dir(wheel: ZipFile, pkg_name: str) -> str:\n \"\"\"Returns the path of the contained .dist-info directory.\n\n Raises an Exception if the directory is not found, more than\n one is found, or it does not match the provided `pkg_name`.\n\n Adapted from:\n https://github.com/pypa/pip/blob/ea727e4d6ab598f34f97c50a22350febc1214a97/src/pip/_internal/utils/wheel.py#L38\n \"\"\"\n\n # Zip file path separators must be /\n subdirs = {name.split(\"/\", 1)[0] for name in wheel.namelist()}\n info_dirs = [subdir for subdir in subdirs if subdir.endswith(\".dist-info\")]\n\n if len(info_dirs) == 0:\n raise Exception(f\".dist-info directory not found for {pkg_name}\")\n\n if len(info_dirs) > 1:\n raise Exception(\n f\"multiple .dist-info directories found for {pkg_name}: {', '.join(info_dirs)}\"\n )\n\n (info_dir,) = info_dirs\n\n info_dir_name = canonicalize_package_name(info_dir)\n canonical_name = canonicalize_package_name(pkg_name)\n\n if not info_dir_name.startswith(canonical_name):\n raise Exception(\n f\".dist-info directory {info_dir!r} does not start with {canonical_name!r}\"\n )\n\n return info_dir\n\n\ndef check_wasm_magic_number(file_path: Path) -> bool:\n WASM_BINARY_MAGIC = b\"\\0asm\"\n with file_path.open(mode=\"rb\") as file:\n return file.read(4) == WASM_BINARY_MAGIC\n",
"path": "pyodide-build/pyodide_build/common.py"
}
] | diff --git a/docs/project/changelog.md b/docs/project/changelog.md
index e7f1b7e0c71..8fa3bc9267a 100644
--- a/docs/project/changelog.md
+++ b/docs/project/changelog.md
@@ -15,6 +15,10 @@ myst:
## Unreleased
+- {{ Enhancement }} Adds `check_wasm_magic_number` function to validate `.so`
+ files for WebAssembly (WASM) compatibility.
+ {pr}`4018`
+
- {{ Enhancement }} Add an example for `loadPyodide` and `pyodide.runPython
{pr}`4012`, {pr}`4011`
diff --git a/pyodide-build/pyodide_build/common.py b/pyodide-build/pyodide_build/common.py
index 534b559204f..f638a49159d 100644
--- a/pyodide-build/pyodide_build/common.py
+++ b/pyodide-build/pyodide_build/common.py
@@ -371,3 +371,9 @@ def get_wheel_dist_info_dir(wheel: ZipFile, pkg_name: str) -> str:
)
return info_dir
+
+
+def check_wasm_magic_number(file_path: Path) -> bool:
+ WASM_BINARY_MAGIC = b"\0asm"
+ with file_path.open(mode="rb") as file:
+ return file.read(4) == WASM_BINARY_MAGIC
diff --git a/pyodide-build/pyodide_build/tests/test_common.py b/pyodide-build/pyodide_build/tests/test_common.py
index 2dd8fffddbc..5a512d2baf2 100644
--- a/pyodide-build/pyodide_build/tests/test_common.py
+++ b/pyodide-build/pyodide_build/tests/test_common.py
@@ -3,6 +3,7 @@
import pytest
from pyodide_build.common import (
+ check_wasm_magic_number,
environment_substitute_args,
extract_wheel_metadata_file,
find_missing_executables,
@@ -152,3 +153,14 @@ def test_extract_wheel_metadata_file(tmp_path):
with pytest.raises(Exception):
extract_wheel_metadata_file(input_path_empty, output_path_empty)
+
+
+def test_check_wasm_magic_number(tmp_path):
+ wasm_magic_number = b"\x00asm\x01\x00\x00\x00\x00\x11"
+ not_wasm_magic_number = b"\x7fELF\x02\x01\x01\x00\x00\x00"
+
+ (tmp_path / "goodfile.so").write_bytes(wasm_magic_number)
+ assert check_wasm_magic_number(tmp_path / "goodfile.so") is True
+
+ (tmp_path / "badfile.so").write_bytes(not_wasm_magic_number)
+ assert check_wasm_magic_number(tmp_path / "badfile.so") is False
|
ivy-llc__ivy-13218 | iscomplex
Marked as closed in #10862, yet it's unimplemented.
| [
{
"content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n)\nfrom ivy.functional.frontends.jax.numpy import (\n promote_types_of_jax_inputs as promote_jax_arrays,\n)\n\n\n@to_ivy_arrays_and_back\ndef allclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False):\n a, b = promote_jax_arrays(a, b)\n return ivy.allclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)\n\n\n@to_ivy_arrays_and_back\ndef array_equal(a1, a2, equal_nan: bool) -> bool:\n a1, a2 = promote_jax_arrays(a1, a2)\n if ivy.shape(a1) != ivy.shape(a2):\n return False\n eq = ivy.asarray(a1 == a2)\n if equal_nan:\n eq = ivy.logical_or(eq, ivy.logical_and(ivy.isnan(a1), ivy.isnan(a2)))\n return ivy.all(eq)\n\n\n@to_ivy_arrays_and_back\ndef array_equiv(a1, a2) -> bool:\n a1, a2 = promote_jax_arrays(a1, a2)\n try:\n eq = ivy.equal(a1, a2)\n except ValueError:\n # shapes are not broadcastable\n return False\n return ivy.all(eq)\n\n\n@to_ivy_arrays_and_back\ndef isneginf(x, out=None):\n return ivy.isneginf(x, out=out)\n\n\n@to_ivy_arrays_and_back\ndef isposinf(x, out=None):\n return ivy.isposinf(x, out=out)\n\n\n@to_ivy_arrays_and_back\ndef not_equal(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.not_equal(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef less(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.less(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef less_equal(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.less_equal(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef greater(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.greater(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef greater_equal(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.greater_equal(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef isnan(x, out=None):\n return ivy.isnan(x, out=out)\n\n\n@to_ivy_arrays_and_back\ndef equal(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.equal(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef all(a, axis=None, out=None, keepdims=False, *, where=False):\n return ivy.all(a, axis=axis, keepdims=keepdims, out=out)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_and(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.bitwise_and(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_not(x):\n return ivy.bitwise_invert(x)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_or(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.bitwise_or(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_xor(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.bitwise_xor(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef any(a, axis=None, out=None, keepdims=False, *, where=None):\n # TODO: Out not supported\n ret = ivy.any(a, axis=axis, keepdims=keepdims)\n if ivy.is_array(where):\n where = ivy.array(where, dtype=ivy.bool)\n ret = ivy.where(where, ret, ivy.default(None, ivy.zeros_like(ret)))\n return ret\n\n\nalltrue = all\n\n\nsometrue = any\n\n\n@to_ivy_arrays_and_back\n# known issue in jnp's documentation of arguments\n# https://github.com/google/jax/issues/9119\ndef logical_and(x1, x2, /):\n if x1.dtype == \"complex128\" or x2.dtype == \"complex128\":\n x1 = ivy.astype(x1, ivy.complex128)\n x2 = ivy.astype(x2, ivy.complex128)\n else:\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.logical_and(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef invert(x, /):\n return ivy.bitwise_invert(x)\n\n\n@to_ivy_arrays_and_back\ndef isfinite(x, /):\n return ivy.isfinite(x)\n\n\n@to_ivy_arrays_and_back\ndef isinf(x, /):\n return ivy.isinf(x)\n\n\n@to_ivy_arrays_and_back\ndef isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False):\n a, b = promote_jax_arrays(a, b)\n return ivy.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)\n\n\n@to_ivy_arrays_and_back\ndef logical_not(x, /):\n return ivy.logical_not(x)\n\n\n@to_ivy_arrays_and_back\ndef logical_or(x1, x2, /):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.logical_or(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef isscalar(x, /):\n return ivy.isscalar(x)\n\n\n@to_ivy_arrays_and_back\ndef left_shift(x1, x2):\n return ivy.isscalar(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef isreal(x, out=None):\n return ivy.isreal(x, out=out)\n\n\n@to_ivy_arrays_and_back\ndef logical_xor(x1, x2, /):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.logical_xor(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef right_shift(x1, x2, /):\n return ivy.bitwise_right_shift(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef isrealobj(x: any):\n return not ivy.is_complex_dtype(ivy.dtype(x))\n\n",
"path": "ivy/functional/frontends/jax/numpy/logic.py"
}
] | [
{
"content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n)\nfrom ivy.functional.frontends.jax.numpy import (\n promote_types_of_jax_inputs as promote_jax_arrays,\n)\n\n\n@to_ivy_arrays_and_back\ndef allclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False):\n a, b = promote_jax_arrays(a, b)\n return ivy.allclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)\n\n\n@to_ivy_arrays_and_back\ndef array_equal(a1, a2, equal_nan: bool) -> bool:\n a1, a2 = promote_jax_arrays(a1, a2)\n if ivy.shape(a1) != ivy.shape(a2):\n return False\n eq = ivy.asarray(a1 == a2)\n if equal_nan:\n eq = ivy.logical_or(eq, ivy.logical_and(ivy.isnan(a1), ivy.isnan(a2)))\n return ivy.all(eq)\n\n\n@to_ivy_arrays_and_back\ndef array_equiv(a1, a2) -> bool:\n a1, a2 = promote_jax_arrays(a1, a2)\n try:\n eq = ivy.equal(a1, a2)\n except ValueError:\n # shapes are not broadcastable\n return False\n return ivy.all(eq)\n\n\n@to_ivy_arrays_and_back\ndef isneginf(x, out=None):\n return ivy.isneginf(x, out=out)\n\n\n@to_ivy_arrays_and_back\ndef isposinf(x, out=None):\n return ivy.isposinf(x, out=out)\n\n\n@to_ivy_arrays_and_back\ndef not_equal(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.not_equal(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef less(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.less(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef less_equal(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.less_equal(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef greater(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.greater(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef greater_equal(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.greater_equal(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef isnan(x, out=None):\n return ivy.isnan(x, out=out)\n\n\n@to_ivy_arrays_and_back\ndef equal(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.equal(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef all(a, axis=None, out=None, keepdims=False, *, where=False):\n return ivy.all(a, axis=axis, keepdims=keepdims, out=out)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_and(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.bitwise_and(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_not(x):\n return ivy.bitwise_invert(x)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_or(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.bitwise_or(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef bitwise_xor(x1, x2):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.bitwise_xor(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef any(a, axis=None, out=None, keepdims=False, *, where=None):\n # TODO: Out not supported\n ret = ivy.any(a, axis=axis, keepdims=keepdims)\n if ivy.is_array(where):\n where = ivy.array(where, dtype=ivy.bool)\n ret = ivy.where(where, ret, ivy.default(None, ivy.zeros_like(ret)))\n return ret\n\n\nalltrue = all\n\n\nsometrue = any\n\n\n@to_ivy_arrays_and_back\n# known issue in jnp's documentation of arguments\n# https://github.com/google/jax/issues/9119\ndef logical_and(x1, x2, /):\n if x1.dtype == \"complex128\" or x2.dtype == \"complex128\":\n x1 = ivy.astype(x1, ivy.complex128)\n x2 = ivy.astype(x2, ivy.complex128)\n else:\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.logical_and(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef invert(x, /):\n return ivy.bitwise_invert(x)\n\n\n@to_ivy_arrays_and_back\ndef isfinite(x, /):\n return ivy.isfinite(x)\n\n\n@to_ivy_arrays_and_back\ndef isinf(x, /):\n return ivy.isinf(x)\n\n\n@to_ivy_arrays_and_back\ndef isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False):\n a, b = promote_jax_arrays(a, b)\n return ivy.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)\n\n\n@to_ivy_arrays_and_back\ndef logical_not(x, /):\n return ivy.logical_not(x)\n\n\n@to_ivy_arrays_and_back\ndef logical_or(x1, x2, /):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.logical_or(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef isscalar(x, /):\n return ivy.isscalar(x)\n\n\n@to_ivy_arrays_and_back\ndef left_shift(x1, x2):\n return ivy.isscalar(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef isreal(x, out=None):\n return ivy.isreal(x, out=out)\n\n\n@to_ivy_arrays_and_back\ndef logical_xor(x1, x2, /):\n x1, x2 = promote_jax_arrays(x1, x2)\n return ivy.logical_xor(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef right_shift(x1, x2, /):\n return ivy.bitwise_right_shift(x1, x2)\n\n\n@to_ivy_arrays_and_back\ndef isrealobj(x: any):\n return not ivy.is_complex_dtype(ivy.dtype(x))\n\n\n@to_ivy_arrays_and_back\ndef iscomplex(x: any):\n return ivy.bitwise_invert(ivy.isreal(x))\n",
"path": "ivy/functional/frontends/jax/numpy/logic.py"
}
] | diff --git a/ivy/functional/frontends/jax/numpy/logic.py b/ivy/functional/frontends/jax/numpy/logic.py
index e2c82f2458170..ff1b7db94a9f7 100644
--- a/ivy/functional/frontends/jax/numpy/logic.py
+++ b/ivy/functional/frontends/jax/numpy/logic.py
@@ -205,3 +205,7 @@ def right_shift(x1, x2, /):
def isrealobj(x: any):
return not ivy.is_complex_dtype(ivy.dtype(x))
+
+@to_ivy_arrays_and_back
+def iscomplex(x: any):
+ return ivy.bitwise_invert(ivy.isreal(x))
diff --git a/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_logic.py b/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_logic.py
index d2cf5b2710fc9..e7d73071a572d 100644
--- a/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_logic.py
+++ b/ivy_tests/test_ivy/test_frontends/test_jax/test_jax_numpy_logic.py
@@ -887,6 +887,33 @@ def test_jax_numpy_right_shift(
)
+# iscomplex
+@handle_frontend_test(
+ fn_tree="jax.numpy.iscomplex",
+ dtype_and_x=helpers.dtype_and_values(
+ available_dtypes=helpers.get_dtypes("real_and_complex"), min_num_dims=1
+ ),
+ test_with_out=st.just(False),
+)
+def test_jax_numpy_iscomplex(
+ dtype_and_x,
+ frontend,
+ on_device,
+ *,
+ fn_tree,
+ test_flags,
+):
+ input_dtype, x = dtype_and_x
+ helpers.test_frontend_function(
+ input_dtypes=input_dtype,
+ frontend=frontend,
+ test_flags=test_flags,
+ fn_tree=fn_tree,
+ on_device=on_device,
+ x=x[0],
+ )
+
+
@handle_frontend_test(
fn_tree="jax.numpy.isrealobj",
dtype_and_x=helpers.dtype_and_values(
|
cupy__cupy-2615 | cupy.where fails for complex arrays
The function cupy.where does not work for complex arrays (numpy.where does):
```
import cupy as cp
a = cp.arange(5).astype(cp.complex128)
b = cp.arange(5).astype(cp.complex128)
c = cp.where(a==b,a,b)
```
fails with the error message
> TypeError: Wrong type ((<class 'numpy.bool_'>, <class 'numpy.complex128'>, <class 'numpy.complex128'>)) of arguments for cupy_where
For `cp.float64`, everything works fine.
CuPy Version : 6.4.0
CUDA Root : /usr
CUDA Build Version : 8000
CUDA Driver Version : 10020
CUDA Runtime Version : 8000
cuDNN Build Version : 7102
cuDNN Version : 7102
NCCL Build Version : 2213
NCCL Runtime Version : (unknown)
| [
{
"content": "from cupy import core\nfrom cupy.core import fusion\n\n\ndef argmax(a, axis=None, dtype=None, out=None, keepdims=False):\n \"\"\"Returns the indices of the maximum along an axis.\n\n Args:\n a (cupy.ndarray): Array to take argmax.\n axis (int): Along which axis to find the maximum. ``a`` is flattened by\n default.\n dtype: Data type specifier.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis ``axis`` is preserved as an axis\n of length one.\n\n Returns:\n cupy.ndarray: The indices of the maximum of ``a`` along an axis.\n\n .. seealso:: :func:`numpy.argmax`\n\n \"\"\"\n # TODO(okuta): check type\n return a.argmax(axis=axis, dtype=dtype, out=out, keepdims=keepdims)\n\n\n# TODO(okuta): Implement nanargmax\n\n\ndef argmin(a, axis=None, dtype=None, out=None, keepdims=False):\n \"\"\"Returns the indices of the minimum along an axis.\n\n Args:\n a (cupy.ndarray): Array to take argmin.\n axis (int): Along which axis to find the minimum. ``a`` is flattened by\n default.\n dtype: Data type specifier.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis ``axis`` is preserved as an axis\n of length one.\n\n Returns:\n cupy.ndarray: The indices of the minimum of ``a`` along an axis.\n\n .. seealso:: :func:`numpy.argmin`\n\n \"\"\"\n # TODO(okuta): check type\n return a.argmin(axis=axis, dtype=dtype, out=out, keepdims=keepdims)\n\n\n# TODO(okuta): Implement nanargmin\n\n\n# TODO(okuta): Implement argwhere\n\n\ndef nonzero(a):\n \"\"\"Return the indices of the elements that are non-zero.\n\n Returns a tuple of arrays, one for each dimension of a,\n containing the indices of the non-zero elements in that dimension.\n\n Args:\n a (cupy.ndarray): array\n\n Returns:\n tuple of arrays: Indices of elements that are non-zero.\n\n .. seealso:: :func:`numpy.nonzero`\n\n \"\"\"\n assert isinstance(a, core.ndarray)\n return a.nonzero()\n\n\ndef flatnonzero(a):\n \"\"\"Return indices that are non-zero in the flattened version of a.\n\n This is equivalent to a.ravel().nonzero()[0].\n\n Args:\n a (cupy.ndarray): input array\n\n Returns:\n cupy.ndarray: Output array,\n containing the indices of the elements of a.ravel() that are non-zero.\n\n .. seealso:: :func:`numpy.flatnonzero`\n \"\"\"\n assert isinstance(a, core.ndarray)\n return a.ravel().nonzero()[0]\n\n\n_where_ufunc = core.create_ufunc(\n 'cupy_where',\n ('???->?', '?bb->b', '?BB->B', '?hh->h', '?HH->H', '?ii->i', '?II->I',\n '?ll->l', '?LL->L', '?qq->q', '?QQ->Q', '?ee->e', '?ff->f',\n # On CUDA 6.5 these combinations don't work correctly (on CUDA >=7.0, it\n # works).\n # See issue #551.\n '?hd->d', '?Hd->d',\n '?dd->d'),\n 'out0 = in0 ? in1 : in2')\n\n\ndef where(condition, x=None, y=None):\n \"\"\"Return elements, either from x or y, depending on condition.\n\n If only condition is given, return ``condition.nonzero()``.\n\n Args:\n condition (cupy.ndarray): When True, take x, otherwise take y.\n x (cupy.ndarray): Values from which to choose on ``True``.\n y (cupy.ndarray): Values from which to choose on ``False``.\n\n Returns:\n cupy.ndarray: Each element of output contains elements of ``x`` when\n ``condition`` is ``True``, otherwise elements of ``y``. If only\n ``condition`` is given, return the tuple ``condition.nonzero()``,\n the indices where ``condition`` is True.\n\n .. seealso:: :func:`numpy.where`\n\n \"\"\"\n\n missing = (x is None, y is None).count(True)\n\n if missing == 1:\n raise ValueError('Must provide both \\'x\\' and \\'y\\' or neither.')\n if missing == 2:\n return nonzero(condition)\n\n if fusion._is_fusing():\n return fusion._call_ufunc(_where_ufunc, condition, x, y)\n return _where_ufunc(condition.astype('?'), x, y)\n\n\n# TODO(okuta): Implement searchsorted\n\n\n# TODO(okuta): Implement extract\n",
"path": "cupy/sorting/search.py"
}
] | [
{
"content": "from cupy import core\nfrom cupy.core import fusion\n\n\ndef argmax(a, axis=None, dtype=None, out=None, keepdims=False):\n \"\"\"Returns the indices of the maximum along an axis.\n\n Args:\n a (cupy.ndarray): Array to take argmax.\n axis (int): Along which axis to find the maximum. ``a`` is flattened by\n default.\n dtype: Data type specifier.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis ``axis`` is preserved as an axis\n of length one.\n\n Returns:\n cupy.ndarray: The indices of the maximum of ``a`` along an axis.\n\n .. seealso:: :func:`numpy.argmax`\n\n \"\"\"\n # TODO(okuta): check type\n return a.argmax(axis=axis, dtype=dtype, out=out, keepdims=keepdims)\n\n\n# TODO(okuta): Implement nanargmax\n\n\ndef argmin(a, axis=None, dtype=None, out=None, keepdims=False):\n \"\"\"Returns the indices of the minimum along an axis.\n\n Args:\n a (cupy.ndarray): Array to take argmin.\n axis (int): Along which axis to find the minimum. ``a`` is flattened by\n default.\n dtype: Data type specifier.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis ``axis`` is preserved as an axis\n of length one.\n\n Returns:\n cupy.ndarray: The indices of the minimum of ``a`` along an axis.\n\n .. seealso:: :func:`numpy.argmin`\n\n \"\"\"\n # TODO(okuta): check type\n return a.argmin(axis=axis, dtype=dtype, out=out, keepdims=keepdims)\n\n\n# TODO(okuta): Implement nanargmin\n\n\n# TODO(okuta): Implement argwhere\n\n\ndef nonzero(a):\n \"\"\"Return the indices of the elements that are non-zero.\n\n Returns a tuple of arrays, one for each dimension of a,\n containing the indices of the non-zero elements in that dimension.\n\n Args:\n a (cupy.ndarray): array\n\n Returns:\n tuple of arrays: Indices of elements that are non-zero.\n\n .. seealso:: :func:`numpy.nonzero`\n\n \"\"\"\n assert isinstance(a, core.ndarray)\n return a.nonzero()\n\n\ndef flatnonzero(a):\n \"\"\"Return indices that are non-zero in the flattened version of a.\n\n This is equivalent to a.ravel().nonzero()[0].\n\n Args:\n a (cupy.ndarray): input array\n\n Returns:\n cupy.ndarray: Output array,\n containing the indices of the elements of a.ravel() that are non-zero.\n\n .. seealso:: :func:`numpy.flatnonzero`\n \"\"\"\n assert isinstance(a, core.ndarray)\n return a.ravel().nonzero()[0]\n\n\n_where_ufunc = core.create_ufunc(\n 'cupy_where',\n ('???->?', '?bb->b', '?BB->B', '?hh->h', '?HH->H', '?ii->i', '?II->I',\n '?ll->l', '?LL->L', '?qq->q', '?QQ->Q', '?ee->e', '?ff->f',\n # On CUDA 6.5 these combinations don't work correctly (on CUDA >=7.0, it\n # works).\n # See issue #551.\n '?hd->d', '?Hd->d',\n '?dd->d', '?FF->F', '?DD->D'),\n 'out0 = in0 ? in1 : in2')\n\n\ndef where(condition, x=None, y=None):\n \"\"\"Return elements, either from x or y, depending on condition.\n\n If only condition is given, return ``condition.nonzero()``.\n\n Args:\n condition (cupy.ndarray): When True, take x, otherwise take y.\n x (cupy.ndarray): Values from which to choose on ``True``.\n y (cupy.ndarray): Values from which to choose on ``False``.\n\n Returns:\n cupy.ndarray: Each element of output contains elements of ``x`` when\n ``condition`` is ``True``, otherwise elements of ``y``. If only\n ``condition`` is given, return the tuple ``condition.nonzero()``,\n the indices where ``condition`` is True.\n\n .. seealso:: :func:`numpy.where`\n\n \"\"\"\n\n missing = (x is None, y is None).count(True)\n\n if missing == 1:\n raise ValueError('Must provide both \\'x\\' and \\'y\\' or neither.')\n if missing == 2:\n return nonzero(condition)\n\n if fusion._is_fusing():\n return fusion._call_ufunc(_where_ufunc, condition, x, y)\n return _where_ufunc(condition.astype('?'), x, y)\n\n\n# TODO(okuta): Implement searchsorted\n\n\n# TODO(okuta): Implement extract\n",
"path": "cupy/sorting/search.py"
}
] | diff --git a/cupy/sorting/search.py b/cupy/sorting/search.py
index 4abc58a5377..cc8d395f1b0 100644
--- a/cupy/sorting/search.py
+++ b/cupy/sorting/search.py
@@ -100,7 +100,7 @@ def flatnonzero(a):
# works).
# See issue #551.
'?hd->d', '?Hd->d',
- '?dd->d'),
+ '?dd->d', '?FF->F', '?DD->D'),
'out0 = in0 ? in1 : in2')
diff --git a/tests/cupy_tests/sorting_tests/test_search.py b/tests/cupy_tests/sorting_tests/test_search.py
index 9bdd0744013..b27971ed771 100644
--- a/tests/cupy_tests/sorting_tests/test_search.py
+++ b/tests/cupy_tests/sorting_tests/test_search.py
@@ -163,7 +163,7 @@ def test_argmin_zero_size_axis1(self, xp, dtype):
class TestWhereTwoArrays(unittest.TestCase):
@testing.for_all_dtypes_combination(
- names=['cond_type', 'x_type', 'y_type'], no_complex=True)
+ names=['cond_type', 'x_type', 'y_type'])
@testing.numpy_cupy_allclose()
def test_where_two_arrays(self, xp, cond_type, x_type, y_type):
m = testing.shaped_random(self.cond_shape, xp, xp.bool_)
@@ -184,7 +184,7 @@ def test_where_two_arrays(self, xp, cond_type, x_type, y_type):
@testing.gpu
class TestWhereCond(unittest.TestCase):
- @testing.for_all_dtypes(no_complex=True)
+ @testing.for_all_dtypes()
@testing.numpy_cupy_array_list_equal()
def test_where_cond(self, xp, dtype):
m = testing.shaped_random(self.cond_shape, xp, xp.bool_)
|
lutris__lutris-2561 | Can't use any installers
When I try to install Epic Games Store, for example https://lutris.net/games/epic-games-store/
the installer eventually fails on this:
```
2019-12-29 10:35:48,329: Error while completing task <bound method CommandsMixin.execute of <lutris.installer.interpreter.ScriptInterpreter object at 0x7f3f726e59a0>>: sequence item 1: expected str instance, list found
<class 'TypeError'> sequence item 1: expected str instance, list found
File "/usr/lib/python3.8/site-packages/lutris/util/jobs.py", line 30, in target
result = self.function(*args, **kwargs)
File "/usr/lib/python3.8/site-packages/lutris/installer/commands.py", line 152, in execute
command.start()
File "/usr/lib/python3.8/site-packages/lutris/command.py", line 116, in start
logger.debug("Running %s", " ".join(self.wrapper_command))
```
It seems to affect all installers though, not just this one.
| [
{
"content": "\"\"\"Threading module, used to launch games while monitoring them.\"\"\"\n\nimport io\nimport os\nimport sys\nimport fcntl\nimport shlex\nimport subprocess\nimport contextlib\nfrom textwrap import dedent\n\nfrom gi.repository import GLib\n\nfrom lutris import settings\nfrom lutris import runtime\nfrom lutris.util.log import logger\nfrom lutris.util import system\n\nWRAPPER_SCRIPT = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), \"lutris-wrapper\")\n\n\nclass MonitoredCommand:\n \"\"\"Exexcutes a commmand while keeping track of its state\"\"\"\n\n fallback_cwd = \"/tmp\"\n\n def __init__(\n self,\n command,\n runner=None,\n env=None,\n term=None,\n cwd=None,\n include_processes=None,\n exclude_processes=None,\n log_buffer=None,\n title=None,\n ): # pylint: disable=too-many-arguments\n self.ready_state = True\n self.env = self.get_environment(env)\n\n self.command = command\n self.runner = runner\n self.stop_func = lambda: True\n self.game_process = None\n self.prevent_on_stop = False\n self.return_code = None\n self.terminal = system.find_executable(term)\n self.is_running = True\n self.error = None\n self.log_handlers = [\n self.log_handler_stdout,\n self.log_handler_console_output,\n ]\n self.set_log_buffer(log_buffer)\n self.stdout_monitor = None\n self.include_processes = include_processes or []\n self.exclude_processes = exclude_processes or []\n\n self.cwd = self.get_cwd(cwd)\n\n self._stdout = io.StringIO()\n\n self._title = title if title else command\n\n @property\n def stdout(self):\n return self._stdout.getvalue()\n\n @property\n def wrapper_command(self):\n \"\"\"Return launch arguments for the wrapper script\"\"\"\n\n return [\n WRAPPER_SCRIPT,\n self._title,\n str(len(self.include_processes)),\n str(len(self.exclude_processes)),\n ] + self.include_processes + self.exclude_processes + self.command\n\n def set_log_buffer(self, log_buffer):\n \"\"\"Attach a TextBuffer to this command enables the buffer handler\"\"\"\n if not log_buffer:\n return\n self.log_buffer = log_buffer\n if self.log_handler_buffer not in self.log_handlers:\n self.log_handlers.append(self.log_handler_buffer)\n\n def get_cwd(self, cwd):\n \"\"\"Return the current working dir of the game\"\"\"\n if not cwd:\n cwd = self.runner.working_dir if self.runner else None\n return os.path.expanduser(cwd or \"~\")\n\n @staticmethod\n def get_environment(user_env):\n \"\"\"Process the user provided environment variables for use as self.env\"\"\"\n env = user_env or {}\n # not clear why this needs to be added, the path is already added in\n # the wrappper script.\n env['PYTHONPATH'] = ':'.join(sys.path)\n # Drop bad values of environment keys, those will confuse the Python\n # interpreter.\n return {\n key: value for key, value in env.items() if \"=\" not in key\n }\n\n def get_child_environment(self):\n \"\"\"Returns the calculated environment for the child process.\"\"\"\n env = os.environ.copy()\n env.update(self.env)\n return env\n\n def start(self):\n \"\"\"Run the thread.\"\"\"\n logger.debug(\"Running %s\", \" \".join(self.wrapper_command))\n for key, value in self.env.items():\n logger.debug(\"ENV: %s=\\\"%s\\\"\", key, value)\n\n if self.terminal:\n self.game_process = self.run_in_terminal()\n else:\n env = self.get_child_environment()\n self.game_process = self.execute_process(self.wrapper_command, env)\n\n if not self.game_process:\n logger.warning(\"No game process available\")\n return\n\n GLib.child_watch_add(self.game_process.pid, self.on_stop)\n\n # make stdout nonblocking.\n fileno = self.game_process.stdout.fileno()\n fcntl.fcntl(\n fileno,\n fcntl.F_SETFL,\n fcntl.fcntl(fileno, fcntl.F_GETFL) | os.O_NONBLOCK\n )\n\n self.stdout_monitor = GLib.io_add_watch(\n self.game_process.stdout,\n GLib.IO_IN | GLib.IO_HUP,\n self.on_stdout_output,\n )\n\n def log_handler_stdout(self, line):\n \"\"\"Add the line to this command's stdout attribute\"\"\"\n self._stdout.write(line)\n\n def log_handler_buffer(self, line):\n \"\"\"Add the line to the associated LogBuffer object\"\"\"\n self.log_buffer.insert(self.log_buffer.get_end_iter(), line, -1)\n\n def log_handler_console_output(self, line): # pylint: disable=no-self-use\n \"\"\"Print the line to stdout\"\"\"\n with contextlib.suppress(BlockingIOError):\n sys.stdout.write(line)\n sys.stdout.flush()\n\n def on_stop(self, _pid, returncode):\n \"\"\"Callback registered on game process termination\"\"\"\n if self.prevent_on_stop: # stop() already in progress\n return False\n\n logger.debug(\"The process has terminated with code %s\", returncode)\n self.is_running = False\n self.return_code = returncode\n\n resume_stop = self.stop()\n if not resume_stop:\n logger.info(\"Full shutdown prevented\")\n return False\n\n return False\n\n def on_stdout_output(self, stdout, condition):\n \"\"\"Called by the stdout monitor to dispatch output to log handlers\"\"\"\n if condition == GLib.IO_HUP:\n self.stdout_monitor = None\n return False\n if not self.is_running:\n return False\n try:\n line = stdout.read(262144).decode(\"utf-8\", errors=\"ignore\")\n except ValueError:\n # file_desc might be closed\n return True\n if \"winemenubuilder.exe\" in line:\n return True\n for log_handler in self.log_handlers:\n log_handler(line)\n return True\n\n def run_in_terminal(self):\n \"\"\"Write command in a script file and run it.\n\n Running it from a file is likely the only way to set env vars only\n for the command (not for the terminal app).\n It's also the only reliable way to keep the term open when the\n game is quit.\n \"\"\"\n script_path = os.path.join(settings.CACHE_DIR, \"run_in_term.sh\")\n exported_environment = \"\\n\".join(\n 'export %s=\"%s\" ' % (key, value)\n for key, value in self.env.items()\n )\n command = \" \".join(['\"%s\"' % token for token in self.wrapper_command])\n with open(script_path, \"w\") as script_file:\n script_file.write(dedent(\n \"\"\"#!/bin/sh\n cd \"%s\"\n %s\n exec %s\n \"\"\" % (self.cwd, exported_environment, command)\n ))\n os.chmod(script_path, 0o744)\n return self.execute_process([self.terminal, \"-e\", script_path])\n\n def execute_process(self, command, env=None):\n \"\"\"Execute and return a subprocess\"\"\"\n if self.cwd and not system.path_exists(self.cwd):\n try:\n os.makedirs(self.cwd)\n except OSError:\n logger.error(\"Failed to create working directory, falling back to %s\",\n self.fallback_cwd)\n self.cwd = \"/tmp\"\n try:\n\n return subprocess.Popen(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n cwd=self.cwd,\n env=env,\n )\n except OSError as ex:\n logger.exception(\"Failed to execute %s: %s\", \" \".join(command), ex)\n self.error = ex.strerror\n\n def stop(self):\n \"\"\"Stops the current game process and cleans up the instance\"\"\"\n # Prevent stop() being called again by the process exiting\n self.prevent_on_stop = True\n\n try:\n self.game_process.terminate()\n except ProcessLookupError: # process already dead.\n logger.debug(\"Management process looks dead already.\")\n\n if hasattr(self, \"stop_func\"):\n resume_stop = self.stop_func()\n if not resume_stop:\n return False\n\n if self.stdout_monitor:\n logger.debug(\"Detaching logger\")\n GLib.source_remove(self.stdout_monitor)\n self.stdout_monitor = None\n else:\n logger.debug(\"logger already detached\")\n\n self.is_running = False\n self.ready_state = False\n return True\n\n\ndef exec_command(command):\n \"\"\"Execute arbitrary command in a MonitoredCommand\n\n Used by the --exec command line flag.\n \"\"\"\n command = MonitoredCommand(shlex.split(command), env=runtime.get_env())\n command.start()\n return command\n",
"path": "lutris/command.py"
}
] | [
{
"content": "\"\"\"Threading module, used to launch games while monitoring them.\"\"\"\n\nimport io\nimport os\nimport sys\nimport fcntl\nimport shlex\nimport subprocess\nimport contextlib\nfrom textwrap import dedent\n\nfrom gi.repository import GLib\n\nfrom lutris import settings\nfrom lutris import runtime\nfrom lutris.util.log import logger\nfrom lutris.util import system\n\nWRAPPER_SCRIPT = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), \"lutris-wrapper\")\n\n\nclass MonitoredCommand:\n \"\"\"Exexcutes a commmand while keeping track of its state\"\"\"\n\n fallback_cwd = \"/tmp\"\n\n def __init__(\n self,\n command,\n runner=None,\n env=None,\n term=None,\n cwd=None,\n include_processes=None,\n exclude_processes=None,\n log_buffer=None,\n title=None,\n ): # pylint: disable=too-many-arguments\n self.ready_state = True\n self.env = self.get_environment(env)\n\n self.command = command\n self.runner = runner\n self.stop_func = lambda: True\n self.game_process = None\n self.prevent_on_stop = False\n self.return_code = None\n self.terminal = system.find_executable(term)\n self.is_running = True\n self.error = None\n self.log_handlers = [\n self.log_handler_stdout,\n self.log_handler_console_output,\n ]\n self.set_log_buffer(log_buffer)\n self.stdout_monitor = None\n self.include_processes = include_processes or []\n self.exclude_processes = exclude_processes or []\n\n self.cwd = self.get_cwd(cwd)\n\n self._stdout = io.StringIO()\n\n self._title = title if title else command[0]\n\n @property\n def stdout(self):\n return self._stdout.getvalue()\n\n @property\n def wrapper_command(self):\n \"\"\"Return launch arguments for the wrapper script\"\"\"\n\n return [\n WRAPPER_SCRIPT,\n self._title,\n str(len(self.include_processes)),\n str(len(self.exclude_processes)),\n ] + self.include_processes + self.exclude_processes + self.command\n\n def set_log_buffer(self, log_buffer):\n \"\"\"Attach a TextBuffer to this command enables the buffer handler\"\"\"\n if not log_buffer:\n return\n self.log_buffer = log_buffer\n if self.log_handler_buffer not in self.log_handlers:\n self.log_handlers.append(self.log_handler_buffer)\n\n def get_cwd(self, cwd):\n \"\"\"Return the current working dir of the game\"\"\"\n if not cwd:\n cwd = self.runner.working_dir if self.runner else None\n return os.path.expanduser(cwd or \"~\")\n\n @staticmethod\n def get_environment(user_env):\n \"\"\"Process the user provided environment variables for use as self.env\"\"\"\n env = user_env or {}\n # not clear why this needs to be added, the path is already added in\n # the wrappper script.\n env['PYTHONPATH'] = ':'.join(sys.path)\n # Drop bad values of environment keys, those will confuse the Python\n # interpreter.\n return {\n key: value for key, value in env.items() if \"=\" not in key\n }\n\n def get_child_environment(self):\n \"\"\"Returns the calculated environment for the child process.\"\"\"\n env = os.environ.copy()\n env.update(self.env)\n return env\n\n def start(self):\n \"\"\"Run the thread.\"\"\"\n logger.debug(\"Running %s\", \" \".join(self.wrapper_command))\n for key, value in self.env.items():\n logger.debug(\"ENV: %s=\\\"%s\\\"\", key, value)\n\n if self.terminal:\n self.game_process = self.run_in_terminal()\n else:\n env = self.get_child_environment()\n self.game_process = self.execute_process(self.wrapper_command, env)\n\n if not self.game_process:\n logger.warning(\"No game process available\")\n return\n\n GLib.child_watch_add(self.game_process.pid, self.on_stop)\n\n # make stdout nonblocking.\n fileno = self.game_process.stdout.fileno()\n fcntl.fcntl(\n fileno,\n fcntl.F_SETFL,\n fcntl.fcntl(fileno, fcntl.F_GETFL) | os.O_NONBLOCK\n )\n\n self.stdout_monitor = GLib.io_add_watch(\n self.game_process.stdout,\n GLib.IO_IN | GLib.IO_HUP,\n self.on_stdout_output,\n )\n\n def log_handler_stdout(self, line):\n \"\"\"Add the line to this command's stdout attribute\"\"\"\n self._stdout.write(line)\n\n def log_handler_buffer(self, line):\n \"\"\"Add the line to the associated LogBuffer object\"\"\"\n self.log_buffer.insert(self.log_buffer.get_end_iter(), line, -1)\n\n def log_handler_console_output(self, line): # pylint: disable=no-self-use\n \"\"\"Print the line to stdout\"\"\"\n with contextlib.suppress(BlockingIOError):\n sys.stdout.write(line)\n sys.stdout.flush()\n\n def on_stop(self, _pid, returncode):\n \"\"\"Callback registered on game process termination\"\"\"\n if self.prevent_on_stop: # stop() already in progress\n return False\n\n logger.debug(\"The process has terminated with code %s\", returncode)\n self.is_running = False\n self.return_code = returncode\n\n resume_stop = self.stop()\n if not resume_stop:\n logger.info(\"Full shutdown prevented\")\n return False\n\n return False\n\n def on_stdout_output(self, stdout, condition):\n \"\"\"Called by the stdout monitor to dispatch output to log handlers\"\"\"\n if condition == GLib.IO_HUP:\n self.stdout_monitor = None\n return False\n if not self.is_running:\n return False\n try:\n line = stdout.read(262144).decode(\"utf-8\", errors=\"ignore\")\n except ValueError:\n # file_desc might be closed\n return True\n if \"winemenubuilder.exe\" in line:\n return True\n for log_handler in self.log_handlers:\n log_handler(line)\n return True\n\n def run_in_terminal(self):\n \"\"\"Write command in a script file and run it.\n\n Running it from a file is likely the only way to set env vars only\n for the command (not for the terminal app).\n It's also the only reliable way to keep the term open when the\n game is quit.\n \"\"\"\n script_path = os.path.join(settings.CACHE_DIR, \"run_in_term.sh\")\n exported_environment = \"\\n\".join(\n 'export %s=\"%s\" ' % (key, value)\n for key, value in self.env.items()\n )\n command = \" \".join(['\"%s\"' % token for token in self.wrapper_command])\n with open(script_path, \"w\") as script_file:\n script_file.write(dedent(\n \"\"\"#!/bin/sh\n cd \"%s\"\n %s\n exec %s\n \"\"\" % (self.cwd, exported_environment, command)\n ))\n os.chmod(script_path, 0o744)\n return self.execute_process([self.terminal, \"-e\", script_path])\n\n def execute_process(self, command, env=None):\n \"\"\"Execute and return a subprocess\"\"\"\n if self.cwd and not system.path_exists(self.cwd):\n try:\n os.makedirs(self.cwd)\n except OSError:\n logger.error(\"Failed to create working directory, falling back to %s\",\n self.fallback_cwd)\n self.cwd = \"/tmp\"\n try:\n\n return subprocess.Popen(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n cwd=self.cwd,\n env=env,\n )\n except OSError as ex:\n logger.exception(\"Failed to execute %s: %s\", \" \".join(command), ex)\n self.error = ex.strerror\n\n def stop(self):\n \"\"\"Stops the current game process and cleans up the instance\"\"\"\n # Prevent stop() being called again by the process exiting\n self.prevent_on_stop = True\n\n try:\n self.game_process.terminate()\n except ProcessLookupError: # process already dead.\n logger.debug(\"Management process looks dead already.\")\n\n if hasattr(self, \"stop_func\"):\n resume_stop = self.stop_func()\n if not resume_stop:\n return False\n\n if self.stdout_monitor:\n logger.debug(\"Detaching logger\")\n GLib.source_remove(self.stdout_monitor)\n self.stdout_monitor = None\n else:\n logger.debug(\"logger already detached\")\n\n self.is_running = False\n self.ready_state = False\n return True\n\n\ndef exec_command(command):\n \"\"\"Execute arbitrary command in a MonitoredCommand\n\n Used by the --exec command line flag.\n \"\"\"\n command = MonitoredCommand(shlex.split(command), env=runtime.get_env())\n command.start()\n return command\n",
"path": "lutris/command.py"
}
] | diff --git a/lutris/command.py b/lutris/command.py
index bba04808c9..24ad8a9bfa 100644
--- a/lutris/command.py
+++ b/lutris/command.py
@@ -61,7 +61,7 @@ def __init__(
self._stdout = io.StringIO()
- self._title = title if title else command
+ self._title = title if title else command[0]
@property
def stdout(self):
|
pymedusa__Medusa-6525 | Medusa not working with pythonw on Windows
Hi,
Today I upgraded to the new 0.3.0 and also upgraded to Python 3.7.2 (from 2.7), but now Meduse won't work using `pythonw` anymore. When I launch via pythonw I see the pythonw process appear and almost immedately shutdown again. Since it's pythonw there is no output, and nothing appears in the application log either. I've tried to debug a bit but I'm stuck. As soon I put in `start.py` the line:
```
sys.stderr = open('stderr.txt', 'a')
```
To try and catch any errors, Medusa actually magically starts working, but the log file remains empty (I've verified I am opening the correct file by actually writing something to it, when I do that my write appears in the log file but still nothing else).
My guess is something in the code tries to access `sys.stderr`, but since that's not available when running with `pythonw` the code breaks and the process exits. However because I'm not a Python developer I don't have any proper tools like step-debugging to find out exactly where it fails :(.
| [
{
"content": "# Copyright (c) 2003-2005 Jimmy Retzlaff, 2008 Konstantin Yegupov\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# Low level interface - see UnRARDLL\\UNRARDLL.TXT\n\nfrom __future__ import generators\n\nimport ctypes\nimport ctypes.wintypes\nimport os\nimport os.path\nimport re\nimport time\nimport sys\n\nfrom .rar_exceptions import *\n\nif sys.version_info > (3,3):\n import faulthandler\n faulthandler.enable()\n\nif sys.version_info[0] >= 3:\n def string_from_bytes(s):\n return s.decode(sys.getdefaultencoding())\n def bytes_from_string(s):\n return s.encode(sys.getdefaultencoding())\nelse:\n def string_from_bytes(s):\n return s\n def bytes_from_string(s):\n return s\n\nERAR_END_ARCHIVE = 10\nERAR_NO_MEMORY = 11\nERAR_BAD_DATA = 12\nERAR_BAD_ARCHIVE = 13\nERAR_UNKNOWN_FORMAT = 14\nERAR_EOPEN = 15\nERAR_ECREATE = 16\nERAR_ECLOSE = 17\nERAR_EREAD = 18\nERAR_EWRITE = 19\nERAR_SMALL_BUF = 20\nERAR_UNKNOWN = 21\nERAR_MISSING_PASSWORD = 22\n\nRAR_OM_LIST = 0\nRAR_OM_EXTRACT = 1\n\nRAR_SKIP = 0\nRAR_TEST = 1\nRAR_EXTRACT = 2\n\nRAR_VOL_ASK = 0\nRAR_VOL_NOTIFY = 1\n\nRAR_DLL_VERSION = 3\n\n# enum UNRARCALLBACK_MESSAGES\nUCM_CHANGEVOLUME = 0\nUCM_PROCESSDATA = 1\nUCM_NEEDPASSWORD = 2\n\narchitecture_bits = ctypes.sizeof(ctypes.c_voidp) * 8\ndll_name = \"unrar.dll\"\nif architecture_bits == 64:\n dll_name = \"x64\\\\unrar64.dll\"\n\nvolume_naming1 = re.compile(\"[.]r([0-9]{2})$\")\nvolume_naming2 = re.compile(\"[.]([0-9]{3})[.]rar$\")\nvolume_naming3 = re.compile(\"[.]part([0-9]+)[.]rar$\")\n\ntry:\n dll_filename = os.path.join(os.path.split(__file__)[0], 'UnRARDLL', dll_name)\n if sys.version_info[:3] == (2, 7, 13):\n # http://bugs.python.org/issue29082\n dll_filename = str(dll_filename)\n unrar = ctypes.WinDLL(dll_filename)\n\nexcept WindowsError:\n dll_filename = dll_name\n if sys.version_info[:3] == (2, 7, 13):\n # http://bugs.python.org/issue29082\n dll_filename = str(dll_filename)\n unrar = ctypes.WinDLL(dll_filename)\n\nclass RAROpenArchiveDataEx(ctypes.Structure):\n def __init__(self, ArcName=None, ArcNameW=u'', OpenMode=RAR_OM_LIST):\n self.CmtBuf = ctypes.c_buffer(64 * 1024)\n ctypes.Structure.__init__(self, ArcName=ArcName, ArcNameW=ArcNameW,\n OpenMode=OpenMode,\n _CmtBuf=ctypes.addressof(self.CmtBuf),\n CmtBufSize=ctypes.sizeof(self.CmtBuf))\n\n _fields_ = [\n ('ArcName', ctypes.c_char_p),\n ('ArcNameW', ctypes.c_wchar_p),\n ('OpenMode', ctypes.c_uint),\n ('OpenResult', ctypes.c_uint),\n ('_CmtBuf', ctypes.c_voidp),\n ('CmtBufSize', ctypes.c_uint),\n ('CmtSize', ctypes.c_uint),\n ('CmtState', ctypes.c_uint),\n ('Flags', ctypes.c_uint),\n ('Reserved', ctypes.c_uint * 32),\n ]\n\n\nclass RARHeaderDataEx(ctypes.Structure):\n def __init__(self):\n self.CmtBuf = ctypes.c_buffer(64 * 1024)\n ctypes.Structure.__init__(self, _CmtBuf=ctypes.addressof(self.CmtBuf),\n CmtBufSize=ctypes.sizeof(self.CmtBuf))\n\n _fields_ = [\n ('ArcName', ctypes.c_char * 1024),\n ('ArcNameW', ctypes.c_wchar * 1024),\n ('FileName', ctypes.c_char * 1024),\n ('FileNameW', ctypes.c_wchar * 1024),\n ('Flags', ctypes.c_uint),\n ('PackSize', ctypes.c_uint),\n ('PackSizeHigh', ctypes.c_uint),\n ('UnpSize', ctypes.c_uint),\n ('UnpSizeHigh', ctypes.c_uint),\n ('HostOS', ctypes.c_uint),\n ('FileCRC', ctypes.c_uint),\n ('FileTime', ctypes.c_uint),\n ('UnpVer', ctypes.c_uint),\n ('Method', ctypes.c_uint),\n ('FileAttr', ctypes.c_uint),\n ('_CmtBuf', ctypes.c_voidp),\n ('CmtBufSize', ctypes.c_uint),\n ('CmtSize', ctypes.c_uint),\n ('CmtState', ctypes.c_uint),\n ('Reserved', ctypes.c_uint * 1024),\n ]\n\n\ndef DosDateTimeToTimeTuple(dosDateTime):\n \"\"\"Convert an MS-DOS format date time to a Python time tuple.\n \"\"\"\n dos_date = dosDateTime >> 16\n dos_time = dosDateTime & 0xffff\n day = dos_date & 0x1f\n month = (dos_date >> 5) & 0xf\n year = 1980 + (dos_date >> 9)\n second = 2 * (dos_time & 0x1f)\n minute = (dos_time >> 5) & 0x3f\n hour = dos_time >> 11\n return time.localtime(\n time.mktime((year, month, day, hour, minute, second, 0, 1, -1)))\n\n\ndef _wrap(restype, func, argtypes):\n result = func\n result.argtypes = argtypes\n result.restype = restype\n return result\n\n\nRARGetDllVersion = _wrap(ctypes.c_int, unrar.RARGetDllVersion, [])\n\nRAROpenArchiveEx = _wrap(ctypes.wintypes.HANDLE, unrar.RAROpenArchiveEx,\n [ctypes.POINTER(RAROpenArchiveDataEx)])\n\nRARReadHeaderEx = _wrap(ctypes.c_int, unrar.RARReadHeaderEx,\n [ctypes.wintypes.HANDLE,\n ctypes.POINTER(RARHeaderDataEx)])\n\n_RARSetPassword = _wrap(ctypes.c_int, unrar.RARSetPassword,\n [ctypes.wintypes.HANDLE, ctypes.c_char_p])\n\n\ndef RARSetPassword(handle, password):\n _RARSetPassword(handle, password)\n\n\nRARProcessFile = _wrap(ctypes.c_int, unrar.RARProcessFile,\n [ctypes.wintypes.HANDLE, ctypes.c_int, ctypes.c_char_p,\n ctypes.c_char_p])\n\nRARCloseArchive = _wrap(ctypes.c_int, unrar.RARCloseArchive,\n [ctypes.wintypes.HANDLE])\n\n# The author of the UnRAR library uses \"long\" as the types of all the parameters,\n# even if some of them are pointers *facepalm*\nUNRARCALLBACK = ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.c_voidp, ctypes.c_voidp,\n ctypes.c_voidp, ctypes.c_voidp)\nRARSetCallback = _wrap(ctypes.c_int, unrar.RARSetCallback,\n [ctypes.wintypes.HANDLE, UNRARCALLBACK, ctypes.c_long])\n\nRARExceptions = {\n ERAR_NO_MEMORY: MemoryError,\n ERAR_BAD_DATA: ArchiveHeaderBroken,\n ERAR_BAD_ARCHIVE: InvalidRARArchive,\n ERAR_EOPEN: FileOpenError,\n}\n\nclass PassiveReader:\n \"\"\"Used for reading files to memory\"\"\"\n\n def __init__(self, usercallback=None):\n self.buf = []\n self.ucb = usercallback\n\n def _callback(self, msg, UserData, P1, P2):\n if msg == UCM_PROCESSDATA:\n data = (ctypes.c_char * P2).from_address(P1).raw\n if self.ucb is not None:\n self.ucb(data)\n else:\n self.buf.append(data)\n return 1\n\n def get_result(self):\n return b''.join(self.buf)\n\n\nclass RarInfoIterator(object):\n def __init__(self, arc):\n self.arc = arc\n self.index = 0\n self.headerData = RARHeaderDataEx()\n self.res = RARReadHeaderEx(self.arc._handle,\n ctypes.byref(self.headerData))\n if self.res in [ERAR_BAD_DATA, ERAR_MISSING_PASSWORD]:\n raise IncorrectRARPassword\n self.arc.lockStatus = \"locked\"\n self.arc.needskip = False\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index > 0:\n if self.arc.needskip:\n RARProcessFile(self.arc._handle, RAR_SKIP, None, None)\n self.res = RARReadHeaderEx(self.arc._handle,\n ctypes.byref(self.headerData))\n\n if self.res:\n raise StopIteration\n self.arc.needskip = True\n\n data = {\n 'index': self.index, 'filename': self.headerData.FileNameW,\n 'datetime': DosDateTimeToTimeTuple(self.headerData.FileTime),\n 'isdir': ((self.headerData.Flags & 0xE0) == 0xE0),\n 'size': self.headerData.UnpSize + (\n self.headerData.UnpSizeHigh << 32)\n }\n if self.headerData.CmtState == 1:\n data['comment'] = string_from_bytes(self.headerData.CmtBuf.value.decode)\n else:\n data['comment'] = None\n self.index += 1\n return data\n\n next = __next__ # Python 2\n\n def __del__(self):\n self.arc.lockStatus = \"finished\"\n\n\ndef generate_password_provider(password):\n def password_provider_callback(msg, UserData, P1, P2):\n if msg == UCM_NEEDPASSWORD and password is not None:\n (ctypes.c_char * P2).from_address(P1).value = password\n return 1\n\n return password_provider_callback\n\n\nclass RarFileImplementation(object):\n def init(self, password=None):\n self.password = password\n archive_data = RAROpenArchiveDataEx(ArcNameW=self.archiveName,\n OpenMode=RAR_OM_EXTRACT)\n self._handle = RAROpenArchiveEx(ctypes.byref(archive_data))\n self.c_callback = UNRARCALLBACK(\n generate_password_provider(self.password))\n RARSetCallback(self._handle, self.c_callback, 1)\n\n if archive_data.OpenResult != 0:\n raise RARExceptions[archive_data.OpenResult]\n\n if archive_data.CmtState == 1:\n self.comment = string_from_bytes(archive_data.CmtBuf.value)\n else:\n self.comment = None\n\n if password:\n RARSetPassword(self._handle, bytes_from_string(password))\n\n self.lockStatus = \"ready\"\n\n self.isVolume = archive_data.Flags & 1\n\n def destruct(self):\n if self._handle and RARCloseArchive:\n RARCloseArchive(self._handle)\n\n def make_sure_ready(self):\n if self.lockStatus == \"locked\":\n raise InvalidRARArchiveUsage(\n \"cannot execute infoiter() without finishing previous one\")\n if self.lockStatus == \"finished\":\n self.destruct()\n self.init(self.password)\n\n def infoiter(self):\n self.make_sure_ready()\n return RarInfoIterator(self)\n\n def read_files(self, checker):\n res = []\n for info in self.infoiter():\n if checker(info) and not info.isdir:\n reader = PassiveReader()\n c_callback = UNRARCALLBACK(reader._callback)\n RARSetCallback(self._handle, c_callback, 1)\n tmpres = RARProcessFile(self._handle, RAR_TEST, None, None)\n if tmpres in [ERAR_BAD_DATA, ERAR_MISSING_PASSWORD]:\n raise IncorrectRARPassword\n self.needskip = False\n res.append((info, reader.get_result()))\n return res\n\n def extract(self, checker, path, withSubpath, overwrite):\n res = []\n for info in self.infoiter():\n checkres = checker(info)\n if checkres is not False and not info.isdir:\n if checkres:\n fn = info.filename\n if not withSubpath:\n fn = os.path.split(fn)[-1]\n target = os.path.join(path, fn)\n else:\n raise DeprecationWarning(\"Condition callbacks returning strings are deprecated\"\n \" and only supported in Windows\")\n if overwrite or (not os.path.exists(target)):\n tmpres = RARProcessFile(self._handle, RAR_EXTRACT, None,\n target.encode(sys.getdefaultencoding()))\n if tmpres in [ERAR_BAD_DATA, ERAR_MISSING_PASSWORD]:\n raise IncorrectRARPassword\n\n self.needskip = False\n res.append(info)\n return res\n\n def get_volume(self):\n if not self.isVolume:\n return None\n header_data = RARHeaderDataEx()\n res = RARReadHeaderEx(self._handle, ctypes.byref(header_data))\n arc_name = header_data.ArcNameW\n match3 = volume_naming3.search(arc_name)\n if match3 is not None:\n return int(match3.group(1)) - 1\n match2 = volume_naming3.search(arc_name)\n if match2 is not None:\n return int(match2.group(1))\n match1 = volume_naming1.search(arc_name)\n if match1 is not None:\n return int(match1.group(1)) + 1\n return 0\n",
"path": "lib/unrar2/windows.py"
}
] | [
{
"content": "# Copyright (c) 2003-2005 Jimmy Retzlaff, 2008 Konstantin Yegupov\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# Low level interface - see UnRARDLL\\UNRARDLL.TXT\n\nfrom __future__ import generators\n\nimport ctypes\nimport ctypes.wintypes\nimport os\nimport os.path\nimport re\nimport time\nimport sys\n\nfrom .rar_exceptions import *\n\nif sys.version_info > (3,3) and sys.stderr is not None:\n import faulthandler\n faulthandler.enable()\n\nif sys.version_info[0] >= 3:\n def string_from_bytes(s):\n return s.decode(sys.getdefaultencoding())\n def bytes_from_string(s):\n return s.encode(sys.getdefaultencoding())\nelse:\n def string_from_bytes(s):\n return s\n def bytes_from_string(s):\n return s\n\nERAR_END_ARCHIVE = 10\nERAR_NO_MEMORY = 11\nERAR_BAD_DATA = 12\nERAR_BAD_ARCHIVE = 13\nERAR_UNKNOWN_FORMAT = 14\nERAR_EOPEN = 15\nERAR_ECREATE = 16\nERAR_ECLOSE = 17\nERAR_EREAD = 18\nERAR_EWRITE = 19\nERAR_SMALL_BUF = 20\nERAR_UNKNOWN = 21\nERAR_MISSING_PASSWORD = 22\n\nRAR_OM_LIST = 0\nRAR_OM_EXTRACT = 1\n\nRAR_SKIP = 0\nRAR_TEST = 1\nRAR_EXTRACT = 2\n\nRAR_VOL_ASK = 0\nRAR_VOL_NOTIFY = 1\n\nRAR_DLL_VERSION = 3\n\n# enum UNRARCALLBACK_MESSAGES\nUCM_CHANGEVOLUME = 0\nUCM_PROCESSDATA = 1\nUCM_NEEDPASSWORD = 2\n\narchitecture_bits = ctypes.sizeof(ctypes.c_voidp) * 8\ndll_name = \"unrar.dll\"\nif architecture_bits == 64:\n dll_name = \"x64\\\\unrar64.dll\"\n\nvolume_naming1 = re.compile(\"[.]r([0-9]{2})$\")\nvolume_naming2 = re.compile(\"[.]([0-9]{3})[.]rar$\")\nvolume_naming3 = re.compile(\"[.]part([0-9]+)[.]rar$\")\n\ntry:\n dll_filename = os.path.join(os.path.split(__file__)[0], 'UnRARDLL', dll_name)\n if sys.version_info[:3] == (2, 7, 13):\n # http://bugs.python.org/issue29082\n dll_filename = str(dll_filename)\n unrar = ctypes.WinDLL(dll_filename)\n\nexcept WindowsError:\n dll_filename = dll_name\n if sys.version_info[:3] == (2, 7, 13):\n # http://bugs.python.org/issue29082\n dll_filename = str(dll_filename)\n unrar = ctypes.WinDLL(dll_filename)\n\nclass RAROpenArchiveDataEx(ctypes.Structure):\n def __init__(self, ArcName=None, ArcNameW=u'', OpenMode=RAR_OM_LIST):\n self.CmtBuf = ctypes.c_buffer(64 * 1024)\n ctypes.Structure.__init__(self, ArcName=ArcName, ArcNameW=ArcNameW,\n OpenMode=OpenMode,\n _CmtBuf=ctypes.addressof(self.CmtBuf),\n CmtBufSize=ctypes.sizeof(self.CmtBuf))\n\n _fields_ = [\n ('ArcName', ctypes.c_char_p),\n ('ArcNameW', ctypes.c_wchar_p),\n ('OpenMode', ctypes.c_uint),\n ('OpenResult', ctypes.c_uint),\n ('_CmtBuf', ctypes.c_voidp),\n ('CmtBufSize', ctypes.c_uint),\n ('CmtSize', ctypes.c_uint),\n ('CmtState', ctypes.c_uint),\n ('Flags', ctypes.c_uint),\n ('Reserved', ctypes.c_uint * 32),\n ]\n\n\nclass RARHeaderDataEx(ctypes.Structure):\n def __init__(self):\n self.CmtBuf = ctypes.c_buffer(64 * 1024)\n ctypes.Structure.__init__(self, _CmtBuf=ctypes.addressof(self.CmtBuf),\n CmtBufSize=ctypes.sizeof(self.CmtBuf))\n\n _fields_ = [\n ('ArcName', ctypes.c_char * 1024),\n ('ArcNameW', ctypes.c_wchar * 1024),\n ('FileName', ctypes.c_char * 1024),\n ('FileNameW', ctypes.c_wchar * 1024),\n ('Flags', ctypes.c_uint),\n ('PackSize', ctypes.c_uint),\n ('PackSizeHigh', ctypes.c_uint),\n ('UnpSize', ctypes.c_uint),\n ('UnpSizeHigh', ctypes.c_uint),\n ('HostOS', ctypes.c_uint),\n ('FileCRC', ctypes.c_uint),\n ('FileTime', ctypes.c_uint),\n ('UnpVer', ctypes.c_uint),\n ('Method', ctypes.c_uint),\n ('FileAttr', ctypes.c_uint),\n ('_CmtBuf', ctypes.c_voidp),\n ('CmtBufSize', ctypes.c_uint),\n ('CmtSize', ctypes.c_uint),\n ('CmtState', ctypes.c_uint),\n ('Reserved', ctypes.c_uint * 1024),\n ]\n\n\ndef DosDateTimeToTimeTuple(dosDateTime):\n \"\"\"Convert an MS-DOS format date time to a Python time tuple.\n \"\"\"\n dos_date = dosDateTime >> 16\n dos_time = dosDateTime & 0xffff\n day = dos_date & 0x1f\n month = (dos_date >> 5) & 0xf\n year = 1980 + (dos_date >> 9)\n second = 2 * (dos_time & 0x1f)\n minute = (dos_time >> 5) & 0x3f\n hour = dos_time >> 11\n return time.localtime(\n time.mktime((year, month, day, hour, minute, second, 0, 1, -1)))\n\n\ndef _wrap(restype, func, argtypes):\n result = func\n result.argtypes = argtypes\n result.restype = restype\n return result\n\n\nRARGetDllVersion = _wrap(ctypes.c_int, unrar.RARGetDllVersion, [])\n\nRAROpenArchiveEx = _wrap(ctypes.wintypes.HANDLE, unrar.RAROpenArchiveEx,\n [ctypes.POINTER(RAROpenArchiveDataEx)])\n\nRARReadHeaderEx = _wrap(ctypes.c_int, unrar.RARReadHeaderEx,\n [ctypes.wintypes.HANDLE,\n ctypes.POINTER(RARHeaderDataEx)])\n\n_RARSetPassword = _wrap(ctypes.c_int, unrar.RARSetPassword,\n [ctypes.wintypes.HANDLE, ctypes.c_char_p])\n\n\ndef RARSetPassword(handle, password):\n _RARSetPassword(handle, password)\n\n\nRARProcessFile = _wrap(ctypes.c_int, unrar.RARProcessFile,\n [ctypes.wintypes.HANDLE, ctypes.c_int, ctypes.c_char_p,\n ctypes.c_char_p])\n\nRARCloseArchive = _wrap(ctypes.c_int, unrar.RARCloseArchive,\n [ctypes.wintypes.HANDLE])\n\n# The author of the UnRAR library uses \"long\" as the types of all the parameters,\n# even if some of them are pointers *facepalm*\nUNRARCALLBACK = ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.c_voidp, ctypes.c_voidp,\n ctypes.c_voidp, ctypes.c_voidp)\nRARSetCallback = _wrap(ctypes.c_int, unrar.RARSetCallback,\n [ctypes.wintypes.HANDLE, UNRARCALLBACK, ctypes.c_long])\n\nRARExceptions = {\n ERAR_NO_MEMORY: MemoryError,\n ERAR_BAD_DATA: ArchiveHeaderBroken,\n ERAR_BAD_ARCHIVE: InvalidRARArchive,\n ERAR_EOPEN: FileOpenError,\n}\n\nclass PassiveReader:\n \"\"\"Used for reading files to memory\"\"\"\n\n def __init__(self, usercallback=None):\n self.buf = []\n self.ucb = usercallback\n\n def _callback(self, msg, UserData, P1, P2):\n if msg == UCM_PROCESSDATA:\n data = (ctypes.c_char * P2).from_address(P1).raw\n if self.ucb is not None:\n self.ucb(data)\n else:\n self.buf.append(data)\n return 1\n\n def get_result(self):\n return b''.join(self.buf)\n\n\nclass RarInfoIterator(object):\n def __init__(self, arc):\n self.arc = arc\n self.index = 0\n self.headerData = RARHeaderDataEx()\n self.res = RARReadHeaderEx(self.arc._handle,\n ctypes.byref(self.headerData))\n if self.res in [ERAR_BAD_DATA, ERAR_MISSING_PASSWORD]:\n raise IncorrectRARPassword\n self.arc.lockStatus = \"locked\"\n self.arc.needskip = False\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index > 0:\n if self.arc.needskip:\n RARProcessFile(self.arc._handle, RAR_SKIP, None, None)\n self.res = RARReadHeaderEx(self.arc._handle,\n ctypes.byref(self.headerData))\n\n if self.res:\n raise StopIteration\n self.arc.needskip = True\n\n data = {\n 'index': self.index, 'filename': self.headerData.FileNameW,\n 'datetime': DosDateTimeToTimeTuple(self.headerData.FileTime),\n 'isdir': ((self.headerData.Flags & 0xE0) == 0xE0),\n 'size': self.headerData.UnpSize + (\n self.headerData.UnpSizeHigh << 32)\n }\n if self.headerData.CmtState == 1:\n data['comment'] = string_from_bytes(self.headerData.CmtBuf.value.decode)\n else:\n data['comment'] = None\n self.index += 1\n return data\n\n next = __next__ # Python 2\n\n def __del__(self):\n self.arc.lockStatus = \"finished\"\n\n\ndef generate_password_provider(password):\n def password_provider_callback(msg, UserData, P1, P2):\n if msg == UCM_NEEDPASSWORD and password is not None:\n (ctypes.c_char * P2).from_address(P1).value = password\n return 1\n\n return password_provider_callback\n\n\nclass RarFileImplementation(object):\n def init(self, password=None):\n self.password = password\n archive_data = RAROpenArchiveDataEx(ArcNameW=self.archiveName,\n OpenMode=RAR_OM_EXTRACT)\n self._handle = RAROpenArchiveEx(ctypes.byref(archive_data))\n self.c_callback = UNRARCALLBACK(\n generate_password_provider(self.password))\n RARSetCallback(self._handle, self.c_callback, 1)\n\n if archive_data.OpenResult != 0:\n raise RARExceptions[archive_data.OpenResult]\n\n if archive_data.CmtState == 1:\n self.comment = string_from_bytes(archive_data.CmtBuf.value)\n else:\n self.comment = None\n\n if password:\n RARSetPassword(self._handle, bytes_from_string(password))\n\n self.lockStatus = \"ready\"\n\n self.isVolume = archive_data.Flags & 1\n\n def destruct(self):\n if self._handle and RARCloseArchive:\n RARCloseArchive(self._handle)\n\n def make_sure_ready(self):\n if self.lockStatus == \"locked\":\n raise InvalidRARArchiveUsage(\n \"cannot execute infoiter() without finishing previous one\")\n if self.lockStatus == \"finished\":\n self.destruct()\n self.init(self.password)\n\n def infoiter(self):\n self.make_sure_ready()\n return RarInfoIterator(self)\n\n def read_files(self, checker):\n res = []\n for info in self.infoiter():\n if checker(info) and not info.isdir:\n reader = PassiveReader()\n c_callback = UNRARCALLBACK(reader._callback)\n RARSetCallback(self._handle, c_callback, 1)\n tmpres = RARProcessFile(self._handle, RAR_TEST, None, None)\n if tmpres in [ERAR_BAD_DATA, ERAR_MISSING_PASSWORD]:\n raise IncorrectRARPassword\n self.needskip = False\n res.append((info, reader.get_result()))\n return res\n\n def extract(self, checker, path, withSubpath, overwrite):\n res = []\n for info in self.infoiter():\n checkres = checker(info)\n if checkres is not False and not info.isdir:\n if checkres:\n fn = info.filename\n if not withSubpath:\n fn = os.path.split(fn)[-1]\n target = os.path.join(path, fn)\n else:\n raise DeprecationWarning(\"Condition callbacks returning strings are deprecated\"\n \" and only supported in Windows\")\n if overwrite or (not os.path.exists(target)):\n tmpres = RARProcessFile(self._handle, RAR_EXTRACT, None,\n target.encode(sys.getdefaultencoding()))\n if tmpres in [ERAR_BAD_DATA, ERAR_MISSING_PASSWORD]:\n raise IncorrectRARPassword\n\n self.needskip = False\n res.append(info)\n return res\n\n def get_volume(self):\n if not self.isVolume:\n return None\n header_data = RARHeaderDataEx()\n res = RARReadHeaderEx(self._handle, ctypes.byref(header_data))\n arc_name = header_data.ArcNameW\n match3 = volume_naming3.search(arc_name)\n if match3 is not None:\n return int(match3.group(1)) - 1\n match2 = volume_naming3.search(arc_name)\n if match2 is not None:\n return int(match2.group(1))\n match1 = volume_naming1.search(arc_name)\n if match1 is not None:\n return int(match1.group(1)) + 1\n return 0\n",
"path": "lib/unrar2/windows.py"
}
] | diff --git a/lib/unrar2/windows.py b/lib/unrar2/windows.py
index 8a06a2ed8d..b4de3b2ce7 100644
--- a/lib/unrar2/windows.py
+++ b/lib/unrar2/windows.py
@@ -34,7 +34,7 @@
from .rar_exceptions import *
-if sys.version_info > (3,3):
+if sys.version_info > (3,3) and sys.stderr is not None:
import faulthandler
faulthandler.enable()
|
pandas-dev__pandas-12013 | Msgpack - ValueError: buffer source array is read-only
I get the Value error when processing data using pandas. I followed the following steps:
1. convert to msgpack format with compress flag
2. subsequently read file into a dataframe
3. push to sql table with to_sql
On the third step i get ValueError: buffer source array is read-only.
This problem does not arise if I wrap the read_msgpack call inside a pandas.concat
Example
``` python
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
eng = create_engine("sqlite:///:memory:")
df1 = pd.DataFrame({ 'A' : 1.,
'B' : pd.Timestamp('20130102'),
'C' : pd.Series(1,index=list(range(4)),dtype='float32'),
'D' : np.array([3] * 4,dtype='int32'),
'E' : 'foo' })
df1.to_msgpack('test.msgpack', compress='zlib')
df2 = pd.read_msgpack('test.msgpack')
df2.to_sql('test', eng, if_exists='append', chunksize=1000) # throws value error
df2 = pd.cooncat([pd.read_msgpack('test.msgpack')])
df2.to_sql('test', eng, if_exists='append', chunksize=1000) # works
```
This happens with both blosc and zlib compression. While I have found a solution, this behaviour seems very odd and for very large files there is a small performance hit.
edit: @TomAugspurger changed the sql engine to sqlite
| [
{
"content": "\"\"\"\n\nprovide a generic structure to support window functions,\nsimilar to how we have a Groupby object\n\n\n\"\"\"\nfrom __future__ import division\n\nimport warnings\nimport numpy as np\nfrom functools import wraps\nfrom collections import defaultdict\n\nimport pandas as pd\nfrom pandas.lib import isscalar\nfrom pandas.core.base import PandasObject, SelectionMixin, AbstractMethodError\nimport pandas.core.common as com\nimport pandas.algos as algos\nfrom pandas import compat\nfrom pandas.util.decorators import Substitution, Appender\nfrom textwrap import dedent\n\n_shared_docs = dict()\n_doc_template = \"\"\"\n\nReturns\n-------\nsame type as input\n\nSee also\n--------\npandas.Series.%(name)s\npandas.DataFrame.%(name)s\n\"\"\"\n\nclass _Window(PandasObject, SelectionMixin):\n _attributes = ['window','min_periods','freq','center','win_type','axis']\n exclusions = set()\n\n def __init__(self, obj, window=None, min_periods=None, freq=None, center=False,\n win_type=None, axis=0):\n\n if freq is not None:\n warnings.warn(\"The freq kw is deprecated and will be removed in a future version. You can resample prior \"\n \"to passing to a window function\",\n FutureWarning, stacklevel=3)\n\n self.blocks = []\n self.obj = obj\n self.window = window\n self.min_periods = min_periods\n self.freq = freq\n self.center = center\n self.win_type = win_type\n self.axis = axis\n self._setup()\n\n @property\n def _constructor(self):\n return Window\n\n def _setup(self):\n pass\n\n def _convert_freq(self, how=None):\n \"\"\" resample according to the how, return a new object \"\"\"\n\n obj = self._selected_obj\n if self.freq is not None and isinstance(obj, (com.ABCSeries, com.ABCDataFrame)):\n if how is not None:\n warnings.warn(\"The how kw argument is deprecated and removed in a future version. You can resample prior \"\n \"to passing to a window function\",\n FutureWarning, stacklevel=6)\n\n obj = obj.resample(self.freq, how=how)\n return obj\n\n def _create_blocks(self, how):\n \"\"\" split data into blocks & return conformed data \"\"\"\n\n obj = self._convert_freq(how)\n return obj.as_blocks(copy=False).values(), obj\n\n def _gotitem(self, key, ndim, subset=None):\n \"\"\"\n sub-classes to define\n return a sliced object\n\n Parameters\n ----------\n key : string / list of selections\n ndim : 1,2\n requested ndim of result\n subset : object, default None\n subset to act on\n \"\"\"\n\n # create a new object to prevent aliasing\n if subset is None:\n subset = self.obj\n self = self._shallow_copy(subset)\n self._reset_cache()\n if subset.ndim==2:\n if isscalar(key) and key in subset or com.is_list_like(key):\n self._selection = key\n return self\n\n def __getattr__(self, attr):\n if attr in self._internal_names_set:\n return object.__getattribute__(self, attr)\n if attr in self.obj:\n return self[attr]\n\n raise AttributeError(\"%r object has no attribute %r\" %\n (type(self).__name__, attr))\n\n def _dir_additions(self):\n return self.obj._dir_additions()\n\n def _get_window(self, other=None):\n return self.window\n\n def __unicode__(self):\n \"\"\" provide a nice str repr of our rolling object \"\"\"\n\n attrs = [ \"{k}={v}\".format(k=k,v=getattr(self,k)) \\\n for k in self._attributes if getattr(self,k,None) is not None ]\n return \"{klass} [{attrs}]\".format(klass=self.__class__.__name__,\n attrs=','.join(attrs))\n\n def _shallow_copy(self, obj=None, **kwargs):\n \"\"\" return a new object with the replacement attributes \"\"\"\n if obj is None:\n obj = self._selected_obj.copy()\n if isinstance(obj, self.__class__):\n obj = obj.obj\n for attr in self._attributes:\n if attr not in kwargs:\n kwargs[attr] = getattr(self,attr)\n return self._constructor(obj, **kwargs)\n\n def _prep_values(self, values=None, kill_inf=True, how=None):\n\n if values is None:\n values = getattr(self._selected_obj,'values',self._selected_obj)\n\n # coerce dtypes as appropriate\n if com.is_float_dtype(values.dtype):\n pass\n elif com.is_integer_dtype(values.dtype):\n values = values.astype(float)\n elif com.is_timedelta64_dtype(values.dtype):\n values = values.view('i8').astype(float)\n else:\n try:\n values = values.astype(float)\n except (ValueError, TypeError):\n raise TypeError(\"cannot handle this type -> {0}\".format(values.dtype))\n\n if kill_inf:\n values = values.copy()\n values[np.isinf(values)] = np.NaN\n\n return values\n\n def _wrap_result(self, result, block=None, obj=None):\n \"\"\" wrap a single result \"\"\"\n\n if obj is None:\n obj = self._selected_obj\n if isinstance(result, np.ndarray):\n\n # coerce if necessary\n if block is not None:\n if com.is_timedelta64_dtype(block.values.dtype):\n result = pd.to_timedelta(result.ravel(),unit='ns').values.reshape(result.shape)\n\n if result.ndim == 1:\n from pandas import Series\n return Series(result, obj.index, name=obj.name)\n\n return type(obj)(result,\n index=obj.index,\n columns=block.columns)\n return result\n\n def _wrap_results(self, results, blocks, obj):\n \"\"\"\n wrap the results\n\n Paramters\n ---------\n results : list of ndarrays\n blocks : list of blocks\n obj : conformed data (may be resampled)\n \"\"\"\n\n final = []\n for result, block in zip(results, blocks):\n\n result = self._wrap_result(result, block=block, obj=obj)\n if result.ndim == 1:\n return result\n final.append(result)\n\n if not len(final):\n return obj.astype('float64')\n return pd.concat(final,axis=1).reindex(columns=obj.columns)\n\n def _center_window(self, result, window):\n \"\"\" center the result in the window \"\"\"\n if self.axis > result.ndim-1:\n raise ValueError(\"Requested axis is larger then no. of argument \"\n \"dimensions\")\n\n from pandas import Series, DataFrame\n offset = _offset(window, True)\n if offset > 0:\n if isinstance(result, (Series, DataFrame)):\n result = result.slice_shift(-offset, axis=self.axis)\n else:\n lead_indexer = [slice(None)] * result.ndim\n lead_indexer[self.axis] = slice(offset, None)\n result = np.copy(result[tuple(lead_indexer)])\n return result\n\n def aggregate(self, arg, *args, **kwargs):\n result, how = self._aggregate(arg, *args, **kwargs)\n if result is None:\n return self.apply(arg, args=args, kwargs=kwargs)\n return result\n\n agg = aggregate\n\n _shared_docs['sum'] = dedent(\"\"\"\n %(name)s sum\n\n Parameters\n ----------\n how : string, default None (DEPRECATED)\n Method for down- or re-sampling\"\"\")\n\n _shared_docs['mean'] = dedent(\"\"\"\n %(name)s mean\n\n Parameters\n ----------\n how : string, default None (DEPRECATED)\n Method for down- or re-sampling\"\"\")\n\nclass Window(_Window):\n \"\"\"\n Provides rolling transformations.\n\n .. versionadded:: 0.18.0\n\n Parameters\n ----------\n window : int\n Size of the moving window. This is the number of observations used for\n calculating the statistic.\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA).\n freq : string or DateOffset object, optional (default None) (DEPRECATED)\n Frequency to conform the data to before computing the statistic. Specified\n as a frequency string or DateOffset object.\n center : boolean, default False\n Set the labels at the center of the window.\n win_type : string, default None\n prove a window type, see the notes below\n axis : int, default 0\n\n Returns\n -------\n a Window sub-classed for the particular operation\n\n Notes\n -----\n By default, the result is set to the right edge of the window. This can be\n changed to the center of the window by setting ``center=True``.\n\n The `freq` keyword is used to conform time series data to a specified\n frequency by resampling the data. This is done with the default parameters\n of :meth:`~pandas.Series.resample` (i.e. using the `mean`).\n\n The recognized window types are:\n\n * ``boxcar``\n * ``triang``\n * ``blackman``\n * ``hamming``\n * ``bartlett``\n * ``parzen``\n * ``bohman``\n * ``blackmanharris``\n * ``nuttall``\n * ``barthann``\n * ``kaiser`` (needs beta)\n * ``gaussian`` (needs std)\n * ``general_gaussian`` (needs power, width)\n * ``slepian`` (needs width).\n\"\"\"\n\n def _prep_window(self, **kwargs):\n \"\"\" provide validation for our window type, return the window \"\"\"\n window = self._get_window()\n\n if isinstance(window, (list, tuple, np.ndarray)):\n return com._asarray_tuplesafe(window).astype(float)\n elif com.is_integer(window):\n try:\n import scipy.signal as sig\n except ImportError:\n raise ImportError('Please install scipy to generate window weight')\n win_type = _validate_win_type(self.win_type, kwargs) # may pop from kwargs\n return sig.get_window(win_type, window).astype(float)\n\n raise ValueError('Invalid window %s' % str(window))\n\n def _apply_window(self, mean=True, how=None, **kwargs):\n \"\"\"\n Applies a moving window of type ``window_type`` on the data.\n\n Parameters\n ----------\n mean : boolean, default True\n If True computes weighted mean, else weighted sum\n how : string, default to None (DEPRECATED)\n how to resample\n\n Returns\n -------\n y : type of input argument\n\n \"\"\"\n window = self._prep_window(**kwargs)\n center = self.center\n\n blocks, obj = self._create_blocks(how=how)\n results = []\n for b in blocks:\n try:\n values = self._prep_values(b.values)\n except TypeError:\n results.append(b.values.copy())\n continue\n\n if values.size == 0:\n results.append(values.copy())\n continue\n\n offset = _offset(window, center)\n additional_nans = np.array([np.NaN] * offset)\n def f(arg, *args, **kwargs):\n minp = _use_window(self.min_periods, len(window))\n return algos.roll_window(np.concatenate((arg, additional_nans)) if center else arg,\n window, minp, avg=mean)\n\n result = np.apply_along_axis(f, self.axis, values)\n\n if center:\n result = self._center_window(result, window)\n results.append(result)\n\n return self._wrap_results(results, blocks, obj)\n\n @Substitution(name='rolling')\n @Appender(SelectionMixin._see_also_template)\n @Appender(SelectionMixin._agg_doc)\n def aggregate(self, arg, *args, **kwargs):\n result, how = self._aggregate(arg, *args, **kwargs)\n if result is None:\n\n # these must apply directly\n result = arg(self)\n\n return result\n\n agg = aggregate\n\n @Substitution(name='window')\n @Appender(_doc_template)\n @Appender(_shared_docs['sum'])\n def sum(self, **kwargs):\n return self._apply_window(mean=False, **kwargs)\n\n @Substitution(name='window')\n @Appender(_doc_template)\n @Appender(_shared_docs['mean'])\n def mean(self, **kwargs):\n return self._apply_window(mean=True, **kwargs)\n\nclass _Rolling(_Window):\n\n @property\n def _constructor(self):\n return Rolling\n\n def _apply(self, func, window=None, center=None, check_minp=None, how=None, **kwargs):\n \"\"\"\n Rolling statistical measure using supplied function. Designed to be\n used with passed-in Cython array-based functions.\n\n Parameters\n ----------\n func : string/callable to apply\n window : int/array, default to _get_window()\n center : boolean, default to self.center\n check_minp : function, default to _use_window\n how : string, default to None (DEPRECATED)\n how to resample\n\n Returns\n -------\n y : type of input\n \"\"\"\n if center is None:\n center = self.center\n if window is None:\n window = self._get_window()\n\n if check_minp is None:\n check_minp = _use_window\n\n blocks, obj = self._create_blocks(how=how)\n results = []\n for b in blocks:\n try:\n values = self._prep_values(b.values)\n except TypeError:\n results.append(b.values.copy())\n continue\n\n if values.size == 0:\n results.append(values.copy())\n continue\n\n # if we have a string function name, wrap it\n if isinstance(func, compat.string_types):\n if not hasattr(algos, func):\n raise ValueError(\"we do not support this function algos.{0}\".format(func))\n\n cfunc = getattr(algos, func)\n def func(arg, window, min_periods=None):\n minp = check_minp(min_periods, window)\n return cfunc(arg, window, minp, **kwargs)\n\n # calculation function\n if center:\n offset = _offset(window, center)\n additional_nans = np.array([np.NaN] * offset)\n def calc(x):\n return func(np.concatenate((x, additional_nans)),\n window, min_periods=self.min_periods)\n else:\n def calc(x):\n return func(x,window, min_periods=self.min_periods)\n\n if values.ndim > 1:\n result = np.apply_along_axis(calc, self.axis, values)\n else:\n result = calc(values)\n\n if center:\n result = self._center_window(result, window)\n\n results.append(result)\n\n return self._wrap_results(results, blocks, obj)\n\nclass _Rolling_and_Expanding(_Rolling):\n\n _shared_docs['count'] = \"\"\"%(name)s count of number of non-NaN observations inside provided window.\"\"\"\n def count(self):\n obj = self._convert_freq()\n window = self._get_window()\n window = min(window, len(obj)) if not self.center else window\n try:\n converted = np.isfinite(obj).astype(float)\n except TypeError:\n converted = np.isfinite(obj.astype(float)).astype(float)\n result = self._constructor(converted,\n window=window,\n min_periods=0,\n center=self.center).sum()\n\n result[result.isnull()] = 0\n return result\n\n _shared_docs['apply'] = dedent(\"\"\"\n %(name)s function apply\n\n Parameters\n ----------\n func : function\n Must produce a single value from an ndarray input\n *args and **kwargs are passed to the function\"\"\")\n\n def apply(self, func, args=(), kwargs={}):\n _level = kwargs.pop('_level',None)\n window = self._get_window()\n offset = _offset(window, self.center)\n def f(arg, window, min_periods):\n minp = _use_window(min_periods, window)\n return algos.roll_generic(arg, window, minp, offset, func, args, kwargs)\n\n return self._apply(f, center=False)\n\n def sum(self, **kwargs):\n return self._apply('roll_sum', **kwargs)\n\n _shared_docs['max'] = dedent(\"\"\"\n %(name)s maximum\n\n Parameters\n ----------\n how : string, default 'max' (DEPRECATED)\n Method for down- or re-sampling\"\"\")\n def max(self, how=None, **kwargs):\n if self.freq is not None and how is None:\n how = 'max'\n return self._apply('roll_max', how=how, **kwargs)\n\n _shared_docs['min'] = dedent(\"\"\"\n %(name)s minimum\n\n Parameters\n ----------\n how : string, default 'min' (DEPRECATED)\n Method for down- or re-sampling\"\"\")\n def min(self, how=None, **kwargs):\n if self.freq is not None and how is None:\n how = 'min'\n return self._apply('roll_min', how=how, **kwargs)\n\n def mean(self, **kwargs):\n return self._apply('roll_mean', **kwargs)\n\n _shared_docs['median'] = dedent(\"\"\"\n %(name)s median\n\n Parameters\n ----------\n how : string, default 'median' (DEPRECATED)\n Method for down- or re-sampling\"\"\")\n def median(self, how=None, **kwargs):\n if self.freq is not None and how is None:\n how = 'median'\n return self._apply('roll_median_c', how=how, **kwargs)\n\n _shared_docs['std'] = dedent(\"\"\"\n %(name)s standard deviation\n\n Parameters\n ----------\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\"\"\")\n\n def std(self, ddof=1, **kwargs):\n window = self._get_window()\n def f(arg, *args, **kwargs):\n minp = _require_min_periods(1)(self.min_periods, window)\n return _zsqrt(algos.roll_var(arg, window, minp, ddof))\n\n return self._apply(f, check_minp=_require_min_periods(1), **kwargs)\n\n _shared_docs['var'] = dedent(\"\"\"\n %(name)s variance\n\n Parameters\n ----------\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\"\"\")\n\n def var(self, ddof=1, **kwargs):\n return self._apply('roll_var',\n check_minp=_require_min_periods(1),\n ddof=ddof,\n **kwargs)\n\n _shared_docs['skew'] = \"\"\"Unbiased %(name)s skewness\"\"\"\n def skew(self, **kwargs):\n return self._apply('roll_skew',\n check_minp=_require_min_periods(3),\n **kwargs)\n\n _shared_docs['kurt'] = \"\"\"Unbiased %(name)s kurtosis\"\"\"\n def kurt(self, **kwargs):\n return self._apply('roll_kurt',\n check_minp=_require_min_periods(4),\n **kwargs)\n\n _shared_docs['quantile'] = dedent(\"\"\"\n %(name)s quantile\n\n Parameters\n ----------\n quantile : float\n 0 <= quantile <= 1\"\"\")\n\n def quantile(self, quantile, **kwargs):\n window = self._get_window()\n def f(arg, *args, **kwargs):\n minp = _use_window(self.min_periods, window)\n return algos.roll_quantile(arg, window, minp, quantile)\n\n return self._apply(f, **kwargs)\n\n _shared_docs['cov'] = dedent(\"\"\"\n %(name)s sample covariance\n\n Parameters\n ----------\n other : Series, DataFrame, or ndarray, optional\n if not supplied then will default to self and produce pairwise output\n pairwise : bool, default None\n If False then only matching columns between self and other will be used and\n the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the output\n will be a Panel in the case of DataFrame inputs. In the case of missing\n elements, only complete pairwise observations will be used.\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\"\"\")\n\n def cov(self, other=None, pairwise=None, ddof=1, **kwargs):\n if other is None:\n other = self._selected_obj\n pairwise = True if pairwise is None else pairwise # only default unset\n other = self._shallow_copy(other)\n window = self._get_window(other)\n\n def _get_cov(X, Y):\n mean = lambda x: x.rolling(window, self.min_periods, center=self.center).mean(**kwargs)\n count = (X+Y).rolling(window=window, center=self.center).count(**kwargs)\n bias_adj = count / (count - ddof)\n return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj\n return _flex_binary_moment(self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise))\n\n _shared_docs['corr'] = dedent(\"\"\"\n %(name)s sample correlation\n\n Parameters\n ----------\n other : Series, DataFrame, or ndarray, optional\n if not supplied then will default to self and produce pairwise output\n pairwise : bool, default None\n If False then only matching columns between self and other will be used and\n the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the output\n will be a Panel in the case of DataFrame inputs. In the case of missing\n elements, only complete pairwise observations will be used.\"\"\")\n\n def corr(self, other=None, pairwise=None, **kwargs):\n if other is None:\n other = self._selected_obj\n pairwise = True if pairwise is None else pairwise # only default unset\n other = self._shallow_copy(other)\n window = self._get_window(other)\n\n def _get_corr(a, b):\n a = a.rolling(window=window,\n min_periods=self.min_periods,\n freq=self.freq,\n center=self.center)\n b = b.rolling(window=window,\n min_periods=self.min_periods,\n freq=self.freq,\n center=self.center)\n\n return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))\n return _flex_binary_moment(self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise))\n\nclass Rolling(_Rolling_and_Expanding):\n \"\"\"\n Provides rolling window calculcations.\n\n .. versionadded:: 0.18.0\n\n Parameters\n ----------\n window : int\n Size of the moving window. This is the number of observations used for\n calculating the statistic.\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA).\n freq : string or DateOffset object, optional (default None) (DEPRECATED)\n Frequency to conform the data to before computing the statistic. Specified\n as a frequency string or DateOffset object.\n center : boolean, default False\n Set the labels at the center of the window.\n axis : int, default 0\n\n Returns\n -------\n a Window sub-classed for the particular operation\n\n Notes\n -----\n By default, the result is set to the right edge of the window. This can be\n changed to the center of the window by setting ``center=True``.\n\n The `freq` keyword is used to conform time series data to a specified\n frequency by resampling the data. This is done with the default parameters\n of :meth:`~pandas.Series.resample` (i.e. using the `mean`).\n \"\"\"\n\n @Substitution(name='rolling')\n @Appender(SelectionMixin._see_also_template)\n @Appender(SelectionMixin._agg_doc)\n def aggregate(self, arg, *args, **kwargs):\n return super(Rolling, self).aggregate(arg, *args, **kwargs)\n\n agg = aggregate\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['count'])\n def count(self):\n return super(Rolling, self).count()\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['apply'])\n def apply(self, func, args=(), kwargs={}):\n return super(Rolling, self).apply(func, args=args, kwargs=kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['sum'])\n def sum(self, **kwargs):\n return super(Rolling, self).sum(**kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['max'])\n def max(self, **kwargs):\n return super(Rolling, self).max(**kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['min'])\n def min(self, **kwargs):\n return super(Rolling, self).min(**kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['mean'])\n def mean(self, **kwargs):\n return super(Rolling, self).mean(**kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['median'])\n def median(self, **kwargs):\n return super(Rolling, self).median(**kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['std'])\n def std(self, ddof=1, **kwargs):\n return super(Rolling, self).std(ddof=ddof, **kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['var'])\n def var(self, ddof=1, **kwargs):\n return super(Rolling, self).var(ddof=ddof, **kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['skew'])\n def skew(self, **kwargs):\n return super(Rolling, self).skew(**kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['kurt'])\n def kurt(self, **kwargs):\n return super(Rolling, self).kurt(**kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['quantile'])\n def quantile(self, quantile, **kwargs):\n return super(Rolling, self).quantile(quantile=quantile, **kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['cov'])\n def cov(self, other=None, pairwise=None, ddof=1, **kwargs):\n return super(Rolling, self).cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['corr'])\n def corr(self, other=None, pairwise=None, **kwargs):\n return super(Rolling, self).corr(other=other, pairwise=pairwise, **kwargs)\n\nclass Expanding(_Rolling_and_Expanding):\n \"\"\"\n Provides expanding transformations.\n\n .. versionadded:: 0.18.0\n\n Parameters\n ----------\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA).\n freq : string or DateOffset object, optional (default None) (DEPRECATED)\n Frequency to conform the data to before computing the statistic. Specified\n as a frequency string or DateOffset object.\n center : boolean, default False\n Set the labels at the center of the window.\n axis : int, default 0\n\n Returns\n -------\n a Window sub-classed for the particular operation\n\n Notes\n -----\n By default, the result is set to the right edge of the window. This can be\n changed to the center of the window by setting ``center=True``.\n\n The `freq` keyword is used to conform time series data to a specified\n frequency by resampling the data. This is done with the default parameters\n of :meth:`~pandas.Series.resample` (i.e. using the `mean`).\n \"\"\"\n\n _attributes = ['min_periods','freq','center','axis']\n\n def __init__(self, obj, min_periods=1, freq=None, center=False, axis=0, **kwargs):\n return super(Expanding, self).__init__(obj=obj, min_periods=min_periods, freq=freq, center=center, axis=axis)\n\n @property\n def _constructor(self):\n return Expanding\n\n def _get_window(self, other=None):\n obj = self._selected_obj\n if other is None:\n return max(len(obj), self.min_periods) if self.min_periods else len(obj)\n return max((len(obj) + len(obj)), self.min_periods) if self.min_periods else (len(obj) + len(obj))\n\n @Substitution(name='expanding')\n @Appender(SelectionMixin._see_also_template)\n @Appender(SelectionMixin._agg_doc)\n def aggregate(self, arg, *args, **kwargs):\n return super(Expanding, self).aggregate(arg, *args, **kwargs)\n\n agg = aggregate\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['count'])\n def count(self, **kwargs):\n return super(Expanding, self).count(**kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['apply'])\n def apply(self, func, args=(), kwargs={}):\n return super(Expanding, self).apply(func, args=args, kwargs=kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['sum'])\n def sum(self, **kwargs):\n return super(Expanding, self).sum(**kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['max'])\n def max(self, **kwargs):\n return super(Expanding, self).max(**kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['min'])\n def min(self, **kwargs):\n return super(Expanding, self).min(**kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['mean'])\n def mean(self, **kwargs):\n return super(Expanding, self).mean(**kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['median'])\n def median(self, **kwargs):\n return super(Expanding, self).median(**kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['std'])\n def std(self, ddof=1, **kwargs):\n return super(Expanding, self).std(ddof=ddof, **kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['var'])\n def var(self, ddof=1, **kwargs):\n return super(Expanding, self).var(ddof=ddof, **kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['skew'])\n def skew(self, **kwargs):\n return super(Expanding, self).skew(**kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['kurt'])\n def kurt(self, **kwargs):\n return super(Expanding, self).kurt(**kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['quantile'])\n def quantile(self, quantile, **kwargs):\n return super(Expanding, self).quantile(quantile=quantile, **kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['cov'])\n def cov(self, other=None, pairwise=None, ddof=1, **kwargs):\n return super(Expanding, self).cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['corr'])\n def corr(self, other=None, pairwise=None, **kwargs):\n return super(Expanding, self).corr(other=other, pairwise=pairwise, **kwargs)\n\n_bias_template = \"\"\"\n\nParameters\n----------\nbias : boolean, default False\n Use a standard estimation bias correction\n\"\"\"\n\n_pairwise_template = \"\"\"\n\nParameters\n----------\nother : Series, DataFrame, or ndarray, optional\n if not supplied then will default to self and produce pairwise output\npairwise : bool, default None\n If False then only matching columns between self and other will be used and\n the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the output\n will be a Panel in the case of DataFrame inputs. In the case of missing\n elements, only complete pairwise observations will be used.\nbias : boolean, default False\n Use a standard estimation bias correction\n\"\"\"\n\nclass EWM(_Rolling):\n r\"\"\"\n Provides exponential weighted functions\n\n .. versionadded:: 0.18.0\n\n Parameters\n ----------\n com : float. optional\n Center of mass: :math:`\\alpha = 1 / (1 + com)`,\n span : float, optional\n Specify decay in terms of span, :math:`\\alpha = 2 / (span + 1)`\n halflife : float, optional\n Specify decay in terms of halflife, :math:`\\alpha = 1 - exp(log(0.5) / halflife)`\n min_periods : int, default 0\n Minimum number of observations in window required to have a value\n (otherwise result is NA).\n freq : None or string alias / date offset object, default=None (DEPRECATED)\n Frequency to conform to before computing statistic\n adjust : boolean, default True\n Divide by decaying adjustment factor in beginning periods to account for\n imbalance in relative weightings (viewing EWMA as a moving average)\n ignore_na : boolean, default False\n Ignore missing values when calculating weights;\n specify True to reproduce pre-0.15.0 behavior\n\n Returns\n -------\n a Window sub-classed for the particular operation\n\n Notes\n -----\n Either center of mass, span or halflife must be specified\n\n EWMA is sometimes specified using a \"span\" parameter `s`, we have that the\n decay parameter :math:`\\alpha` is related to the span as\n :math:`\\alpha = 2 / (s + 1) = 1 / (1 + c)`\n\n where `c` is the center of mass. Given a span, the associated center of mass is\n :math:`c = (s - 1) / 2`\n\n So a \"20-day EWMA\" would have center 9.5.\n\n The `freq` keyword is used to conform time series data to a specified\n frequency by resampling the data. This is done with the default parameters\n of :meth:`~pandas.Series.resample` (i.e. using the `mean`).\n\n When adjust is True (default), weighted averages are calculated using weights\n (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1.\n\n When adjust is False, weighted averages are calculated recursively as:\n weighted_average[0] = arg[0];\n weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i].\n\n When ignore_na is False (default), weights are based on absolute positions.\n For example, the weights of x and y used in calculating the final weighted\n average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and\n (1-alpha)**2 and alpha (if adjust is False).\n\n When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based on\n relative positions. For example, the weights of x and y used in calculating\n the final weighted average of [x, None, y] are 1-alpha and 1 (if adjust is\n True), and 1-alpha and alpha (if adjust is False).\n\n More details can be found at\n http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-moment-functions\n \"\"\"\n _attributes = ['com','min_periods','freq','adjust','ignore_na','axis']\n\n def __init__(self, obj, com=None, span=None, halflife=None, min_periods=0, freq=None,\n adjust=True, ignore_na=False, axis=0):\n self.obj = obj\n self.com = _get_center_of_mass(com, span, halflife)\n self.min_periods = min_periods\n self.freq = freq\n self.adjust = adjust\n self.ignore_na = ignore_na\n self.axis = axis\n\n @property\n def _constructor(self):\n return EWM\n\n @Substitution(name='ewm')\n @Appender(SelectionMixin._see_also_template)\n @Appender(SelectionMixin._agg_doc)\n def aggregate(self, arg, *args, **kwargs):\n return super(EWM, self).aggregate(arg, *args, **kwargs)\n\n agg = aggregate\n\n def _apply(self, func, how=None, **kwargs):\n \"\"\"Rolling statistical measure using supplied function. Designed to be\n used with passed-in Cython array-based functions.\n\n Parameters\n ----------\n func : string/callable to apply\n how : string, default to None (DEPRECATED)\n how to resample\n\n Returns\n -------\n y : type of input argument\n\n \"\"\"\n blocks, obj = self._create_blocks(how=how)\n results = []\n for b in blocks:\n try:\n values = self._prep_values(b.values)\n except TypeError:\n results.append(b.values.copy())\n continue\n\n if values.size == 0:\n results.append(values.copy())\n continue\n\n # if we have a string function name, wrap it\n if isinstance(func, compat.string_types):\n if not hasattr(algos, func):\n raise ValueError(\"we do not support this function algos.{0}\".format(func))\n\n cfunc = getattr(algos, func)\n def func(arg):\n return cfunc(arg, self.com, int(self.adjust), int(self.ignore_na), int(self.min_periods))\n\n results.append(np.apply_along_axis(func, self.axis, values))\n\n return self._wrap_results(results, blocks, obj)\n\n @Substitution(name='ewm')\n @Appender(_doc_template)\n def mean(self, **kwargs):\n \"\"\"exponential weighted moving average\"\"\"\n return self._apply('ewma', **kwargs)\n\n @Substitution(name='ewm')\n @Appender(_doc_template)\n @Appender(_bias_template)\n def std(self, bias=False, **kwargs):\n \"\"\"exponential weighted moving stddev\"\"\"\n return _zsqrt(self.var(bias=bias, **kwargs))\n vol=std\n\n @Substitution(name='ewm')\n @Appender(_doc_template)\n @Appender(_bias_template)\n def var(self, bias=False, **kwargs):\n \"\"\"exponential weighted moving variance\"\"\"\n def f(arg):\n return algos.ewmcov(arg,\n arg,\n self.com,\n int(self.adjust),\n int(self.ignore_na),\n int(self.min_periods),\n int(bias))\n\n return self._apply(f, **kwargs)\n\n @Substitution(name='ewm')\n @Appender(_doc_template)\n @Appender(_pairwise_template)\n def cov(self, other=None, pairwise=None, bias=False, **kwargs):\n \"\"\"exponential weighted sample covariance\"\"\"\n if other is None:\n other = self._selected_obj\n pairwise = True if pairwise is None else pairwise # only default unset\n other = self._shallow_copy(other)\n\n def _get_cov(X, Y):\n X = self._shallow_copy(X)\n Y = self._shallow_copy(Y)\n cov = algos.ewmcov(X._prep_values(),\n Y._prep_values(),\n self.com,\n int(self.adjust),\n int(self.ignore_na),\n int(self.min_periods),\n int(bias))\n return X._wrap_result(cov)\n\n return _flex_binary_moment(self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise))\n\n @Substitution(name='ewm')\n @Appender(_doc_template)\n @Appender(_pairwise_template)\n def corr(self, other=None, pairwise=None, **kwargs):\n \"\"\"exponential weighted sample correlation\"\"\"\n if other is None:\n other = self._selected_obj\n pairwise = True if pairwise is None else pairwise # only default unset\n other = self._shallow_copy(other)\n\n def _get_corr(X, Y):\n X = self._shallow_copy(X)\n Y = self._shallow_copy(Y)\n def _cov(x, y):\n return algos.ewmcov(x, y, self.com, int(self.adjust), int(self.ignore_na), int(self.min_periods), 1)\n\n x_values = X._prep_values()\n y_values = Y._prep_values()\n cov = _cov(x_values, y_values)\n x_var = _cov(x_values, x_values)\n y_var = _cov(y_values, y_values)\n corr = cov / _zsqrt(x_var * y_var)\n return X._wrap_result(corr)\n\n return _flex_binary_moment(self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise))\n\n########################\n##### Helper Funcs #####\n########################\n\ndef _flex_binary_moment(arg1, arg2, f, pairwise=False):\n from pandas import Series, DataFrame, Panel\n if not (isinstance(arg1,(np.ndarray, Series, DataFrame)) and\n isinstance(arg2,(np.ndarray, Series, DataFrame))):\n raise TypeError(\"arguments to moment function must be of type \"\n \"np.ndarray/Series/DataFrame\")\n\n if isinstance(arg1, (np.ndarray, Series)) and \\\n isinstance(arg2, (np.ndarray,Series)):\n X, Y = _prep_binary(arg1, arg2)\n return f(X, Y)\n\n elif isinstance(arg1, DataFrame):\n def dataframe_from_int_dict(data, frame_template):\n result = DataFrame(data, index=frame_template.index)\n if len(result.columns) > 0:\n result.columns = frame_template.columns[result.columns]\n return result\n\n results = {}\n if isinstance(arg2, DataFrame):\n if pairwise is False:\n if arg1 is arg2:\n # special case in order to handle duplicate column names\n for i, col in enumerate(arg1.columns):\n results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i])\n return dataframe_from_int_dict(results, arg1)\n else:\n if not arg1.columns.is_unique:\n raise ValueError(\"'arg1' columns are not unique\")\n if not arg2.columns.is_unique:\n raise ValueError(\"'arg2' columns are not unique\")\n X, Y = arg1.align(arg2, join='outer')\n X = X + 0 * Y\n Y = Y + 0 * X\n res_columns = arg1.columns.union(arg2.columns)\n for col in res_columns:\n if col in X and col in Y:\n results[col] = f(X[col], Y[col])\n return DataFrame(results, index=X.index, columns=res_columns)\n elif pairwise is True:\n results = defaultdict(dict)\n for i, k1 in enumerate(arg1.columns):\n for j, k2 in enumerate(arg2.columns):\n if j<i and arg2 is arg1:\n # Symmetric case\n results[i][j] = results[j][i]\n else:\n results[i][j] = f(*_prep_binary(arg1.iloc[:, i], arg2.iloc[:, j]))\n p = Panel.from_dict(results).swapaxes('items', 'major')\n if len(p.major_axis) > 0:\n p.major_axis = arg1.columns[p.major_axis]\n if len(p.minor_axis) > 0:\n p.minor_axis = arg2.columns[p.minor_axis]\n return p\n else:\n raise ValueError(\"'pairwise' is not True/False\")\n else:\n results = {}\n for i, col in enumerate(arg1.columns):\n results[i] = f(*_prep_binary(arg1.iloc[:, i], arg2))\n return dataframe_from_int_dict(results, arg1)\n\n else:\n return _flex_binary_moment(arg2, arg1, f)\n\ndef _get_center_of_mass(com, span, halflife):\n valid_count = len([x for x in [com, span, halflife] if x is not None])\n if valid_count > 1:\n raise Exception(\"com, span, and halflife are mutually exclusive\")\n\n if span is not None:\n # convert span to center of mass\n com = (span - 1) / 2.\n elif halflife is not None:\n # convert halflife to center of mass\n decay = 1 - np.exp(np.log(0.5) / halflife)\n com = 1 / decay - 1\n elif com is None:\n raise Exception(\"Must pass one of com, span, or halflife\")\n\n return float(com)\n\ndef _offset(window, center):\n if not com.is_integer(window):\n window = len(window)\n offset = (window - 1) / 2. if center else 0\n try:\n return int(offset)\n except:\n return offset.astype(int)\n\ndef _require_min_periods(p):\n def _check_func(minp, window):\n if minp is None:\n return window\n else:\n return max(p, minp)\n return _check_func\n\ndef _use_window(minp, window):\n if minp is None:\n return window\n else:\n return minp\n\ndef _zsqrt(x):\n result = np.sqrt(x)\n mask = x < 0\n\n from pandas import DataFrame\n if isinstance(x, DataFrame):\n if mask.values.any():\n result[mask] = 0\n else:\n if mask.any():\n result[mask] = 0\n\n return result\n\ndef _prep_binary(arg1, arg2):\n if not isinstance(arg2, type(arg1)):\n raise Exception('Input arrays must be of the same type!')\n\n # mask out values, this also makes a common index...\n X = arg1 + 0 * arg2\n Y = arg2 + 0 * arg1\n\n return X, Y\n\ndef _validate_win_type(win_type, kwargs):\n # may pop from kwargs\n arg_map = {'kaiser': ['beta'],\n 'gaussian': ['std'],\n 'general_gaussian': ['power', 'width'],\n 'slepian': ['width']}\n if win_type in arg_map:\n return tuple([win_type] +\n _pop_args(win_type, arg_map[win_type], kwargs))\n return win_type\n\n\ndef _pop_args(win_type, arg_names, kwargs):\n msg = '%s window requires %%s' % win_type\n all_args = []\n for n in arg_names:\n if n not in kwargs:\n raise ValueError(msg % n)\n all_args.append(kwargs.pop(n))\n return all_args\n\n#############################\n##### top-level exports #####\n#############################\n\ndef rolling(obj, win_type=None, **kwds):\n from pandas import Series, DataFrame\n if not isinstance(obj, (Series, DataFrame)):\n raise TypeError('invalid type: %s' % type(obj))\n\n if win_type is not None:\n return Window(obj, win_type=win_type, **kwds)\n\n return Rolling(obj, **kwds)\nrolling.__doc__ = Window.__doc__\n\ndef expanding(obj, **kwds):\n from pandas import Series, DataFrame\n if not isinstance(obj, (Series, DataFrame)):\n raise TypeError('invalid type: %s' % type(obj))\n\n return Expanding(obj, **kwds)\nexpanding.__doc__ = Expanding.__doc__\n\ndef ewm(obj, **kwds):\n from pandas import Series, DataFrame\n if not isinstance(obj, (Series, DataFrame)):\n raise TypeError('invalid type: %s' % type(obj))\n\n return EWM(obj, **kwds)\newm.__doc__ = EWM.__doc__\n",
"path": "pandas/core/window.py"
}
] | [
{
"content": "\"\"\"\n\nprovide a generic structure to support window functions,\nsimilar to how we have a Groupby object\n\n\n\"\"\"\nfrom __future__ import division\n\nimport warnings\nimport numpy as np\nfrom functools import wraps\nfrom collections import defaultdict\n\nimport pandas as pd\nfrom pandas.lib import isscalar\nfrom pandas.core.base import PandasObject, SelectionMixin, AbstractMethodError\nimport pandas.core.common as com\nimport pandas.algos as algos\nfrom pandas import compat\nfrom pandas.util.decorators import Substitution, Appender\nfrom textwrap import dedent\n\n_shared_docs = dict()\n_doc_template = \"\"\"\n\nReturns\n-------\nsame type as input\n\nSee also\n--------\npandas.Series.%(name)s\npandas.DataFrame.%(name)s\n\"\"\"\n\nclass _Window(PandasObject, SelectionMixin):\n _attributes = ['window','min_periods','freq','center','win_type','axis']\n exclusions = set()\n\n def __init__(self, obj, window=None, min_periods=None, freq=None, center=False,\n win_type=None, axis=0):\n\n if freq is not None:\n warnings.warn(\"The freq kw is deprecated and will be removed in a future version. You can resample prior \"\n \"to passing to a window function\",\n FutureWarning, stacklevel=3)\n\n self.blocks = []\n self.obj = obj\n self.window = window\n self.min_periods = min_periods\n self.freq = freq\n self.center = center\n self.win_type = win_type\n self.axis = axis\n self._setup()\n\n @property\n def _constructor(self):\n return Window\n\n def _setup(self):\n pass\n\n def _convert_freq(self, how=None):\n \"\"\" resample according to the how, return a new object \"\"\"\n\n obj = self._selected_obj\n if self.freq is not None and isinstance(obj, (com.ABCSeries, com.ABCDataFrame)):\n if how is not None:\n warnings.warn(\"The how kw argument is deprecated and removed in a future version. You can resample prior \"\n \"to passing to a window function\",\n FutureWarning, stacklevel=6)\n\n obj = obj.resample(self.freq, how=how)\n return obj\n\n def _create_blocks(self, how):\n \"\"\" split data into blocks & return conformed data \"\"\"\n\n obj = self._convert_freq(how)\n return obj.as_blocks(copy=False).values(), obj\n\n def _gotitem(self, key, ndim, subset=None):\n \"\"\"\n sub-classes to define\n return a sliced object\n\n Parameters\n ----------\n key : string / list of selections\n ndim : 1,2\n requested ndim of result\n subset : object, default None\n subset to act on\n \"\"\"\n\n # create a new object to prevent aliasing\n if subset is None:\n subset = self.obj\n self = self._shallow_copy(subset)\n self._reset_cache()\n if subset.ndim==2:\n if isscalar(key) and key in subset or com.is_list_like(key):\n self._selection = key\n return self\n\n def __getattr__(self, attr):\n if attr in self._internal_names_set:\n return object.__getattribute__(self, attr)\n if attr in self.obj:\n return self[attr]\n\n raise AttributeError(\"%r object has no attribute %r\" %\n (type(self).__name__, attr))\n\n def _dir_additions(self):\n return self.obj._dir_additions()\n\n def _get_window(self, other=None):\n return self.window\n\n def __unicode__(self):\n \"\"\" provide a nice str repr of our rolling object \"\"\"\n\n attrs = [ \"{k}={v}\".format(k=k,v=getattr(self,k)) \\\n for k in self._attributes if getattr(self,k,None) is not None ]\n return \"{klass} [{attrs}]\".format(klass=self.__class__.__name__,\n attrs=','.join(attrs))\n\n def _shallow_copy(self, obj=None, **kwargs):\n \"\"\" return a new object with the replacement attributes \"\"\"\n if obj is None:\n obj = self._selected_obj.copy()\n if isinstance(obj, self.__class__):\n obj = obj.obj\n for attr in self._attributes:\n if attr not in kwargs:\n kwargs[attr] = getattr(self,attr)\n return self._constructor(obj, **kwargs)\n\n def _prep_values(self, values=None, kill_inf=True, how=None):\n\n if values is None:\n values = getattr(self._selected_obj,'values',self._selected_obj)\n\n # coerce dtypes as appropriate\n if com.is_float_dtype(values.dtype):\n pass\n elif com.is_integer_dtype(values.dtype):\n values = values.astype(float)\n elif com.is_timedelta64_dtype(values.dtype):\n values = values.view('i8').astype(float)\n else:\n try:\n values = values.astype(float)\n except (ValueError, TypeError):\n raise TypeError(\"cannot handle this type -> {0}\".format(values.dtype))\n\n if kill_inf:\n values = values.copy()\n values[np.isinf(values)] = np.NaN\n\n return values\n\n def _wrap_result(self, result, block=None, obj=None):\n \"\"\" wrap a single result \"\"\"\n\n if obj is None:\n obj = self._selected_obj\n if isinstance(result, np.ndarray):\n\n # coerce if necessary\n if block is not None:\n if com.is_timedelta64_dtype(block.values.dtype):\n result = pd.to_timedelta(result.ravel(),unit='ns').values.reshape(result.shape)\n\n if result.ndim == 1:\n from pandas import Series\n return Series(result, obj.index, name=obj.name)\n\n return type(obj)(result,\n index=obj.index,\n columns=block.columns)\n return result\n\n def _wrap_results(self, results, blocks, obj):\n \"\"\"\n wrap the results\n\n Paramters\n ---------\n results : list of ndarrays\n blocks : list of blocks\n obj : conformed data (may be resampled)\n \"\"\"\n\n final = []\n for result, block in zip(results, blocks):\n\n result = self._wrap_result(result, block=block, obj=obj)\n if result.ndim == 1:\n return result\n final.append(result)\n\n if not len(final):\n return obj.astype('float64')\n return pd.concat(final,axis=1).reindex(columns=obj.columns)\n\n def _center_window(self, result, window):\n \"\"\" center the result in the window \"\"\"\n if self.axis > result.ndim-1:\n raise ValueError(\"Requested axis is larger then no. of argument \"\n \"dimensions\")\n\n from pandas import Series, DataFrame\n offset = _offset(window, True)\n if offset > 0:\n if isinstance(result, (Series, DataFrame)):\n result = result.slice_shift(-offset, axis=self.axis)\n else:\n lead_indexer = [slice(None)] * result.ndim\n lead_indexer[self.axis] = slice(offset, None)\n result = np.copy(result[tuple(lead_indexer)])\n return result\n\n def aggregate(self, arg, *args, **kwargs):\n result, how = self._aggregate(arg, *args, **kwargs)\n if result is None:\n return self.apply(arg, args=args, kwargs=kwargs)\n return result\n\n agg = aggregate\n\n _shared_docs['sum'] = dedent(\"\"\"\n %(name)s sum\n\n Parameters\n ----------\n how : string, default None (DEPRECATED)\n Method for down- or re-sampling\"\"\")\n\n _shared_docs['mean'] = dedent(\"\"\"\n %(name)s mean\n\n Parameters\n ----------\n how : string, default None (DEPRECATED)\n Method for down- or re-sampling\"\"\")\n\nclass Window(_Window):\n \"\"\"\n Provides rolling transformations.\n\n .. versionadded:: 0.18.0\n\n Parameters\n ----------\n window : int\n Size of the moving window. This is the number of observations used for\n calculating the statistic.\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA).\n freq : string or DateOffset object, optional (default None) (DEPRECATED)\n Frequency to conform the data to before computing the statistic. Specified\n as a frequency string or DateOffset object.\n center : boolean, default False\n Set the labels at the center of the window.\n win_type : string, default None\n prove a window type, see the notes below\n axis : int, default 0\n\n Returns\n -------\n a Window sub-classed for the particular operation\n\n Notes\n -----\n By default, the result is set to the right edge of the window. This can be\n changed to the center of the window by setting ``center=True``.\n\n The `freq` keyword is used to conform time series data to a specified\n frequency by resampling the data. This is done with the default parameters\n of :meth:`~pandas.Series.resample` (i.e. using the `mean`).\n\n The recognized window types are:\n\n * ``boxcar``\n * ``triang``\n * ``blackman``\n * ``hamming``\n * ``bartlett``\n * ``parzen``\n * ``bohman``\n * ``blackmanharris``\n * ``nuttall``\n * ``barthann``\n * ``kaiser`` (needs beta)\n * ``gaussian`` (needs std)\n * ``general_gaussian`` (needs power, width)\n * ``slepian`` (needs width).\n\"\"\"\n\n def _prep_window(self, **kwargs):\n \"\"\" provide validation for our window type, return the window \"\"\"\n window = self._get_window()\n\n if isinstance(window, (list, tuple, np.ndarray)):\n return com._asarray_tuplesafe(window).astype(float)\n elif com.is_integer(window):\n try:\n import scipy.signal as sig\n except ImportError:\n raise ImportError('Please install scipy to generate window weight')\n win_type = _validate_win_type(self.win_type, kwargs) # may pop from kwargs\n return sig.get_window(win_type, window).astype(float)\n\n raise ValueError('Invalid window %s' % str(window))\n\n def _apply_window(self, mean=True, how=None, **kwargs):\n \"\"\"\n Applies a moving window of type ``window_type`` on the data.\n\n Parameters\n ----------\n mean : boolean, default True\n If True computes weighted mean, else weighted sum\n how : string, default to None (DEPRECATED)\n how to resample\n\n Returns\n -------\n y : type of input argument\n\n \"\"\"\n window = self._prep_window(**kwargs)\n center = self.center\n\n blocks, obj = self._create_blocks(how=how)\n results = []\n for b in blocks:\n try:\n values = self._prep_values(b.values)\n except TypeError:\n results.append(b.values.copy())\n continue\n\n if values.size == 0:\n results.append(values.copy())\n continue\n\n offset = _offset(window, center)\n additional_nans = np.array([np.NaN] * offset)\n def f(arg, *args, **kwargs):\n minp = _use_window(self.min_periods, len(window))\n return algos.roll_window(np.concatenate((arg, additional_nans)) if center else arg,\n window, minp, avg=mean)\n\n result = np.apply_along_axis(f, self.axis, values)\n\n if center:\n result = self._center_window(result, window)\n results.append(result)\n\n return self._wrap_results(results, blocks, obj)\n\n @Substitution(name='rolling')\n @Appender(SelectionMixin._see_also_template)\n @Appender(SelectionMixin._agg_doc)\n def aggregate(self, arg, *args, **kwargs):\n result, how = self._aggregate(arg, *args, **kwargs)\n if result is None:\n\n # these must apply directly\n result = arg(self)\n\n return result\n\n agg = aggregate\n\n @Substitution(name='window')\n @Appender(_doc_template)\n @Appender(_shared_docs['sum'])\n def sum(self, **kwargs):\n return self._apply_window(mean=False, **kwargs)\n\n @Substitution(name='window')\n @Appender(_doc_template)\n @Appender(_shared_docs['mean'])\n def mean(self, **kwargs):\n return self._apply_window(mean=True, **kwargs)\n\nclass _Rolling(_Window):\n\n @property\n def _constructor(self):\n return Rolling\n\n def _apply(self, func, window=None, center=None, check_minp=None, how=None, **kwargs):\n \"\"\"\n Rolling statistical measure using supplied function. Designed to be\n used with passed-in Cython array-based functions.\n\n Parameters\n ----------\n func : string/callable to apply\n window : int/array, default to _get_window()\n center : boolean, default to self.center\n check_minp : function, default to _use_window\n how : string, default to None (DEPRECATED)\n how to resample\n\n Returns\n -------\n y : type of input\n \"\"\"\n if center is None:\n center = self.center\n if window is None:\n window = self._get_window()\n\n if check_minp is None:\n check_minp = _use_window\n\n blocks, obj = self._create_blocks(how=how)\n results = []\n for b in blocks:\n try:\n values = self._prep_values(b.values)\n except TypeError:\n results.append(b.values.copy())\n continue\n\n if values.size == 0:\n results.append(values.copy())\n continue\n\n # if we have a string function name, wrap it\n if isinstance(func, compat.string_types):\n if not hasattr(algos, func):\n raise ValueError(\"we do not support this function algos.{0}\".format(func))\n\n cfunc = getattr(algos, func)\n def func(arg, window, min_periods=None):\n minp = check_minp(min_periods, window)\n return cfunc(arg, window, minp, **kwargs)\n\n # calculation function\n if center:\n offset = _offset(window, center)\n additional_nans = np.array([np.NaN] * offset)\n def calc(x):\n return func(np.concatenate((x, additional_nans)),\n window, min_periods=self.min_periods)\n else:\n def calc(x):\n return func(x,window, min_periods=self.min_periods)\n\n if values.ndim > 1:\n result = np.apply_along_axis(calc, self.axis, values)\n else:\n result = calc(values)\n\n if center:\n result = self._center_window(result, window)\n\n results.append(result)\n\n return self._wrap_results(results, blocks, obj)\n\nclass _Rolling_and_Expanding(_Rolling):\n\n _shared_docs['count'] = \"\"\"%(name)s count of number of non-NaN observations inside provided window.\"\"\"\n def count(self):\n obj = self._convert_freq()\n window = self._get_window()\n window = min(window, len(obj)) if not self.center else window\n try:\n converted = np.isfinite(obj).astype(float)\n except TypeError:\n converted = np.isfinite(obj.astype(float)).astype(float)\n result = self._constructor(converted,\n window=window,\n min_periods=0,\n center=self.center).sum()\n\n result[result.isnull()] = 0\n return result\n\n _shared_docs['apply'] = dedent(\"\"\"\n %(name)s function apply\n\n Parameters\n ----------\n func : function\n Must produce a single value from an ndarray input\n *args and **kwargs are passed to the function\"\"\")\n\n def apply(self, func, args=(), kwargs={}):\n _level = kwargs.pop('_level',None)\n window = self._get_window()\n offset = _offset(window, self.center)\n def f(arg, window, min_periods):\n minp = _use_window(min_periods, window)\n return algos.roll_generic(arg, window, minp, offset, func, args, kwargs)\n\n return self._apply(f, center=False)\n\n def sum(self, **kwargs):\n return self._apply('roll_sum', **kwargs)\n\n _shared_docs['max'] = dedent(\"\"\"\n %(name)s maximum\n\n Parameters\n ----------\n how : string, default 'max' (DEPRECATED)\n Method for down- or re-sampling\"\"\")\n def max(self, how=None, **kwargs):\n if self.freq is not None and how is None:\n how = 'max'\n return self._apply('roll_max', how=how, **kwargs)\n\n _shared_docs['min'] = dedent(\"\"\"\n %(name)s minimum\n\n Parameters\n ----------\n how : string, default 'min' (DEPRECATED)\n Method for down- or re-sampling\"\"\")\n def min(self, how=None, **kwargs):\n if self.freq is not None and how is None:\n how = 'min'\n return self._apply('roll_min', how=how, **kwargs)\n\n def mean(self, **kwargs):\n return self._apply('roll_mean', **kwargs)\n\n _shared_docs['median'] = dedent(\"\"\"\n %(name)s median\n\n Parameters\n ----------\n how : string, default 'median' (DEPRECATED)\n Method for down- or re-sampling\"\"\")\n def median(self, how=None, **kwargs):\n if self.freq is not None and how is None:\n how = 'median'\n return self._apply('roll_median_c', how=how, **kwargs)\n\n _shared_docs['std'] = dedent(\"\"\"\n %(name)s standard deviation\n\n Parameters\n ----------\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\"\"\")\n\n def std(self, ddof=1, **kwargs):\n window = self._get_window()\n def f(arg, *args, **kwargs):\n minp = _require_min_periods(1)(self.min_periods, window)\n return _zsqrt(algos.roll_var(arg, window, minp, ddof))\n\n return self._apply(f, check_minp=_require_min_periods(1), **kwargs)\n\n _shared_docs['var'] = dedent(\"\"\"\n %(name)s variance\n\n Parameters\n ----------\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\"\"\")\n\n def var(self, ddof=1, **kwargs):\n return self._apply('roll_var',\n check_minp=_require_min_periods(1),\n ddof=ddof,\n **kwargs)\n\n _shared_docs['skew'] = \"\"\"Unbiased %(name)s skewness\"\"\"\n def skew(self, **kwargs):\n return self._apply('roll_skew',\n check_minp=_require_min_periods(3),\n **kwargs)\n\n _shared_docs['kurt'] = \"\"\"Unbiased %(name)s kurtosis\"\"\"\n def kurt(self, **kwargs):\n return self._apply('roll_kurt',\n check_minp=_require_min_periods(4),\n **kwargs)\n\n _shared_docs['quantile'] = dedent(\"\"\"\n %(name)s quantile\n\n Parameters\n ----------\n quantile : float\n 0 <= quantile <= 1\"\"\")\n\n def quantile(self, quantile, **kwargs):\n window = self._get_window()\n def f(arg, *args, **kwargs):\n minp = _use_window(self.min_periods, window)\n return algos.roll_quantile(arg, window, minp, quantile)\n\n return self._apply(f, **kwargs)\n\n _shared_docs['cov'] = dedent(\"\"\"\n %(name)s sample covariance\n\n Parameters\n ----------\n other : Series, DataFrame, or ndarray, optional\n if not supplied then will default to self and produce pairwise output\n pairwise : bool, default None\n If False then only matching columns between self and other will be used and\n the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the output\n will be a Panel in the case of DataFrame inputs. In the case of missing\n elements, only complete pairwise observations will be used.\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\"\"\")\n\n def cov(self, other=None, pairwise=None, ddof=1, **kwargs):\n if other is None:\n other = self._selected_obj\n pairwise = True if pairwise is None else pairwise # only default unset\n other = self._shallow_copy(other)\n window = self._get_window(other)\n\n def _get_cov(X, Y):\n mean = lambda x: x.rolling(window, self.min_periods, center=self.center).mean(**kwargs)\n count = (X+Y).rolling(window=window, center=self.center).count(**kwargs)\n bias_adj = count / (count - ddof)\n return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj\n return _flex_binary_moment(self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise))\n\n _shared_docs['corr'] = dedent(\"\"\"\n %(name)s sample correlation\n\n Parameters\n ----------\n other : Series, DataFrame, or ndarray, optional\n if not supplied then will default to self and produce pairwise output\n pairwise : bool, default None\n If False then only matching columns between self and other will be used and\n the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the output\n will be a Panel in the case of DataFrame inputs. In the case of missing\n elements, only complete pairwise observations will be used.\"\"\")\n\n def corr(self, other=None, pairwise=None, **kwargs):\n if other is None:\n other = self._selected_obj\n pairwise = True if pairwise is None else pairwise # only default unset\n other = self._shallow_copy(other)\n window = self._get_window(other)\n\n def _get_corr(a, b):\n a = a.rolling(window=window,\n min_periods=self.min_periods,\n freq=self.freq,\n center=self.center)\n b = b.rolling(window=window,\n min_periods=self.min_periods,\n freq=self.freq,\n center=self.center)\n\n return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))\n return _flex_binary_moment(self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise))\n\nclass Rolling(_Rolling_and_Expanding):\n \"\"\"\n Provides rolling window calculcations.\n\n .. versionadded:: 0.18.0\n\n Parameters\n ----------\n window : int\n Size of the moving window. This is the number of observations used for\n calculating the statistic.\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA).\n freq : string or DateOffset object, optional (default None) (DEPRECATED)\n Frequency to conform the data to before computing the statistic. Specified\n as a frequency string or DateOffset object.\n center : boolean, default False\n Set the labels at the center of the window.\n axis : int, default 0\n\n Returns\n -------\n a Window sub-classed for the particular operation\n\n Notes\n -----\n By default, the result is set to the right edge of the window. This can be\n changed to the center of the window by setting ``center=True``.\n\n The `freq` keyword is used to conform time series data to a specified\n frequency by resampling the data. This is done with the default parameters\n of :meth:`~pandas.Series.resample` (i.e. using the `mean`).\n \"\"\"\n\n @Substitution(name='rolling')\n @Appender(SelectionMixin._see_also_template)\n @Appender(SelectionMixin._agg_doc)\n def aggregate(self, arg, *args, **kwargs):\n return super(Rolling, self).aggregate(arg, *args, **kwargs)\n\n agg = aggregate\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['count'])\n def count(self):\n return super(Rolling, self).count()\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['apply'])\n def apply(self, func, args=(), kwargs={}):\n return super(Rolling, self).apply(func, args=args, kwargs=kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['sum'])\n def sum(self, **kwargs):\n return super(Rolling, self).sum(**kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['max'])\n def max(self, **kwargs):\n return super(Rolling, self).max(**kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['min'])\n def min(self, **kwargs):\n return super(Rolling, self).min(**kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['mean'])\n def mean(self, **kwargs):\n return super(Rolling, self).mean(**kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['median'])\n def median(self, **kwargs):\n return super(Rolling, self).median(**kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['std'])\n def std(self, ddof=1, **kwargs):\n return super(Rolling, self).std(ddof=ddof, **kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['var'])\n def var(self, ddof=1, **kwargs):\n return super(Rolling, self).var(ddof=ddof, **kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['skew'])\n def skew(self, **kwargs):\n return super(Rolling, self).skew(**kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['kurt'])\n def kurt(self, **kwargs):\n return super(Rolling, self).kurt(**kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['quantile'])\n def quantile(self, quantile, **kwargs):\n return super(Rolling, self).quantile(quantile=quantile, **kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['cov'])\n def cov(self, other=None, pairwise=None, ddof=1, **kwargs):\n return super(Rolling, self).cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs)\n\n @Substitution(name='rolling')\n @Appender(_doc_template)\n @Appender(_shared_docs['corr'])\n def corr(self, other=None, pairwise=None, **kwargs):\n return super(Rolling, self).corr(other=other, pairwise=pairwise, **kwargs)\n\nclass Expanding(_Rolling_and_Expanding):\n \"\"\"\n Provides expanding transformations.\n\n .. versionadded:: 0.18.0\n\n Parameters\n ----------\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA).\n freq : string or DateOffset object, optional (default None) (DEPRECATED)\n Frequency to conform the data to before computing the statistic. Specified\n as a frequency string or DateOffset object.\n center : boolean, default False\n Set the labels at the center of the window.\n axis : int, default 0\n\n Returns\n -------\n a Window sub-classed for the particular operation\n\n Notes\n -----\n By default, the result is set to the right edge of the window. This can be\n changed to the center of the window by setting ``center=True``.\n\n The `freq` keyword is used to conform time series data to a specified\n frequency by resampling the data. This is done with the default parameters\n of :meth:`~pandas.Series.resample` (i.e. using the `mean`).\n \"\"\"\n\n _attributes = ['min_periods','freq','center','axis']\n\n def __init__(self, obj, min_periods=1, freq=None, center=False, axis=0, **kwargs):\n return super(Expanding, self).__init__(obj=obj, min_periods=min_periods, freq=freq, center=center, axis=axis)\n\n @property\n def _constructor(self):\n return Expanding\n\n def _get_window(self, other=None):\n obj = self._selected_obj\n if other is None:\n return max(len(obj), self.min_periods) if self.min_periods else len(obj)\n return max((len(obj) + len(obj)), self.min_periods) if self.min_periods else (len(obj) + len(obj))\n\n @Substitution(name='expanding')\n @Appender(SelectionMixin._see_also_template)\n @Appender(SelectionMixin._agg_doc)\n def aggregate(self, arg, *args, **kwargs):\n return super(Expanding, self).aggregate(arg, *args, **kwargs)\n\n agg = aggregate\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['count'])\n def count(self, **kwargs):\n return super(Expanding, self).count(**kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['apply'])\n def apply(self, func, args=(), kwargs={}):\n return super(Expanding, self).apply(func, args=args, kwargs=kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['sum'])\n def sum(self, **kwargs):\n return super(Expanding, self).sum(**kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['max'])\n def max(self, **kwargs):\n return super(Expanding, self).max(**kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['min'])\n def min(self, **kwargs):\n return super(Expanding, self).min(**kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['mean'])\n def mean(self, **kwargs):\n return super(Expanding, self).mean(**kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['median'])\n def median(self, **kwargs):\n return super(Expanding, self).median(**kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['std'])\n def std(self, ddof=1, **kwargs):\n return super(Expanding, self).std(ddof=ddof, **kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['var'])\n def var(self, ddof=1, **kwargs):\n return super(Expanding, self).var(ddof=ddof, **kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['skew'])\n def skew(self, **kwargs):\n return super(Expanding, self).skew(**kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['kurt'])\n def kurt(self, **kwargs):\n return super(Expanding, self).kurt(**kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['quantile'])\n def quantile(self, quantile, **kwargs):\n return super(Expanding, self).quantile(quantile=quantile, **kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['cov'])\n def cov(self, other=None, pairwise=None, ddof=1, **kwargs):\n return super(Expanding, self).cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs)\n\n @Substitution(name='expanding')\n @Appender(_doc_template)\n @Appender(_shared_docs['corr'])\n def corr(self, other=None, pairwise=None, **kwargs):\n return super(Expanding, self).corr(other=other, pairwise=pairwise, **kwargs)\n\n_bias_template = \"\"\"\n\nParameters\n----------\nbias : boolean, default False\n Use a standard estimation bias correction\n\"\"\"\n\n_pairwise_template = \"\"\"\n\nParameters\n----------\nother : Series, DataFrame, or ndarray, optional\n if not supplied then will default to self and produce pairwise output\npairwise : bool, default None\n If False then only matching columns between self and other will be used and\n the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the output\n will be a Panel in the case of DataFrame inputs. In the case of missing\n elements, only complete pairwise observations will be used.\nbias : boolean, default False\n Use a standard estimation bias correction\n\"\"\"\n\n\nclass EWM(_Rolling):\n r\"\"\"\n Provides exponential weighted functions\n\n .. versionadded:: 0.18.0\n\n Parameters\n ----------\n com : float. optional\n Center of mass: :math:`\\alpha = 1 / (1 + com)`,\n span : float, optional\n Specify decay in terms of span, :math:`\\alpha = 2 / (span + 1)`\n halflife : float, optional\n Specify decay in terms of halflife, :math:`\\alpha = 1 - exp(log(0.5) / halflife)`\n min_periods : int, default 0\n Minimum number of observations in window required to have a value\n (otherwise result is NA).\n freq : None or string alias / date offset object, default=None (DEPRECATED)\n Frequency to conform to before computing statistic\n adjust : boolean, default True\n Divide by decaying adjustment factor in beginning periods to account for\n imbalance in relative weightings (viewing EWMA as a moving average)\n ignore_na : boolean, default False\n Ignore missing values when calculating weights;\n specify True to reproduce pre-0.15.0 behavior\n\n Returns\n -------\n a Window sub-classed for the particular operation\n\n Notes\n -----\n Either center of mass, span or halflife must be specified\n\n EWMA is sometimes specified using a \"span\" parameter `s`, we have that the\n decay parameter :math:`\\alpha` is related to the span as\n :math:`\\alpha = 2 / (s + 1) = 1 / (1 + c)`\n\n where `c` is the center of mass. Given a span, the associated center of mass is\n :math:`c = (s - 1) / 2`\n\n So a \"20-day EWMA\" would have center 9.5.\n\n The `freq` keyword is used to conform time series data to a specified\n frequency by resampling the data. This is done with the default parameters\n of :meth:`~pandas.Series.resample` (i.e. using the `mean`).\n\n When adjust is True (default), weighted averages are calculated using weights\n (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1.\n\n When adjust is False, weighted averages are calculated recursively as:\n weighted_average[0] = arg[0];\n weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i].\n\n When ignore_na is False (default), weights are based on absolute positions.\n For example, the weights of x and y used in calculating the final weighted\n average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and\n (1-alpha)**2 and alpha (if adjust is False).\n\n When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based on\n relative positions. For example, the weights of x and y used in calculating\n the final weighted average of [x, None, y] are 1-alpha and 1 (if adjust is\n True), and 1-alpha and alpha (if adjust is False).\n\n More details can be found at\n http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-moment-functions\n \"\"\"\n _attributes = ['com','min_periods','freq','adjust','ignore_na','axis']\n\n def __init__(self, obj, com=None, span=None, halflife=None, min_periods=0, freq=None,\n adjust=True, ignore_na=False, axis=0):\n self.obj = obj\n self.com = _get_center_of_mass(com, span, halflife)\n self.min_periods = min_periods\n self.freq = freq\n self.adjust = adjust\n self.ignore_na = ignore_na\n self.axis = axis\n\n @property\n def _constructor(self):\n return EWM\n\n @Substitution(name='ewm')\n @Appender(SelectionMixin._see_also_template)\n @Appender(SelectionMixin._agg_doc)\n def aggregate(self, arg, *args, **kwargs):\n return super(EWM, self).aggregate(arg, *args, **kwargs)\n\n agg = aggregate\n\n def _apply(self, func, how=None, **kwargs):\n \"\"\"Rolling statistical measure using supplied function. Designed to be\n used with passed-in Cython array-based functions.\n\n Parameters\n ----------\n func : string/callable to apply\n how : string, default to None (DEPRECATED)\n how to resample\n\n Returns\n -------\n y : type of input argument\n\n \"\"\"\n blocks, obj = self._create_blocks(how=how)\n results = []\n for b in blocks:\n try:\n values = self._prep_values(b.values)\n except TypeError:\n results.append(b.values.copy())\n continue\n\n if values.size == 0:\n results.append(values.copy())\n continue\n\n # if we have a string function name, wrap it\n if isinstance(func, compat.string_types):\n if not hasattr(algos, func):\n raise ValueError(\"we do not support this function algos.{0}\".format(func))\n\n cfunc = getattr(algos, func)\n def func(arg):\n return cfunc(arg, self.com, int(self.adjust), int(self.ignore_na), int(self.min_periods))\n\n results.append(np.apply_along_axis(func, self.axis, values))\n\n return self._wrap_results(results, blocks, obj)\n\n @Substitution(name='ewm')\n @Appender(_doc_template)\n def mean(self, **kwargs):\n \"\"\"exponential weighted moving average\"\"\"\n return self._apply('ewma', **kwargs)\n\n @Substitution(name='ewm')\n @Appender(_doc_template)\n @Appender(_bias_template)\n def std(self, bias=False, **kwargs):\n \"\"\"exponential weighted moving stddev\"\"\"\n return _zsqrt(self.var(bias=bias, **kwargs))\n vol=std\n\n @Substitution(name='ewm')\n @Appender(_doc_template)\n @Appender(_bias_template)\n def var(self, bias=False, **kwargs):\n \"\"\"exponential weighted moving variance\"\"\"\n def f(arg):\n return algos.ewmcov(arg,\n arg,\n self.com,\n int(self.adjust),\n int(self.ignore_na),\n int(self.min_periods),\n int(bias))\n\n return self._apply(f, **kwargs)\n\n @Substitution(name='ewm')\n @Appender(_doc_template)\n @Appender(_pairwise_template)\n def cov(self, other=None, pairwise=None, bias=False, **kwargs):\n \"\"\"exponential weighted sample covariance\"\"\"\n if other is None:\n other = self._selected_obj\n pairwise = True if pairwise is None else pairwise # only default unset\n other = self._shallow_copy(other)\n\n def _get_cov(X, Y):\n X = self._shallow_copy(X)\n Y = self._shallow_copy(Y)\n cov = algos.ewmcov(X._prep_values(),\n Y._prep_values(),\n self.com,\n int(self.adjust),\n int(self.ignore_na),\n int(self.min_periods),\n int(bias))\n return X._wrap_result(cov)\n\n return _flex_binary_moment(self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise))\n\n @Substitution(name='ewm')\n @Appender(_doc_template)\n @Appender(_pairwise_template)\n def corr(self, other=None, pairwise=None, **kwargs):\n \"\"\"exponential weighted sample correlation\"\"\"\n if other is None:\n other = self._selected_obj\n pairwise = True if pairwise is None else pairwise # only default unset\n other = self._shallow_copy(other)\n\n def _get_corr(X, Y):\n X = self._shallow_copy(X)\n Y = self._shallow_copy(Y)\n def _cov(x, y):\n return algos.ewmcov(x, y, self.com, int(self.adjust), int(self.ignore_na), int(self.min_periods), 1)\n\n x_values = X._prep_values()\n y_values = Y._prep_values()\n cov = _cov(x_values, y_values)\n x_var = _cov(x_values, x_values)\n y_var = _cov(y_values, y_values)\n corr = cov / _zsqrt(x_var * y_var)\n return X._wrap_result(corr)\n\n return _flex_binary_moment(self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise))\n\n########################\n##### Helper Funcs #####\n########################\n\ndef _flex_binary_moment(arg1, arg2, f, pairwise=False):\n from pandas import Series, DataFrame, Panel\n if not (isinstance(arg1,(np.ndarray, Series, DataFrame)) and\n isinstance(arg2,(np.ndarray, Series, DataFrame))):\n raise TypeError(\"arguments to moment function must be of type \"\n \"np.ndarray/Series/DataFrame\")\n\n if isinstance(arg1, (np.ndarray, Series)) and \\\n isinstance(arg2, (np.ndarray,Series)):\n X, Y = _prep_binary(arg1, arg2)\n return f(X, Y)\n\n elif isinstance(arg1, DataFrame):\n def dataframe_from_int_dict(data, frame_template):\n result = DataFrame(data, index=frame_template.index)\n if len(result.columns) > 0:\n result.columns = frame_template.columns[result.columns]\n return result\n\n results = {}\n if isinstance(arg2, DataFrame):\n if pairwise is False:\n if arg1 is arg2:\n # special case in order to handle duplicate column names\n for i, col in enumerate(arg1.columns):\n results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i])\n return dataframe_from_int_dict(results, arg1)\n else:\n if not arg1.columns.is_unique:\n raise ValueError(\"'arg1' columns are not unique\")\n if not arg2.columns.is_unique:\n raise ValueError(\"'arg2' columns are not unique\")\n X, Y = arg1.align(arg2, join='outer')\n X = X + 0 * Y\n Y = Y + 0 * X\n res_columns = arg1.columns.union(arg2.columns)\n for col in res_columns:\n if col in X and col in Y:\n results[col] = f(X[col], Y[col])\n return DataFrame(results, index=X.index, columns=res_columns)\n elif pairwise is True:\n results = defaultdict(dict)\n for i, k1 in enumerate(arg1.columns):\n for j, k2 in enumerate(arg2.columns):\n if j<i and arg2 is arg1:\n # Symmetric case\n results[i][j] = results[j][i]\n else:\n results[i][j] = f(*_prep_binary(arg1.iloc[:, i], arg2.iloc[:, j]))\n p = Panel.from_dict(results).swapaxes('items', 'major')\n if len(p.major_axis) > 0:\n p.major_axis = arg1.columns[p.major_axis]\n if len(p.minor_axis) > 0:\n p.minor_axis = arg2.columns[p.minor_axis]\n return p\n else:\n raise ValueError(\"'pairwise' is not True/False\")\n else:\n results = {}\n for i, col in enumerate(arg1.columns):\n results[i] = f(*_prep_binary(arg1.iloc[:, i], arg2))\n return dataframe_from_int_dict(results, arg1)\n\n else:\n return _flex_binary_moment(arg2, arg1, f)\n\ndef _get_center_of_mass(com, span, halflife):\n valid_count = len([x for x in [com, span, halflife] if x is not None])\n if valid_count > 1:\n raise Exception(\"com, span, and halflife are mutually exclusive\")\n\n if span is not None:\n # convert span to center of mass\n com = (span - 1) / 2.\n elif halflife is not None:\n # convert halflife to center of mass\n decay = 1 - np.exp(np.log(0.5) / halflife)\n com = 1 / decay - 1\n elif com is None:\n raise Exception(\"Must pass one of com, span, or halflife\")\n\n return float(com)\n\ndef _offset(window, center):\n if not com.is_integer(window):\n window = len(window)\n offset = (window - 1) / 2. if center else 0\n try:\n return int(offset)\n except:\n return offset.astype(int)\n\ndef _require_min_periods(p):\n def _check_func(minp, window):\n if minp is None:\n return window\n else:\n return max(p, minp)\n return _check_func\n\ndef _use_window(minp, window):\n if minp is None:\n return window\n else:\n return minp\n\ndef _zsqrt(x):\n result = np.sqrt(x)\n mask = x < 0\n\n from pandas import DataFrame\n if isinstance(x, DataFrame):\n if mask.values.any():\n result[mask] = 0\n else:\n if mask.any():\n result[mask] = 0\n\n return result\n\ndef _prep_binary(arg1, arg2):\n if not isinstance(arg2, type(arg1)):\n raise Exception('Input arrays must be of the same type!')\n\n # mask out values, this also makes a common index...\n X = arg1 + 0 * arg2\n Y = arg2 + 0 * arg1\n\n return X, Y\n\ndef _validate_win_type(win_type, kwargs):\n # may pop from kwargs\n arg_map = {'kaiser': ['beta'],\n 'gaussian': ['std'],\n 'general_gaussian': ['power', 'width'],\n 'slepian': ['width']}\n if win_type in arg_map:\n return tuple([win_type] +\n _pop_args(win_type, arg_map[win_type], kwargs))\n return win_type\n\n\ndef _pop_args(win_type, arg_names, kwargs):\n msg = '%s window requires %%s' % win_type\n all_args = []\n for n in arg_names:\n if n not in kwargs:\n raise ValueError(msg % n)\n all_args.append(kwargs.pop(n))\n return all_args\n\n#############################\n##### top-level exports #####\n#############################\n\ndef rolling(obj, win_type=None, **kwds):\n from pandas import Series, DataFrame\n if not isinstance(obj, (Series, DataFrame)):\n raise TypeError('invalid type: %s' % type(obj))\n\n if win_type is not None:\n return Window(obj, win_type=win_type, **kwds)\n\n return Rolling(obj, **kwds)\nrolling.__doc__ = Window.__doc__\n\ndef expanding(obj, **kwds):\n from pandas import Series, DataFrame\n if not isinstance(obj, (Series, DataFrame)):\n raise TypeError('invalid type: %s' % type(obj))\n\n return Expanding(obj, **kwds)\nexpanding.__doc__ = Expanding.__doc__\n\ndef ewm(obj, **kwds):\n from pandas import Series, DataFrame\n if not isinstance(obj, (Series, DataFrame)):\n raise TypeError('invalid type: %s' % type(obj))\n\n return EWM(obj, **kwds)\newm.__doc__ = EWM.__doc__\n",
"path": "pandas/core/window.py"
}
] | diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index 4ce2ce5b69cb4..3496e9eea834c 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -463,6 +463,7 @@ Bug Fixes
- Bug in ``pd.read_clipboard`` and ``pd.to_clipboard`` functions not supporting Unicode; upgrade included ``pyperclip`` to v1.5.15 (:issue:`9263`)
- Bug in ``DataFrame.query`` containing an assignment (:issue:`8664`)
+- Bug in ``from_msgpack`` where ``__contains__()`` fails for columns of the unpacked ``DataFrame``, if the ``DataFrame`` has object columns. (:issue: `11880`)
- Bug in timezone info lost when broadcasting scalar datetime to ``DataFrame`` (:issue:`11682`)
diff --git a/pandas/core/window.py b/pandas/core/window.py
index 1e5816e898baa..ce8fda9e932bc 100644
--- a/pandas/core/window.py
+++ b/pandas/core/window.py
@@ -965,6 +965,7 @@ def corr(self, other=None, pairwise=None, **kwargs):
Use a standard estimation bias correction
"""
+
class EWM(_Rolling):
r"""
Provides exponential weighted functions
diff --git a/pandas/hashtable.pyx b/pandas/hashtable.pyx
index 58e9d64921e0d..a5fcbd3f2d0f1 100644
--- a/pandas/hashtable.pyx
+++ b/pandas/hashtable.pyx
@@ -342,7 +342,7 @@ cdef class Int64HashTable(HashTable):
self.table.vals[k] = <Py_ssize_t> values[i]
@cython.boundscheck(False)
- def map_locations(self, int64_t[:] values):
+ def map_locations(self, ndarray[int64_t, ndim=1] values):
cdef:
Py_ssize_t i, n = len(values)
int ret = 0
@@ -570,7 +570,7 @@ cdef class Float64HashTable(HashTable):
return np.asarray(labels)
@cython.boundscheck(False)
- def map_locations(self, float64_t[:] values):
+ def map_locations(self, ndarray[float64_t, ndim=1] values):
cdef:
Py_ssize_t i, n = len(values)
int ret = 0
diff --git a/pandas/io/tests/test_packers.py b/pandas/io/tests/test_packers.py
index d6a9feb1bd8f4..61b24c858b60d 100644
--- a/pandas/io/tests/test_packers.py
+++ b/pandas/io/tests/test_packers.py
@@ -9,8 +9,8 @@
from pandas import compat
from pandas.compat import u
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
- date_range, period_range, Index, SparseSeries, SparseDataFrame,
- SparsePanel)
+ date_range, period_range, Index)
+from pandas.io.packers import to_msgpack, read_msgpack
import pandas.util.testing as tm
from pandas.util.testing import (ensure_clean, assert_index_equal,
assert_series_equal,
@@ -23,7 +23,19 @@
nan = np.nan
-from pandas.io.packers import to_msgpack, read_msgpack
+try:
+ import blosc # NOQA
+except ImportError:
+ _BLOSC_INSTALLED = False
+else:
+ _BLOSC_INSTALLED = True
+
+try:
+ import zlib # NOQA
+except ImportError:
+ _ZLIB_INSTALLED = False
+else:
+ _ZLIB_INSTALLED = True
_multiprocess_can_split_ = False
@@ -483,6 +495,14 @@ class TestCompression(TestPackers):
"""
def setUp(self):
+ try:
+ from sqlalchemy import create_engine
+ self._create_sql_engine = create_engine
+ except ImportError:
+ self._SQLALCHEMY_INSTALLED = False
+ else:
+ self._SQLALCHEMY_INSTALLED = True
+
super(TestCompression, self).setUp()
data = {
'A': np.arange(1000, dtype=np.float64),
@@ -508,14 +528,56 @@ def test_compression_zlib(self):
assert_frame_equal(self.frame[k], i_rec[k])
def test_compression_blosc(self):
- try:
- import blosc
- except ImportError:
+ if not _BLOSC_INSTALLED:
raise nose.SkipTest('no blosc')
i_rec = self.encode_decode(self.frame, compress='blosc')
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
+ def test_readonly_axis_blosc(self):
+ # GH11880
+ if not _BLOSC_INSTALLED:
+ raise nose.SkipTest('no blosc')
+ df1 = DataFrame({'A': list('abcd')})
+ df2 = DataFrame(df1, index=[1., 2., 3., 4.])
+ self.assertTrue(1 in self.encode_decode(df1['A'], compress='blosc'))
+ self.assertTrue(1. in self.encode_decode(df2['A'], compress='blosc'))
+
+ def test_readonly_axis_zlib(self):
+ # GH11880
+ df1 = DataFrame({'A': list('abcd')})
+ df2 = DataFrame(df1, index=[1., 2., 3., 4.])
+ self.assertTrue(1 in self.encode_decode(df1['A'], compress='zlib'))
+ self.assertTrue(1. in self.encode_decode(df2['A'], compress='zlib'))
+
+ def test_readonly_axis_blosc_to_sql(self):
+ # GH11880
+ if not _BLOSC_INSTALLED:
+ raise nose.SkipTest('no blosc')
+ if not self._SQLALCHEMY_INSTALLED:
+ raise nose.SkipTest('no sqlalchemy')
+ expected = DataFrame({'A': list('abcd')})
+ df = self.encode_decode(expected, compress='blosc')
+ eng = self._create_sql_engine("sqlite:///:memory:")
+ df.to_sql('test', eng, if_exists='append')
+ result = pandas.read_sql_table('test', eng, index_col='index')
+ result.index.names = [None]
+ assert_frame_equal(expected, result)
+
+ def test_readonly_axis_zlib_to_sql(self):
+ # GH11880
+ if not _ZLIB_INSTALLED:
+ raise nose.SkipTest('no zlib')
+ if not self._SQLALCHEMY_INSTALLED:
+ raise nose.SkipTest('no sqlalchemy')
+ expected = DataFrame({'A': list('abcd')})
+ df = self.encode_decode(expected, compress='zlib')
+ eng = self._create_sql_engine("sqlite:///:memory:")
+ df.to_sql('test', eng, if_exists='append')
+ result = pandas.read_sql_table('test', eng, index_col='index')
+ result.index.names = [None]
+ assert_frame_equal(expected, result)
+
class TestEncoding(TestPackers):
def setUp(self):
|
encode__uvicorn-1099 | PackageInfo: Invalid constraint (click (>=7.*)) found in uvicorn-0.14.0 dependencies, skipping
### Checklist
<!-- Please make sure you check all these items before submitting your bug report. -->
- [X] The bug is reproducible against the latest release and/or `master`.
- [X] There are no similar issues or pull requests to fix it yet.
### Describe the bug
When adding uvicorn package with poetry the following warning is raised:
PackageInfo: Invalid constraint (click (>=7.*)) found in uvicorn-0.14.0 dependencies, skipping
because the constraint `>=7.*` violates PEP440 syntax. It should either be `>=7.0` or `=7.*`.
Because of this, the `click` dependency is not installed and uvicorn may not work.
### To reproduce
Just execute `poetry add uvicorn` in any environment.
### Expected behavior
To install `uvicorn` correctly together with all its dependencies.
### Actual behavior
The `click` dependency is not installed and uvicorn may not work.
| [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n path = os.path.join(package, \"__init__.py\")\n init_py = open(path, \"r\", encoding=\"utf8\").read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n return open(\"README.md\", \"r\", encoding=\"utf8\").read()\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [\n dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, \"__init__.py\"))\n ]\n\n\nenv_marker_cpython = (\n \"sys_platform != 'win32'\"\n \" and (sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'PyPy')\"\n)\n\nenv_marker_win = \"sys_platform == 'win32'\"\nenv_marker_below_38 = \"python_version < '3.8'\"\n\nminimal_requirements = [\n \"asgiref>=3.4.0\",\n \"click>=7.*\",\n \"h11>=0.8\",\n \"typing-extensions;\" + env_marker_below_38,\n]\n\n\nextra_requirements = [\n \"websockets>=9.1\",\n \"httptools==0.2.*\",\n \"uvloop>=0.14.0,!=0.15.0,!=0.15.1; \" + env_marker_cpython,\n \"colorama>=0.4;\" + env_marker_win,\n \"watchgod>=0.6\",\n \"python-dotenv>=0.13\",\n \"PyYAML>=5.1\",\n]\n\n\nsetup(\n name=\"uvicorn\",\n version=get_version(\"uvicorn\"),\n url=\"https://github.com/encode/uvicorn\",\n license=\"BSD\",\n description=\"The lightning-fast ASGI server.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n packages=get_packages(\"uvicorn\"),\n install_requires=minimal_requirements,\n extras_require={\"standard\": extra_requirements},\n include_package_data=True,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n entry_points=\"\"\"\n [console_scripts]\n uvicorn=uvicorn.main:main\n \"\"\",\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n path = os.path.join(package, \"__init__.py\")\n init_py = open(path, \"r\", encoding=\"utf8\").read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n return open(\"README.md\", \"r\", encoding=\"utf8\").read()\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [\n dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, \"__init__.py\"))\n ]\n\n\nenv_marker_cpython = (\n \"sys_platform != 'win32'\"\n \" and (sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'PyPy')\"\n)\n\nenv_marker_win = \"sys_platform == 'win32'\"\nenv_marker_below_38 = \"python_version < '3.8'\"\n\nminimal_requirements = [\n \"asgiref>=3.4.0\",\n \"click>=7.0\",\n \"h11>=0.8\",\n \"typing-extensions;\" + env_marker_below_38,\n]\n\n\nextra_requirements = [\n \"websockets>=9.1\",\n \"httptools==0.2.*\",\n \"uvloop>=0.14.0,!=0.15.0,!=0.15.1; \" + env_marker_cpython,\n \"colorama>=0.4;\" + env_marker_win,\n \"watchgod>=0.6\",\n \"python-dotenv>=0.13\",\n \"PyYAML>=5.1\",\n]\n\n\nsetup(\n name=\"uvicorn\",\n version=get_version(\"uvicorn\"),\n url=\"https://github.com/encode/uvicorn\",\n license=\"BSD\",\n description=\"The lightning-fast ASGI server.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n packages=get_packages(\"uvicorn\"),\n install_requires=minimal_requirements,\n extras_require={\"standard\": extra_requirements},\n include_package_data=True,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n entry_points=\"\"\"\n [console_scripts]\n uvicorn=uvicorn.main:main\n \"\"\",\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 1763c7764..c34317bb5 100755
--- a/setup.py
+++ b/setup.py
@@ -45,7 +45,7 @@ def get_packages(package):
minimal_requirements = [
"asgiref>=3.4.0",
- "click>=7.*",
+ "click>=7.0",
"h11>=0.8",
"typing-extensions;" + env_marker_below_38,
]
|
open-telemetry__opentelemetry-python-3650 | Non-executable files with shebangs in the repository
**Describe your environment**
(Nothing relevant to describe)
**Steps to reproduce**
```
$ rg -l '^#!' | xargs ls -l
-rwxr-xr-x. 1 ben ben 1420 Jul 5 2023 docs/examples/django/manage.py
-rw-r--r--. 1 ben ben 1300 Jul 5 2023 docs/examples/opencensus-exporter-tracer/collector.py
-rwxr-xr-x. 1 ben ben 1485 Jul 5 2023 docs/examples/opentracing/main.py
-rwxr-xr-x. 1 ben ben 853 Jul 13 2023 scripts/build.sh
-rwxr-xr-x. 1 ben ben 1163 Jan 22 10:06 scripts/coverage.sh
-rwxr-xr-x. 1 ben ben 20741 Jul 13 2023 scripts/eachdist.py
-rwxr-xr-x. 1 ben ben 215 Jul 5 2023 scripts/generate_website_docs.sh
-rwxr-xr-x. 1 ben ben 2377 Jan 22 10:06 scripts/proto_codegen.sh
-rwxr-xr-x. 1 ben ben 1928 Jan 22 10:06 scripts/semconv/generate.sh
-rwxr-xr-x. 1 ben ben 945 Jul 5 2023 scripts/tracecontext-integration-test.sh
-rw-r--r--. 1 ben ben 2519 Jan 22 11:43 tests/w3c_tracecontext_validation_server.py
```
Note that two files have shebang lines (`#!`) but do not have the executable bit set, which makes the shebang lines useless.
**What is the expected behavior?**
Files should either be non-executable and have no shebang line, or be executable and have a shebang line.
**What is the actual behavior?**
The following files are not executable and have useless shebang lines:
- `docs/examples/opencensus-exporter-tracer/collector.py`
- `tests/w3c_tracecontext_validation_server.py`
**Additional context**
This is a trivial thing, but I would like to fix it in a PR – either by setting the executable bit on these two files, or by removing the useless shebang lines. Both files are “script-like,” i.e. they have `if __name__ == "__main__"` or have useful side effects. Which approach would you prefer?
| [
{
"content": "#!/usr/bin/env python3\n#\n# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom opentelemetry import trace\nfrom opentelemetry.exporter.opencensus.trace_exporter import (\n OpenCensusSpanExporter,\n)\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import BatchSpanProcessor\n\nexporter = OpenCensusSpanExporter(endpoint=\"localhost:55678\")\n\ntrace.set_tracer_provider(TracerProvider())\ntracer = trace.get_tracer(__name__)\nspan_processor = BatchSpanProcessor(exporter)\n\ntrace.get_tracer_provider().add_span_processor(span_processor)\nwith tracer.start_as_current_span(\"foo\"):\n with tracer.start_as_current_span(\"bar\"):\n with tracer.start_as_current_span(\"baz\"):\n print(\"Hello world from OpenTelemetry Python!\")\n",
"path": "docs/examples/opencensus-exporter-tracer/collector.py"
}
] | [
{
"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom opentelemetry import trace\nfrom opentelemetry.exporter.opencensus.trace_exporter import (\n OpenCensusSpanExporter,\n)\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import BatchSpanProcessor\n\nexporter = OpenCensusSpanExporter(endpoint=\"localhost:55678\")\n\ntrace.set_tracer_provider(TracerProvider())\ntracer = trace.get_tracer(__name__)\nspan_processor = BatchSpanProcessor(exporter)\n\ntrace.get_tracer_provider().add_span_processor(span_processor)\nwith tracer.start_as_current_span(\"foo\"):\n with tracer.start_as_current_span(\"bar\"):\n with tracer.start_as_current_span(\"baz\"):\n print(\"Hello world from OpenTelemetry Python!\")\n",
"path": "docs/examples/opencensus-exporter-tracer/collector.py"
}
] | diff --git a/docs/examples/opencensus-exporter-tracer/collector.py b/docs/examples/opencensus-exporter-tracer/collector.py
index 5c98cc4ce9f..cd33c89617b 100644
--- a/docs/examples/opencensus-exporter-tracer/collector.py
+++ b/docs/examples/opencensus-exporter-tracer/collector.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python3
-#
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/tests/w3c_tracecontext_validation_server.py b/tests/w3c_tracecontext_validation_server.py
index d6c468025e4..5c47708ee1c 100644
--- a/tests/w3c_tracecontext_validation_server.py
+++ b/tests/w3c_tracecontext_validation_server.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python3
-#
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
|
dotkom__onlineweb4-1931 | SSO base template should extend the base template
## What kind of an issue is this?
- [x] Bug report
## What is the expected behaviour?
The template for the SSO app should extend the base template so we don't have to maintain multiple base templates.
## What is the current behaviour?
It's a custom template, which looks copy/pasted from the base template.
| [
{
"content": "# -*- encoding: utf-8 -*-\n\nimport logging\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render\nfrom oauth2_provider.views.base import AuthorizationView as DefaultAuthorizationView # flake8: noqa\nfrom oauth2_provider.views.base import RevokeTokenView, TokenView\n\n_log = logging.getLogger('SSO')\n\n\n@login_required\ndef index(request):\n \"\"\"\n This is the main SSO view\n \"\"\"\n\n context = {}\n\n return render(request, 'sso/index.html', context)\n\n\nclass AuthorizationView(DefaultAuthorizationView):\n template_name = 'sso/authorize.html'\n",
"path": "apps/sso/views.py"
}
] | [
{
"content": "# -*- encoding: utf-8 -*-\n\nimport logging\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render\nfrom oauth2_provider.views.base import AuthorizationView as DefaultAuthorizationView # flake8: noqa\nfrom oauth2_provider.views.base import RevokeTokenView, TokenView\n\n_log = logging.getLogger('SSO')\n\n\n@login_required\ndef index(request):\n \"\"\"\n This is the main SSO view\n \"\"\"\n\n context = {}\n\n return render(request, 'sso/authorize.html', context)\n\n\nclass AuthorizationView(DefaultAuthorizationView):\n template_name = 'sso/authorize.html'\n",
"path": "apps/sso/views.py"
}
] | diff --git a/apps/sso/views.py b/apps/sso/views.py
index 74494925b..b7d7fdf55 100644
--- a/apps/sso/views.py
+++ b/apps/sso/views.py
@@ -18,7 +18,7 @@ def index(request):
context = {}
- return render(request, 'sso/index.html', context)
+ return render(request, 'sso/authorize.html', context)
class AuthorizationView(DefaultAuthorizationView):
diff --git a/templates/base.html b/templates/base.html
index 96ced3995..7f2a7a961 100755
--- a/templates/base.html
+++ b/templates/base.html
@@ -7,7 +7,7 @@
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
- <title>{% block title %}{% endblock %}</title>
+ <title>{% block title %}Online, linjeforeningen for informatikk ved NTNU{% endblock %}</title>
<meta name="description" content="Online er linjeforeningen for informatikkstudentene ved NTNU. Våre studenter går Bachelor- eller Mastergradstudium i informatikk.">
<link rel="icon" href="{{ STATIC_URL }}img/favicon.png">
<link rel="icon" type="image/png" href="{{ STATIC_URL }}mimg/favicon-32x32.png" sizes="32x32">
diff --git a/templates/frontpage.html b/templates/frontpage.html
index 5d014b8d8..aca1cd01c 100755
--- a/templates/frontpage.html
+++ b/templates/frontpage.html
@@ -4,8 +4,6 @@
{% load render_bundle from webpack_loader %}
-{% block title %}Online, linjeforeningen for informatikk ved NTNU{% endblock title %}
-
{% block styles %}
{{ block.super }}
{% render_bundle 'frontpage' 'css' %}
diff --git a/templates/oidc_provider/authorize.html b/templates/oidc_provider/authorize.html
index f5fc0e99b..e53464246 100644
--- a/templates/oidc_provider/authorize.html
+++ b/templates/oidc_provider/authorize.html
@@ -1,42 +1,45 @@
-{% extends "sso/index.html" %}
+{% extends "sso/authorize.html" %}
+
{% block content %}
-<div id="sso-panel">
-{% if not error %}
- <div id="sso-panel-header">
- <h3>Ekstern autentisering</h3>
- </div>
- <div id="sso-panel-body">
- <h4>Tjenesten {{ client.name }} ønsker tilgang til denne informasjonen om deg.</h4>
- <form id="sso-authorization-form" method="post" action="{% url 'oidc_provider:authorize' %}">
- {% csrf_token %}
+<section id="sso-container">
+ <div id="sso-panel">
+ {% if not error %}
+ <div id="sso-panel-header">
+ <h3>Ekstern autentisering</h3>
+ </div>
+ <div id="sso-panel-body">
+ <h4>Tjenesten {{ client.name }} ønsker tilgang til denne informasjonen om deg.</h4>
+ <form id="sso-authorization-form" method="post" action="{% url 'oidc_provider:authorize' %}">
+ {% csrf_token %}
- {{ hidden_inputs }}
+ {{ hidden_inputs }}
- <p>Tjenesten behøver følgende tilganger:</p>
- <ul class="sso-permissions">
- {% for scope in scopes %}
- <li>
- <strong>{{ scope.name }}</strong>
+ <p>Tjenesten behøver følgende tilganger:</p>
+ <ul class="sso-permissions">
+ {% for scope in scopes %}
+ <li>
+ <strong>{{ scope.name }}</strong>
+ <br />
+ <i>{{ scope.description }}</i>
+ </li>
+ {% endfor %}
+ </ul>
<br />
- <i>{{ scope.description }}</i>
- </li>
- {% endfor %}
- </ul>
- <br />
- <div id="sso-control-buttons">
- <input type="submit" class="btn btn-large btn-success" name="allow" value="Godta" />
- <input type="submit" class="btn btn-large btn-danger pull-right" value="Avslå" />
- </div>
- </form>
- </div>
- <p class="text-center"><small><i class="glyphicon glyphicon-lock"></i> Ekstern autentisering benytter OpenID Connect og OAuth 2.0 gjennom TLS</small></p>
-{% else %}
- <div id="sso-panel-header">
- <h3>En feil skjedde</h3>
- </div>
- <div id="sso-panel-body">
- {{ error.description }}
+ <div id="sso-control-buttons">
+ <input type="submit" class="btn btn-large btn-success" name="allow" value="Godta" />
+ <input type="submit" class="btn btn-large btn-danger pull-right" value="Avslå" />
+ </div>
+ </form>
+ </div>
+ <p class="text-center"><small><i class="glyphicon glyphicon-lock"></i> Ekstern autentisering benytter OpenID Connect og OAuth 2.0 gjennom TLS</small></p>
+ {% else %}
+ <div id="sso-panel-header">
+ <h3>En feil skjedde</h3>
+ </div>
+ <div id="sso-panel-body">
+ {{ error.description }}
+ </div>
+ {% endif %}
</div>
-{% endif %}
-</div>
+</section>
{% endblock %}
diff --git a/templates/sso/authorize.html b/templates/sso/authorize.html
index f69fbda82..df8b471d0 100644
--- a/templates/sso/authorize.html
+++ b/templates/sso/authorize.html
@@ -1,43 +1,60 @@
-{% extends "sso/index.html" %}
+{% extends 'base.html' %}
+
+{% load chunks %}
+
+{% load render_bundle from webpack_loader %}
+
+{% block styles %}
+ {{ block.super }}
+ {% render_bundle 'sso' 'css' %}
+{% endblock %}
+
+{% block js %}
+ {{ block.super }}
+ {% render_bundle 'sso' 'js' %}
+{% endblock %}
+
{% block content %}
-<div id="sso-panel">
-{% if not error %}
- <div id="sso-panel-header">
- <h3>Ekstern autentisering</h3>
- </div>
- <div id="sso-panel-body">
- <h4>Tjenesten {{ application.name }} ønsker innloggingsinformasjon.</h4>
- <form id="sso-authorization-form" method="post">
- {% csrf_token %}
- {% for field in form %}
- {% if field.is_hidden %}
- {{ field }}
- {% endif %}
- {% endfor %}
- <p>Tjenesten behøver følgende tilganger:</p>
- <ul class="sso-permissions">
- {% for scope in scopes_descriptions %}
- <li>{{ scope }}</li>
+<section id="sso-container">
+ <div id="sso-panel">
+ {% if not error %}
+ <div id="sso-panel-header">
+ <h3>Ekstern autentisering</h3>
+ </div>
+ <div id="sso-panel-body">
+ <h4>Tjenesten {{ application.name }} ønsker innloggingsinformasjon.</h4>
+ <form id="sso-authorization-form" method="post">
+ {% csrf_token %}
+ {% for field in form %}
+ {% if field.is_hidden %}
+ {{ field }}
+ {% endif %}
{% endfor %}
- </ul>
- <br />
- <div id="sso-control-buttons">
- <input type="submit" class="btn btn-large btn-success" name="allow" value="Godta" />
- <input type="submit" class="btn btn-large btn-danger pull-right" value="Avslå" />
- </div>
- </form>
- </div>
- <p class="text-center"><small><i class="fa fa-lock"></i> Ekstern autentisering benytter OAuth 2.0 gjennom TLS</small></p>
-{% else %}
- <div id="sso-panel-header">
- <h3>En feil skjedde</h3>
- </div>
- <div id="sso-panel-body">
- {{ error.description }}
+ <p>Tjenesten behøver følgende tilganger:</p>
+ <ul class="sso-permissions">
+ {% for scope in scopes_descriptions %}
+ <li>{{ scope }}</li>
+ {% endfor %}
+ </ul>
+ <br />
+ <div id="sso-control-buttons">
+ <input type="submit" class="btn btn-large btn-success" name="allow" value="Godta" />
+ <input type="submit" class="btn btn-large btn-danger pull-right" value="Avslå" />
+ </div>
+ </form>
+ </div>
+ <p class="text-center"><small><i class="fa fa-lock"></i> Ekstern autentisering benytter OAuth 2.0 gjennom TLS</small></p>
+ {% else %}
+ <div id="sso-panel-header">
+ <h3>En feil skjedde</h3>
+ </div>
+ <div id="sso-panel-body">
+ {{ error.description }}
+ </div>
+ {% endif %}
</div>
-{% endif %}
-</div>
-<br />
-<br />
-<p class="text-center">Les mer om bruken av <i><a href="/wiki/">Ekstern Autentisering</a></i> på wikien.</p>
+ <br />
+ <br />
+ <p class="text-center">Les mer om bruken av <i><a href="/wiki/">Ekstern Autentisering</a></i> på wikien.</p>
+</section>
{% endblock %}
diff --git a/templates/sso/index.html b/templates/sso/index.html
deleted file mode 100644
index 09777a664..000000000
--- a/templates/sso/index.html
+++ /dev/null
@@ -1,198 +0,0 @@
-{% load render_bundle from webpack_loader %}
-<!DOCTYPE html>
-<html>
-<head>
- <meta charset="utf-8">
- <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
- <title>{% block title %}{% endblock %}</title>
- <meta name="description" content="Online er linjeforeningen for informatikkstudentene ved NTNU. Våre studenter går Bachelor- eller Mastergradstudium i informatikk.">
- <link rel="icon" href="{{ STATIC_URL }}img/favicon.png">
- <meta name="viewport" content="width=device-width, initial-scale=1.0">
- {% block styles %}
- {% render_bundle 'common' 'css' %}
- {% render_bundle 'core' 'css' %}
- {% render_bundle 'sso' 'css' %}
- {% endblock %}
-</head>
-<body>
- <nav id="mainnav">
- <div class="container">
- <ul class="mn-collapse">
- <li>
- <button id="mainnav-button">
- <svg x="0px" y="0px" width="100%" viewBox="0 0 96 96" class="mn-svg" enable-background="new 0 0 96 96">
- <rect width="32" height="4" x="32" y="46" class="mn-svg-rect-top"></rect>
- </svg>
- <svg x="0px" y="0px" width="100%" viewBox="0 0 96 96" class="mn-svg" enable-background="new 0 0 96 96">
- <rect width="32" height="4" x="32" y="46" class="mn-svg-rect-bottom"></rect>
- </svg>
- </button>
- </li>
- <li>
- <a href="{% url 'home' %}">
- <img src="{{ STATIC_URL }}img/online_logo.svg" alt="Online" class="online-logo">
- </a>
- </li>
- </ul>
- <ul class="mn-nav">
- <li><a class="online-logo-link" href="{% url 'home' %}"><img src="{{ STATIC_URL }}img/online_logo.svg" alt="Online"></a></li>
- <li><a href="{% url 'events_index' %}">Arkiv</a></li>
- <li><a href="{% url 'careeropportunity_index' %}">Karriere</a></li>
- <li><a href="{% url 'resourcecenter_index' %}">Ressurser</a></li>
- <li><a href="/wiki/">Wiki</a></li>
- </ul>
- <ul class="mn-user-nav">
- {% if user.is_authenticated %}
- <li>
- <a href="#login_menu" class="dropdown-toggle dropdown-signin login glyphicon glyphicon-user" data-toggle="dropdown"></a>
- <span class="username_menu hidden-xs hidden-sm hidden-md">{{ user.username }}</span>
-
- <ul class="dropdown-menu login-box" role="menu">
-
- {% if user.is_staff %}
- <li><a href="/admin/">Administrasjon</a></li>
- {% endif %}
- <li><a href="{% url 'profiles' %}">Min side<span class="hidden-lg">: {{ user.username }}</span></a></li>
- <li><a href="{% url 'profiles_user_search' %}">Finn brukere</a></li>
- <li class="divider"></li>
- <li><a href="{% url 'auth_logout' %}">Logg ut</a></li>
- </ul>
- </li>
- {% else %}
- <li>
- <a href="#login_menu" class="dropdown-toggle dropdown-signin login glyphicon glyphicon-lock" data-toggle="dropdown"></a>
- <ul class="dropdown-menu login-box" role="menu">
- <li>
- <form class="navbar-form" method="POST" action="{% url 'auth_login' %}">
- {% csrf_token %}
- <fieldset class="textbox">
- <div class="input-group">
- <label for="id_username">Brukernavn</label>
- <input type="text" name="username" id="id_username" class="form-control">
- </div>
- <div class="input-group">
- <label for="id_password">Passord</label>
- <input type="password" name="password" id="id_password" class="form-control">
- </div>
- </fieldset>
- <input type="hidden" name="next" value="{{ request.get_full_path }}" />
- <button type="submit" class="btn btn-primary">Logg inn</button>
- </form>
- <div id="login-form-btn-group">
- <a class="btn btn-default" href="{% url 'auth_register' %}">Registrer</a>
- <a class="btn btn-default" href="{% url 'auth_recover' %}">Glemt passord</a>
- </div>
- </li>
- </ul>
- </li>
- {% endif %}
- </ul>
-
- <div id="main-sponsor">
- <a href="http://www.soprasteria.no/">
- <img class="ms-img" src="{{ STATIC_URL }}img/soprasteria.svg" alt="Hovedsamarbeidspartner - Sopra Steria">
- </a>
- <span class="ms-span">Hovedsamarbeidspartner</span>
- </div>
- </ul>
- </div>
- </nav>
- <section id="sso-container">
- {% block content %}{% endblock %}
- </section>
- <section id="footer">
- <div class="container">
- <div class="row">
- <div class="col-xs-12 col-sm-6 col-md-4">
- <div class="row row-space">
- <div class="col-md-12 footer-social">
- <a href="http://facebook.com/LinjeforeningenOnline" class="socialIcon-link">
- <img src="{{ STATIC_URL }}img/social/facebook.svg" alt="Facebook">
- </a>
- <a href="http://twitter.com/Online_NTNU" class="socialIcon-link">
- <img src="{{ STATIC_URL }}img/social/twitter.svg" alt="Twitter">
- </a>
- <a href="https://www.instagram.com/online_ntnu/" class="socialIcon-link">
- <img src="{{ STATIC_URL }}img/social/instagram.svg" alt="Instagram">
- </a>
- <a href="https://www.github.com/dotkom/" class="socialIcon-link">
- <img src="{{ STATIC_URL }}img/social/github.svg" alt="Github">
- </a>
- <a href="https://plus.google.com/107294836198591872251" class="socialIcon-link">
- <img src="{{ STATIC_URL }}img/social/gpluss.svg" alt="Google Plus">
- </a>
- </div>
- </div>
- <div class="row">
- <div class="col-md-12">
- <p>Har du funnet en feil på nettsiden?<br>Ta kontakt med <a href="mailto:[email protected]">Utviklingsteamet</a></p>
- </div>
- </div>
- </div>
- <div class="col-xs-12 col-sm-6 col-md-4">
- <div class="row">
- <div class="col-md-12">
- <div class="contact-info">
- <div class="contact-item">
- <span class="glyphicon glyphicon-briefcase"></span>992 548 045 (OrgNr)
- </div>
- <div class="contact-item">
- <span class="glyphicon glyphicon-envelope"></span>[email protected]
- </div>
- <div class="contact-item">
- <span class="glyphicon glyphicon-phone"></span>73 59 64 89
- </div>
- </div>
- </div>
- </div>
- <div class="row">
- <div class="col-xs-6 col-sm-6 col-md-6">
- <div class="address">
- <div class="address-heading">Besøksadresse <span class="glyphicon glyphicon-map-marker"></span></div>
- <div class="address-info">
- Rom 303<br>
- Høgskoleringen 3<br>
- NTNU Gløshaugen
- </div>
- </div>
- </div>
- <div class="col-xs-6 col-sm-6 col-md-6">
- <div class="address">
- <div class="address-heading">Postadresse <span class="glyphicon glyphicon-map-marker"></span></div>
- <div class="address-info">
- Online Linjeforening<br>
- Sem Sælandsv. 9<br>
- 7491 Trondheim<br>
- </div>
- </div>
- </div>
- </div>
- </div>
- <div class="col-xs-12 col-md-4">
- <div id="footer-map"></div>
- </div>
- </div>
- </div>
- </section>
-
- {% block js %}
- <script src="https://maps.googleapis.com/maps/api/js?v=3&sensor=true"></script>
- {% render_bundle 'common' 'js' %}
- {% render_bundle 'core' 'js' %}
- {% render_bundle 'sso' 'js' %}
- {% endblock %}
-
- {% if GOOGLE_ANALYTICS_KEY %}
- <script type="text/javascript">
- var _gaq = _gaq || [];
- _gaq.push(['_setAccount', '{{ GOOGLE_ANALYTICS_KEY }}']);
- _gaq.push(['_trackPageview']);
- (function() {
- var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
- ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
- var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
- })();
- </script>
- {% endif %}
- </body>
-</html>
|
dotkom__onlineweb4-1281 | Separate requirements in develop, testing and production
We should separate requirements for various environments. What sparked this was requiring some postgresql magic to install all requirements (`psycopg2`), and e.g. moonshine does not want postgres just for tests to pass (or run, for that matter).
Requirements that should be removed from `requirements.txt` and added to `requirements-<env>.txt`:
#### Production requirements [src](https://github.com/dotKom/onlineweb4/blob/develop/requirements.txt#L9):
- `psycopg2` (yes, some people use postgres in development. However, example-local.py uses sqlite and most people will therefore not need this package. *1)
#### Testing requirements [src](https://github.com/dotKom/onlineweb4/blob/develop/requirements.txt#L38):
- `cov-core`
- `coverage`
- `django-nose`
- `factory-boy`
- `lettuce`
- `nose` *2
- `nose-cov`
- `teamcity-messages`
#### Development requirements (these can be/should be fully optional)
- `django-debug-toolbar` (it's not in `requirements.txt` as of now, but maybe it should? [wiki](https://github.com/dotKom/onlineweb4/wiki/Django-debug-toolbar))
*1: It also fails to install and stops requirements.txt installation if people don't have postgresql dev headers ([ex.](https://online.ntnu.no/tc/viewLog.html?tab=buildLog&buildTypeId=Onlineweb4_Build&buildId=29))
*2: If we remove this one we have to change default runner class in base settings as the current testrunner is nose.
| [
{
"content": "# -*- coding: utf8 -*-\nimport os\nimport sys\n\nimport wiki\nfrom django.contrib.messages import constants as messages\n\n# Directory that contains this file.\nPROJECT_SETTINGS_DIRECTORY = os.path.dirname(globals()['__file__'])\n# Root directory. Contains manage.py\nPROJECT_ROOT_DIRECTORY = os.path.join(PROJECT_SETTINGS_DIRECTORY, '..', '..')\n\nTEST_RUNNER = \"django_nose.NoseTestSuiteRunner\"\n\nNOSE_ARGS = ['--with-coverage', '--cover-package=apps', '--cover-html-dir=coverage', '--cover-xml', '--cover-html']\n\nDEBUG = False\n\nADMINS = (\n ('dotKom', '[email protected]'),\n)\nMANAGERS = ADMINS\n\n# Email settings\nDEFAULT_FROM_EMAIL = '[email protected]'\nEMAIL_ARRKOM = '[email protected]'\nEMAIL_BEDKOM = '[email protected]'\nEMAIL_DOTKOM = '[email protected]'\nEMAIL_EKSKOM = '[email protected]'\nEMAIL_FAGKOM = '[email protected]'\nEMAIL_PROKOM = '[email protected]'\nEMAIL_TRIKOM = '[email protected]'\n\n# We will receive errors and other django messages from this email\nSERVER_EMAIL = '[email protected]'\n\nTIME_ZONE = 'Europe/Oslo'\n\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'nb'\nLANGUAGES = (\n ('nb', 'Norwegian'),\n ('en_US', 'English'),\n )\nLOCALE_PATHS = [\n os.path.join(PROJECT_ROOT_DIRECTORY, 'locale'),\n]\n\nSITE_ID = 1\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nDATETIME_FORMAT = 'N j, Y, H:i'\nSECRET_KEY = 'override-this-in-local.py'\n\n# Session cookie expires after one year\nSESSION_COOKIE_AGE = 31540000\n\n# Override this in local if you need to :)\nBASE_URL = 'https://online.ntnu.no'\n\nAUTH_USER_MODEL = 'authentication.OnlineUser'\nLOGIN_URL = '/auth/login/'\n\nMEDIA_ROOT = os.path.join(PROJECT_ROOT_DIRECTORY, 'uploaded_media') # Override this in local.py in prod.\nMEDIA_URL = '/media/'\n\nSTATIC_ROOT = os.path.join(PROJECT_ROOT_DIRECTORY, 'static')\nSTATIC_URL = '/static/'\n\n# Prefix for default profile picture\nDEFAULT_PROFILE_PICTURE_PREFIX = os.path.join(STATIC_URL, \"img\", \"profile_default\")\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n os.path.join(PROJECT_ROOT_DIRECTORY, 'files/static'),\n)\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n 'compressor.finders.CompressorFinder',\n)\n\n# Including django-wiki static files so we can import the less files.\nDJANGO_WIKI_STATIC = os.path.join(os.path.dirname(wiki.__file__), 'static')\n\nCOMPRESS_FILES = True\nCOMPRESS_OUTPUT_DIR = 'cache'\nCOMPRESS_PRECOMPILERS = (\n ('text/less', 'lessc --include-path=%s {infile} {outfile}' % DJANGO_WIKI_STATIC),\n)\n\nCOMPRESS_CSS_FILTERS = [\n 'compressor.filters.css_default.CssAbsoluteFilter',\n # We want this later on, but it breaks production so disabling for now.\n #'compressor-filters.cssmin.CSSMinFilter',\n]\nCOMPRESS_JS_FILTERS = [\n 'compressor.filters.jsmin.JSMinFilter',\n]\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'APP_DIRS': True,\n 'DIRS': [\n os.path.join(PROJECT_ROOT_DIRECTORY, 'templates/')\n ],\n 'OPTIONS': {\n 'context_processors': [\n \"django.contrib.auth.context_processors.auth\",\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.i18n\",\n \"django.template.context_processors.media\",\n \"django.template.context_processors.request\",\n \"django.template.context_processors.static\",\n \"django.template.context_processors.tz\",\n \"django.contrib.messages.context_processors.messages\",\n \"sekizai.context_processors.sekizai\", # Wiki\n \"onlineweb4.context_processors.analytics\",\n ],\n 'debug': DEBUG,\n }\n }\n]\n\nMIDDLEWARE_CLASSES = (\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'middleware.http.Http403Middleware',\n 'reversion.middleware.RevisionMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nAUTHENTICATION_BACKENDS = (\n 'django.contrib.auth.backends.ModelBackend', # this is default\n 'guardian.backends.ObjectPermissionBackend',\n)\n\nROOT_URLCONF = 'onlineweb4.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'onlineweb4.wsgi.application'\n\n# Pizzasystem settings\nPIZZA_GROUP = 'dotkom'\nPIZZA_ADMIN_GROUP = 'pizzaadmin'\n\n# Grappelli settings\nGRAPPELLI_ADMIN_TITLE = 'Onlineweb'\n\n# Guardian settings\nANONYMOUS_USER_ID = -1\nGUARDIAN_RENDER_403 = True\n\n# Django-Taggit settings\nTAGGIT_CASE_INSENSITIVE = True\n\n# List of usergroups that should be listed under \"Finn brukere\" in user profile\nUSER_SEARCH_GROUPS = [\n 16, # appKom\n 1, # arrKom\n 2, # banKom\n 3, # bedKom\n 4, # dotKom\n 5, # eksKom\n 14, # Eldsteradet\n 6, # fagKom\n 11, # Hovedstyret\n 19, # jubKom\n 10, # pangKom\n 7, # proKom\n 18, # seniorKom\n 8, # triKom\n 9, # velKom\n 24, # itex\n]\n\n#List of mailing lists, used in update_sympa_memcache_from_sql.py\nPUBLIC_LISTS = [\n \"foreninger\",\n \"linjeforeninger\",\n \"gloshaugen\",\n \"dragvoll\",\n \"masterforeninger\",\n \"kjellere\",\n \"linjeledere\",\n \"linjeredaksjoner\",\n \"glosfaddere\",\n \"sr-samarbeid\",\n \"ivt-samarbeid\",\n \"linjekor\",\n \"studentdemokratiet\"\n]\n\nINSTALLED_APPS = (\n # Third party dependencies\n 'django.contrib.humanize',\n 'django_nose',\n 'django_nyt', # Wiki\n 'mptt', # Wiki\n 'sekizai', # Wiki\n 'sorl.thumbnail', # Wiki\n 'grappelli',\n 'filebrowser',\n 'chunks',\n 'crispy_forms',\n 'django_extensions',\n 'django_dynamic_fixture',\n 'oauth2_provider',\n 'captcha',\n 'compressor',\n 'pdfdocument',\n 'watson',\n 'gunicorn',\n 'markdown_deux',\n 'djangoformsetjs',\n 'reversion',\n 'guardian',\n 'stripe',\n 'rest_framework',\n 'django_filters',\n 'taggit',\n 'taggit_serializer',\n 'corsheaders',\n 'datetimewidget',\n\n # Django apps\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.messages',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n\n # Onlineweb 4 apps\n 'apps.api',\n 'apps.approval',\n 'apps.article',\n 'apps.authentication',\n 'apps.autoconfig',\n 'apps.careeropportunity',\n 'apps.companyprofile',\n 'apps.dashboard',\n 'apps.gallery',\n 'apps.events',\n 'apps.marks',\n 'apps.offline',\n 'apps.feedback',\n 'apps.mommy',\n 'apps.profiles',\n 'apps.genfors',\n 'apps.resourcecenter',\n 'apps.mailinglists',\n 'apps.inventory',\n 'apps.payment',\n 'apps.posters',\n 'apps.sso',\n 'apps.splash',\n 'apps.shop',\n 'apps.webshop',\n 'scripts',\n\n #External apps\n 'feedme',\n 'redwine',\n\n #Wiki\n 'wiki',\n 'wiki.plugins.attachments',\n 'wiki.plugins.images',\n 'wiki.plugins.macros',\n 'wiki.plugins.help',\n 'wiki.plugins.links',\n\n)\n\n\n# SSO / OAuth2 settings\nif 'apps.sso' in INSTALLED_APPS:\n from apps.sso.settings import OAUTH2_SCOPES\n OAUTH2_PROVIDER = {\n 'SCOPES': OAUTH2_SCOPES,\n 'ACCESS_TOKEN_EXPIRE_SECONDS': 3600,\n 'AUTHORIZATION_CODE_EXPIRE_SECONDS': 60,\n }\n OAUTH2_PROVIDER_APPLICATION_MODEL = 'sso.Client'\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'formatters': {\n 'standard': {\n 'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'\n },\n },\n 'handlers': {\n 'null': {\n 'level': 'DEBUG',\n 'class': 'logging.NullHandler',\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n 'console':{\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard'\n }\n },\n 'loggers': {\n 'django.security.DisallowedHost': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n 'feedback': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n 'syncer': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n '': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n }\n}\n\n# crispy forms settings\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\n\n# bootstrap messages classes\nMESSAGE_TAGS = {messages.DEBUG: 'alert-debug',\n messages.INFO: 'alert-info',\n messages.SUCCESS: 'alert-success',\n messages.WARNING: 'alert-warning',\n messages.ERROR: 'alert-error'}\n\n\n# Not really sure what this does.\n# Has something to do with django-dynamic-fixture bumped from 1.6.4 to 1.6.5 in order to run a syncdb with mysql/postgres (OptimusCrime)\nIMPORT_DDF_MODELS = False\n\n# Django REST framework\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.SessionAuthentication', # Allows users to be logged in to browsable API\n ),\n 'DEFAULT_FILTER_BACKENDS': (\n 'rest_framework.filters.DjangoFilterBackend',\n 'rest_framework.filters.OrderingFilter',\n ),\n 'DEFAULT_PARSER_CLASSES': (\n 'rest_framework.parsers.JSONParser',\n 'rest_framework.parsers.FormParser',\n 'rest_framework.parsers.MultiPartParser',\n 'rest_framework.parsers.FileUploadParser',\n ),\n 'DEFAULT_RENDERER_CLASSES': [\n 'rest_framework.renderers.JSONRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n 'rest_framework.renderers.AdminRenderer',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',\n 'PAGE_SIZE': 10\n}\n\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_URLS_REGEX = r'^/api/v1/.*$' # Enables CORS on /api/v1/ endpoints only\n\n# Remember to keep 'local' last, so it can override any setting.\nfor settings_module in ['filebrowser', 'django_wiki', 'local']: # local last\n if not os.path.exists(os.path.join(PROJECT_SETTINGS_DIRECTORY,\n settings_module + \".py\")):\n sys.stderr.write(\"Could not find settings module '%s'.\\n\" %\n settings_module)\n if settings_module == 'local':\n sys.stderr.write(\"You need to copy the settings file \"\n \"'onlineweb4/settings/example-local.py' to \"\n \"'onlineweb4/settings/local.py'.\\n\")\n sys.exit(1)\n try:\n exec('from .%s import *' % settings_module)\n except ImportError as e:\n print(\"Could not import settings for '%s' : %s\" % (settings_module,\n str(e)))\n",
"path": "onlineweb4/settings/base.py"
}
] | [
{
"content": "# -*- coding: utf8 -*-\nimport os\nimport sys\n\nimport wiki\nfrom django.contrib.messages import constants as messages\n\n# Directory that contains this file.\nPROJECT_SETTINGS_DIRECTORY = os.path.dirname(globals()['__file__'])\n# Root directory. Contains manage.py\nPROJECT_ROOT_DIRECTORY = os.path.join(PROJECT_SETTINGS_DIRECTORY, '..', '..')\n\nTEST_RUNNER = \"django_nose.NoseTestSuiteRunner\"\n\nNOSE_ARGS = ['--with-coverage', '--cover-package=apps', '--cover-html-dir=coverage', '--cover-xml', '--cover-html']\n\nDEBUG = False\n\nADMINS = (\n ('dotKom', '[email protected]'),\n)\nMANAGERS = ADMINS\n\n# Email settings\nDEFAULT_FROM_EMAIL = '[email protected]'\nEMAIL_ARRKOM = '[email protected]'\nEMAIL_BEDKOM = '[email protected]'\nEMAIL_DOTKOM = '[email protected]'\nEMAIL_EKSKOM = '[email protected]'\nEMAIL_FAGKOM = '[email protected]'\nEMAIL_PROKOM = '[email protected]'\nEMAIL_TRIKOM = '[email protected]'\n\n# We will receive errors and other django messages from this email\nSERVER_EMAIL = '[email protected]'\n\nTIME_ZONE = 'Europe/Oslo'\n\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'nb'\nLANGUAGES = (\n ('nb', 'Norwegian'),\n ('en_US', 'English'),\n )\nLOCALE_PATHS = [\n os.path.join(PROJECT_ROOT_DIRECTORY, 'locale'),\n]\n\nSITE_ID = 1\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nDATETIME_FORMAT = 'N j, Y, H:i'\nSECRET_KEY = 'override-this-in-local.py'\n\n# Session cookie expires after one year\nSESSION_COOKIE_AGE = 31540000\n\n# Override this in local if you need to :)\nBASE_URL = 'https://online.ntnu.no'\n\nAUTH_USER_MODEL = 'authentication.OnlineUser'\nLOGIN_URL = '/auth/login/'\n\nMEDIA_ROOT = os.path.join(PROJECT_ROOT_DIRECTORY, 'uploaded_media') # Override this in local.py in prod.\nMEDIA_URL = '/media/'\n\nSTATIC_ROOT = os.path.join(PROJECT_ROOT_DIRECTORY, 'static')\nSTATIC_URL = '/static/'\n\n# Prefix for default profile picture\nDEFAULT_PROFILE_PICTURE_PREFIX = os.path.join(STATIC_URL, \"img\", \"profile_default\")\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n os.path.join(PROJECT_ROOT_DIRECTORY, 'files/static'),\n)\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n 'compressor.finders.CompressorFinder',\n)\n\n# Including django-wiki static files so we can import the less files.\nDJANGO_WIKI_STATIC = os.path.join(os.path.dirname(wiki.__file__), 'static')\n\nCOMPRESS_FILES = True\nCOMPRESS_OUTPUT_DIR = 'cache'\nCOMPRESS_PRECOMPILERS = (\n ('text/less', 'lessc --include-path=%s {infile} {outfile}' % DJANGO_WIKI_STATIC),\n)\n\nCOMPRESS_CSS_FILTERS = [\n 'compressor.filters.css_default.CssAbsoluteFilter',\n # We want this later on, but it breaks production so disabling for now.\n #'compressor-filters.cssmin.CSSMinFilter',\n]\nCOMPRESS_JS_FILTERS = [\n 'compressor.filters.jsmin.JSMinFilter',\n]\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'APP_DIRS': True,\n 'DIRS': [\n os.path.join(PROJECT_ROOT_DIRECTORY, 'templates/')\n ],\n 'OPTIONS': {\n 'context_processors': [\n \"django.contrib.auth.context_processors.auth\",\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.i18n\",\n \"django.template.context_processors.media\",\n \"django.template.context_processors.request\",\n \"django.template.context_processors.static\",\n \"django.template.context_processors.tz\",\n \"django.contrib.messages.context_processors.messages\",\n \"sekizai.context_processors.sekizai\", # Wiki\n \"onlineweb4.context_processors.analytics\",\n ],\n 'debug': DEBUG,\n }\n }\n]\n\nMIDDLEWARE_CLASSES = (\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'middleware.http.Http403Middleware',\n 'reversion.middleware.RevisionMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nAUTHENTICATION_BACKENDS = (\n 'django.contrib.auth.backends.ModelBackend', # this is default\n 'guardian.backends.ObjectPermissionBackend',\n)\n\nROOT_URLCONF = 'onlineweb4.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'onlineweb4.wsgi.application'\n\n# Pizzasystem settings\nPIZZA_GROUP = 'dotkom'\nPIZZA_ADMIN_GROUP = 'pizzaadmin'\n\n# Grappelli settings\nGRAPPELLI_ADMIN_TITLE = 'Onlineweb'\n\n# Guardian settings\nANONYMOUS_USER_ID = -1\nGUARDIAN_RENDER_403 = True\n\n# Django-Taggit settings\nTAGGIT_CASE_INSENSITIVE = True\n\n# List of usergroups that should be listed under \"Finn brukere\" in user profile\nUSER_SEARCH_GROUPS = [\n 16, # appKom\n 1, # arrKom\n 2, # banKom\n 3, # bedKom\n 4, # dotKom\n 5, # eksKom\n 14, # Eldsteradet\n 6, # fagKom\n 11, # Hovedstyret\n 19, # jubKom\n 10, # pangKom\n 7, # proKom\n 18, # seniorKom\n 8, # triKom\n 9, # velKom\n 24, # itex\n]\n\n#List of mailing lists, used in update_sympa_memcache_from_sql.py\nPUBLIC_LISTS = [\n \"foreninger\",\n \"linjeforeninger\",\n \"gloshaugen\",\n \"dragvoll\",\n \"masterforeninger\",\n \"kjellere\",\n \"linjeledere\",\n \"linjeredaksjoner\",\n \"glosfaddere\",\n \"sr-samarbeid\",\n \"ivt-samarbeid\",\n \"linjekor\",\n \"studentdemokratiet\"\n]\n\nINSTALLED_APPS = (\n # Third party dependencies\n 'django.contrib.humanize',\n 'django_nose',\n 'django_nyt', # Wiki\n 'mptt', # Wiki\n 'sekizai', # Wiki\n 'sorl.thumbnail', # Wiki\n 'grappelli',\n 'filebrowser',\n 'chunks',\n 'crispy_forms',\n 'django_extensions',\n 'django_dynamic_fixture',\n 'oauth2_provider',\n 'captcha',\n 'compressor',\n 'pdfdocument',\n 'watson',\n 'markdown_deux',\n 'djangoformsetjs',\n 'reversion',\n 'guardian',\n 'stripe',\n 'rest_framework',\n 'django_filters',\n 'taggit',\n 'taggit_serializer',\n 'corsheaders',\n 'datetimewidget',\n\n # Django apps\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.messages',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n\n # Onlineweb 4 apps\n 'apps.api',\n 'apps.approval',\n 'apps.article',\n 'apps.authentication',\n 'apps.autoconfig',\n 'apps.careeropportunity',\n 'apps.companyprofile',\n 'apps.dashboard',\n 'apps.gallery',\n 'apps.events',\n 'apps.marks',\n 'apps.offline',\n 'apps.feedback',\n 'apps.mommy',\n 'apps.profiles',\n 'apps.genfors',\n 'apps.resourcecenter',\n 'apps.mailinglists',\n 'apps.inventory',\n 'apps.payment',\n 'apps.posters',\n 'apps.sso',\n 'apps.splash',\n 'apps.shop',\n 'apps.webshop',\n 'scripts',\n\n #External apps\n 'feedme',\n 'redwine',\n\n #Wiki\n 'wiki',\n 'wiki.plugins.attachments',\n 'wiki.plugins.images',\n 'wiki.plugins.macros',\n 'wiki.plugins.help',\n 'wiki.plugins.links',\n\n)\n\n\n# SSO / OAuth2 settings\nif 'apps.sso' in INSTALLED_APPS:\n from apps.sso.settings import OAUTH2_SCOPES\n OAUTH2_PROVIDER = {\n 'SCOPES': OAUTH2_SCOPES,\n 'ACCESS_TOKEN_EXPIRE_SECONDS': 3600,\n 'AUTHORIZATION_CODE_EXPIRE_SECONDS': 60,\n }\n OAUTH2_PROVIDER_APPLICATION_MODEL = 'sso.Client'\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'formatters': {\n 'standard': {\n 'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'\n },\n },\n 'handlers': {\n 'null': {\n 'level': 'DEBUG',\n 'class': 'logging.NullHandler',\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n 'console':{\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard'\n }\n },\n 'loggers': {\n 'django.security.DisallowedHost': {\n 'handlers': ['null'],\n 'propagate': False,\n },\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n 'feedback': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n 'syncer': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n '': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n }\n}\n\n# crispy forms settings\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\n\n# bootstrap messages classes\nMESSAGE_TAGS = {messages.DEBUG: 'alert-debug',\n messages.INFO: 'alert-info',\n messages.SUCCESS: 'alert-success',\n messages.WARNING: 'alert-warning',\n messages.ERROR: 'alert-error'}\n\n\n# Not really sure what this does.\n# Has something to do with django-dynamic-fixture bumped from 1.6.4 to 1.6.5 in order to run a syncdb with mysql/postgres (OptimusCrime)\nIMPORT_DDF_MODELS = False\n\n# Django REST framework\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.SessionAuthentication', # Allows users to be logged in to browsable API\n ),\n 'DEFAULT_FILTER_BACKENDS': (\n 'rest_framework.filters.DjangoFilterBackend',\n 'rest_framework.filters.OrderingFilter',\n ),\n 'DEFAULT_PARSER_CLASSES': (\n 'rest_framework.parsers.JSONParser',\n 'rest_framework.parsers.FormParser',\n 'rest_framework.parsers.MultiPartParser',\n 'rest_framework.parsers.FileUploadParser',\n ),\n 'DEFAULT_RENDERER_CLASSES': [\n 'rest_framework.renderers.JSONRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n 'rest_framework.renderers.AdminRenderer',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',\n 'PAGE_SIZE': 10\n}\n\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_URLS_REGEX = r'^/api/v1/.*$' # Enables CORS on /api/v1/ endpoints only\n\n# Remember to keep 'local' last, so it can override any setting.\nfor settings_module in ['filebrowser', 'django_wiki', 'local']: # local last\n if not os.path.exists(os.path.join(PROJECT_SETTINGS_DIRECTORY,\n settings_module + \".py\")):\n sys.stderr.write(\"Could not find settings module '%s'.\\n\" %\n settings_module)\n if settings_module == 'local':\n sys.stderr.write(\"You need to copy the settings file \"\n \"'onlineweb4/settings/example-local.py' to \"\n \"'onlineweb4/settings/local.py'.\\n\")\n sys.exit(1)\n try:\n exec('from .%s import *' % settings_module)\n except ImportError as e:\n print(\"Could not import settings for '%s' : %s\" % (settings_module,\n str(e)))\n",
"path": "onlineweb4/settings/base.py"
}
] | diff --git a/onlineweb4/settings/base.py b/onlineweb4/settings/base.py
index 565de0a3c..f7b3961b5 100644
--- a/onlineweb4/settings/base.py
+++ b/onlineweb4/settings/base.py
@@ -219,7 +219,6 @@
'compressor',
'pdfdocument',
'watson',
- 'gunicorn',
'markdown_deux',
'djangoformsetjs',
'reversion',
diff --git a/requirements-prod.txt b/requirements-prod.txt
new file mode 100644
index 000000000..e116f5559
--- /dev/null
+++ b/requirements-prod.txt
@@ -0,0 +1,2 @@
+gunicorn==19.0 # Used as a http server in production
+psycopg2 # Postgres adapter
diff --git a/requirements-testing.txt b/requirements-testing.txt
new file mode 100644
index 000000000..18c7c3290
--- /dev/null
+++ b/requirements-testing.txt
@@ -0,0 +1,8 @@
+cov-core>=1.7
+coverage>=4.0.1
+factory-boy>=1.1.3
+lettuce>=0.2.20
+nose-cov>=1.6
+teamcity-messages>=1.16
+flake8>=2.4.1
+pepper8>=1.0.4 # Transforms flake8 output to HTML report + TC messages
diff --git a/requirements.txt b/requirements.txt
index 8195d3af7..4c25ac940 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -7,9 +7,7 @@ django-filter==0.11.0 # Filtering for DRF
# Upstream is missing Python 3 patches
git+https://github.com/JelteF/python-memcached@patch-1#egg=python-memcached==1.58
markdown2==2.3.0 # textarea text formatting
-psycopg2 # Postgres adapter
-pytz # Timezone lib. Obsolete?
-gunicorn==19.0
+pytz # Timezone lib
icalendar==3.8.4
stripe==1.20.2
@@ -39,21 +37,12 @@ pdfdocument==3.1
Unidecode==0.4.17 # Translates every unicode symbol to the closest ascii. online_mail_usernames
django-markdown-deux==1.0.5
django-formset-js==0.4.3
-werkzeug==0.10.4
django-cors-headers==1.1.0 # Enable CORS for Nibble
django-datetime-widget==0.9.3 # Datetime widget for forms
#test tools
-cov-core==1.7
-coverage==4.0.1
-django-nose>=1.4,<1.5
-factory-boy==1.1.3
-lettuce==0.2.20
-nose==1.3.4
-nose-cov==1.6
-teamcity-messages==1.16
-flake8==2.4.1
-pepper8>=1.0.4 # Transforms flake8 output to HTML report + TC messages
+django-nose>=1.4,<1.5 # We use this test runner locally
+nose==1.3.4 # We use this test runner locally
# Frigg
requests[security]==2.8.0
@@ -71,4 +60,3 @@ django-sekizai>=0.9
# imagemagick # From package repository. Tested with version == 8:6.7.7.10.
# libjpeg-dev # From package repository.
# libpq-dev # From package repository.
-
diff --git a/tox.ini b/tox.ini
index bf7efaf24..ba0dc21bf 100644
--- a/tox.ini
+++ b/tox.ini
@@ -15,7 +15,9 @@ commands =
coverage: coverage xml
[testenv:tests]
-deps = -r{toxinidir}/requirements.txt
+deps =
+ -r{toxinidir}/requirements.txt
+ -r{toxinidir}/requirements-testing.txt
whitelist_externals =
/bin/cp
setenv =
|
apache__airflow-22886 | Databricks SQL fails on Python 3.10
### Apache Airflow Provider(s)
databricks
### Versions of Apache Airflow Providers
The databricks SQL does not work on Python 3.10 due to "from collections import Iterable" in the `databricks-sql-connector`
* https://pypi.org/project/databricks-sql-connector/
Details of this issue dicussed in https://github.com/apache/airflow/pull/22050
For now we will likely just exclude the tests (and mark databricks provider as non-python 3.10 compatible). But once this is fixed (in either 1.0.2 or upcoming 2.0.0 version of the library, we wil restore it back).
### Apache Airflow version
main (development)
### Operating System
All
### Deployment
Other
### Deployment details
Just Breeze with Python 3.10
### What happened
The tests are failing:
```
self = <databricks.sql.common.ParamEscaper object at 0x7fe81c6dd6c0>
item = ['file1', 'file2', 'file3']
def escape_item(self, item):
if item is None:
return 'NULL'
elif isinstance(item, (int, float)):
return self.escape_number(item)
elif isinstance(item, basestring):
return self.escape_string(item)
> elif isinstance(item, collections.Iterable):
E AttributeError: module 'collections' has no attribute 'Iterable'
```
https://github.com/apache/airflow/runs/5523057543?check_suite_focus=true#step:8:16781
### What you expected to happen
Test succeed :)
### How to reproduce
Run `TestDatabricksSqlCopyIntoOperator` in Python 3.10 environment.
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
| [
{
"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Setup.py for the Airflow project.\"\"\"\nimport glob\nimport logging\nimport os\nimport subprocess\nimport sys\nimport unittest\nfrom copy import deepcopy\nfrom os.path import dirname, relpath\nfrom textwrap import wrap\nfrom typing import Dict, List\n\nfrom setuptools import Command, Distribution, find_namespace_packages, setup\nfrom setuptools.command.develop import develop as develop_orig\nfrom setuptools.command.install import install as install_orig\n\n# Setuptools patches this import to point to a vendored copy instead of the\n# stdlib, which is deprecated in Python 3.10 and will be removed in 3.12.\nfrom distutils import log # isort: skip\n\n# Controls whether providers are installed from packages or directly from sources\n# It is turned on by default in case of development environments such as Breeze\n# And it is particularly useful when you add a new provider and there is no\n# PyPI version to install the provider package from\nINSTALL_PROVIDERS_FROM_SOURCES = 'INSTALL_PROVIDERS_FROM_SOURCES'\nPY39 = sys.version_info >= (3, 9)\n\nlogger = logging.getLogger(__name__)\n\nversion = '2.3.0.dev0'\n\nmy_dir = dirname(__file__)\n\n\ndef airflow_test_suite() -> unittest.TestSuite:\n \"\"\"Test suite for Airflow tests\"\"\"\n test_loader = unittest.TestLoader()\n test_suite = test_loader.discover(os.path.join(my_dir, 'tests'), pattern='test_*.py')\n return test_suite\n\n\nclass CleanCommand(Command):\n \"\"\"\n Command to tidy up the project root.\n Registered as cmdclass in setup() so it can be called with ``python setup.py extra_clean``.\n \"\"\"\n\n description = \"Tidy up the project root\"\n user_options: List[str] = []\n\n def initialize_options(self) -> None:\n \"\"\"Set default values for options.\"\"\"\n\n def finalize_options(self) -> None:\n \"\"\"Set final values for options.\"\"\"\n\n @staticmethod\n def rm_all_files(files: List[str]) -> None:\n \"\"\"Remove all files from the list\"\"\"\n for file in files:\n try:\n os.remove(file)\n except Exception as e:\n logger.warning(\"Error when removing %s: %s\", file, e)\n\n def run(self) -> None:\n \"\"\"Remove temporary files and directories.\"\"\"\n os.chdir(my_dir)\n self.rm_all_files(glob.glob('./build/*'))\n self.rm_all_files(glob.glob('./**/__pycache__/*', recursive=True))\n self.rm_all_files(glob.glob('./**/*.pyc', recursive=True))\n self.rm_all_files(glob.glob('./dist/*'))\n self.rm_all_files(glob.glob('./*.egg-info'))\n self.rm_all_files(glob.glob('./docker-context-files/*.whl'))\n self.rm_all_files(glob.glob('./docker-context-files/*.tgz'))\n\n\nclass CompileAssets(Command):\n \"\"\"\n Compile and build the frontend assets using yarn and webpack.\n Registered as cmdclass in setup() so it can be called with ``python setup.py compile_assets``.\n \"\"\"\n\n description = \"Compile and build the frontend assets\"\n user_options: List[str] = []\n\n def initialize_options(self) -> None:\n \"\"\"Set default values for options.\"\"\"\n\n def finalize_options(self) -> None:\n \"\"\"Set final values for options.\"\"\"\n\n def run(self) -> None:\n \"\"\"Run a command to compile and build assets.\"\"\"\n subprocess.check_call('./airflow/www/compile_assets.sh')\n\n\nclass ListExtras(Command):\n \"\"\"\n List all available extras\n Registered as cmdclass in setup() so it can be called with ``python setup.py list_extras``.\n \"\"\"\n\n description = \"List available extras\"\n user_options: List[str] = []\n\n def initialize_options(self) -> None:\n \"\"\"Set default values for options.\"\"\"\n\n def finalize_options(self) -> None:\n \"\"\"Set final values for options.\"\"\"\n\n def run(self) -> None:\n \"\"\"List extras.\"\"\"\n print(\"\\n\".join(wrap(\", \".join(EXTRAS_REQUIREMENTS.keys()), 100)))\n\n\ndef git_version(version_: str) -> str:\n \"\"\"\n Return a version to identify the state of the underlying git repo. The version will\n indicate whether the head of the current git-backed working directory is tied to a\n release tag or not : it will indicate the former with a 'release:{version}' prefix\n and the latter with a '.dev0' suffix. Following the prefix will be a sha of the current\n branch head. Finally, a \"dirty\" suffix is appended to indicate that uncommitted\n changes are present.\n\n :param str version_: Semver version\n :return: Found Airflow version in Git repo\n :rtype: str\n \"\"\"\n try:\n import git\n\n try:\n repo = git.Repo(os.path.join(*[my_dir, '.git']))\n except git.NoSuchPathError:\n logger.warning('.git directory not found: Cannot compute the git version')\n return ''\n except git.InvalidGitRepositoryError:\n logger.warning('Invalid .git directory not found: Cannot compute the git version')\n return ''\n except ImportError:\n logger.warning('gitpython not found: Cannot compute the git version.')\n return ''\n if repo:\n sha = repo.head.commit.hexsha\n if repo.is_dirty():\n return f'.dev0+{sha}.dirty'\n # commit is clean\n return f'.release:{version_}+{sha}'\n return 'no_git_version'\n\n\ndef write_version(filename: str = os.path.join(*[my_dir, \"airflow\", \"git_version\"])) -> None:\n \"\"\"\n Write the Semver version + git hash to file, e.g. \".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65\".\n\n :param str filename: Destination file to write\n \"\"\"\n text = f\"{git_version(version)}\"\n with open(filename, 'w') as file:\n file.write(text)\n\n\npandas_requirement = 'pandas>=0.17.1'\n\n# 'Start dependencies group' and 'Start dependencies group' are mark for ./scripts/ci/check_order_setup.py\n# If you change this mark you should also change ./scripts/ci/check_order_setup.py\n# Start dependencies group\nalibaba = [\n 'oss2>=2.14.0',\n]\namazon = [\n 'boto3>=1.15.0',\n # watchtower 3 has been released end Jan and introduced breaking change across the board that might\n # change logging behaviour:\n # https://github.com/kislyuk/watchtower/blob/develop/Changes.rst#changes-for-v300-2022-01-26\n # TODO: update to watchtower >3\n 'watchtower~=2.0.1',\n 'jsonpath_ng>=1.5.3',\n 'redshift_connector>=2.0.888',\n 'sqlalchemy_redshift>=0.8.6',\n pandas_requirement,\n 'mypy-boto3-rds>=1.21.0',\n 'mypy-boto3-redshift-data>=1.21.0',\n]\napache_beam = [\n 'apache-beam>=2.33.0',\n]\narangodb = ['python-arango>=7.3.2']\nasana = ['asana>=0.10']\nasync_packages = [\n 'eventlet>=0.9.7',\n 'gevent>=0.13',\n 'greenlet>=0.4.9',\n]\natlas = [\n 'atlasclient>=0.1.2',\n]\nazure = [\n 'azure-batch>=8.0.0',\n 'azure-cosmos>=4.0.0',\n 'azure-datalake-store>=0.0.45',\n 'azure-identity>=1.3.1',\n 'azure-keyvault-secrets>=4.1.0,<5.0',\n 'azure-kusto-data>=0.0.43,<0.1',\n # Azure integration uses old librarires and the limits below reflect that\n # TODO: upgrade to newer versions of all the below libraries\n 'azure-mgmt-containerinstance>=1.5.0,<2.0',\n 'azure-mgmt-datafactory>=1.0.0,<2.0',\n 'azure-mgmt-datalake-store>=0.5.0',\n 'azure-mgmt-resource>=2.2.0',\n # limited due to https://github.com/Azure/azure-sdk-for-python/pull/18801 implementation released in 12.9\n 'azure-storage-blob>=12.7.0,<12.9.0',\n 'azure-storage-common>=2.1.0',\n 'azure-storage-file>=2.1.0',\n]\ncassandra = [\n 'cassandra-driver>=3.13.0',\n]\ncelery = [\n # The Celery is known to introduce problems when upgraded to a MAJOR version. Airflow Core\n # Uses Celery for CeleryExecutor, and we also know that Kubernetes Python client follows SemVer\n # (https://docs.celeryq.dev/en/stable/contributing.html?highlight=semver#versions).\n # This is a crucial component of Airflow, so we should limit it to the next MAJOR version and only\n # deliberately bump the version when we tested it, and we know it can be bumped.\n # Bumping this version should also be connected with\n # limiting minimum airflow version supported in cncf.kubernetes provider, due to the\n # potential breaking changes in Airflow Core as well (celery is added as extra, so Airflow\n # core is not hard-limited via install-requirements, only by extra).\n 'celery>=5.2.3,<6',\n 'flower>=1.0.0',\n]\ncgroups = [\n # Cgroupspy 0.2.2 added Python 3.10 compatibility\n 'cgroupspy>=0.2.2',\n]\ncloudant = [\n 'cloudant>=2.0',\n]\ndask = [\n # Dask support is limited, we need Dask team to upgrade support for dask if we were to continue\n # Supporting it in the future\n 'cloudpickle>=1.4.1',\n 'dask>=2.9.0',\n 'distributed>=2.11.1',\n]\ndatabricks = [\n 'requests>=2.26.0, <3',\n 'databricks-sql-connector>=1.0.0, <2.0.0',\n]\ndatadog = [\n 'datadog>=0.14.0',\n]\ndeprecated_api = [\n 'requests>=2.26.0',\n]\ndoc = [\n 'click>=8.0',\n 'sphinx>=4.4.0',\n # Docutils 0.17.0 converts generated <div class=\"section\"> into <section> and breaks our doc formatting\n # By adding a lot of whitespace separation. This limit can be lifted when we update our doc to handle\n # <section> tags for sections\n 'docutils<0.17.0',\n # Without this, Sphinx goes in to a _very_ large backtrack on Python 3.7,\n # even though Sphinx 4.4.0 has this but with python_version<3.10.\n 'importlib-metadata>=4.4; python_version < \"3.8\"',\n 'sphinx-airflow-theme',\n 'sphinx-argparse>=0.1.13',\n 'sphinx-autoapi>=1.8.0',\n 'sphinx-copybutton',\n 'sphinx-jinja>=2.0',\n 'sphinx-rtd-theme>=0.1.6',\n 'sphinxcontrib-httpdomain>=1.7.0',\n 'sphinxcontrib-redoc>=1.6.0',\n 'sphinxcontrib-spelling>=7.3',\n]\ndocker = [\n 'docker>=5.0.3',\n]\ndrill = ['sqlalchemy-drill>=1.1.0', 'sqlparse>=0.4.1']\ndruid = [\n 'pydruid>=0.4.1',\n]\nelasticsearch = [\n 'elasticsearch>7',\n 'elasticsearch-dbapi',\n 'elasticsearch-dsl>=5.0.0',\n]\nexasol = ['pyexasol>=0.5.1', pandas_requirement]\nfacebook = [\n 'facebook-business>=6.0.2',\n]\nflask_appbuilder_authlib = [\n 'authlib',\n]\ngithub = [\n 'pygithub',\n]\ngoogle = [\n # Google has very clear rules on what dependencies should be used. All the limits below\n # follow strict guidelines of Google Libraries as quoted here:\n # While this issue is open, dependents of google-api-core, google-cloud-core. and google-auth\n # should preserve >1, <3 pins on these packages.\n # https://github.com/googleapis/google-cloud-python/issues/10566\n # Some of Google Packages are limited to <2.0.0 because 2.0.0 releases of the libraries\n # Introduced breaking changes across the board. Those libraries should be upgraded soon\n # TODO: Upgrade all Google libraries that are limited to <2.0.0\n 'PyOpenSSL',\n # The Google Ads 14.0.1 breaks PIP and eager upgrade as it requires\n # google-api-core>=2.0.0 which cannot be used yet (see below comment)\n # and https://github.com/apache/airflow/issues/18705#issuecomment-933746150\n 'google-ads>=12.0.0,<14.0.1',\n 'google-api-core>=2.7.0,<3.0.0',\n 'google-api-python-client>=1.6.0,<2.0.0',\n 'google-auth>=1.0.0',\n 'google-auth-httplib2>=0.0.1',\n 'google-cloud-aiplatform>=1.7.1,<2.0.0',\n 'google-cloud-automl>=2.1.0',\n 'google-cloud-bigquery-datatransfer>=3.0.0',\n 'google-cloud-bigtable>=1.0.0,<2.0.0',\n 'google-cloud-build>=3.0.0',\n 'google-cloud-container>=0.1.1,<2.0.0',\n 'google-cloud-datacatalog>=3.0.0',\n 'google-cloud-dataplex>=0.1.0',\n 'google-cloud-dataproc>=3.1.0',\n 'google-cloud-dataproc-metastore>=1.2.0,<2.0.0',\n 'google-cloud-dlp>=0.11.0,<2.0.0',\n 'google-cloud-kms>=2.0.0',\n 'google-cloud-language>=1.1.1,<2.0.0',\n 'google-cloud-logging>=2.1.1',\n 'google-cloud-memcache>=0.2.0',\n 'google-cloud-monitoring>=2.0.0',\n 'google-cloud-os-login>=2.0.0',\n 'google-cloud-orchestration-airflow>=1.0.0,<2.0.0',\n 'google-cloud-pubsub>=2.0.0',\n 'google-cloud-redis>=2.0.0',\n 'google-cloud-secret-manager>=0.2.0,<2.0.0',\n 'google-cloud-spanner>=1.10.0,<2.0.0',\n 'google-cloud-speech>=0.36.3,<2.0.0',\n 'google-cloud-storage>=1.30,<2.0.0',\n 'google-cloud-tasks>=2.0.0',\n 'google-cloud-texttospeech>=0.4.0,<2.0.0',\n 'google-cloud-translate>=1.5.0,<2.0.0',\n 'google-cloud-videointelligence>=1.7.0,<2.0.0',\n 'google-cloud-vision>=0.35.2,<2.0.0',\n 'google-cloud-workflows>=0.1.0,<2.0.0',\n 'grpcio-gcp>=0.2.2',\n 'httpx',\n 'json-merge-patch>=0.2',\n 'looker-sdk>=22.2.0',\n 'pandas-gbq',\n pandas_requirement,\n 'sqlalchemy-bigquery>=1.2.1',\n]\ngrpc = [\n # Google has very clear rules on what dependencies should be used. All the limits below\n # follow strict guidelines of Google Libraries as quoted here:\n # While this issue is open, dependents of google-api-core, google-cloud-core. and google-auth\n # should preserve >1, <3 pins on these packages.\n # https://github.com/googleapis/google-cloud-python/issues/10566\n 'google-auth>=1.0.0, <3.0.0',\n 'google-auth-httplib2>=0.0.1',\n 'grpcio>=1.15.0',\n]\nhashicorp = [\n 'hvac>=0.10',\n]\nhdfs = [\n 'snakebite-py3',\n 'hdfs[avro,dataframe,kerberos]>=2.0.4',\n]\nhive = [\n 'hmsclient>=0.1.0',\n 'pyhive[hive]>=0.6.0',\n # in case of Python 3.9 sasl library needs to be installed with version higher or equal than\n # 0.3.1 because only that version supports Python 3.9. For other Python version pyhive[hive] pulls\n # the sasl library anyway (and there sasl library version is not relevant)\n 'sasl>=0.3.1; python_version>=\"3.9\"',\n 'thrift>=0.9.2',\n pandas_requirement,\n]\nhttp = [\n # The 2.26.0 release of requests got rid of the chardet LGPL mandatory dependency, allowing us to\n # release it as a requirement for airflow\n 'requests>=2.26.0',\n]\nhttp_provider = [\n 'apache-airflow-providers-http',\n]\ninfluxdb = [\n 'influxdb-client>=1.19.0',\n pandas_requirement,\n]\njdbc = [\n 'jaydebeapi>=1.1.1',\n]\njenkins = [\n 'python-jenkins>=1.0.0',\n]\njira = [\n 'JIRA>1.0.7',\n]\nkerberos = [\n 'pykerberos>=1.1.13',\n 'requests_kerberos>=0.10.0',\n 'thrift_sasl>=0.2.0',\n]\nkubernetes = [\n 'cryptography>=2.0.0',\n # The Kubernetes API is known to introduce problems when upgraded to a MAJOR version. Airflow Core\n # Uses Kubernetes for Kubernetes executor, and we also know that Kubernetes Python client follows SemVer\n # (https://github.com/kubernetes-client/python#compatibility). This is a crucial component of Airflow\n # So we should limit it to the next MAJOR version and only deliberately bump the version when we\n # tested it, and we know it can be bumped. Bumping this version should also be connected with\n # limiting minimum airflow version supported in cncf.kubernetes provider, due to the\n # potential breaking changes in Airflow Core as well (kubernetes is added as extra, so Airflow\n # core is not hard-limited via install-requirements, only by extra).\n 'kubernetes>=21.7.0,<24',\n]\nkylin = ['kylinpy>=2.6']\nldap = [\n 'ldap3>=2.5.1',\n 'python-ldap',\n]\nleveldb = ['plyvel; platform_machine != \"aarch64\"']\nmongo = [\n 'dnspython>=1.13.0',\n # pymongo 4.0.0 removes connection option `ssl_cert_reqs` which is used in providers-mongo/2.2.0\n # TODO: Upgrade to pymongo 4.0.0+\n 'pymongo>=3.6.0,<4.0.0',\n]\nmssql = [\n 'pymssql>=2.1.5; platform_machine != \"aarch64\"',\n]\nmysql = [\n 'mysql-connector-python>=8.0.11; platform_machine != \"aarch64\"',\n 'mysqlclient>=1.3.6; platform_machine != \"aarch64\"',\n]\nneo4j = ['neo4j>=4.2.1']\nodbc = [\n 'pyodbc',\n]\nopsgenie = [\n 'opsgenie-sdk>=2.1.5',\n]\noracle = [\n 'cx_Oracle>=5.1.2',\n]\npagerduty = [\n 'pdpyras>=4.1.2',\n]\npandas = [\n pandas_requirement,\n]\npapermill = [\n 'papermill[all]>=1.2.1',\n 'scrapbook[all]',\n]\npassword = [\n 'bcrypt>=2.0.0',\n 'flask-bcrypt>=0.7.1',\n]\npinot = [\n # pinotdb v0.1.1 may still work with older versions of Apache Pinot, but we've confirmed that it\n # causes a problem with newer versions.\n 'pinotdb>0.1.2',\n]\nplexus = [\n 'arrow>=0.16.0',\n]\npostgres = [\n 'psycopg2-binary>=2.7.4',\n]\npresto = [\n # The limit to Presto 0.8 for unknown reason\n # TODO: Remove the limit\n 'presto-python-client>=0.7.0,<0.8',\n pandas_requirement,\n]\npsrp = [\n 'pypsrp>=0.8',\n]\nqubole = [\n 'qds-sdk>=1.10.4',\n]\nrabbitmq = [\n 'amqp',\n]\nredis = [\n # Redis 4 introduced a number of changes that likely need testing including mixins in redis commands\n # as well as unquoting URLS with `urllib.parse.unquote`:\n # https://github.com/redis/redis-py/blob/master/CHANGES\n # TODO: upgrade to support redis package >=4\n 'redis~=3.2',\n]\nsalesforce = ['simple-salesforce>=1.0.0', 'tableauserverclient', pandas_requirement]\nsamba = [\n 'smbprotocol>=1.5.0',\n]\nsegment = [\n 'analytics-python>=1.2.9',\n]\nsendgrid = [\n 'sendgrid>=6.0.0',\n]\nsentry = [\n 'blinker>=1.1',\n 'sentry-sdk>=0.8.0',\n]\nsingularity = ['spython>=0.0.56']\nslack = [\n 'slack_sdk>=3.0.0',\n]\nsnowflake = [\n 'snowflake-connector-python>=2.4.1',\n 'snowflake-sqlalchemy>=1.1.0',\n]\nspark = [\n 'pyspark',\n]\nssh = [\n 'paramiko>=2.6.0',\n 'pysftp>=0.2.9',\n 'sshtunnel>=0.3.2',\n]\nstatsd = [\n 'statsd>=3.3.0',\n]\ntableau = [\n 'tableauserverclient',\n]\ntelegram = [\n 'python-telegram-bot>=13.0',\n]\ntrino = [\n 'trino>=0.301.0',\n pandas_requirement,\n]\nvertica = [\n 'vertica-python>=0.5.1',\n]\nvirtualenv = [\n 'virtualenv',\n]\nwebhdfs = [\n 'hdfs[avro,dataframe,kerberos]>=2.0.4',\n]\nwinrm = [\n 'pywinrm>=0.4',\n]\nyandex = [\n 'yandexcloud>=0.146.0',\n]\nzendesk = [\n 'zenpy>=2.0.24',\n]\n# End dependencies group\n\n# Mypy 0.900 and above ships only with stubs from stdlib so if we need other stubs, we need to install them\n# manually as `types-*`. See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports\n# for details. Wy want to install them explicitly because we want to eventually move to\n# mypyd which does not support installing the types dynamically with --install-types\nmypy_dependencies = [\n # TODO: upgrade to newer versions of MyPy continuously as they are released\n 'mypy==0.910',\n 'types-boto',\n 'types-certifi',\n 'types-croniter',\n 'types-Deprecated',\n 'types-docutils',\n 'types-freezegun',\n 'types-paramiko',\n 'types-protobuf',\n 'types-python-dateutil',\n 'types-python-slugify',\n 'types-pytz',\n 'types-redis',\n 'types-requests',\n 'types-setuptools',\n 'types-termcolor',\n 'types-tabulate',\n 'types-toml',\n 'types-Markdown',\n 'types-PyMySQL',\n 'types-PyYAML',\n]\n\n# Dependencies needed for development only\ndevel_only = [\n 'aws_xray_sdk',\n 'beautifulsoup4>=4.7.1',\n 'black',\n 'blinker',\n 'bowler',\n 'click>=8.0',\n 'coverage',\n 'filelock',\n 'flake8>=3.6.0',\n 'flake8-colors',\n 'flaky',\n 'freezegun',\n # Github3 version 3.1.2 requires PyJWT>=2.3.0 which clashes with Flask App Builder where PyJWT is <2.0.0\n # Actually GitHub3.1.0 already introduced PyJWT>=2.3.0 but so far `pip` was able to resolve it without\n # getting into a long backtracking loop and figure out that github3 3.0.0 version is the right version\n # similarly limiting it to 3.1.2 causes pip not to enter the backtracking loop. Apparently when there\n # are 3 versions with PyJWT>=2.3.0 (3.1.0, 3.1.1 an 3.1.2) pip enters into backtrack loop and fails\n # to resolve that github3 3.0.0 is the right version to use.\n # This limitation could be removed if PyJWT limitation < 2.0.0 is dropped from FAB or when\n # pip resolution is improved to handle the case. The issue which describes this PIP behaviour\n # and hopefully allowing to improve it is tracked in https://github.com/pypa/pip/issues/10924\n 'github3.py<3.1.0',\n 'gitpython',\n 'ipdb',\n 'jira',\n 'jsondiff',\n 'mongomock',\n 'moto>=3.1.0',\n 'parameterized',\n 'paramiko',\n 'pipdeptree',\n 'pre-commit',\n 'pypsrp',\n 'pygithub',\n 'pysftp',\n # Pytest 7 has been released in February 2022 and we should attempt to upgrade and remove the limit\n # It contains a number of potential breaking changes but none of them looks breaking our use\n # https://docs.pytest.org/en/latest/changelog.html#pytest-7-0-0-2022-02-03\n # TODO: upgrade it and remove the limit\n 'pytest~=6.0',\n 'pytest-asyncio',\n 'pytest-cov',\n 'pytest-instafail',\n # We should attempt to remove the limit when we upgrade Pytest\n # TODO: remove the limit when we upgrade pytest\n 'pytest-rerunfailures~=9.1',\n 'pytest-timeouts',\n 'pytest-xdist',\n 'python-jose',\n 'pywinrm',\n 'qds-sdk>=1.9.6',\n 'pytest-httpx',\n 'requests_mock',\n 'rich_click',\n 'semver',\n 'twine',\n 'wheel',\n 'yamllint',\n]\n\ndevel = cgroups + devel_only + doc + kubernetes + mypy_dependencies + mysql + pandas + password\ndevel_hadoop = devel + hdfs + hive + kerberos + presto + webhdfs\n\n# Dict of all providers which are part of the Apache Airflow repository together with their requirements\nPROVIDERS_REQUIREMENTS: Dict[str, List[str]] = {\n 'airbyte': http_provider,\n 'alibaba': alibaba,\n 'amazon': amazon,\n 'apache.beam': apache_beam,\n 'apache.cassandra': cassandra,\n 'apache.drill': drill,\n 'apache.druid': druid,\n 'apache.hdfs': hdfs,\n 'apache.hive': hive,\n 'apache.kylin': kylin,\n 'apache.livy': http_provider,\n 'apache.pig': [],\n 'apache.pinot': pinot,\n 'apache.spark': spark,\n 'apache.sqoop': [],\n 'arangodb': arangodb,\n 'asana': asana,\n 'celery': celery,\n 'cloudant': cloudant,\n 'cncf.kubernetes': kubernetes,\n 'databricks': databricks,\n 'datadog': datadog,\n 'dbt.cloud': http_provider,\n 'dingding': [],\n 'discord': [],\n 'docker': docker,\n 'elasticsearch': elasticsearch,\n 'exasol': exasol,\n 'facebook': facebook,\n 'ftp': [],\n 'github': github,\n 'google': google,\n 'grpc': grpc,\n 'hashicorp': hashicorp,\n 'http': http,\n 'imap': [],\n 'influxdb': influxdb,\n 'jdbc': jdbc,\n 'jenkins': jenkins,\n 'jira': jira,\n 'microsoft.azure': azure,\n 'microsoft.mssql': mssql,\n 'microsoft.psrp': psrp,\n 'microsoft.winrm': winrm,\n 'mongo': mongo,\n 'mysql': mysql,\n 'neo4j': neo4j,\n 'odbc': odbc,\n 'openfaas': [],\n 'opsgenie': opsgenie,\n 'oracle': oracle,\n 'pagerduty': pagerduty,\n 'papermill': papermill,\n 'plexus': plexus,\n 'postgres': postgres,\n 'presto': presto,\n 'qubole': qubole,\n 'redis': redis,\n 'salesforce': salesforce,\n 'samba': samba,\n 'segment': segment,\n 'sendgrid': sendgrid,\n 'sftp': ssh,\n 'singularity': singularity,\n 'slack': slack,\n 'snowflake': snowflake,\n 'sqlite': [],\n 'ssh': ssh,\n 'tableau': tableau,\n 'telegram': telegram,\n 'trino': trino,\n 'vertica': vertica,\n 'yandex': yandex,\n 'zendesk': zendesk,\n}\n\n# Those are all additional extras which do not have their own 'providers'\n# The 'apache.atlas' and 'apache.webhdfs' are extras that provide additional libraries\n# but they do not have separate providers (yet?), they are merely there to add extra libraries\n# That can be used in custom python/bash operators.\nADDITIONAL_EXTRAS_REQUIREMENTS: Dict[str, List[str]] = {\n 'apache.atlas': atlas,\n 'apache.webhdfs': webhdfs,\n}\n\n\n# Those are extras that are extensions of the 'core' Airflow. They provide additional features\n# To airflow core. They do not have separate providers because they do not have any operators/hooks etc.\nCORE_EXTRAS_REQUIREMENTS: Dict[str, List[str]] = {\n 'async': async_packages,\n 'celery': celery, # also has provider, but it extends the core with the CeleryExecutor\n 'cgroups': cgroups,\n 'cncf.kubernetes': kubernetes, # also has provider, but it extends the core with the KubernetesExecutor\n 'dask': dask,\n 'deprecated_api': deprecated_api,\n 'github_enterprise': flask_appbuilder_authlib,\n 'google_auth': flask_appbuilder_authlib,\n 'kerberos': kerberos,\n 'ldap': ldap,\n 'leveldb': leveldb,\n 'pandas': pandas,\n 'password': password,\n 'rabbitmq': rabbitmq,\n 'sentry': sentry,\n 'statsd': statsd,\n 'virtualenv': virtualenv,\n}\n\nEXTRAS_REQUIREMENTS: Dict[str, List[str]] = deepcopy(CORE_EXTRAS_REQUIREMENTS)\n\n\ndef add_extras_for_all_providers() -> None:\n \"\"\"\n Adds extras for all providers.\n By default all providers have the same extra name as provider id, for example\n 'apache.hive' extra has 'apache.hive' provider requirement.\n \"\"\"\n for provider_name, provider_requirement in PROVIDERS_REQUIREMENTS.items():\n EXTRAS_REQUIREMENTS[provider_name] = provider_requirement\n\n\ndef add_additional_extras() -> None:\n \"\"\"Adds extras for all additional extras.\"\"\"\n for extra_name, extra_requirement in ADDITIONAL_EXTRAS_REQUIREMENTS.items():\n EXTRAS_REQUIREMENTS[extra_name] = extra_requirement\n\n\nadd_extras_for_all_providers()\nadd_additional_extras()\n\n#############################################################################################################\n# The whole section can be removed in Airflow 3.0 as those old aliases are deprecated in 2.* series\n#############################################################################################################\n\n# Dictionary of aliases from 1.10 - deprecated in Airflow 2.*\nEXTRAS_DEPRECATED_ALIASES: Dict[str, str] = {\n 'atlas': 'apache.atlas',\n 'aws': 'amazon',\n 'azure': 'microsoft.azure',\n 'cassandra': 'apache.cassandra',\n 'crypto': '', # All crypto requirements are installation requirements of core Airflow\n 'druid': 'apache.druid',\n 'gcp': 'google',\n 'gcp_api': 'google',\n 'hdfs': 'apache.hdfs',\n 'hive': 'apache.hive',\n 'kubernetes': 'cncf.kubernetes',\n 'mssql': 'microsoft.mssql',\n 'pinot': 'apache.pinot',\n 'qds': 'qubole',\n 's3': 'amazon',\n 'spark': 'apache.spark',\n 'webhdfs': 'apache.webhdfs',\n 'winrm': 'microsoft.winrm',\n}\n\nEXTRAS_DEPRECATED_ALIASES_NOT_PROVIDERS: List[str] = [\n \"crypto\",\n \"webhdfs\",\n]\n\n\ndef add_extras_for_all_deprecated_aliases() -> None:\n \"\"\"\n Add extras for all deprecated aliases. Requirements for those deprecated aliases are the same\n as the extras they are replaced with.\n The requirements are not copies - those are the same lists as for the new extras. This is intended.\n Thanks to that if the original extras are later extended with providers, aliases are extended as well.\n \"\"\"\n for alias, extra in EXTRAS_DEPRECATED_ALIASES.items():\n requirements = EXTRAS_REQUIREMENTS.get(extra) if extra != '' else []\n if requirements is None:\n raise Exception(f\"The extra {extra} is missing for deprecated alias {alias}\")\n EXTRAS_REQUIREMENTS[alias] = requirements\n\n\ndef add_all_deprecated_provider_packages() -> None:\n \"\"\"\n For deprecated aliases that are providers, we will swap the providers requirements to instead\n be the provider itself.\n\n e.g. {\"kubernetes\": [\"kubernetes>=3.0.0, <12.0.0\", ...]} becomes\n {\"kubernetes\": [\"apache-airflow-provider-cncf-kubernetes\"]}\n \"\"\"\n for alias, provider in EXTRAS_DEPRECATED_ALIASES.items():\n if alias in EXTRAS_DEPRECATED_ALIASES_NOT_PROVIDERS:\n continue\n replace_extra_requirement_with_provider_packages(alias, [provider])\n\n\nadd_extras_for_all_deprecated_aliases()\n\n#############################################################################################################\n# End of deprecated section\n#############################################################################################################\n\n# This is list of all providers. It's a shortcut for anyone who would like to easily get list of\n# All providers. It is used by pre-commits.\nALL_PROVIDERS = list(PROVIDERS_REQUIREMENTS.keys())\n\nALL_DB_PROVIDERS = [\n 'apache.cassandra',\n 'apache.drill',\n 'apache.druid',\n 'apache.hdfs',\n 'apache.hive',\n 'apache.pinot',\n 'arangodb',\n 'cloudant',\n 'databricks',\n 'exasol',\n 'influxdb',\n 'microsoft.mssql',\n 'mongo',\n 'mysql',\n 'neo4j',\n 'postgres',\n 'presto',\n 'trino',\n 'vertica',\n]\n\n# Special requirements for all database-related providers. They are de-duplicated.\nall_dbs = list({req for db_provider in ALL_DB_PROVIDERS for req in PROVIDERS_REQUIREMENTS[db_provider]})\n\n# Requirements for all \"user\" extras (no devel). They are de-duplicated. Note that we do not need\n# to separately add providers requirements - they have been already added as 'providers' extras above\n_all_requirements = list({req for extras_reqs in EXTRAS_REQUIREMENTS.values() for req in extras_reqs})\n\n# All user extras here\nEXTRAS_REQUIREMENTS[\"all\"] = _all_requirements\n\n# All db user extras here\nEXTRAS_REQUIREMENTS[\"all_dbs\"] = all_dbs + pandas\n\n# This can be simplified to devel_hadoop + _all_requirements due to inclusions\n# but we keep it for explicit sake. We are de-duplicating it anyway.\ndevel_all = list(set(_all_requirements + doc + devel + devel_hadoop))\n\n# Those are packages excluded for \"all\" dependencies\nPACKAGES_EXCLUDED_FOR_ALL = []\nPACKAGES_EXCLUDED_FOR_ALL.extend(\n [\n 'snakebite',\n ]\n)\n\n\ndef is_package_excluded(package: str, exclusion_list: List[str]) -> bool:\n \"\"\"\n Checks if package should be excluded.\n\n :param package: package name (beginning of it)\n :param exclusion_list: list of excluded packages\n :return: true if package should be excluded\n \"\"\"\n return any(package.startswith(excluded_package) for excluded_package in exclusion_list)\n\n\ndevel_all = [\n package\n for package in devel_all\n if not is_package_excluded(package=package, exclusion_list=PACKAGES_EXCLUDED_FOR_ALL)\n]\n\ndevel_ci = devel_all\n\n\n# Those are extras that we have to add for development purposes\n# They can be use to install some predefined set of dependencies.\nEXTRAS_REQUIREMENTS[\"doc\"] = doc\nEXTRAS_REQUIREMENTS[\"devel\"] = devel # devel already includes doc\nEXTRAS_REQUIREMENTS[\"devel_hadoop\"] = devel_hadoop # devel_hadoop already includes devel\nEXTRAS_REQUIREMENTS[\"devel_all\"] = devel_all\nEXTRAS_REQUIREMENTS[\"devel_ci\"] = devel_ci\n\n\ndef sort_extras_requirements() -> Dict[str, List[str]]:\n \"\"\"\n The dictionary order remains when keys() are retrieved.\n Sort both: extras and list of dependencies to make it easier to analyse problems\n external packages will be first, then if providers are added they are added at the end of the lists.\n \"\"\"\n sorted_requirements = dict(sorted(EXTRAS_REQUIREMENTS.items()))\n for extra_list in sorted_requirements.values():\n extra_list.sort()\n return sorted_requirements\n\n\nEXTRAS_REQUIREMENTS = sort_extras_requirements()\n\n# Those providers are pre-installed always when airflow is installed.\n# Those providers do not have dependency on airflow2.0 because that would lead to circular dependencies.\n# This is not a problem for PIP but some tools (pipdeptree) show those as a warning.\nPREINSTALLED_PROVIDERS = [\n 'ftp',\n 'http',\n 'imap',\n 'sqlite',\n]\n\n\ndef get_provider_package_from_package_id(package_id: str) -> str:\n \"\"\"\n Builds the name of provider package out of the package id provided/\n\n :param package_id: id of the package (like amazon or microsoft.azure)\n :return: full name of package in PyPI\n \"\"\"\n package_suffix = package_id.replace(\".\", \"-\")\n return f\"apache-airflow-providers-{package_suffix}\"\n\n\ndef get_excluded_providers() -> List[str]:\n \"\"\"Returns packages excluded for the current python version.\"\"\"\n return []\n\n\ndef get_all_provider_packages() -> str:\n \"\"\"Returns all provider packages configured in setup.py\"\"\"\n excluded_providers = get_excluded_providers()\n return \" \".join(\n get_provider_package_from_package_id(package)\n for package in PROVIDERS_REQUIREMENTS\n if package not in excluded_providers\n )\n\n\nclass AirflowDistribution(Distribution):\n \"\"\"The setuptools.Distribution subclass with Airflow specific behaviour\"\"\"\n\n def __init__(self, attrs=None):\n super().__init__(attrs)\n self.install_requires = None\n\n def parse_config_files(self, *args, **kwargs) -> None:\n \"\"\"\n Ensure that when we have been asked to install providers from sources\n that we don't *also* try to install those providers from PyPI.\n Also we should make sure that in this case we copy provider.yaml files so that\n Providers manager can find package information.\n \"\"\"\n super().parse_config_files(*args, **kwargs)\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\n self.install_requires = [\n req for req in self.install_requires if not req.startswith('apache-airflow-providers-')\n ]\n provider_yaml_files = glob.glob(\"airflow/providers/**/provider.yaml\", recursive=True)\n for provider_yaml_file in provider_yaml_files:\n provider_relative_path = relpath(provider_yaml_file, os.path.join(my_dir, \"airflow\"))\n self.package_data['airflow'].append(provider_relative_path)\n else:\n self.install_requires.extend(\n [get_provider_package_from_package_id(package_id) for package_id in PREINSTALLED_PROVIDERS]\n )\n\n\ndef replace_extra_requirement_with_provider_packages(extra: str, providers: List[str]) -> None:\n \"\"\"\n Replaces extra requirement with provider package. The intention here is that when\n the provider is added as dependency of extra, there is no need to add the dependencies\n separately. This is not needed and even harmful, because in case of future versions of\n the provider, the requirements might change, so hard-coding requirements from the version\n that was available at the release time might cause dependency conflicts in the future.\n\n Say for example that you have salesforce provider with those deps:\n\n { 'salesforce': ['simple-salesforce>=1.0.0', 'tableauserverclient'] }\n\n Initially ['salesforce'] extra has those requirements and it works like that when you install\n it when INSTALL_PROVIDERS_FROM_SOURCES is set to `true` (during the development). However, when\n the production installation is used, The dependencies are changed:\n\n { 'salesforce': ['apache-airflow-providers-salesforce'] }\n\n And then, 'apache-airflow-providers-salesforce' package has those 'install_requires' dependencies:\n ['simple-salesforce>=1.0.0', 'tableauserverclient']\n\n So transitively 'salesforce' extra has all the requirements it needs and in case the provider\n changes its dependencies, they will transitively change as well.\n\n In the constraint mechanism we save both - provider versions and it's dependencies\n version, which means that installation using constraints is repeatable.\n\n For K8s and Celery which are both \"Core executors\" and \"Providers\" we have to\n add the base dependencies to core as well, in order to mitigate problems where\n newer version of provider will have less strict limits. This should be done for both\n extras and their deprecated aliases. This is not a full protection however, the way\n extras work, this will not add \"hard\" limits for Airflow and the user who does not use\n constraints.\n\n :param extra: Name of the extra to add providers to\n :param providers: list of provider ids\n \"\"\"\n if extra in ['cncf.kubernetes', 'kubernetes', 'celery']:\n EXTRAS_REQUIREMENTS[extra].extend(\n [get_provider_package_from_package_id(package_name) for package_name in providers]\n )\n else:\n EXTRAS_REQUIREMENTS[extra] = [\n get_provider_package_from_package_id(package_name) for package_name in providers\n ]\n\n\ndef add_provider_packages_to_extra_requirements(extra: str, providers: List[str]) -> None:\n \"\"\"\n Adds provider packages as requirements to extra. This is used to add provider packages as requirements\n to the \"bulk\" kind of extras. Those bulk extras do not have the detailed 'extra' requirements as\n initial values, so instead of replacing them (see previous function) we can extend them.\n\n :param extra: Name of the extra to add providers to\n :param providers: list of provider ids\n \"\"\"\n EXTRAS_REQUIREMENTS[extra].extend(\n [get_provider_package_from_package_id(package_name) for package_name in providers]\n )\n\n\ndef add_all_provider_packages() -> None:\n \"\"\"\n In case of regular installation (providers installed from packages), we should add extra dependencies to\n Airflow - to get the providers automatically installed when those extras are installed.\n\n For providers installed from sources we skip that step. That helps to test and install airflow with\n all packages in CI - for example when new providers are added, otherwise the installation would fail\n as the new provider is not yet in PyPI.\n\n \"\"\"\n for provider in ALL_PROVIDERS:\n replace_extra_requirement_with_provider_packages(provider, [provider])\n add_provider_packages_to_extra_requirements(\"all\", ALL_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"devel_ci\", ALL_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"devel_all\", ALL_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"all_dbs\", ALL_DB_PROVIDERS)\n add_provider_packages_to_extra_requirements(\n \"devel_hadoop\", [\"apache.hdfs\", \"apache.hive\", \"presto\", \"trino\"]\n )\n add_all_deprecated_provider_packages()\n\n\nclass Develop(develop_orig):\n \"\"\"Forces removal of providers in editable mode.\"\"\"\n\n def run(self) -> None: # type: ignore\n self.announce('Installing in editable mode. Uninstalling provider packages!', level=log.INFO)\n # We need to run \"python3 -m pip\" because it might be that older PIP binary is in the path\n # And it results with an error when running pip directly (cannot import pip module)\n # also PIP does not have a stable API so we have to run subprocesses ¯\\_(ツ)_/¯\n try:\n installed_packages = (\n subprocess.check_output([\"python3\", \"-m\", \"pip\", \"freeze\"]).decode().splitlines()\n )\n airflow_provider_packages = [\n package_line.split(\"=\")[0]\n for package_line in installed_packages\n if package_line.startswith(\"apache-airflow-providers\")\n ]\n self.announce(f'Uninstalling ${airflow_provider_packages}!', level=log.INFO)\n subprocess.check_call([\"python3\", \"-m\", \"pip\", \"uninstall\", \"--yes\", *airflow_provider_packages])\n except subprocess.CalledProcessError as e:\n self.announce(f'Error when uninstalling airflow provider packages: {e}!', level=log.WARN)\n super().run()\n\n\nclass Install(install_orig):\n \"\"\"Forces installation of providers from sources in editable mode.\"\"\"\n\n def run(self) -> None:\n self.announce('Standard installation. Providers are installed from packages', level=log.INFO)\n super().run()\n\n\ndef do_setup() -> None:\n \"\"\"\n Perform the Airflow package setup.\n\n Most values come from setup.cfg, only the dynamically calculated ones are passed to setup\n function call. See https://setuptools.readthedocs.io/en/latest/userguide/declarative_config.html\n \"\"\"\n setup_kwargs = {}\n\n def include_provider_namespace_packages_when_installing_from_sources() -> None:\n \"\"\"\n When installing providers from sources we install all namespace packages found below airflow,\n including airflow and provider packages, otherwise defaults from setup.cfg control this.\n The kwargs in setup() call override those that are specified in setup.cfg.\n \"\"\"\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\n setup_kwargs['packages'] = find_namespace_packages(include=['airflow*'])\n\n include_provider_namespace_packages_when_installing_from_sources()\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\n print(\"Installing providers from sources. Skip adding providers as dependencies\")\n else:\n add_all_provider_packages()\n\n write_version()\n setup(\n distclass=AirflowDistribution,\n version=version,\n extras_require=EXTRAS_REQUIREMENTS,\n download_url=('https://archive.apache.org/dist/airflow/' + version),\n cmdclass={\n 'extra_clean': CleanCommand,\n 'compile_assets': CompileAssets,\n 'list_extras': ListExtras,\n 'install': Install, # type: ignore\n 'develop': Develop,\n },\n test_suite='setup.airflow_test_suite',\n **setup_kwargs, # type: ignore\n )\n\n\nif __name__ == \"__main__\":\n do_setup() # comment\n",
"path": "setup.py"
}
] | [
{
"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Setup.py for the Airflow project.\"\"\"\nimport glob\nimport logging\nimport os\nimport subprocess\nimport sys\nimport unittest\nfrom copy import deepcopy\nfrom os.path import dirname, relpath\nfrom textwrap import wrap\nfrom typing import Dict, List\n\nfrom setuptools import Command, Distribution, find_namespace_packages, setup\nfrom setuptools.command.develop import develop as develop_orig\nfrom setuptools.command.install import install as install_orig\n\n# Setuptools patches this import to point to a vendored copy instead of the\n# stdlib, which is deprecated in Python 3.10 and will be removed in 3.12.\nfrom distutils import log # isort: skip\n\n# Controls whether providers are installed from packages or directly from sources\n# It is turned on by default in case of development environments such as Breeze\n# And it is particularly useful when you add a new provider and there is no\n# PyPI version to install the provider package from\nINSTALL_PROVIDERS_FROM_SOURCES = 'INSTALL_PROVIDERS_FROM_SOURCES'\nPY39 = sys.version_info >= (3, 9)\n\nlogger = logging.getLogger(__name__)\n\nversion = '2.3.0.dev0'\n\nmy_dir = dirname(__file__)\n\n\ndef airflow_test_suite() -> unittest.TestSuite:\n \"\"\"Test suite for Airflow tests\"\"\"\n test_loader = unittest.TestLoader()\n test_suite = test_loader.discover(os.path.join(my_dir, 'tests'), pattern='test_*.py')\n return test_suite\n\n\nclass CleanCommand(Command):\n \"\"\"\n Command to tidy up the project root.\n Registered as cmdclass in setup() so it can be called with ``python setup.py extra_clean``.\n \"\"\"\n\n description = \"Tidy up the project root\"\n user_options: List[str] = []\n\n def initialize_options(self) -> None:\n \"\"\"Set default values for options.\"\"\"\n\n def finalize_options(self) -> None:\n \"\"\"Set final values for options.\"\"\"\n\n @staticmethod\n def rm_all_files(files: List[str]) -> None:\n \"\"\"Remove all files from the list\"\"\"\n for file in files:\n try:\n os.remove(file)\n except Exception as e:\n logger.warning(\"Error when removing %s: %s\", file, e)\n\n def run(self) -> None:\n \"\"\"Remove temporary files and directories.\"\"\"\n os.chdir(my_dir)\n self.rm_all_files(glob.glob('./build/*'))\n self.rm_all_files(glob.glob('./**/__pycache__/*', recursive=True))\n self.rm_all_files(glob.glob('./**/*.pyc', recursive=True))\n self.rm_all_files(glob.glob('./dist/*'))\n self.rm_all_files(glob.glob('./*.egg-info'))\n self.rm_all_files(glob.glob('./docker-context-files/*.whl'))\n self.rm_all_files(glob.glob('./docker-context-files/*.tgz'))\n\n\nclass CompileAssets(Command):\n \"\"\"\n Compile and build the frontend assets using yarn and webpack.\n Registered as cmdclass in setup() so it can be called with ``python setup.py compile_assets``.\n \"\"\"\n\n description = \"Compile and build the frontend assets\"\n user_options: List[str] = []\n\n def initialize_options(self) -> None:\n \"\"\"Set default values for options.\"\"\"\n\n def finalize_options(self) -> None:\n \"\"\"Set final values for options.\"\"\"\n\n def run(self) -> None:\n \"\"\"Run a command to compile and build assets.\"\"\"\n subprocess.check_call('./airflow/www/compile_assets.sh')\n\n\nclass ListExtras(Command):\n \"\"\"\n List all available extras\n Registered as cmdclass in setup() so it can be called with ``python setup.py list_extras``.\n \"\"\"\n\n description = \"List available extras\"\n user_options: List[str] = []\n\n def initialize_options(self) -> None:\n \"\"\"Set default values for options.\"\"\"\n\n def finalize_options(self) -> None:\n \"\"\"Set final values for options.\"\"\"\n\n def run(self) -> None:\n \"\"\"List extras.\"\"\"\n print(\"\\n\".join(wrap(\", \".join(EXTRAS_REQUIREMENTS.keys()), 100)))\n\n\ndef git_version(version_: str) -> str:\n \"\"\"\n Return a version to identify the state of the underlying git repo. The version will\n indicate whether the head of the current git-backed working directory is tied to a\n release tag or not : it will indicate the former with a 'release:{version}' prefix\n and the latter with a '.dev0' suffix. Following the prefix will be a sha of the current\n branch head. Finally, a \"dirty\" suffix is appended to indicate that uncommitted\n changes are present.\n\n :param str version_: Semver version\n :return: Found Airflow version in Git repo\n :rtype: str\n \"\"\"\n try:\n import git\n\n try:\n repo = git.Repo(os.path.join(*[my_dir, '.git']))\n except git.NoSuchPathError:\n logger.warning('.git directory not found: Cannot compute the git version')\n return ''\n except git.InvalidGitRepositoryError:\n logger.warning('Invalid .git directory not found: Cannot compute the git version')\n return ''\n except ImportError:\n logger.warning('gitpython not found: Cannot compute the git version.')\n return ''\n if repo:\n sha = repo.head.commit.hexsha\n if repo.is_dirty():\n return f'.dev0+{sha}.dirty'\n # commit is clean\n return f'.release:{version_}+{sha}'\n return 'no_git_version'\n\n\ndef write_version(filename: str = os.path.join(*[my_dir, \"airflow\", \"git_version\"])) -> None:\n \"\"\"\n Write the Semver version + git hash to file, e.g. \".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65\".\n\n :param str filename: Destination file to write\n \"\"\"\n text = f\"{git_version(version)}\"\n with open(filename, 'w') as file:\n file.write(text)\n\n\npandas_requirement = 'pandas>=0.17.1'\n\n# 'Start dependencies group' and 'Start dependencies group' are mark for ./scripts/ci/check_order_setup.py\n# If you change this mark you should also change ./scripts/ci/check_order_setup.py\n# Start dependencies group\nalibaba = [\n 'oss2>=2.14.0',\n]\namazon = [\n 'boto3>=1.15.0',\n # watchtower 3 has been released end Jan and introduced breaking change across the board that might\n # change logging behaviour:\n # https://github.com/kislyuk/watchtower/blob/develop/Changes.rst#changes-for-v300-2022-01-26\n # TODO: update to watchtower >3\n 'watchtower~=2.0.1',\n 'jsonpath_ng>=1.5.3',\n 'redshift_connector>=2.0.888',\n 'sqlalchemy_redshift>=0.8.6',\n pandas_requirement,\n 'mypy-boto3-rds>=1.21.0',\n 'mypy-boto3-redshift-data>=1.21.0',\n]\napache_beam = [\n 'apache-beam>=2.33.0',\n]\narangodb = ['python-arango>=7.3.2']\nasana = ['asana>=0.10']\nasync_packages = [\n 'eventlet>=0.9.7',\n 'gevent>=0.13',\n 'greenlet>=0.4.9',\n]\natlas = [\n 'atlasclient>=0.1.2',\n]\nazure = [\n 'azure-batch>=8.0.0',\n 'azure-cosmos>=4.0.0',\n 'azure-datalake-store>=0.0.45',\n 'azure-identity>=1.3.1',\n 'azure-keyvault-secrets>=4.1.0,<5.0',\n 'azure-kusto-data>=0.0.43,<0.1',\n # Azure integration uses old librarires and the limits below reflect that\n # TODO: upgrade to newer versions of all the below libraries\n 'azure-mgmt-containerinstance>=1.5.0,<2.0',\n 'azure-mgmt-datafactory>=1.0.0,<2.0',\n 'azure-mgmt-datalake-store>=0.5.0',\n 'azure-mgmt-resource>=2.2.0',\n # limited due to https://github.com/Azure/azure-sdk-for-python/pull/18801 implementation released in 12.9\n 'azure-storage-blob>=12.7.0,<12.9.0',\n 'azure-storage-common>=2.1.0',\n 'azure-storage-file>=2.1.0',\n]\ncassandra = [\n 'cassandra-driver>=3.13.0',\n]\ncelery = [\n # The Celery is known to introduce problems when upgraded to a MAJOR version. Airflow Core\n # Uses Celery for CeleryExecutor, and we also know that Kubernetes Python client follows SemVer\n # (https://docs.celeryq.dev/en/stable/contributing.html?highlight=semver#versions).\n # This is a crucial component of Airflow, so we should limit it to the next MAJOR version and only\n # deliberately bump the version when we tested it, and we know it can be bumped.\n # Bumping this version should also be connected with\n # limiting minimum airflow version supported in cncf.kubernetes provider, due to the\n # potential breaking changes in Airflow Core as well (celery is added as extra, so Airflow\n # core is not hard-limited via install-requirements, only by extra).\n 'celery>=5.2.3,<6',\n 'flower>=1.0.0',\n]\ncgroups = [\n # Cgroupspy 0.2.2 added Python 3.10 compatibility\n 'cgroupspy>=0.2.2',\n]\ncloudant = [\n 'cloudant>=2.0',\n]\ndask = [\n # Dask support is limited, we need Dask team to upgrade support for dask if we were to continue\n # Supporting it in the future\n 'cloudpickle>=1.4.1',\n 'dask>=2.9.0',\n 'distributed>=2.11.1',\n]\ndatabricks = [\n 'requests>=2.26.0, <3',\n 'databricks-sql-connector>=1.0.2, <2.0.0',\n]\ndatadog = [\n 'datadog>=0.14.0',\n]\ndeprecated_api = [\n 'requests>=2.26.0',\n]\ndoc = [\n 'click>=8.0',\n 'sphinx>=4.4.0',\n # Docutils 0.17.0 converts generated <div class=\"section\"> into <section> and breaks our doc formatting\n # By adding a lot of whitespace separation. This limit can be lifted when we update our doc to handle\n # <section> tags for sections\n 'docutils<0.17.0',\n # Without this, Sphinx goes in to a _very_ large backtrack on Python 3.7,\n # even though Sphinx 4.4.0 has this but with python_version<3.10.\n 'importlib-metadata>=4.4; python_version < \"3.8\"',\n 'sphinx-airflow-theme',\n 'sphinx-argparse>=0.1.13',\n 'sphinx-autoapi>=1.8.0',\n 'sphinx-copybutton',\n 'sphinx-jinja>=2.0',\n 'sphinx-rtd-theme>=0.1.6',\n 'sphinxcontrib-httpdomain>=1.7.0',\n 'sphinxcontrib-redoc>=1.6.0',\n 'sphinxcontrib-spelling>=7.3',\n]\ndocker = [\n 'docker>=5.0.3',\n]\ndrill = ['sqlalchemy-drill>=1.1.0', 'sqlparse>=0.4.1']\ndruid = [\n 'pydruid>=0.4.1',\n]\nelasticsearch = [\n 'elasticsearch>7',\n 'elasticsearch-dbapi',\n 'elasticsearch-dsl>=5.0.0',\n]\nexasol = ['pyexasol>=0.5.1', pandas_requirement]\nfacebook = [\n 'facebook-business>=6.0.2',\n]\nflask_appbuilder_authlib = [\n 'authlib',\n]\ngithub = [\n 'pygithub',\n]\ngoogle = [\n # Google has very clear rules on what dependencies should be used. All the limits below\n # follow strict guidelines of Google Libraries as quoted here:\n # While this issue is open, dependents of google-api-core, google-cloud-core. and google-auth\n # should preserve >1, <3 pins on these packages.\n # https://github.com/googleapis/google-cloud-python/issues/10566\n # Some of Google Packages are limited to <2.0.0 because 2.0.0 releases of the libraries\n # Introduced breaking changes across the board. Those libraries should be upgraded soon\n # TODO: Upgrade all Google libraries that are limited to <2.0.0\n 'PyOpenSSL',\n # The Google Ads 14.0.1 breaks PIP and eager upgrade as it requires\n # google-api-core>=2.0.0 which cannot be used yet (see below comment)\n # and https://github.com/apache/airflow/issues/18705#issuecomment-933746150\n 'google-ads>=12.0.0,<14.0.1',\n 'google-api-core>=2.7.0,<3.0.0',\n 'google-api-python-client>=1.6.0,<2.0.0',\n 'google-auth>=1.0.0',\n 'google-auth-httplib2>=0.0.1',\n 'google-cloud-aiplatform>=1.7.1,<2.0.0',\n 'google-cloud-automl>=2.1.0',\n 'google-cloud-bigquery-datatransfer>=3.0.0',\n 'google-cloud-bigtable>=1.0.0,<2.0.0',\n 'google-cloud-build>=3.0.0',\n 'google-cloud-container>=0.1.1,<2.0.0',\n 'google-cloud-datacatalog>=3.0.0',\n 'google-cloud-dataplex>=0.1.0',\n 'google-cloud-dataproc>=3.1.0',\n 'google-cloud-dataproc-metastore>=1.2.0,<2.0.0',\n 'google-cloud-dlp>=0.11.0,<2.0.0',\n 'google-cloud-kms>=2.0.0',\n 'google-cloud-language>=1.1.1,<2.0.0',\n 'google-cloud-logging>=2.1.1',\n 'google-cloud-memcache>=0.2.0',\n 'google-cloud-monitoring>=2.0.0',\n 'google-cloud-os-login>=2.0.0',\n 'google-cloud-orchestration-airflow>=1.0.0,<2.0.0',\n 'google-cloud-pubsub>=2.0.0',\n 'google-cloud-redis>=2.0.0',\n 'google-cloud-secret-manager>=0.2.0,<2.0.0',\n 'google-cloud-spanner>=1.10.0,<2.0.0',\n 'google-cloud-speech>=0.36.3,<2.0.0',\n 'google-cloud-storage>=1.30,<2.0.0',\n 'google-cloud-tasks>=2.0.0',\n 'google-cloud-texttospeech>=0.4.0,<2.0.0',\n 'google-cloud-translate>=1.5.0,<2.0.0',\n 'google-cloud-videointelligence>=1.7.0,<2.0.0',\n 'google-cloud-vision>=0.35.2,<2.0.0',\n 'google-cloud-workflows>=0.1.0,<2.0.0',\n 'grpcio-gcp>=0.2.2',\n 'httpx',\n 'json-merge-patch>=0.2',\n 'looker-sdk>=22.2.0',\n 'pandas-gbq',\n pandas_requirement,\n 'sqlalchemy-bigquery>=1.2.1',\n]\ngrpc = [\n # Google has very clear rules on what dependencies should be used. All the limits below\n # follow strict guidelines of Google Libraries as quoted here:\n # While this issue is open, dependents of google-api-core, google-cloud-core. and google-auth\n # should preserve >1, <3 pins on these packages.\n # https://github.com/googleapis/google-cloud-python/issues/10566\n 'google-auth>=1.0.0, <3.0.0',\n 'google-auth-httplib2>=0.0.1',\n 'grpcio>=1.15.0',\n]\nhashicorp = [\n 'hvac>=0.10',\n]\nhdfs = [\n 'snakebite-py3',\n 'hdfs[avro,dataframe,kerberos]>=2.0.4',\n]\nhive = [\n 'hmsclient>=0.1.0',\n 'pyhive[hive]>=0.6.0',\n # in case of Python 3.9 sasl library needs to be installed with version higher or equal than\n # 0.3.1 because only that version supports Python 3.9. For other Python version pyhive[hive] pulls\n # the sasl library anyway (and there sasl library version is not relevant)\n 'sasl>=0.3.1; python_version>=\"3.9\"',\n 'thrift>=0.9.2',\n pandas_requirement,\n]\nhttp = [\n # The 2.26.0 release of requests got rid of the chardet LGPL mandatory dependency, allowing us to\n # release it as a requirement for airflow\n 'requests>=2.26.0',\n]\nhttp_provider = [\n 'apache-airflow-providers-http',\n]\ninfluxdb = [\n 'influxdb-client>=1.19.0',\n pandas_requirement,\n]\njdbc = [\n 'jaydebeapi>=1.1.1',\n]\njenkins = [\n 'python-jenkins>=1.0.0',\n]\njira = [\n 'JIRA>1.0.7',\n]\nkerberos = [\n 'pykerberos>=1.1.13',\n 'requests_kerberos>=0.10.0',\n 'thrift_sasl>=0.2.0',\n]\nkubernetes = [\n 'cryptography>=2.0.0',\n # The Kubernetes API is known to introduce problems when upgraded to a MAJOR version. Airflow Core\n # Uses Kubernetes for Kubernetes executor, and we also know that Kubernetes Python client follows SemVer\n # (https://github.com/kubernetes-client/python#compatibility). This is a crucial component of Airflow\n # So we should limit it to the next MAJOR version and only deliberately bump the version when we\n # tested it, and we know it can be bumped. Bumping this version should also be connected with\n # limiting minimum airflow version supported in cncf.kubernetes provider, due to the\n # potential breaking changes in Airflow Core as well (kubernetes is added as extra, so Airflow\n # core is not hard-limited via install-requirements, only by extra).\n 'kubernetes>=21.7.0,<24',\n]\nkylin = ['kylinpy>=2.6']\nldap = [\n 'ldap3>=2.5.1',\n 'python-ldap',\n]\nleveldb = ['plyvel; platform_machine != \"aarch64\"']\nmongo = [\n 'dnspython>=1.13.0',\n # pymongo 4.0.0 removes connection option `ssl_cert_reqs` which is used in providers-mongo/2.2.0\n # TODO: Upgrade to pymongo 4.0.0+\n 'pymongo>=3.6.0,<4.0.0',\n]\nmssql = [\n 'pymssql>=2.1.5; platform_machine != \"aarch64\"',\n]\nmysql = [\n 'mysql-connector-python>=8.0.11; platform_machine != \"aarch64\"',\n 'mysqlclient>=1.3.6; platform_machine != \"aarch64\"',\n]\nneo4j = ['neo4j>=4.2.1']\nodbc = [\n 'pyodbc',\n]\nopsgenie = [\n 'opsgenie-sdk>=2.1.5',\n]\noracle = [\n 'cx_Oracle>=5.1.2',\n]\npagerduty = [\n 'pdpyras>=4.1.2',\n]\npandas = [\n pandas_requirement,\n]\npapermill = [\n 'papermill[all]>=1.2.1',\n 'scrapbook[all]',\n]\npassword = [\n 'bcrypt>=2.0.0',\n 'flask-bcrypt>=0.7.1',\n]\npinot = [\n # pinotdb v0.1.1 may still work with older versions of Apache Pinot, but we've confirmed that it\n # causes a problem with newer versions.\n 'pinotdb>0.1.2',\n]\nplexus = [\n 'arrow>=0.16.0',\n]\npostgres = [\n 'psycopg2-binary>=2.7.4',\n]\npresto = [\n # The limit to Presto 0.8 for unknown reason\n # TODO: Remove the limit\n 'presto-python-client>=0.7.0,<0.8',\n pandas_requirement,\n]\npsrp = [\n 'pypsrp>=0.8',\n]\nqubole = [\n 'qds-sdk>=1.10.4',\n]\nrabbitmq = [\n 'amqp',\n]\nredis = [\n # Redis 4 introduced a number of changes that likely need testing including mixins in redis commands\n # as well as unquoting URLS with `urllib.parse.unquote`:\n # https://github.com/redis/redis-py/blob/master/CHANGES\n # TODO: upgrade to support redis package >=4\n 'redis~=3.2',\n]\nsalesforce = ['simple-salesforce>=1.0.0', 'tableauserverclient', pandas_requirement]\nsamba = [\n 'smbprotocol>=1.5.0',\n]\nsegment = [\n 'analytics-python>=1.2.9',\n]\nsendgrid = [\n 'sendgrid>=6.0.0',\n]\nsentry = [\n 'blinker>=1.1',\n 'sentry-sdk>=0.8.0',\n]\nsingularity = ['spython>=0.0.56']\nslack = [\n 'slack_sdk>=3.0.0',\n]\nsnowflake = [\n 'snowflake-connector-python>=2.4.1',\n 'snowflake-sqlalchemy>=1.1.0',\n]\nspark = [\n 'pyspark',\n]\nssh = [\n 'paramiko>=2.6.0',\n 'pysftp>=0.2.9',\n 'sshtunnel>=0.3.2',\n]\nstatsd = [\n 'statsd>=3.3.0',\n]\ntableau = [\n 'tableauserverclient',\n]\ntelegram = [\n 'python-telegram-bot>=13.0',\n]\ntrino = [\n 'trino>=0.301.0',\n pandas_requirement,\n]\nvertica = [\n 'vertica-python>=0.5.1',\n]\nvirtualenv = [\n 'virtualenv',\n]\nwebhdfs = [\n 'hdfs[avro,dataframe,kerberos]>=2.0.4',\n]\nwinrm = [\n 'pywinrm>=0.4',\n]\nyandex = [\n 'yandexcloud>=0.146.0',\n]\nzendesk = [\n 'zenpy>=2.0.24',\n]\n# End dependencies group\n\n# Mypy 0.900 and above ships only with stubs from stdlib so if we need other stubs, we need to install them\n# manually as `types-*`. See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports\n# for details. Wy want to install them explicitly because we want to eventually move to\n# mypyd which does not support installing the types dynamically with --install-types\nmypy_dependencies = [\n # TODO: upgrade to newer versions of MyPy continuously as they are released\n 'mypy==0.910',\n 'types-boto',\n 'types-certifi',\n 'types-croniter',\n 'types-Deprecated',\n 'types-docutils',\n 'types-freezegun',\n 'types-paramiko',\n 'types-protobuf',\n 'types-python-dateutil',\n 'types-python-slugify',\n 'types-pytz',\n 'types-redis',\n 'types-requests',\n 'types-setuptools',\n 'types-termcolor',\n 'types-tabulate',\n 'types-toml',\n 'types-Markdown',\n 'types-PyMySQL',\n 'types-PyYAML',\n]\n\n# Dependencies needed for development only\ndevel_only = [\n 'aws_xray_sdk',\n 'beautifulsoup4>=4.7.1',\n 'black',\n 'blinker',\n 'bowler',\n 'click>=8.0',\n 'coverage',\n 'filelock',\n 'flake8>=3.6.0',\n 'flake8-colors',\n 'flaky',\n 'freezegun',\n # Github3 version 3.1.2 requires PyJWT>=2.3.0 which clashes with Flask App Builder where PyJWT is <2.0.0\n # Actually GitHub3.1.0 already introduced PyJWT>=2.3.0 but so far `pip` was able to resolve it without\n # getting into a long backtracking loop and figure out that github3 3.0.0 version is the right version\n # similarly limiting it to 3.1.2 causes pip not to enter the backtracking loop. Apparently when there\n # are 3 versions with PyJWT>=2.3.0 (3.1.0, 3.1.1 an 3.1.2) pip enters into backtrack loop and fails\n # to resolve that github3 3.0.0 is the right version to use.\n # This limitation could be removed if PyJWT limitation < 2.0.0 is dropped from FAB or when\n # pip resolution is improved to handle the case. The issue which describes this PIP behaviour\n # and hopefully allowing to improve it is tracked in https://github.com/pypa/pip/issues/10924\n 'github3.py<3.1.0',\n 'gitpython',\n 'ipdb',\n 'jira',\n 'jsondiff',\n 'mongomock',\n 'moto>=3.1.0',\n 'parameterized',\n 'paramiko',\n 'pipdeptree',\n 'pre-commit',\n 'pypsrp',\n 'pygithub',\n 'pysftp',\n # Pytest 7 has been released in February 2022 and we should attempt to upgrade and remove the limit\n # It contains a number of potential breaking changes but none of them looks breaking our use\n # https://docs.pytest.org/en/latest/changelog.html#pytest-7-0-0-2022-02-03\n # TODO: upgrade it and remove the limit\n 'pytest~=6.0',\n 'pytest-asyncio',\n 'pytest-cov',\n 'pytest-instafail',\n # We should attempt to remove the limit when we upgrade Pytest\n # TODO: remove the limit when we upgrade pytest\n 'pytest-rerunfailures~=9.1',\n 'pytest-timeouts',\n 'pytest-xdist',\n 'python-jose',\n 'pywinrm',\n 'qds-sdk>=1.9.6',\n 'pytest-httpx',\n 'requests_mock',\n 'rich_click',\n 'semver',\n 'twine',\n 'wheel',\n 'yamllint',\n]\n\ndevel = cgroups + devel_only + doc + kubernetes + mypy_dependencies + mysql + pandas + password\ndevel_hadoop = devel + hdfs + hive + kerberos + presto + webhdfs\n\n# Dict of all providers which are part of the Apache Airflow repository together with their requirements\nPROVIDERS_REQUIREMENTS: Dict[str, List[str]] = {\n 'airbyte': http_provider,\n 'alibaba': alibaba,\n 'amazon': amazon,\n 'apache.beam': apache_beam,\n 'apache.cassandra': cassandra,\n 'apache.drill': drill,\n 'apache.druid': druid,\n 'apache.hdfs': hdfs,\n 'apache.hive': hive,\n 'apache.kylin': kylin,\n 'apache.livy': http_provider,\n 'apache.pig': [],\n 'apache.pinot': pinot,\n 'apache.spark': spark,\n 'apache.sqoop': [],\n 'arangodb': arangodb,\n 'asana': asana,\n 'celery': celery,\n 'cloudant': cloudant,\n 'cncf.kubernetes': kubernetes,\n 'databricks': databricks,\n 'datadog': datadog,\n 'dbt.cloud': http_provider,\n 'dingding': [],\n 'discord': [],\n 'docker': docker,\n 'elasticsearch': elasticsearch,\n 'exasol': exasol,\n 'facebook': facebook,\n 'ftp': [],\n 'github': github,\n 'google': google,\n 'grpc': grpc,\n 'hashicorp': hashicorp,\n 'http': http,\n 'imap': [],\n 'influxdb': influxdb,\n 'jdbc': jdbc,\n 'jenkins': jenkins,\n 'jira': jira,\n 'microsoft.azure': azure,\n 'microsoft.mssql': mssql,\n 'microsoft.psrp': psrp,\n 'microsoft.winrm': winrm,\n 'mongo': mongo,\n 'mysql': mysql,\n 'neo4j': neo4j,\n 'odbc': odbc,\n 'openfaas': [],\n 'opsgenie': opsgenie,\n 'oracle': oracle,\n 'pagerduty': pagerduty,\n 'papermill': papermill,\n 'plexus': plexus,\n 'postgres': postgres,\n 'presto': presto,\n 'qubole': qubole,\n 'redis': redis,\n 'salesforce': salesforce,\n 'samba': samba,\n 'segment': segment,\n 'sendgrid': sendgrid,\n 'sftp': ssh,\n 'singularity': singularity,\n 'slack': slack,\n 'snowflake': snowflake,\n 'sqlite': [],\n 'ssh': ssh,\n 'tableau': tableau,\n 'telegram': telegram,\n 'trino': trino,\n 'vertica': vertica,\n 'yandex': yandex,\n 'zendesk': zendesk,\n}\n\n# Those are all additional extras which do not have their own 'providers'\n# The 'apache.atlas' and 'apache.webhdfs' are extras that provide additional libraries\n# but they do not have separate providers (yet?), they are merely there to add extra libraries\n# That can be used in custom python/bash operators.\nADDITIONAL_EXTRAS_REQUIREMENTS: Dict[str, List[str]] = {\n 'apache.atlas': atlas,\n 'apache.webhdfs': webhdfs,\n}\n\n\n# Those are extras that are extensions of the 'core' Airflow. They provide additional features\n# To airflow core. They do not have separate providers because they do not have any operators/hooks etc.\nCORE_EXTRAS_REQUIREMENTS: Dict[str, List[str]] = {\n 'async': async_packages,\n 'celery': celery, # also has provider, but it extends the core with the CeleryExecutor\n 'cgroups': cgroups,\n 'cncf.kubernetes': kubernetes, # also has provider, but it extends the core with the KubernetesExecutor\n 'dask': dask,\n 'deprecated_api': deprecated_api,\n 'github_enterprise': flask_appbuilder_authlib,\n 'google_auth': flask_appbuilder_authlib,\n 'kerberos': kerberos,\n 'ldap': ldap,\n 'leveldb': leveldb,\n 'pandas': pandas,\n 'password': password,\n 'rabbitmq': rabbitmq,\n 'sentry': sentry,\n 'statsd': statsd,\n 'virtualenv': virtualenv,\n}\n\nEXTRAS_REQUIREMENTS: Dict[str, List[str]] = deepcopy(CORE_EXTRAS_REQUIREMENTS)\n\n\ndef add_extras_for_all_providers() -> None:\n \"\"\"\n Adds extras for all providers.\n By default all providers have the same extra name as provider id, for example\n 'apache.hive' extra has 'apache.hive' provider requirement.\n \"\"\"\n for provider_name, provider_requirement in PROVIDERS_REQUIREMENTS.items():\n EXTRAS_REQUIREMENTS[provider_name] = provider_requirement\n\n\ndef add_additional_extras() -> None:\n \"\"\"Adds extras for all additional extras.\"\"\"\n for extra_name, extra_requirement in ADDITIONAL_EXTRAS_REQUIREMENTS.items():\n EXTRAS_REQUIREMENTS[extra_name] = extra_requirement\n\n\nadd_extras_for_all_providers()\nadd_additional_extras()\n\n#############################################################################################################\n# The whole section can be removed in Airflow 3.0 as those old aliases are deprecated in 2.* series\n#############################################################################################################\n\n# Dictionary of aliases from 1.10 - deprecated in Airflow 2.*\nEXTRAS_DEPRECATED_ALIASES: Dict[str, str] = {\n 'atlas': 'apache.atlas',\n 'aws': 'amazon',\n 'azure': 'microsoft.azure',\n 'cassandra': 'apache.cassandra',\n 'crypto': '', # All crypto requirements are installation requirements of core Airflow\n 'druid': 'apache.druid',\n 'gcp': 'google',\n 'gcp_api': 'google',\n 'hdfs': 'apache.hdfs',\n 'hive': 'apache.hive',\n 'kubernetes': 'cncf.kubernetes',\n 'mssql': 'microsoft.mssql',\n 'pinot': 'apache.pinot',\n 'qds': 'qubole',\n 's3': 'amazon',\n 'spark': 'apache.spark',\n 'webhdfs': 'apache.webhdfs',\n 'winrm': 'microsoft.winrm',\n}\n\nEXTRAS_DEPRECATED_ALIASES_NOT_PROVIDERS: List[str] = [\n \"crypto\",\n \"webhdfs\",\n]\n\n\ndef add_extras_for_all_deprecated_aliases() -> None:\n \"\"\"\n Add extras for all deprecated aliases. Requirements for those deprecated aliases are the same\n as the extras they are replaced with.\n The requirements are not copies - those are the same lists as for the new extras. This is intended.\n Thanks to that if the original extras are later extended with providers, aliases are extended as well.\n \"\"\"\n for alias, extra in EXTRAS_DEPRECATED_ALIASES.items():\n requirements = EXTRAS_REQUIREMENTS.get(extra) if extra != '' else []\n if requirements is None:\n raise Exception(f\"The extra {extra} is missing for deprecated alias {alias}\")\n EXTRAS_REQUIREMENTS[alias] = requirements\n\n\ndef add_all_deprecated_provider_packages() -> None:\n \"\"\"\n For deprecated aliases that are providers, we will swap the providers requirements to instead\n be the provider itself.\n\n e.g. {\"kubernetes\": [\"kubernetes>=3.0.0, <12.0.0\", ...]} becomes\n {\"kubernetes\": [\"apache-airflow-provider-cncf-kubernetes\"]}\n \"\"\"\n for alias, provider in EXTRAS_DEPRECATED_ALIASES.items():\n if alias in EXTRAS_DEPRECATED_ALIASES_NOT_PROVIDERS:\n continue\n replace_extra_requirement_with_provider_packages(alias, [provider])\n\n\nadd_extras_for_all_deprecated_aliases()\n\n#############################################################################################################\n# End of deprecated section\n#############################################################################################################\n\n# This is list of all providers. It's a shortcut for anyone who would like to easily get list of\n# All providers. It is used by pre-commits.\nALL_PROVIDERS = list(PROVIDERS_REQUIREMENTS.keys())\n\nALL_DB_PROVIDERS = [\n 'apache.cassandra',\n 'apache.drill',\n 'apache.druid',\n 'apache.hdfs',\n 'apache.hive',\n 'apache.pinot',\n 'arangodb',\n 'cloudant',\n 'databricks',\n 'exasol',\n 'influxdb',\n 'microsoft.mssql',\n 'mongo',\n 'mysql',\n 'neo4j',\n 'postgres',\n 'presto',\n 'trino',\n 'vertica',\n]\n\n# Special requirements for all database-related providers. They are de-duplicated.\nall_dbs = list({req for db_provider in ALL_DB_PROVIDERS for req in PROVIDERS_REQUIREMENTS[db_provider]})\n\n# Requirements for all \"user\" extras (no devel). They are de-duplicated. Note that we do not need\n# to separately add providers requirements - they have been already added as 'providers' extras above\n_all_requirements = list({req for extras_reqs in EXTRAS_REQUIREMENTS.values() for req in extras_reqs})\n\n# All user extras here\nEXTRAS_REQUIREMENTS[\"all\"] = _all_requirements\n\n# All db user extras here\nEXTRAS_REQUIREMENTS[\"all_dbs\"] = all_dbs + pandas\n\n# This can be simplified to devel_hadoop + _all_requirements due to inclusions\n# but we keep it for explicit sake. We are de-duplicating it anyway.\ndevel_all = list(set(_all_requirements + doc + devel + devel_hadoop))\n\n# Those are packages excluded for \"all\" dependencies\nPACKAGES_EXCLUDED_FOR_ALL = []\nPACKAGES_EXCLUDED_FOR_ALL.extend(\n [\n 'snakebite',\n ]\n)\n\n\ndef is_package_excluded(package: str, exclusion_list: List[str]) -> bool:\n \"\"\"\n Checks if package should be excluded.\n\n :param package: package name (beginning of it)\n :param exclusion_list: list of excluded packages\n :return: true if package should be excluded\n \"\"\"\n return any(package.startswith(excluded_package) for excluded_package in exclusion_list)\n\n\ndevel_all = [\n package\n for package in devel_all\n if not is_package_excluded(package=package, exclusion_list=PACKAGES_EXCLUDED_FOR_ALL)\n]\n\ndevel_ci = devel_all\n\n\n# Those are extras that we have to add for development purposes\n# They can be use to install some predefined set of dependencies.\nEXTRAS_REQUIREMENTS[\"doc\"] = doc\nEXTRAS_REQUIREMENTS[\"devel\"] = devel # devel already includes doc\nEXTRAS_REQUIREMENTS[\"devel_hadoop\"] = devel_hadoop # devel_hadoop already includes devel\nEXTRAS_REQUIREMENTS[\"devel_all\"] = devel_all\nEXTRAS_REQUIREMENTS[\"devel_ci\"] = devel_ci\n\n\ndef sort_extras_requirements() -> Dict[str, List[str]]:\n \"\"\"\n The dictionary order remains when keys() are retrieved.\n Sort both: extras and list of dependencies to make it easier to analyse problems\n external packages will be first, then if providers are added they are added at the end of the lists.\n \"\"\"\n sorted_requirements = dict(sorted(EXTRAS_REQUIREMENTS.items()))\n for extra_list in sorted_requirements.values():\n extra_list.sort()\n return sorted_requirements\n\n\nEXTRAS_REQUIREMENTS = sort_extras_requirements()\n\n# Those providers are pre-installed always when airflow is installed.\n# Those providers do not have dependency on airflow2.0 because that would lead to circular dependencies.\n# This is not a problem for PIP but some tools (pipdeptree) show those as a warning.\nPREINSTALLED_PROVIDERS = [\n 'ftp',\n 'http',\n 'imap',\n 'sqlite',\n]\n\n\ndef get_provider_package_from_package_id(package_id: str) -> str:\n \"\"\"\n Builds the name of provider package out of the package id provided/\n\n :param package_id: id of the package (like amazon or microsoft.azure)\n :return: full name of package in PyPI\n \"\"\"\n package_suffix = package_id.replace(\".\", \"-\")\n return f\"apache-airflow-providers-{package_suffix}\"\n\n\ndef get_excluded_providers() -> List[str]:\n \"\"\"Returns packages excluded for the current python version.\"\"\"\n return []\n\n\ndef get_all_provider_packages() -> str:\n \"\"\"Returns all provider packages configured in setup.py\"\"\"\n excluded_providers = get_excluded_providers()\n return \" \".join(\n get_provider_package_from_package_id(package)\n for package in PROVIDERS_REQUIREMENTS\n if package not in excluded_providers\n )\n\n\nclass AirflowDistribution(Distribution):\n \"\"\"The setuptools.Distribution subclass with Airflow specific behaviour\"\"\"\n\n def __init__(self, attrs=None):\n super().__init__(attrs)\n self.install_requires = None\n\n def parse_config_files(self, *args, **kwargs) -> None:\n \"\"\"\n Ensure that when we have been asked to install providers from sources\n that we don't *also* try to install those providers from PyPI.\n Also we should make sure that in this case we copy provider.yaml files so that\n Providers manager can find package information.\n \"\"\"\n super().parse_config_files(*args, **kwargs)\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\n self.install_requires = [\n req for req in self.install_requires if not req.startswith('apache-airflow-providers-')\n ]\n provider_yaml_files = glob.glob(\"airflow/providers/**/provider.yaml\", recursive=True)\n for provider_yaml_file in provider_yaml_files:\n provider_relative_path = relpath(provider_yaml_file, os.path.join(my_dir, \"airflow\"))\n self.package_data['airflow'].append(provider_relative_path)\n else:\n self.install_requires.extend(\n [get_provider_package_from_package_id(package_id) for package_id in PREINSTALLED_PROVIDERS]\n )\n\n\ndef replace_extra_requirement_with_provider_packages(extra: str, providers: List[str]) -> None:\n \"\"\"\n Replaces extra requirement with provider package. The intention here is that when\n the provider is added as dependency of extra, there is no need to add the dependencies\n separately. This is not needed and even harmful, because in case of future versions of\n the provider, the requirements might change, so hard-coding requirements from the version\n that was available at the release time might cause dependency conflicts in the future.\n\n Say for example that you have salesforce provider with those deps:\n\n { 'salesforce': ['simple-salesforce>=1.0.0', 'tableauserverclient'] }\n\n Initially ['salesforce'] extra has those requirements and it works like that when you install\n it when INSTALL_PROVIDERS_FROM_SOURCES is set to `true` (during the development). However, when\n the production installation is used, The dependencies are changed:\n\n { 'salesforce': ['apache-airflow-providers-salesforce'] }\n\n And then, 'apache-airflow-providers-salesforce' package has those 'install_requires' dependencies:\n ['simple-salesforce>=1.0.0', 'tableauserverclient']\n\n So transitively 'salesforce' extra has all the requirements it needs and in case the provider\n changes its dependencies, they will transitively change as well.\n\n In the constraint mechanism we save both - provider versions and it's dependencies\n version, which means that installation using constraints is repeatable.\n\n For K8s and Celery which are both \"Core executors\" and \"Providers\" we have to\n add the base dependencies to core as well, in order to mitigate problems where\n newer version of provider will have less strict limits. This should be done for both\n extras and their deprecated aliases. This is not a full protection however, the way\n extras work, this will not add \"hard\" limits for Airflow and the user who does not use\n constraints.\n\n :param extra: Name of the extra to add providers to\n :param providers: list of provider ids\n \"\"\"\n if extra in ['cncf.kubernetes', 'kubernetes', 'celery']:\n EXTRAS_REQUIREMENTS[extra].extend(\n [get_provider_package_from_package_id(package_name) for package_name in providers]\n )\n else:\n EXTRAS_REQUIREMENTS[extra] = [\n get_provider_package_from_package_id(package_name) for package_name in providers\n ]\n\n\ndef add_provider_packages_to_extra_requirements(extra: str, providers: List[str]) -> None:\n \"\"\"\n Adds provider packages as requirements to extra. This is used to add provider packages as requirements\n to the \"bulk\" kind of extras. Those bulk extras do not have the detailed 'extra' requirements as\n initial values, so instead of replacing them (see previous function) we can extend them.\n\n :param extra: Name of the extra to add providers to\n :param providers: list of provider ids\n \"\"\"\n EXTRAS_REQUIREMENTS[extra].extend(\n [get_provider_package_from_package_id(package_name) for package_name in providers]\n )\n\n\ndef add_all_provider_packages() -> None:\n \"\"\"\n In case of regular installation (providers installed from packages), we should add extra dependencies to\n Airflow - to get the providers automatically installed when those extras are installed.\n\n For providers installed from sources we skip that step. That helps to test and install airflow with\n all packages in CI - for example when new providers are added, otherwise the installation would fail\n as the new provider is not yet in PyPI.\n\n \"\"\"\n for provider in ALL_PROVIDERS:\n replace_extra_requirement_with_provider_packages(provider, [provider])\n add_provider_packages_to_extra_requirements(\"all\", ALL_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"devel_ci\", ALL_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"devel_all\", ALL_PROVIDERS)\n add_provider_packages_to_extra_requirements(\"all_dbs\", ALL_DB_PROVIDERS)\n add_provider_packages_to_extra_requirements(\n \"devel_hadoop\", [\"apache.hdfs\", \"apache.hive\", \"presto\", \"trino\"]\n )\n add_all_deprecated_provider_packages()\n\n\nclass Develop(develop_orig):\n \"\"\"Forces removal of providers in editable mode.\"\"\"\n\n def run(self) -> None: # type: ignore\n self.announce('Installing in editable mode. Uninstalling provider packages!', level=log.INFO)\n # We need to run \"python3 -m pip\" because it might be that older PIP binary is in the path\n # And it results with an error when running pip directly (cannot import pip module)\n # also PIP does not have a stable API so we have to run subprocesses ¯\\_(ツ)_/¯\n try:\n installed_packages = (\n subprocess.check_output([\"python3\", \"-m\", \"pip\", \"freeze\"]).decode().splitlines()\n )\n airflow_provider_packages = [\n package_line.split(\"=\")[0]\n for package_line in installed_packages\n if package_line.startswith(\"apache-airflow-providers\")\n ]\n self.announce(f'Uninstalling ${airflow_provider_packages}!', level=log.INFO)\n subprocess.check_call([\"python3\", \"-m\", \"pip\", \"uninstall\", \"--yes\", *airflow_provider_packages])\n except subprocess.CalledProcessError as e:\n self.announce(f'Error when uninstalling airflow provider packages: {e}!', level=log.WARN)\n super().run()\n\n\nclass Install(install_orig):\n \"\"\"Forces installation of providers from sources in editable mode.\"\"\"\n\n def run(self) -> None:\n self.announce('Standard installation. Providers are installed from packages', level=log.INFO)\n super().run()\n\n\ndef do_setup() -> None:\n \"\"\"\n Perform the Airflow package setup.\n\n Most values come from setup.cfg, only the dynamically calculated ones are passed to setup\n function call. See https://setuptools.readthedocs.io/en/latest/userguide/declarative_config.html\n \"\"\"\n setup_kwargs = {}\n\n def include_provider_namespace_packages_when_installing_from_sources() -> None:\n \"\"\"\n When installing providers from sources we install all namespace packages found below airflow,\n including airflow and provider packages, otherwise defaults from setup.cfg control this.\n The kwargs in setup() call override those that are specified in setup.cfg.\n \"\"\"\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\n setup_kwargs['packages'] = find_namespace_packages(include=['airflow*'])\n\n include_provider_namespace_packages_when_installing_from_sources()\n if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':\n print(\"Installing providers from sources. Skip adding providers as dependencies\")\n else:\n add_all_provider_packages()\n\n write_version()\n setup(\n distclass=AirflowDistribution,\n version=version,\n extras_require=EXTRAS_REQUIREMENTS,\n download_url=('https://archive.apache.org/dist/airflow/' + version),\n cmdclass={\n 'extra_clean': CleanCommand,\n 'compile_assets': CompileAssets,\n 'list_extras': ListExtras,\n 'install': Install, # type: ignore\n 'develop': Develop,\n },\n test_suite='setup.airflow_test_suite',\n **setup_kwargs, # type: ignore\n )\n\n\nif __name__ == \"__main__\":\n do_setup() # comment\n",
"path": "setup.py"
}
] | diff --git a/airflow/providers/databricks/provider.yaml b/airflow/providers/databricks/provider.yaml
index 3aba329e4d89d..4afbc0c5357db 100644
--- a/airflow/providers/databricks/provider.yaml
+++ b/airflow/providers/databricks/provider.yaml
@@ -37,9 +37,6 @@ versions:
additional-dependencies:
- apache-airflow>=2.1.0
-excluded-python-versions:
- - "3.10"
-
integrations:
- integration-name: Databricks
external-doc-url: https://databricks.com/
diff --git a/docs/apache-airflow-providers-databricks/index.rst b/docs/apache-airflow-providers-databricks/index.rst
index 56836078c2516..51c8381f0c20c 100644
--- a/docs/apache-airflow-providers-databricks/index.rst
+++ b/docs/apache-airflow-providers-databricks/index.rst
@@ -80,7 +80,7 @@ PIP requirements
PIP package Version required
============================ ===================
``apache-airflow`` ``>=2.1.0``
-``databricks-sql-connector`` ``>=1.0.0, <2.0.0``
+``databricks-sql-connector`` ``>=1.0.2, <2.0.0``
``requests`` ``>=2.26.0, <3``
============================ ===================
diff --git a/setup.py b/setup.py
index e18cef450fd88..950285353f0ce 100644
--- a/setup.py
+++ b/setup.py
@@ -264,7 +264,7 @@ def write_version(filename: str = os.path.join(*[my_dir, "airflow", "git_version
]
databricks = [
'requests>=2.26.0, <3',
- 'databricks-sql-connector>=1.0.0, <2.0.0',
+ 'databricks-sql-connector>=1.0.2, <2.0.0',
]
datadog = [
'datadog>=0.14.0',
diff --git a/tests/providers/databricks/operators/test_databricks_sql.py b/tests/providers/databricks/operators/test_databricks_sql.py
index 08e24d72d8012..6b9fb43701ae3 100644
--- a/tests/providers/databricks/operators/test_databricks_sql.py
+++ b/tests/providers/databricks/operators/test_databricks_sql.py
@@ -24,7 +24,7 @@
import pytest
from databricks.sql.types import Row
-from airflow import PY310, AirflowException
+from airflow import AirflowException
from airflow.providers.databricks.operators.databricks_sql import (
DatabricksCopyIntoOperator,
DatabricksSqlOperator,
@@ -83,12 +83,6 @@ def test_exec_write_file(self, db_mock_class):
db_mock.run.assert_called_once_with(sql, parameters=None)
[email protected](
- PY310,
- reason="Databricks SQL tests not run on Python 3.10 because there is direct Iterable import from"
- " collections in the databricks SQL library, where it should be imported from collections.abc."
- " This could be removed when https://github.com/apache/airflow/issues/22220 is solved",
-)
class TestDatabricksSqlCopyIntoOperator(unittest.TestCase):
def test_copy_with_files(self):
op = DatabricksCopyIntoOperator(
|
zestedesavoir__zds-site-2605 | [Bêta v1.8] Les flux rss ne fonctionnent plus => erreur 500
En direct de la beta, il est impossible d'avoir les flux rss en cliquant sur les liens correspondant. On se prend une erreur 500.
C'est donc une regression, puisque ça marche en prod.
| [
{
"content": "# coding: utf-8\n\nimport os\n\nfrom django.utils.translation import gettext_lazy as _\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n# INTERNAL_IPS = ('127.0.0.1',) # debug toolbar\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'base.db'),\n 'USER': '',\n 'PASSWORD': '',\n 'HOST': '',\n 'PORT': '',\n }\n}\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = 'Europe/Paris'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'fr-fr'\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = False\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = False\n\nLANGUAGES = (\n ('fr', _('Français')),\n ('en', _('Anglais')),\n)\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/media/\"\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = '/media/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n os.path.join(BASE_DIR, 'dist'),\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\nFIXTURE_DIRS = (os.path.join(BASE_DIR, 'fixtures'))\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'n!01nl+318#x75_%le8#s0=-*ysw&y49uc#t=*wvi(9hnyii0z'\n\nFILE_UPLOAD_HANDLERS = (\n \"django.core.files.uploadhandler.MemoryFileUploadHandler\",\n \"django.core.files.uploadhandler.TemporaryFileUploadHandler\",\n)\n\nMIDDLEWARE_CLASSES = (\n # CorsMiddleware needs to be before CommonMiddleware.\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'zds.utils.ThreadLocals',\n 'zds.middlewares.SetLastVisitMiddleware.SetLastVisitMiddleware',\n 'zds.middlewares.profile.ProfileMiddleware',\n)\n\nROOT_URLCONF = 'zds.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'zds.wsgi.application'\n\nTEMPLATE_DIRS = [\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n os.path.join(BASE_DIR, 'templates')\n]\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n # Default context processors\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.request',\n 'django.core.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'social.apps.django_app.context_processors.backends',\n 'social.apps.django_app.context_processors.login_redirect',\n # ZDS context processors\n 'zds.utils.context_processor.app_settings',\n 'zds.utils.context_processor.git_version',\n)\n\nCRISPY_TEMPLATE_PACK = 'bootstrap'\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sitemaps',\n 'django.contrib.humanize',\n\n 'easy_thumbnails',\n 'easy_thumbnails.optimize',\n 'crispy_forms',\n 'haystack',\n 'munin',\n 'social.apps.django_app.default',\n 'rest_framework',\n 'rest_framework_swagger',\n 'corsheaders',\n 'oauth2_provider',\n\n # Apps DB tables are created in THIS order by default\n # --> Order is CRITICAL to properly handle foreign keys\n 'zds.utils',\n 'zds.pages',\n 'zds.gallery',\n 'zds.mp',\n 'zds.article',\n 'zds.forum',\n 'zds.tutorial',\n 'zds.member',\n # Uncomment the next line to enable the admin:\n 'django.contrib.admin',\n # Uncomment the next line to enable admin documentation:\n # 'django.contrib.admindocs',\n)\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'avatar': {'size': (60, 60), 'crop': True},\n 'avatar_mini': {'size': (24, 24), 'crop': True},\n 'tutorial_illu': {'size': (60, 60), 'crop': True},\n 'article_illu': {'size': (60, 60), 'crop': True},\n 'help_illu': {'size': (48, 48), 'crop': True},\n 'help_mini_illu': {'size': (26, 26), 'crop': True},\n 'gallery': {'size': (120, 120), 'crop': True},\n 'content': {'size': (960, 960), 'crop': False},\n },\n}\n\nREST_FRAMEWORK = {\n # If the pagination isn't specify in the API, its configuration is\n # specified here.\n 'PAGINATE_BY': 10, # Default to 10\n 'PAGINATE_BY_PARAM': 'page_size', # Allow client to override, using `?page_size=xxx`.\n 'MAX_PAGINATE_BY': 100, # Maximum limit allowed when using `?page_size=xxx`.\n # Active OAuth2 authentication.\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'oauth2_provider.ext.rest_framework.OAuth2Authentication',\n ),\n 'DEFAULT_PARSER_CLASSES': (\n 'rest_framework.parsers.JSONParser',\n #'rest_framework.parsers.XMLParser',\n 'rest_framework_xml.parsers.XMLParser',\n 'rest_framework.parsers.FormParser',\n 'rest_framework.parsers.MultiPartParser',\n ),\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n #'rest_framework.renderers.XMLRenderer',\n 'rest_framework_xml.renderers.XMLRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n ),\n 'DEFAULT_THROTTLE_CLASSES': (\n 'rest_framework.throttling.AnonRateThrottle',\n 'rest_framework.throttling.UserRateThrottle'\n ),\n 'DEFAULT_THROTTLE_RATES': {\n 'anon': '60/hour',\n 'user': '2000/hour'\n }\n}\n\nREST_FRAMEWORK_EXTENSIONS = {\n # If the cache isn't specify in the API, the time of the cache\n # is specified here in seconds.\n 'DEFAULT_CACHE_RESPONSE_TIMEOUT': 60 * 15\n}\n\nSWAGGER_SETTINGS = {\n 'enabled_methods': [\n 'get',\n 'post',\n 'put',\n 'delete'\n ]\n}\n\nCORS_ORIGIN_ALLOW_ALL = True\n\nCORS_ALLOW_METHODS = (\n 'GET',\n 'POST',\n 'PUT',\n 'DELETE',\n)\n\nCORS_ALLOW_HEADERS = (\n 'x-requested-with',\n 'content-type',\n 'accept',\n 'origin',\n 'authorization',\n 'x-csrftoken',\n 'x-data-format'\n)\n\nCORS_EXPOSE_HEADERS = (\n 'etag',\n 'link'\n)\n\nif (DEBUG):\n INSTALLED_APPS += (\n 'debug_toolbar',\n )\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',\n 'LOCATION': '127.0.0.1:11211',\n }\n}\n\nSESSION_ENGINE = \"django.contrib.sessions.backends.cached_db\"\n\nLOGIN_URL = '/membres/connexion'\n\nABSOLUTE_URL_OVERRIDES = {\n 'auth.user': lambda u: '/membres/voir/{0}/'.format(u.username.encode('utf-8'))\n}\n\n\n# Django fileserve settings (set to True for local dev version only)\nSERVE = False\n\nPANDOC_LOC = ''\nPANDOC_PDF_PARAM = \"--latex-engine=xelatex --template=../../assets/tex/template.tex -s -S -N --toc -V documentclass=scrbook -V lang=francais -V mainfont=Merriweather -V monofont=\\\"Andale Mono\\\" -V fontsize=12pt -V geometry:margin=1in \"\n# LOG PATH FOR PANDOC LOGGING\nPANDOC_LOG = './pandoc.log'\nPANDOC_LOG_STATE = False\n\nHAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'haystack.backends.solr_backend.SolrEngine',\n 'URL': 'http://127.0.0.1:8983/solr'\n # ...or for multicore...\n # 'URL': 'http://127.0.0.1:8983/solr/mysite',\n },\n}\n\nGEOIP_PATH = os.path.join(BASE_DIR, 'geodata')\n\n# Fake mails (in console)\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\nfrom django.contrib.messages import constants as message_constants\nMESSAGE_TAGS = {\n message_constants.DEBUG: 'debug',\n message_constants.INFO: 'info',\n message_constants.SUCCESS: 'success',\n message_constants.WARNING: 'warning',\n message_constants.ERROR: 'alert',\n}\n\nSDZ_TUTO_DIR = ''\n\nLOCALE_PATHS = (\n os.path.join(BASE_DIR, 'conf/locale/'),\n)\n\nZDS_APP = {\n 'site': {\n 'name': u\"ZesteDeSavoir\",\n 'litteral_name': u\"Zeste de Savoir\",\n 'slogan': u\"Zeste de Savoir, la connaissance pour tous et sans pépins\",\n 'abbr': u\"zds\",\n 'url': u\"http://127.0.0.1:8000\",\n 'dns': u\"zestedesavoir.com\",\n 'email_contact': u\"[email protected]\",\n 'email_noreply': u\"[email protected]\",\n 'repository': u\"https://github.com/zestedesavoir/zds-site\",\n 'bugtracker': u\"https://github.com/zestedesavoir/zds-site/issues\",\n 'forum_feedback_users': u\"/forums/communaute/bug-suggestions/\",\n 'short_description': u\"\",\n 'long_description': u\"Zeste de Savoir est un site de partage de connaissances \"\n u\"sur lequel vous trouverez des tutoriels de tous niveaux, \"\n u\"des articles et des forums d'entraide animés par et pour \"\n u\"la communauté.\",\n 'association': {\n 'name': u\"Zeste de Savoir\",\n 'fee': u\"30 €\",\n 'email': u\"[email protected]\",\n 'email_ca': u\"[email protected]\"\n },\n 'licenses': {\n 'logo': {\n 'code': u\"CC-BY\",\n 'title': u\"Creative Commons License\",\n 'description': u\"Licence Creative Commons Attribution - Pas d’Utilisation Commerciale - \"\n u\"Partage dans les Mêmes Conditions 4.0 International.\",\n 'url_image': u\"http://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png\",\n 'url_license': u\"http://creativecommons.org/licenses/by-nc-sa/4.0/\",\n 'author': u\"MaxRoyo\"\n },\n 'cookies': {\n 'code': u\"CC-BY\",\n 'title': u\"Licence Creative Commons\",\n 'description': u\"licence Creative Commons Attribution 4.0 International\",\n 'url_image': u\"http://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png\",\n 'url_license': u\"http://creativecommons.org/licenses/by-nc-sa/4.0/\"\n },\n 'source': {\n 'code': u\"GPL v3\",\n 'url_license': u\"http://www.gnu.org/licenses/gpl-3.0.html\",\n 'provider_name': u\"Progdupeupl\",\n 'provider_url': u\"http://pdp.microjoe.org\",\n },\n 'licence_info_title': u'http://zestedesavoir.com/tutoriels/281/le-droit-dauteur-creative-commons-et-les-lic'\n u'ences-sur-zeste-de-savoir/',\n 'licence_info_link': u'Le droit d\\'auteur, Creative Commons et les licences sur Zeste de Savoir'\n },\n 'hosting': {\n 'name': u\"OVH\",\n 'address': u\"2 rue Kellermann - 59100 Roubaix - France\"\n },\n 'social': {\n 'facebook': u'https://www.facebook.com/ZesteDeSavoir',\n 'twitter': u'https://twitter.com/ZesteDeSavoir',\n 'googleplus': u'https://plus.google.com/u/0/107033688356682807298'\n },\n 'cnil': u\"1771020\",\n },\n 'member': {\n 'bot_account': u\"admin\",\n 'anonymous_account': u\"anonymous\",\n 'external_account': u\"external\",\n 'bot_group': u'bot',\n 'members_per_page': 100,\n },\n 'gallery': {\n 'image_max_size': 1024 * 1024,\n },\n 'article': {\n 'home_number': 5,\n 'repo_path': os.path.join(BASE_DIR, 'articles-data')\n },\n 'tutorial': {\n 'repo_path': os.path.join(BASE_DIR, 'tutoriels-private'),\n 'repo_public_path': os.path.join(BASE_DIR, 'tutoriels-public'),\n 'default_license_pk': 7,\n 'home_number': 5,\n 'helps_per_page': 20\n },\n 'forum': {\n 'posts_per_page': 21,\n 'topics_per_page': 21,\n 'spam_limit_seconds': 60 * 15,\n 'spam_limit_participant': 2,\n 'followed_topics_per_page': 21,\n 'beta_forum_id': 1,\n 'max_post_length': 1000000,\n 'top_tag_max': 5,\n 'home_number': 5,\n },\n 'paginator': {\n 'folding_limit': 4\n }\n}\n\nLOGIN_REDIRECT_URL = \"/\"\n\nAUTHENTICATION_BACKENDS = ('social.backends.facebook.FacebookOAuth2',\n 'social.backends.google.GoogleOAuth2',\n 'django.contrib.auth.backends.ModelBackend')\nSOCIAL_AUTH_GOOGLE_OAUTH2_USE_DEPRECATED_API = True\n\nSOCIAL_AUTH_PIPELINE = (\n 'social.pipeline.social_auth.social_details',\n 'social.pipeline.social_auth.social_uid',\n 'social.pipeline.social_auth.auth_allowed',\n 'social.pipeline.social_auth.social_user',\n 'social.pipeline.user.get_username',\n 'social.pipeline.user.create_user',\n 'zds.member.models.save_profile',\n 'social.pipeline.social_auth.associate_user',\n 'social.pipeline.social_auth.load_extra_data',\n 'social.pipeline.user.user_details'\n)\n\n# redefine for real key and secret code\nSOCIAL_AUTH_FACEBOOK_KEY = \"\"\nSOCIAL_AUTH_FACEBOOK_SECRET = \"\"\nSOCIAL_AUTH_GOOGLE_OAUTH2_KEY = \"696570367703-r6hc7mdd27t1sktdkivpnc5b25i0uip2.apps.googleusercontent.com\"\nSOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = \"mApWNh3stCsYHwsGuWdbZWP8\"\n\n# To remove a useless warning in Django 1.7.\n# See http://daniel.hepper.net/blog/2014/04/fixing-1_6-w001-when-upgrading-from-django-1-5-to-1-7/\nTEST_RUNNER = 'django.test.runner.DiscoverRunner'\n\n# Load the production settings, overwrite the existing ones if needed\ntry:\n from settings_prod import *\nexcept ImportError:\n pass\n\n",
"path": "zds/settings.py"
}
] | [
{
"content": "# coding: utf-8\n\nimport os\n\nfrom django.utils.translation import gettext_lazy as _\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n# INTERNAL_IPS = ('127.0.0.1',) # debug toolbar\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'base.db'),\n 'USER': '',\n 'PASSWORD': '',\n 'HOST': '',\n 'PORT': '',\n }\n}\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = 'Europe/Paris'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'fr-fr'\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = False\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = False\n\nLANGUAGES = (\n ('fr', _('Français')),\n ('en', _('Anglais')),\n)\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/media/\"\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = '/media/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n os.path.join(BASE_DIR, 'dist'),\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\nFIXTURE_DIRS = (os.path.join(BASE_DIR, 'fixtures'))\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'n!01nl+318#x75_%le8#s0=-*ysw&y49uc#t=*wvi(9hnyii0z'\n\nFILE_UPLOAD_HANDLERS = (\n \"django.core.files.uploadhandler.MemoryFileUploadHandler\",\n \"django.core.files.uploadhandler.TemporaryFileUploadHandler\",\n)\n\nMIDDLEWARE_CLASSES = (\n # CorsMiddleware needs to be before CommonMiddleware.\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'zds.utils.ThreadLocals',\n 'zds.middlewares.SetLastVisitMiddleware.SetLastVisitMiddleware',\n 'zds.middlewares.profile.ProfileMiddleware',\n)\n\nROOT_URLCONF = 'zds.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'zds.wsgi.application'\n\nTEMPLATE_DIRS = [\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n os.path.join(BASE_DIR, 'templates')\n]\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n # Default context processors\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.request',\n 'django.core.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'social.apps.django_app.context_processors.backends',\n 'social.apps.django_app.context_processors.login_redirect',\n # ZDS context processors\n 'zds.utils.context_processor.app_settings',\n 'zds.utils.context_processor.git_version',\n)\n\nCRISPY_TEMPLATE_PACK = 'bootstrap'\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sitemaps',\n 'django.contrib.humanize',\n\n 'easy_thumbnails',\n 'easy_thumbnails.optimize',\n 'crispy_forms',\n 'haystack',\n 'munin',\n 'social.apps.django_app.default',\n 'rest_framework',\n 'rest_framework_swagger',\n 'corsheaders',\n 'oauth2_provider',\n\n # Apps DB tables are created in THIS order by default\n # --> Order is CRITICAL to properly handle foreign keys\n 'zds.utils',\n 'zds.pages',\n 'zds.gallery',\n 'zds.mp',\n 'zds.article',\n 'zds.forum',\n 'zds.tutorial',\n 'zds.member',\n # Uncomment the next line to enable the admin:\n 'django.contrib.admin',\n # Uncomment the next line to enable admin documentation:\n # 'django.contrib.admindocs',\n)\n\nSITE_ID = 1\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'avatar': {'size': (60, 60), 'crop': True},\n 'avatar_mini': {'size': (24, 24), 'crop': True},\n 'tutorial_illu': {'size': (60, 60), 'crop': True},\n 'article_illu': {'size': (60, 60), 'crop': True},\n 'help_illu': {'size': (48, 48), 'crop': True},\n 'help_mini_illu': {'size': (26, 26), 'crop': True},\n 'gallery': {'size': (120, 120), 'crop': True},\n 'content': {'size': (960, 960), 'crop': False},\n },\n}\n\nREST_FRAMEWORK = {\n # If the pagination isn't specify in the API, its configuration is\n # specified here.\n 'PAGINATE_BY': 10, # Default to 10\n 'PAGINATE_BY_PARAM': 'page_size', # Allow client to override, using `?page_size=xxx`.\n 'MAX_PAGINATE_BY': 100, # Maximum limit allowed when using `?page_size=xxx`.\n # Active OAuth2 authentication.\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'oauth2_provider.ext.rest_framework.OAuth2Authentication',\n ),\n 'DEFAULT_PARSER_CLASSES': (\n 'rest_framework.parsers.JSONParser',\n #'rest_framework.parsers.XMLParser',\n 'rest_framework_xml.parsers.XMLParser',\n 'rest_framework.parsers.FormParser',\n 'rest_framework.parsers.MultiPartParser',\n ),\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n #'rest_framework.renderers.XMLRenderer',\n 'rest_framework_xml.renderers.XMLRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n ),\n 'DEFAULT_THROTTLE_CLASSES': (\n 'rest_framework.throttling.AnonRateThrottle',\n 'rest_framework.throttling.UserRateThrottle'\n ),\n 'DEFAULT_THROTTLE_RATES': {\n 'anon': '60/hour',\n 'user': '2000/hour'\n }\n}\n\nREST_FRAMEWORK_EXTENSIONS = {\n # If the cache isn't specify in the API, the time of the cache\n # is specified here in seconds.\n 'DEFAULT_CACHE_RESPONSE_TIMEOUT': 60 * 15\n}\n\nSWAGGER_SETTINGS = {\n 'enabled_methods': [\n 'get',\n 'post',\n 'put',\n 'delete'\n ]\n}\n\nCORS_ORIGIN_ALLOW_ALL = True\n\nCORS_ALLOW_METHODS = (\n 'GET',\n 'POST',\n 'PUT',\n 'DELETE',\n)\n\nCORS_ALLOW_HEADERS = (\n 'x-requested-with',\n 'content-type',\n 'accept',\n 'origin',\n 'authorization',\n 'x-csrftoken',\n 'x-data-format'\n)\n\nCORS_EXPOSE_HEADERS = (\n 'etag',\n 'link'\n)\n\nif (DEBUG):\n INSTALLED_APPS += (\n 'debug_toolbar',\n )\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',\n 'LOCATION': '127.0.0.1:11211',\n }\n}\n\nSESSION_ENGINE = \"django.contrib.sessions.backends.cached_db\"\n\nLOGIN_URL = '/membres/connexion'\n\nABSOLUTE_URL_OVERRIDES = {\n 'auth.user': lambda u: '/membres/voir/{0}/'.format(u.username.encode('utf-8'))\n}\n\n\n# Django fileserve settings (set to True for local dev version only)\nSERVE = False\n\nPANDOC_LOC = ''\nPANDOC_PDF_PARAM = \"--latex-engine=xelatex --template=../../assets/tex/template.tex -s -S -N --toc -V documentclass=scrbook -V lang=francais -V mainfont=Merriweather -V monofont=\\\"Andale Mono\\\" -V fontsize=12pt -V geometry:margin=1in \"\n# LOG PATH FOR PANDOC LOGGING\nPANDOC_LOG = './pandoc.log'\nPANDOC_LOG_STATE = False\n\nHAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'haystack.backends.solr_backend.SolrEngine',\n 'URL': 'http://127.0.0.1:8983/solr'\n # ...or for multicore...\n # 'URL': 'http://127.0.0.1:8983/solr/mysite',\n },\n}\n\nGEOIP_PATH = os.path.join(BASE_DIR, 'geodata')\n\n# Fake mails (in console)\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\nfrom django.contrib.messages import constants as message_constants\nMESSAGE_TAGS = {\n message_constants.DEBUG: 'debug',\n message_constants.INFO: 'info',\n message_constants.SUCCESS: 'success',\n message_constants.WARNING: 'warning',\n message_constants.ERROR: 'alert',\n}\n\nSDZ_TUTO_DIR = ''\n\nLOCALE_PATHS = (\n os.path.join(BASE_DIR, 'conf/locale/'),\n)\n\nZDS_APP = {\n 'site': {\n 'name': u\"ZesteDeSavoir\",\n 'litteral_name': u\"Zeste de Savoir\",\n 'slogan': u\"Zeste de Savoir, la connaissance pour tous et sans pépins\",\n 'abbr': u\"zds\",\n 'url': u\"http://127.0.0.1:8000\",\n 'dns': u\"zestedesavoir.com\",\n 'email_contact': u\"[email protected]\",\n 'email_noreply': u\"[email protected]\",\n 'repository': u\"https://github.com/zestedesavoir/zds-site\",\n 'bugtracker': u\"https://github.com/zestedesavoir/zds-site/issues\",\n 'forum_feedback_users': u\"/forums/communaute/bug-suggestions/\",\n 'short_description': u\"\",\n 'long_description': u\"Zeste de Savoir est un site de partage de connaissances \"\n u\"sur lequel vous trouverez des tutoriels de tous niveaux, \"\n u\"des articles et des forums d'entraide animés par et pour \"\n u\"la communauté.\",\n 'association': {\n 'name': u\"Zeste de Savoir\",\n 'fee': u\"30 €\",\n 'email': u\"[email protected]\",\n 'email_ca': u\"[email protected]\"\n },\n 'licenses': {\n 'logo': {\n 'code': u\"CC-BY\",\n 'title': u\"Creative Commons License\",\n 'description': u\"Licence Creative Commons Attribution - Pas d’Utilisation Commerciale - \"\n u\"Partage dans les Mêmes Conditions 4.0 International.\",\n 'url_image': u\"http://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png\",\n 'url_license': u\"http://creativecommons.org/licenses/by-nc-sa/4.0/\",\n 'author': u\"MaxRoyo\"\n },\n 'cookies': {\n 'code': u\"CC-BY\",\n 'title': u\"Licence Creative Commons\",\n 'description': u\"licence Creative Commons Attribution 4.0 International\",\n 'url_image': u\"http://i.creativecommons.org/l/by-nc-sa/4.0/80x15.png\",\n 'url_license': u\"http://creativecommons.org/licenses/by-nc-sa/4.0/\"\n },\n 'source': {\n 'code': u\"GPL v3\",\n 'url_license': u\"http://www.gnu.org/licenses/gpl-3.0.html\",\n 'provider_name': u\"Progdupeupl\",\n 'provider_url': u\"http://pdp.microjoe.org\",\n },\n 'licence_info_title': u'http://zestedesavoir.com/tutoriels/281/le-droit-dauteur-creative-commons-et-les-lic'\n u'ences-sur-zeste-de-savoir/',\n 'licence_info_link': u'Le droit d\\'auteur, Creative Commons et les licences sur Zeste de Savoir'\n },\n 'hosting': {\n 'name': u\"OVH\",\n 'address': u\"2 rue Kellermann - 59100 Roubaix - France\"\n },\n 'social': {\n 'facebook': u'https://www.facebook.com/ZesteDeSavoir',\n 'twitter': u'https://twitter.com/ZesteDeSavoir',\n 'googleplus': u'https://plus.google.com/u/0/107033688356682807298'\n },\n 'cnil': u\"1771020\",\n },\n 'member': {\n 'bot_account': u\"admin\",\n 'anonymous_account': u\"anonymous\",\n 'external_account': u\"external\",\n 'bot_group': u'bot',\n 'members_per_page': 100,\n },\n 'gallery': {\n 'image_max_size': 1024 * 1024,\n },\n 'article': {\n 'home_number': 5,\n 'repo_path': os.path.join(BASE_DIR, 'articles-data')\n },\n 'tutorial': {\n 'repo_path': os.path.join(BASE_DIR, 'tutoriels-private'),\n 'repo_public_path': os.path.join(BASE_DIR, 'tutoriels-public'),\n 'default_license_pk': 7,\n 'home_number': 5,\n 'helps_per_page': 20\n },\n 'forum': {\n 'posts_per_page': 21,\n 'topics_per_page': 21,\n 'spam_limit_seconds': 60 * 15,\n 'spam_limit_participant': 2,\n 'followed_topics_per_page': 21,\n 'beta_forum_id': 1,\n 'max_post_length': 1000000,\n 'top_tag_max': 5,\n 'home_number': 5,\n },\n 'paginator': {\n 'folding_limit': 4\n }\n}\n\nLOGIN_REDIRECT_URL = \"/\"\n\nAUTHENTICATION_BACKENDS = ('social.backends.facebook.FacebookOAuth2',\n 'social.backends.google.GoogleOAuth2',\n 'django.contrib.auth.backends.ModelBackend')\nSOCIAL_AUTH_GOOGLE_OAUTH2_USE_DEPRECATED_API = True\n\nSOCIAL_AUTH_PIPELINE = (\n 'social.pipeline.social_auth.social_details',\n 'social.pipeline.social_auth.social_uid',\n 'social.pipeline.social_auth.auth_allowed',\n 'social.pipeline.social_auth.social_user',\n 'social.pipeline.user.get_username',\n 'social.pipeline.user.create_user',\n 'zds.member.models.save_profile',\n 'social.pipeline.social_auth.associate_user',\n 'social.pipeline.social_auth.load_extra_data',\n 'social.pipeline.user.user_details'\n)\n\n# redefine for real key and secret code\nSOCIAL_AUTH_FACEBOOK_KEY = \"\"\nSOCIAL_AUTH_FACEBOOK_SECRET = \"\"\nSOCIAL_AUTH_GOOGLE_OAUTH2_KEY = \"696570367703-r6hc7mdd27t1sktdkivpnc5b25i0uip2.apps.googleusercontent.com\"\nSOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = \"mApWNh3stCsYHwsGuWdbZWP8\"\n\n# To remove a useless warning in Django 1.7.\n# See http://daniel.hepper.net/blog/2014/04/fixing-1_6-w001-when-upgrading-from-django-1-5-to-1-7/\nTEST_RUNNER = 'django.test.runner.DiscoverRunner'\n\n# Load the production settings, overwrite the existing ones if needed\ntry:\n from settings_prod import *\nexcept ImportError:\n pass\n\n",
"path": "zds/settings.py"
}
] | diff --git a/zds/settings.py b/zds/settings.py
index 3d94cba83f..d0d2f33de1 100644
--- a/zds/settings.py
+++ b/zds/settings.py
@@ -176,6 +176,8 @@
# 'django.contrib.admindocs',
)
+SITE_ID = 1
+
THUMBNAIL_ALIASES = {
'': {
'avatar': {'size': (60, 60), 'crop': True},
|
Gallopsled__pwntools-1811 | List comprehension in __all__ prevents Pylance from working
Thanks for contributing to Pwntools! Ideas from the community help make Pwntools an amazing tool for everybody.
If you've got an idea for a new feature, please provide information about:
* What the feature does
According to https://github.com/microsoft/pylance-release/issues/289, the list comprehension in `__all__` in https://github.com/Gallopsled/pwntools/blob/4e6ccb0da17fb91e43a4f9e95edf4fd83806ba23/pwn/toplevel.py#L85 prevents [Pylance](https://marketplace.visualstudio.com/items?itemName=ms-python.vscode-pylance) from working (when using `from pwn import *` instead of manually importing all modules).
https://github.com/compas-dev/compas/issues/621 may be a solution instead of listing all attributes manually to fix that.
* Why the feature should exist
To make Pylance happy :smile:
* What tests should be included
Test in VS Code to ensure it works.
If you think you can write the feature yourself, please submit a Pull Request and we can review your changes!
| [
{
"content": "# Get all the modules from pwnlib\nimport collections\nimport logging\nimport math\nimport operator\nimport os\nimport platform\nimport re\nimport requests\nimport socks\nimport signal\nimport string\nimport struct\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport time\n\nfrom pprint import pprint\n\nimport pwnlib\nfrom pwnlib import *\nfrom pwnlib.asm import *\nfrom pwnlib.context import Thread\nfrom pwnlib.context import context, LocalContext\nfrom pwnlib.dynelf import DynELF\nfrom pwnlib.encoders import *\nfrom pwnlib.elf.corefile import Core, Corefile, Coredump\nfrom pwnlib.elf.elf import ELF, load\nfrom pwnlib.encoders import *\nfrom pwnlib.exception import PwnlibException\nfrom pwnlib.gdb import attach, debug_assembly, debug_shellcode\nfrom pwnlib.filepointer import *\nfrom pwnlib.flag import *\nfrom pwnlib.fmtstr import FmtStr, fmtstr_payload, fmtstr_split\nfrom pwnlib.log import getLogger\nfrom pwnlib.memleak import MemLeak, RelativeMemLeak\nfrom pwnlib.regsort import *\nfrom pwnlib.replacements import *\nfrom pwnlib.rop import ROP\nfrom pwnlib.rop.call import AppendedArgument\nfrom pwnlib.rop.srop import SigreturnFrame\nfrom pwnlib.rop.ret2dlresolve import Ret2dlresolvePayload\nfrom pwnlib.runner import *\nfrom pwnlib.term.readline import str_input\nfrom pwnlib.timeout import Timeout\nfrom pwnlib.tubes.listen import listen\nfrom pwnlib.tubes.process import process, PTY, PIPE, STDOUT\nfrom pwnlib.tubes.remote import remote, tcp, udp, connect\nfrom pwnlib.tubes.serialtube import serialtube\nfrom pwnlib.tubes.server import server\nfrom pwnlib.tubes.ssh import ssh\nfrom pwnlib.tubes.tube import tube\nfrom pwnlib.ui import *\nfrom pwnlib.util import crc\nfrom pwnlib.util import iters\nfrom pwnlib.util import net\nfrom pwnlib.util import proc\nfrom pwnlib.util import safeeval\nfrom pwnlib.util.crc import BitPolynom\nfrom pwnlib.util.cyclic import *\nfrom pwnlib.util.fiddling import *\nfrom pwnlib.util.getdents import *\nfrom pwnlib.util.hashes import *\nfrom pwnlib.util.lists import *\nfrom pwnlib.util.misc import *\nfrom pwnlib.util.packing import *\nfrom pwnlib.util.proc import pidof\nfrom pwnlib.util.sh_string import sh_string, sh_prepare, sh_command_with\nfrom pwnlib.util.splash import *\nfrom pwnlib.util.web import *\n\n# Promote these modules, so that \"from pwn import *\" will let you access them\n\nfrom six.moves import cPickle as pickle, cStringIO as StringIO\nfrom six import BytesIO\n\nerror = log.error\nwarning = log.warning\nwarn = log.warning\ninfo = log.info\ndebug = log.debug\nsuccess = log.success\n\n__all__ = [x for x in tuple(globals()) if x != '__name__']\n",
"path": "pwn/toplevel.py"
}
] | [
{
"content": "# Get all the modules from pwnlib\nimport collections\nimport logging\nimport math\nimport operator\nimport os\nimport platform\nimport re\nimport requests\nimport socks\nimport signal\nimport string\nimport struct\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport time\n\nfrom pprint import pprint\n\nimport pwnlib\nfrom pwnlib import *\nfrom pwnlib.asm import *\nfrom pwnlib.context import Thread\nfrom pwnlib.context import context, LocalContext\nfrom pwnlib.dynelf import DynELF\nfrom pwnlib.encoders import *\nfrom pwnlib.elf.corefile import Core, Corefile, Coredump\nfrom pwnlib.elf.elf import ELF, load\nfrom pwnlib.encoders import *\nfrom pwnlib.exception import PwnlibException\nfrom pwnlib.gdb import attach, debug_assembly, debug_shellcode\nfrom pwnlib.filepointer import *\nfrom pwnlib.flag import *\nfrom pwnlib.fmtstr import FmtStr, fmtstr_payload, fmtstr_split\nfrom pwnlib.log import getLogger\nfrom pwnlib.memleak import MemLeak, RelativeMemLeak\nfrom pwnlib.regsort import *\nfrom pwnlib.replacements import *\nfrom pwnlib.rop import ROP\nfrom pwnlib.rop.call import AppendedArgument\nfrom pwnlib.rop.srop import SigreturnFrame\nfrom pwnlib.rop.ret2dlresolve import Ret2dlresolvePayload\nfrom pwnlib.runner import *\nfrom pwnlib.term.readline import str_input\nfrom pwnlib.timeout import Timeout\nfrom pwnlib.tubes.listen import listen\nfrom pwnlib.tubes.process import process, PTY, PIPE, STDOUT\nfrom pwnlib.tubes.remote import remote, tcp, udp, connect\nfrom pwnlib.tubes.serialtube import serialtube\nfrom pwnlib.tubes.server import server\nfrom pwnlib.tubes.ssh import ssh\nfrom pwnlib.tubes.tube import tube\nfrom pwnlib.ui import *\nfrom pwnlib.util import crc\nfrom pwnlib.util import iters\nfrom pwnlib.util import net\nfrom pwnlib.util import proc\nfrom pwnlib.util import safeeval\nfrom pwnlib.util.crc import BitPolynom\nfrom pwnlib.util.cyclic import *\nfrom pwnlib.util.fiddling import *\nfrom pwnlib.util.getdents import *\nfrom pwnlib.util.hashes import *\nfrom pwnlib.util.lists import *\nfrom pwnlib.util.misc import *\nfrom pwnlib.util.packing import *\nfrom pwnlib.util.proc import pidof\nfrom pwnlib.util.sh_string import sh_string, sh_prepare, sh_command_with\nfrom pwnlib.util.splash import *\nfrom pwnlib.util.web import *\n\n# Promote these modules, so that \"from pwn import *\" will let you access them\n\nfrom six.moves import cPickle as pickle, cStringIO as StringIO\nfrom six import BytesIO\n\nerror = log.error\nwarning = log.warning\nwarn = log.warning\ninfo = log.info\ndebug = log.debug\nsuccess = log.success\n\n# Equivalence with the default behavior of \"from import *\"\n# __all__ = [x for x in tuple(globals()) if not x.startswith('_')]\n",
"path": "pwn/toplevel.py"
}
] | diff --git a/CHANGELOG.md b/CHANGELOG.md
index ba2970645..4929c0906 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -68,6 +68,7 @@ The table below shows which release corresponds to each branch, and what date th
- [#1758][1758] Remove eval from cli
- [#1780][1780] Re-add Python2 to the official Dockerfile
- [#1941][1941] Disable all Android tests, `pwnlib.adb` is no longer supported in CI
+- [#1811][1811] Remove unnecessary `pwn.toplevel.__all__`
[1261]: https://github.com/Gallopsled/pwntools/pull/1261
[1695]: https://github.com/Gallopsled/pwntools/pull/1695
@@ -78,7 +79,7 @@ The table below shows which release corresponds to each branch, and what date th
[1758]: https://github.com/Gallopsled/pwntools/pull/1758
[1780]: https://github.com/Gallopsled/pwntools/pull/1780
[1941]: https://github.com/Gallopsled/pwntools/pull/1941
-[1786]: https://github.com/Gallopsled/pwntools/pull/1786
+[1811]: https://github.com/Gallopsled/pwntools/pull/1811
## 4.4.0 (`beta`)
diff --git a/pwn/toplevel.py b/pwn/toplevel.py
index 0cc3f7509..5af57bf55 100644
--- a/pwn/toplevel.py
+++ b/pwn/toplevel.py
@@ -83,4 +83,5 @@
debug = log.debug
success = log.success
-__all__ = [x for x in tuple(globals()) if x != '__name__']
+# Equivalence with the default behavior of "from import *"
+# __all__ = [x for x in tuple(globals()) if not x.startswith('_')]
|
searx__searx-1477 | New release?
Hi,
I'm the Debian maintainer of searx and we are still shipping version 0.14.0 of searx because there has not been a more recent release since February.
Unfortunately, we see a number of services that do not work anymore with 0.14.0 but which are fixed in git. Would it be possible to make a release soon?
The last commit to the repository was back in August. Is the project still alive? Debian plans to release its next stable version soon and we should not include searx if it's not maintained anymore.
Thanks!
cheers, josch
| [
{
"content": "# -*- coding: utf-8 -*-\n'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2013- by Adam Tauber, <[email protected]>\n'''\n\n# version of searx\nVERSION_MAJOR = 0\nVERSION_MINOR = 14\nVERSION_BUILD = 0\n\nVERSION_STRING = \"{0}.{1}.{2}\".format(VERSION_MAJOR,\n VERSION_MINOR,\n VERSION_BUILD)\n",
"path": "searx/version.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2013- by Adam Tauber, <[email protected]>\n'''\n\n# version of searx\nVERSION_MAJOR = 0\nVERSION_MINOR = 15\nVERSION_BUILD = 0\n\nVERSION_STRING = \"{0}.{1}.{2}\".format(VERSION_MAJOR,\n VERSION_MINOR,\n VERSION_BUILD)\n",
"path": "searx/version.py"
}
] | diff --git a/AUTHORS.rst b/AUTHORS.rst
index 346f324d5d..674bfd758a 100644
--- a/AUTHORS.rst
+++ b/AUTHORS.rst
@@ -83,3 +83,18 @@ generally made searx better:
- Joseph Nuthalapati @josephkiranbabu
- @maiki
- Richard Didier @zeph33
+- Michael Vieria @Themimitoof
+- Richard Nespithal @rndevfx
+- Stanislas @angristan
+- @rinpatch
+- g. s. @usernameisntallowed
+- Léo Bourrel @bourrel
+- @cy8aer
+- @Popolon
+- Alice Ferrazzi @aliceinwire
+- @LiquidLemon
+- @dadosch
+- @Venca24
+- @ZEROF
+- Ivan Skytte Jørgensen @isj-privacore
+- @miicha
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 55bd33ee85..70e9e415cc 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -1,3 +1,41 @@
+0.15.0 2019.01.06
+=================
+
+- New engines
+
+ - Acgsou (files, images, videos, music)
+ - Duden.de (general)
+ - Seznam (general)
+ - Mojeek (general)
+- New languages
+
+ - Catalan
+ - Welsh
+ - Basque
+ - Persian (Iran)
+ - Galician
+ - Dutch (Belgium)
+ - Telugu
+ - Vietnamese
+- New random answerers
+
+ - sha256
+ - uuidv4
+- New DOI resolsvers
+
+ - sci-hub.tw
+- Fix Vim mode on Firefox
+- Fix custom select in Oscar theme
+- Engine fixes (duckduckgo, google news, currency convert, gigablast, google scholar, wikidata image, etymonline, google videos, startpage, bing image)
+- Minor simple theme fixes
+
+- New Youtube icon in Oscar theme
+- Get DOI rewriters from settings.yml
+- Hide page buttons when infinite scrolling is enabled
+- Update user agent versions
+- Make Oscar style configurable
+- Make suspend times of errored engines configurable
+
0.14.0 2018.02.19
=================
diff --git a/searx/settings.yml b/searx/settings.yml
index 35172bd6a6..00b001b6ce 100644
--- a/searx/settings.yml
+++ b/searx/settings.yml
@@ -742,21 +742,27 @@ locales:
en : English
ar : العَرَبِيَّة (Arabic)
bg : Български (Bulgarian)
+ ca : Català (Catalan)
cs : Čeština (Czech)
+ cy : Cymraeg (Welsh)
da : Dansk (Danish)
de : Deutsch (German)
el_GR : Ελληνικά (Greek_Greece)
eo : Esperanto (Esperanto)
es : Español (Spanish)
+ eu : Euskara (Basque)
+ fa_IR : (fārsī) فارسى (Persian)
fi : Suomi (Finnish)
fil : Wikang Filipino (Filipino)
fr : Français (French)
+ gl : Galego (Galician)
he : עברית (Hebrew)
hr : Hrvatski (Croatian)
hu : Magyar (Hungarian)
it : Italiano (Italian)
ja : 日本語 (Japanese)
nl : Nederlands (Dutch)
+ nl_BE : Vlaams (Dutch_Belgium)
pl : Polski (Polish)
pt : Português (Portuguese)
pt_BR : Português (Portuguese_Brazil)
@@ -766,8 +772,10 @@ locales:
sl : Slovenski (Slovene)
sr : српски (Serbian)
sv : Svenska (Swedish)
+ te : తెలుగు (telugu)
tr : Türkçe (Turkish)
uk : українська мова (Ukrainian)
+ vi : tiếng việt (㗂越)
zh : 中文 (Chinese)
zh_TW : 國語 (Taiwanese Mandarin)
diff --git a/searx/translations/ar/LC_MESSAGES/messages.mo b/searx/translations/ar/LC_MESSAGES/messages.mo
index b3579a1a71..052e5b5225 100644
Binary files a/searx/translations/ar/LC_MESSAGES/messages.mo and b/searx/translations/ar/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/ar/LC_MESSAGES/messages.po b/searx/translations/ar/LC_MESSAGES/messages.po
index 4d7e55fe4d..645ca0ed6a 100644
--- a/searx/translations/ar/LC_MESSAGES/messages.po
+++ b/searx/translations/ar/LC_MESSAGES/messages.po
@@ -3,14 +3,15 @@
# This file is distributed under the same license as the PROJECT project.
#
# Translators:
-# ButterflyOfFire <[email protected]>, 2017-2018
+# ButterflyOfFire ButterflyOfFire, 2018
+# ButterflyOfFire, 2017-2018
msgid ""
msgstr ""
"Project-Id-Version: searx\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2017-11-01 21:31+0100\n"
-"PO-Revision-Date: 2018-01-23 17:54+0000\n"
-"Last-Translator: ButterflyOfFire <[email protected]>\n"
+"PO-Revision-Date: 2018-09-08 08:23+0000\n"
+"Last-Translator: ButterflyOfFire ButterflyOfFire\n"
"Language-Team: Arabic (http://www.transifex.com/asciimoo/searx/language/ar/)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
@@ -29,7 +30,7 @@ msgstr ""
#: searx/search.py:151
msgid "unexpected crash"
-msgstr ""
+msgstr "خلل غير مُتوقّع"
#: searx/webapp.py:136
msgid "files"
@@ -73,7 +74,7 @@ msgstr "علوم"
#: searx/webapp.py:399 searx/webapp.py:658
msgid "Invalid settings, please edit your preferences"
-msgstr ""
+msgstr "إنّ الإعدادات خاطئة، يرجى تعديل خياراتك"
#: searx/webapp.py:415
msgid "Invalid settings"
@@ -97,7 +98,7 @@ msgstr "مولّد قيمة عشوائية"
#: searx/answerers/random/answerer.py:54
msgid "Generate different random values"
-msgstr ""
+msgstr "توليد قِيم عشوائية مختلفة"
#: searx/answerers/statistics/answerer.py:53
msgid "Statistics functions"
@@ -288,7 +289,7 @@ msgstr "اللغة الإفتراضية"
#: searx/templates/pix-art/preferences.html:20
#: searx/templates/simple/preferences.html:120
msgid "Interface language"
-msgstr "لغة العرض"
+msgstr "لغة الواجهة"
#: searx/templates/courgette/preferences.html:34
#: searx/templates/legacy/preferences.html:35
@@ -371,7 +372,7 @@ msgstr "اللون"
#: searx/templates/courgette/preferences.html:86
msgid "Blue (default)"
-msgstr "أزرق )إفتراضي("
+msgstr "أزرق (إفتراضي)"
#: searx/templates/courgette/preferences.html:87
msgid "Violet"
@@ -581,13 +582,13 @@ msgstr "عرض نتائج البحث في ألسنة جديدة"
#: searx/templates/oscar/preferences.html:117
#: searx/templates/simple/preferences.html:145
msgid "On"
-msgstr ""
+msgstr "يشتغل"
#: searx/templates/legacy/preferences.html:88
#: searx/templates/oscar/preferences.html:118
#: searx/templates/simple/preferences.html:146
msgid "Off"
-msgstr ""
+msgstr "مُعطَّل"
#: searx/templates/legacy/result_templates/code.html:3
#: searx/templates/legacy/result_templates/default.html:3
@@ -626,7 +627,7 @@ msgstr "محرك بحث يحمي الخصوصية و قابل للتهكير"
#: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50
#: searx/templates/simple/macros.html:43
msgid "proxied"
-msgstr "يمر عبر البروكسي"
+msgstr "النفاذ عبر البروكسي"
#: searx/templates/oscar/macros.html:92
msgid "supported"
@@ -661,7 +662,7 @@ msgstr "المجيبون"
#: searx/templates/oscar/preferences.html:17
#: searx/templates/oscar/preferences.html:272
msgid "Cookies"
-msgstr "الكوكيز"
+msgstr "كعكات الكوكيز"
#: searx/templates/oscar/preferences.html:42
#: searx/templates/simple/preferences.html:48
@@ -676,12 +677,12 @@ msgstr "يقوم بتغيير لغة واجهة البحث"
#: searx/templates/oscar/preferences.html:58
#: searx/templates/simple/preferences.html:60
msgid "Find stuff as you type"
-msgstr ""
+msgstr "العثور على نتائج أثناء الكتابة"
#: searx/templates/oscar/preferences.html:69
#: searx/templates/simple/preferences.html:173
msgid "Proxying image results through searx"
-msgstr ""
+msgstr "تمرير نتائج البحث عن الصور عبر بروكسي Searx"
#: searx/templates/oscar/preferences.html:78
msgid ""
@@ -865,7 +866,7 @@ msgstr "معلومة !"
#: searx/templates/oscar/messages/no_cookies.html:4
msgid "currently, there are no cookies defined."
-msgstr ""
+msgstr "حاليا لم تقم بتحديد أي مِن كعكات الكوكيز."
#: searx/templates/oscar/messages/no_data_available.html:4
msgid "There is currently no data available. "
diff --git a/searx/translations/bg/LC_MESSAGES/messages.mo b/searx/translations/bg/LC_MESSAGES/messages.mo
index 63b303a423..f80e5afcc9 100644
Binary files a/searx/translations/bg/LC_MESSAGES/messages.mo and b/searx/translations/bg/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/ca/LC_MESSAGES/messages.mo b/searx/translations/ca/LC_MESSAGES/messages.mo
new file mode 100644
index 0000000000..2ec3e05030
Binary files /dev/null and b/searx/translations/ca/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/ca/LC_MESSAGES/messages.po b/searx/translations/ca/LC_MESSAGES/messages.po
new file mode 100644
index 0000000000..460091cd7b
--- /dev/null
+++ b/searx/translations/ca/LC_MESSAGES/messages.po
@@ -0,0 +1,1014 @@
+# Translations template for PROJECT.
+# Copyright (C) 2017 ORGANIZATION
+# This file is distributed under the same license as the PROJECT project.
+#
+# Translators:
+# Calbasi <[email protected]>, 2018
+# jmontane, 2018
+msgid ""
+msgstr ""
+"Project-Id-Version: searx\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2017-11-01 21:31+0100\n"
+"PO-Revision-Date: 2018-12-15 15:10+0000\n"
+"Last-Translator: jmontane\n"
+"Language-Team: Catalan (http://www.transifex.com/asciimoo/searx/language/ca/)\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.3.4\n"
+"Language: ca\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+#: searx/search.py:137 searx/search.py:182
+msgid "timeout"
+msgstr "s'ha esgotat el temps d'espera"
+
+#: searx/search.py:144
+msgid "request exception"
+msgstr "Excepció en la petició"
+
+#: searx/search.py:151
+msgid "unexpected crash"
+msgstr "Fallada no esperada"
+
+#: searx/webapp.py:136
+msgid "files"
+msgstr "fitxers"
+
+#: searx/webapp.py:137
+msgid "general"
+msgstr "general"
+
+#: searx/webapp.py:138
+msgid "music"
+msgstr "música"
+
+#: searx/webapp.py:139
+msgid "social media"
+msgstr "xarxes socials"
+
+#: searx/webapp.py:140
+msgid "images"
+msgstr "imatges"
+
+#: searx/webapp.py:141
+msgid "videos"
+msgstr "vídeos"
+
+#: searx/webapp.py:142
+msgid "it"
+msgstr "informàtica"
+
+#: searx/webapp.py:143
+msgid "news"
+msgstr "notícies"
+
+#: searx/webapp.py:144
+msgid "map"
+msgstr "mapa"
+
+#: searx/webapp.py:145
+msgid "science"
+msgstr "ciència"
+
+#: searx/webapp.py:399 searx/webapp.py:658
+msgid "Invalid settings, please edit your preferences"
+msgstr "La configuració no és vàlida, editeu-la"
+
+#: searx/webapp.py:415
+msgid "Invalid settings"
+msgstr "La configuració no és vàlida"
+
+#: searx/webapp.py:449 searx/webapp.py:493
+msgid "search error"
+msgstr "error en la cerca"
+
+#: searx/webapp.py:530
+msgid "{minutes} minute(s) ago"
+msgstr "fa {minutes} minuts"
+
+#: searx/webapp.py:532
+msgid "{hours} hour(s), {minutes} minute(s) ago"
+msgstr "fa {hours} hores i {minutes} minuts"
+
+#: searx/answerers/random/answerer.py:53
+msgid "Random value generator"
+msgstr "Generador de valor aleatori"
+
+#: searx/answerers/random/answerer.py:54
+msgid "Generate different random values"
+msgstr "Genera valors aleatoris diferents"
+
+#: searx/answerers/statistics/answerer.py:53
+msgid "Statistics functions"
+msgstr "Funcions estadístiques"
+
+#: searx/answerers/statistics/answerer.py:54
+msgid "Compute {functions} of the arguments"
+msgstr "Calcula {functions} dels arguments"
+
+#: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201
+msgid "Engine time (sec)"
+msgstr "Temps del motor (segons)"
+
+#: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205
+msgid "Page loads (sec)"
+msgstr "Càrrega de la pàgina (segons)"
+
+#: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209
+#: searx/templates/oscar/results.html:95
+#: searx/templates/simple/results.html:20
+msgid "Number of results"
+msgstr "Nombre de resultats"
+
+#: searx/engines/__init__.py:206 searx/engines/flycheck___init__.py:213
+msgid "Scores"
+msgstr "Valoració"
+
+#: searx/engines/__init__.py:210 searx/engines/flycheck___init__.py:217
+msgid "Scores per result"
+msgstr "Valoració segons el resultat"
+
+#: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221
+msgid "Errors"
+msgstr "Errors"
+
+#: searx/engines/pdbe.py:87
+msgid "{title} (OBSOLETE)"
+msgstr "{title} (OBSOLET)"
+
+#: searx/engines/pdbe.py:91
+msgid "This entry has been superseded by"
+msgstr "Aquesta entrada ha estat substituïda per"
+
+#: searx/engines/pubmed.py:74
+msgid "No abstract is available for this publication."
+msgstr "No hi ha resum disponible per a aquesta publicació."
+
+#: searx/plugins/https_rewrite.py:32
+msgid "Rewrite HTTP links to HTTPS if possible"
+msgstr "Reescriu els enllaços HTTP cap a HTTPS si és possible"
+
+#: searx/plugins/infinite_scroll.py:3
+msgid "Infinite scroll"
+msgstr "Desplaçament infinit"
+
+#: searx/plugins/infinite_scroll.py:4
+msgid "Automatically load next page when scrolling to bottom of current page"
+msgstr "Carrega automàticament la pàgina següent en desplaçar-se al final de la pàgina actual"
+
+#: searx/plugins/oa_doi_rewrite.py:9
+msgid "Open Access DOI rewrite"
+msgstr "Reescriu l'Open Access DOI"
+
+#: searx/plugins/oa_doi_rewrite.py:10
+msgid ""
+"Avoid paywalls by redirecting to open-access versions of publications when "
+"available"
+msgstr "Evita els llocs de pagaments redirigint a versions d'accés obert de les publicacions si és possible"
+
+#: searx/plugins/open_results_on_new_tab.py:18
+#: searx/templates/oscar/preferences.html:114
+#: searx/templates/simple/preferences.html:149
+msgid "Open result links on new browser tabs"
+msgstr "Obre els enllaços de resultats en una pestanya nova"
+
+#: searx/plugins/open_results_on_new_tab.py:19
+msgid ""
+"Results are opened in the same window by default. This plugin overwrites the"
+" default behaviour to open links on new tabs/windows. (JavaScript required)"
+msgstr "De forma predeterminada, els resultats s'obren en la mateixa finestra. Aquest connector canvia el comportament predeterminat i obre els enllaços en una finestra o pestanya nova. (Cal JavaScript)"
+
+#: searx/plugins/search_on_category_select.py:18
+msgid "Search on category select"
+msgstr "Cerca en la selecció de categories"
+
+#: searx/plugins/search_on_category_select.py:19
+msgid ""
+"Perform search immediately if a category selected. Disable to select "
+"multiple categories. (JavaScript required)"
+msgstr "Executa la cerca immediatament si hi ha seleccionada una categoria. Desactiveu-ho per a seleccionar més d'una categoria. (Cal JavaScript)"
+
+#: searx/plugins/self_info.py:20
+msgid ""
+"Displays your IP if the query is \"ip\" and your user agent if the query "
+"contains \"user agent\"."
+msgstr "Mostra la vostra IP si la consulta és «ip» i el valor «user agent» del navegador si la consulta conté «user agent»."
+
+#: searx/plugins/tracker_url_remover.py:26
+msgid "Tracker URL remover"
+msgstr "Suprimeix l'URL de rastreig"
+
+#: searx/plugins/tracker_url_remover.py:27
+msgid "Remove trackers arguments from the returned URL"
+msgstr "Suprimeix els arguments de rastreig de les URL retornades"
+
+#: searx/plugins/vim_hotkeys.py:3
+msgid "Vim-like hotkeys"
+msgstr "Dreceres de teclat del Vim"
+
+#: searx/plugins/vim_hotkeys.py:4
+msgid ""
+"Navigate search results with Vim-like hotkeys (JavaScript required). Press "
+"\"h\" key on main or result page to get help."
+msgstr "Navegació pels resultats de la cerca amb les dreceres a l'estil Vim (cal JavaScript). Pitgeu la tecla «h» en la pàgina principal o de resultats per a obtenir ajuda."
+
+#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4
+#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4
+#: searx/templates/simple/404.html:4
+msgid "Page not found"
+msgstr "No s'ha trobat la pàgina"
+
+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6
+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6
+#: searx/templates/simple/404.html:6
+#, python-format
+msgid "Go to %(search_page)s."
+msgstr "Vés a %(search_page)s."
+
+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6
+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6
+#: searx/templates/simple/404.html:6
+msgid "search page"
+msgstr "pàgina de cerca"
+
+#: searx/templates/courgette/index.html:9
+#: searx/templates/courgette/index.html:13
+#: searx/templates/courgette/results.html:5
+#: searx/templates/legacy/index.html:8 searx/templates/legacy/index.html:12
+#: searx/templates/oscar/navbar.html:7
+#: searx/templates/oscar/preferences.html:3
+#: searx/templates/pix-art/index.html:8
+msgid "preferences"
+msgstr "preferències"
+
+#: searx/templates/courgette/index.html:11
+#: searx/templates/legacy/index.html:10 searx/templates/oscar/about.html:2
+#: searx/templates/oscar/navbar.html:6 searx/templates/pix-art/index.html:7
+msgid "about"
+msgstr "quant a"
+
+#: searx/templates/courgette/preferences.html:5
+#: searx/templates/legacy/preferences.html:5
+#: searx/templates/oscar/preferences.html:8
+#: searx/templates/pix-art/preferences.html:5
+#: searx/templates/simple/preferences.html:26
+msgid "Preferences"
+msgstr "Preferències"
+
+#: searx/templates/courgette/preferences.html:9
+#: searx/templates/legacy/preferences.html:9
+#: searx/templates/oscar/preferences.html:33
+#: searx/templates/oscar/preferences.html:35
+#: searx/templates/simple/preferences.html:34
+msgid "Default categories"
+msgstr "Categories predeterminades"
+
+#: searx/templates/courgette/preferences.html:13
+#: searx/templates/legacy/preferences.html:14
+#: searx/templates/oscar/preferences.html:41
+#: searx/templates/pix-art/preferences.html:9
+#: searx/templates/simple/preferences.html:39
+#: searx/templates/simple/preferences.html:163
+msgid "Search language"
+msgstr "Llengua de cerca"
+
+#: searx/templates/courgette/preferences.html:16
+#: searx/templates/legacy/preferences.html:17
+#: searx/templates/oscar/languages.html:6
+#: searx/templates/pix-art/preferences.html:12
+#: searx/templates/simple/languages.html:2
+#: searx/templates/simple/preferences.html:42
+msgid "Default language"
+msgstr "Llengua predeterminada"
+
+#: searx/templates/courgette/preferences.html:24
+#: searx/templates/legacy/preferences.html:25
+#: searx/templates/oscar/preferences.html:47
+#: searx/templates/pix-art/preferences.html:20
+#: searx/templates/simple/preferences.html:120
+msgid "Interface language"
+msgstr "Llengua de la interfície"
+
+#: searx/templates/courgette/preferences.html:34
+#: searx/templates/legacy/preferences.html:35
+#: searx/templates/oscar/preferences.html:57
+#: searx/templates/simple/preferences.html:51
+msgid "Autocomplete"
+msgstr "Compleció automàtica"
+
+#: searx/templates/courgette/preferences.html:45
+#: searx/templates/legacy/preferences.html:46
+#: searx/templates/oscar/preferences.html:68
+#: searx/templates/simple/preferences.html:166
+msgid "Image proxy"
+msgstr "Servidor intermediari d'imatges"
+
+#: searx/templates/courgette/preferences.html:48
+#: searx/templates/legacy/preferences.html:49
+#: searx/templates/oscar/preferences.html:72
+#: searx/templates/simple/preferences.html:169
+msgid "Enabled"
+msgstr "Activat"
+
+#: searx/templates/courgette/preferences.html:49
+#: searx/templates/legacy/preferences.html:50
+#: searx/templates/oscar/preferences.html:73
+#: searx/templates/simple/preferences.html:170
+msgid "Disabled"
+msgstr "Desactivat"
+
+#: searx/templates/courgette/preferences.html:54
+#: searx/templates/legacy/preferences.html:55
+#: searx/templates/oscar/preferences.html:77
+#: searx/templates/pix-art/preferences.html:30
+#: searx/templates/simple/preferences.html:156
+msgid "Method"
+msgstr "Mètode"
+
+#: searx/templates/courgette/preferences.html:63
+#: searx/templates/legacy/preferences.html:64
+#: searx/templates/oscar/preferences.html:86
+#: searx/templates/oscar/preferences.html:165
+#: searx/templates/oscar/preferences.html:173
+#: searx/templates/simple/preferences.html:63
+#: searx/templates/simple/preferences.html:90
+msgid "SafeSearch"
+msgstr "Cerca segura"
+
+#: searx/templates/courgette/preferences.html:66
+#: searx/templates/legacy/preferences.html:67
+#: searx/templates/oscar/preferences.html:90
+#: searx/templates/simple/preferences.html:66
+msgid "Strict"
+msgstr "Estricta"
+
+#: searx/templates/courgette/preferences.html:67
+#: searx/templates/legacy/preferences.html:68
+#: searx/templates/oscar/preferences.html:91
+#: searx/templates/simple/preferences.html:67
+msgid "Moderate"
+msgstr "Moderada"
+
+#: searx/templates/courgette/preferences.html:68
+#: searx/templates/legacy/preferences.html:69
+#: searx/templates/oscar/preferences.html:92
+#: searx/templates/simple/preferences.html:68
+msgid "None"
+msgstr "Cap"
+
+#: searx/templates/courgette/preferences.html:73
+#: searx/templates/legacy/preferences.html:74
+#: searx/templates/oscar/preferences.html:96
+#: searx/templates/pix-art/preferences.html:39
+#: searx/templates/simple/preferences.html:131
+msgid "Themes"
+msgstr "Temes"
+
+#: searx/templates/courgette/preferences.html:83
+msgid "Color"
+msgstr "Color"
+
+#: searx/templates/courgette/preferences.html:86
+msgid "Blue (default)"
+msgstr "Blau (predeterminat)"
+
+#: searx/templates/courgette/preferences.html:87
+msgid "Violet"
+msgstr "Violat"
+
+#: searx/templates/courgette/preferences.html:88
+msgid "Green"
+msgstr "Verd"
+
+#: searx/templates/courgette/preferences.html:89
+msgid "Cyan"
+msgstr "Cian"
+
+#: searx/templates/courgette/preferences.html:90
+msgid "Orange"
+msgstr "Taronja"
+
+#: searx/templates/courgette/preferences.html:91
+msgid "Red"
+msgstr "Vermell"
+
+#: searx/templates/courgette/preferences.html:96
+#: searx/templates/legacy/preferences.html:93
+#: searx/templates/pix-art/preferences.html:49
+#: searx/templates/simple/preferences.html:77
+msgid "Currently used search engines"
+msgstr "Motors de cerca usats actualment"
+
+#: searx/templates/courgette/preferences.html:100
+#: searx/templates/legacy/preferences.html:97
+#: searx/templates/oscar/preferences.html:162
+#: searx/templates/oscar/preferences.html:176
+#: searx/templates/pix-art/preferences.html:53
+#: searx/templates/simple/preferences.html:87
+msgid "Engine name"
+msgstr "Nom del motor"
+
+#: searx/templates/courgette/preferences.html:101
+#: searx/templates/legacy/preferences.html:98
+msgid "Category"
+msgstr "Categoria"
+
+#: searx/templates/courgette/preferences.html:102
+#: searx/templates/courgette/preferences.html:113
+#: searx/templates/legacy/preferences.html:99
+#: searx/templates/legacy/preferences.html:110
+#: searx/templates/oscar/preferences.html:161
+#: searx/templates/oscar/preferences.html:177
+#: searx/templates/pix-art/preferences.html:54
+#: searx/templates/pix-art/preferences.html:64
+#: searx/templates/simple/preferences.html:86
+msgid "Allow"
+msgstr "Permet"
+
+#: searx/templates/courgette/preferences.html:102
+#: searx/templates/courgette/preferences.html:114
+#: searx/templates/legacy/preferences.html:99
+#: searx/templates/legacy/preferences.html:111
+#: searx/templates/pix-art/preferences.html:54
+#: searx/templates/pix-art/preferences.html:65
+msgid "Block"
+msgstr "Bloca"
+
+#: searx/templates/courgette/preferences.html:122
+#: searx/templates/legacy/preferences.html:119
+#: searx/templates/oscar/preferences.html:297
+#: searx/templates/pix-art/preferences.html:73
+#: searx/templates/simple/preferences.html:180
+msgid ""
+"These settings are stored in your cookies, this allows us not to store this "
+"data about you."
+msgstr "Aquesta configuració es desa en les galetes. Això ens permet no emmagatzemar les vostres dades."
+
+#: searx/templates/courgette/preferences.html:124
+#: searx/templates/legacy/preferences.html:121
+#: searx/templates/oscar/preferences.html:299
+#: searx/templates/pix-art/preferences.html:75
+#: searx/templates/simple/preferences.html:182
+msgid ""
+"These cookies serve your sole convenience, we don't use these cookies to "
+"track you."
+msgstr "Aquestes galetes només són per a la vostra conveniència. No les usem per a rastrejar-vos."
+
+#: searx/templates/courgette/preferences.html:127
+#: searx/templates/legacy/preferences.html:124
+#: searx/templates/oscar/preferences.html:305
+#: searx/templates/pix-art/preferences.html:78
+#: searx/templates/simple/preferences.html:185
+msgid "save"
+msgstr "desa"
+
+#: searx/templates/courgette/preferences.html:128
+#: searx/templates/legacy/preferences.html:125
+#: searx/templates/oscar/preferences.html:307
+#: searx/templates/simple/preferences.html:186
+msgid "Reset defaults"
+msgstr "Restaura els valors predeterminats"
+
+#: searx/templates/courgette/preferences.html:129
+#: searx/templates/legacy/preferences.html:126
+#: searx/templates/oscar/preferences.html:306
+#: searx/templates/pix-art/preferences.html:79
+#: searx/templates/simple/preferences.html:187
+msgid "back"
+msgstr "enrere"
+
+#: searx/templates/courgette/results.html:12
+#: searx/templates/legacy/results.html:13
+#: searx/templates/oscar/results.html:136
+#: searx/templates/simple/results.html:58
+msgid "Search URL"
+msgstr "URL de cerca"
+
+#: searx/templates/courgette/results.html:16
+#: searx/templates/legacy/results.html:17
+#: searx/templates/oscar/results.html:141
+#: searx/templates/simple/results.html:62
+msgid "Download results"
+msgstr "Baixa els resultats"
+
+#: searx/templates/courgette/results.html:34
+#: searx/templates/legacy/results.html:35
+#: searx/templates/simple/results.html:10
+msgid "Answers"
+msgstr "Respostes"
+
+#: searx/templates/courgette/results.html:42
+#: searx/templates/legacy/results.html:43
+#: searx/templates/oscar/results.html:116
+#: searx/templates/simple/results.html:42
+msgid "Suggestions"
+msgstr "Suggeriments"
+
+#: searx/templates/courgette/results.html:70
+#: searx/templates/legacy/results.html:81
+#: searx/templates/oscar/results.html:68 searx/templates/oscar/results.html:78
+#: searx/templates/simple/results.html:130
+msgid "previous page"
+msgstr "pàgina anterior"
+
+#: searx/templates/courgette/results.html:81
+#: searx/templates/legacy/results.html:92
+#: searx/templates/oscar/results.html:62 searx/templates/oscar/results.html:84
+#: searx/templates/simple/results.html:145
+msgid "next page"
+msgstr "pàgina següent"
+
+#: searx/templates/courgette/search.html:3
+#: searx/templates/legacy/search.html:3 searx/templates/oscar/search.html:6
+#: searx/templates/oscar/search_full.html:9
+#: searx/templates/pix-art/search.html:3 searx/templates/simple/search.html:4
+msgid "Search for..."
+msgstr "Cerca..."
+
+#: searx/templates/courgette/stats.html:4 searx/templates/legacy/stats.html:4
+#: searx/templates/oscar/stats.html:5 searx/templates/pix-art/stats.html:4
+#: searx/templates/simple/stats.html:7
+msgid "Engine stats"
+msgstr "Estadístiques del motor"
+
+#: searx/templates/courgette/result_templates/images.html:4
+#: searx/templates/legacy/result_templates/images.html:4
+#: searx/templates/pix-art/result_templates/images.html:4
+msgid "original context"
+msgstr "context original"
+
+#: searx/templates/courgette/result_templates/torrent.html:7
+#: searx/templates/legacy/result_templates/torrent.html:11
+#: searx/templates/oscar/result_templates/torrent.html:6
+#: searx/templates/simple/result_templates/torrent.html:9
+msgid "Seeder"
+msgstr "Font"
+
+#: searx/templates/courgette/result_templates/torrent.html:7
+#: searx/templates/legacy/result_templates/torrent.html:11
+#: searx/templates/oscar/result_templates/torrent.html:6
+#: searx/templates/simple/result_templates/torrent.html:9
+msgid "Leecher"
+msgstr "Descarregador"
+
+#: searx/templates/courgette/result_templates/torrent.html:9
+#: searx/templates/legacy/result_templates/torrent.html:9
+#: searx/templates/oscar/macros.html:23
+#: searx/templates/simple/result_templates/torrent.html:6
+msgid "magnet link"
+msgstr "enllaç magnet"
+
+#: searx/templates/courgette/result_templates/torrent.html:10
+#: searx/templates/legacy/result_templates/torrent.html:10
+#: searx/templates/oscar/macros.html:24
+#: searx/templates/simple/result_templates/torrent.html:7
+msgid "torrent file"
+msgstr "fitxer torrent"
+
+#: searx/templates/legacy/categories.html:8
+#: searx/templates/simple/categories.html:6
+msgid "Click on the magnifier to perform search"
+msgstr "Feu clic en la lupa per a executar la cerca"
+
+#: searx/templates/legacy/preferences.html:84
+#: searx/templates/oscar/preferences.html:113
+#: searx/templates/simple/preferences.html:142
+msgid "Results on new tabs"
+msgstr "Resultats en pestanyes noves"
+
+#: searx/templates/legacy/preferences.html:87
+#: searx/templates/oscar/preferences.html:117
+#: searx/templates/simple/preferences.html:145
+msgid "On"
+msgstr "Activat"
+
+#: searx/templates/legacy/preferences.html:88
+#: searx/templates/oscar/preferences.html:118
+#: searx/templates/simple/preferences.html:146
+msgid "Off"
+msgstr "Desactivat"
+
+#: searx/templates/legacy/result_templates/code.html:3
+#: searx/templates/legacy/result_templates/default.html:3
+#: searx/templates/legacy/result_templates/map.html:9
+#: searx/templates/oscar/macros.html:34 searx/templates/oscar/macros.html:48
+#: searx/templates/simple/macros.html:43
+msgid "cached"
+msgstr "en memòria cau"
+
+#: searx/templates/oscar/advanced.html:4
+msgid "Advanced settings"
+msgstr "Configuració avançada"
+
+#: searx/templates/oscar/base.html:62
+#: searx/templates/oscar/messages/first_time.html:4
+#: searx/templates/oscar/messages/save_settings_successfull.html:5
+#: searx/templates/oscar/messages/unknow_error.html:5
+msgid "Close"
+msgstr "Tanca"
+
+#: searx/templates/oscar/base.html:64
+#: searx/templates/oscar/messages/no_results.html:4
+#: searx/templates/simple/messages/no_results.html:4
+#: searx/templates/simple/results.html:25
+msgid "Error!"
+msgstr "Error!"
+
+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
+msgid "Powered by"
+msgstr "Funciona amb"
+
+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
+msgid "a privacy-respecting, hackable metasearch engine"
+msgstr "un meta motor de cerca personalitzable i respectuós amb la privadesa"
+
+#: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50
+#: searx/templates/simple/macros.html:43
+msgid "proxied"
+msgstr "en servidor intermediari"
+
+#: searx/templates/oscar/macros.html:92
+msgid "supported"
+msgstr "suportat"
+
+#: searx/templates/oscar/macros.html:96
+msgid "not supported"
+msgstr "no suportat"
+
+#: searx/templates/oscar/preferences.html:13
+#: searx/templates/oscar/preferences.html:22
+#: searx/templates/simple/preferences.html:32
+msgid "General"
+msgstr "General"
+
+#: searx/templates/oscar/preferences.html:14
+#: searx/templates/oscar/preferences.html:146
+#: searx/templates/simple/preferences.html:76
+msgid "Engines"
+msgstr "Motorrs"
+
+#: searx/templates/oscar/preferences.html:15
+#: searx/templates/oscar/preferences.html:219
+msgid "Plugins"
+msgstr "Connectat"
+
+#: searx/templates/oscar/preferences.html:16
+#: searx/templates/oscar/preferences.html:245
+msgid "Answerers"
+msgstr "Resposter"
+
+#: searx/templates/oscar/preferences.html:17
+#: searx/templates/oscar/preferences.html:272
+msgid "Cookies"
+msgstr "Galetes"
+
+#: searx/templates/oscar/preferences.html:42
+#: searx/templates/simple/preferences.html:48
+msgid "What language do you prefer for search?"
+msgstr "En quina llengua preferiu cercar?"
+
+#: searx/templates/oscar/preferences.html:48
+#: searx/templates/simple/preferences.html:128
+msgid "Change the language of the layout"
+msgstr "Canvia la llengua de la disposició"
+
+#: searx/templates/oscar/preferences.html:58
+#: searx/templates/simple/preferences.html:60
+msgid "Find stuff as you type"
+msgstr "Troba coses tal com escriu"
+
+#: searx/templates/oscar/preferences.html:69
+#: searx/templates/simple/preferences.html:173
+msgid "Proxying image results through searx"
+msgstr "Envia els resultats d'imatges via el servidor intermediari del searx"
+
+#: searx/templates/oscar/preferences.html:78
+msgid ""
+"Change how forms are submited, <a "
+"href=\"http://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods\""
+" rel=\"external\">learn more about request methods</a>"
+msgstr "Canvia com es trameten els formularis, <a href=\"http://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods\" rel=\"external\">més informació sobre els mètodes de petició</a>"
+
+#: searx/templates/oscar/preferences.html:87
+#: searx/templates/simple/preferences.html:71
+msgid "Filter content"
+msgstr "Filtra el contingut"
+
+#: searx/templates/oscar/preferences.html:97
+#: searx/templates/simple/preferences.html:139
+msgid "Change searx layout"
+msgstr "Canvia la disposició del searx"
+
+#: searx/templates/oscar/preferences.html:106
+#: searx/templates/oscar/preferences.html:111
+msgid "Choose style for this theme"
+msgstr "Trieu un estil per a aquest tema"
+
+#: searx/templates/oscar/preferences.html:106
+#: searx/templates/oscar/preferences.html:111
+msgid "Style"
+msgstr "Estil"
+
+#: searx/templates/oscar/preferences.html:122
+msgid "Open Access DOI resolver"
+msgstr "Solucionador de l'Open Access DOI"
+
+#: searx/templates/oscar/preferences.html:123
+msgid ""
+"Redirect to open-access versions of publications when available (plugin "
+"required)"
+msgstr "Redirigeix cap a versions d'accés obert de les publicacions si són disponibles (cal un connector)"
+
+#: searx/templates/oscar/preferences.html:163
+#: searx/templates/oscar/preferences.html:175
+#: searx/templates/simple/preferences.html:88
+msgid "Shortcut"
+msgstr "Drecera"
+
+#: searx/templates/oscar/preferences.html:164
+#: searx/templates/oscar/preferences.html:174
+msgid "Selected language"
+msgstr "Llengua seleccionada"
+
+#: searx/templates/oscar/preferences.html:166
+#: searx/templates/oscar/preferences.html:172
+#: searx/templates/simple/preferences.html:91
+msgid "Time range"
+msgstr "Interval de temps"
+
+#: searx/templates/oscar/preferences.html:167
+#: searx/templates/oscar/preferences.html:171
+#: searx/templates/simple/preferences.html:92
+msgid "Avg. time"
+msgstr "Temps amitjanat"
+
+#: searx/templates/oscar/preferences.html:168
+#: searx/templates/oscar/preferences.html:170
+#: searx/templates/simple/preferences.html:93
+msgid "Max time"
+msgstr "Temps màxim"
+
+#: searx/templates/oscar/preferences.html:248
+msgid "This is the list of searx's instant answering modules."
+msgstr "Aquest és el llistat dels mòduls de resposta ràpida del searx."
+
+#: searx/templates/oscar/preferences.html:252
+msgid "Name"
+msgstr "Nom"
+
+#: searx/templates/oscar/preferences.html:253
+msgid "Keywords"
+msgstr "Paraules clau"
+
+#: searx/templates/oscar/preferences.html:254
+msgid "Description"
+msgstr "Descripció"
+
+#: searx/templates/oscar/preferences.html:255
+msgid "Examples"
+msgstr "Exemples"
+
+#: searx/templates/oscar/preferences.html:275
+msgid ""
+"This is the list of cookies and their values searx is storing on your "
+"computer."
+msgstr "Aquest és el llistat de les galetes, i els seu valor, que el searx té desats en el vostre equip."
+
+#: searx/templates/oscar/preferences.html:276
+msgid "With that list, you can assess searx transparency."
+msgstr "Amb aquest llistat, podeu avaluar la transparència del searx."
+
+#: searx/templates/oscar/preferences.html:281
+msgid "Cookie name"
+msgstr "Nom de la galeta"
+
+#: searx/templates/oscar/preferences.html:282
+msgid "Value"
+msgstr "Valor"
+
+#: searx/templates/oscar/preferences.html:301
+msgid "Search URL of the currently saved preferences"
+msgstr "URL de cerca de les preferències desades actualment"
+
+#: searx/templates/oscar/preferences.html:301
+msgid ""
+"Note: specifying custom settings in the search URL can reduce privacy by "
+"leaking data to the clicked result sites."
+msgstr "Nota: si indiqueu configuracions personalitzades en la URL de cerca podeu reduir la privadesa, amb filtració de dades, en fer clic als llocs dels resultats."
+
+#: searx/templates/oscar/results.html:17
+msgid "Search results"
+msgstr "Resultats de la cerca"
+
+#: searx/templates/oscar/results.html:21
+#: searx/templates/simple/results.html:84
+msgid "Try searching for:"
+msgstr "Proveu a cercar:"
+
+#: searx/templates/oscar/results.html:100
+#: searx/templates/simple/results.html:25
+msgid "Engines cannot retrieve results"
+msgstr "Els motors no poden obtenir cap resultat"
+
+#: searx/templates/oscar/results.html:131
+msgid "Links"
+msgstr "Enllaços"
+
+#: searx/templates/oscar/search.html:8
+#: searx/templates/oscar/search_full.html:11
+#: searx/templates/simple/search.html:5
+msgid "Start search"
+msgstr "Comença la cerca"
+
+#: searx/templates/oscar/stats.html:2
+msgid "stats"
+msgstr "estadístiques"
+
+#: searx/templates/oscar/time-range.html:3
+#: searx/templates/simple/time-range.html:3
+msgid "Anytime"
+msgstr "En qualsevol moment"
+
+#: searx/templates/oscar/time-range.html:6
+#: searx/templates/simple/time-range.html:6
+msgid "Last day"
+msgstr "Les darreres 24 hores"
+
+#: searx/templates/oscar/time-range.html:9
+#: searx/templates/simple/time-range.html:9
+msgid "Last week"
+msgstr "La setmana passada"
+
+#: searx/templates/oscar/time-range.html:12
+#: searx/templates/simple/time-range.html:12
+msgid "Last month"
+msgstr "El darrer mes"
+
+#: searx/templates/oscar/time-range.html:15
+#: searx/templates/simple/time-range.html:15
+msgid "Last year"
+msgstr "El darrer any"
+
+#: searx/templates/oscar/messages/first_time.html:6
+#: searx/templates/oscar/messages/no_data_available.html:3
+msgid "Heads up!"
+msgstr "Atenció!"
+
+#: searx/templates/oscar/messages/first_time.html:7
+msgid "It look like you are using searx first time."
+msgstr "Sembla que esteu usant searx per primer cop."
+
+#: searx/templates/oscar/messages/no_cookies.html:3
+msgid "Information!"
+msgstr "Informació!"
+
+#: searx/templates/oscar/messages/no_cookies.html:4
+msgid "currently, there are no cookies defined."
+msgstr "actualment, no hi ha definida cap galeta."
+
+#: searx/templates/oscar/messages/no_data_available.html:4
+msgid "There is currently no data available. "
+msgstr "Actualment, no hi ha dades disponibles."
+
+#: searx/templates/oscar/messages/no_results.html:4
+#: searx/templates/simple/messages/no_results.html:4
+msgid "Engines cannot retrieve results."
+msgstr "Els motors no poden obtenir cap resultat"
+
+#: searx/templates/oscar/messages/no_results.html:10
+#: searx/templates/simple/messages/no_results.html:10
+msgid "Please, try again later or find another searx instance."
+msgstr "Torneu-ho a intentar més tard o useu una altra instància del searx."
+
+#: searx/templates/oscar/messages/no_results.html:14
+#: searx/templates/simple/messages/no_results.html:14
+msgid "Sorry!"
+msgstr "Disculpeu!"
+
+#: searx/templates/oscar/messages/no_results.html:15
+#: searx/templates/simple/messages/no_results.html:15
+msgid ""
+"we didn't find any results. Please use another query or search in more "
+"categories."
+msgstr "no hem trobat cap resultat. Feu una consulta diferent o cerqueu en més categories."
+
+#: searx/templates/oscar/messages/save_settings_successfull.html:7
+msgid "Well done!"
+msgstr "Ben fet!"
+
+#: searx/templates/oscar/messages/save_settings_successfull.html:8
+msgid "Settings saved successfully."
+msgstr "La configuració s'ha desat correctament."
+
+#: searx/templates/oscar/messages/unknow_error.html:7
+msgid "Oh snap!"
+msgstr "Cagundena!"
+
+#: searx/templates/oscar/messages/unknow_error.html:8
+msgid "Something went wrong."
+msgstr "Alguna cosa ha anat malament."
+
+#: searx/templates/oscar/result_templates/default.html:7
+#: searx/templates/simple/result_templates/default.html:6
+msgid "show media"
+msgstr "mostra el contingut multimèdia"
+
+#: searx/templates/oscar/result_templates/default.html:7
+#: searx/templates/simple/result_templates/default.html:6
+msgid "hide media"
+msgstr "amaga el contingut multimèdia"
+
+#: searx/templates/oscar/result_templates/images.html:30
+msgid "Get image"
+msgstr "Obtén la imatge"
+
+#: searx/templates/oscar/result_templates/images.html:33
+msgid "View source"
+msgstr "Mostra el codi font"
+
+#: searx/templates/oscar/result_templates/map.html:7
+#: searx/templates/simple/result_templates/map.html:7
+msgid "show map"
+msgstr "mostra el mapa"
+
+#: searx/templates/oscar/result_templates/map.html:7
+#: searx/templates/simple/result_templates/map.html:7
+msgid "hide map"
+msgstr "amaga el mapa"
+
+#: searx/templates/oscar/result_templates/map.html:11
+#: searx/templates/simple/result_templates/map.html:11
+msgid "show details"
+msgstr "mostra els detalls"
+
+#: searx/templates/oscar/result_templates/map.html:11
+#: searx/templates/simple/result_templates/map.html:11
+msgid "hide details"
+msgstr "amaga els detalls"
+
+#: searx/templates/oscar/result_templates/torrent.html:7
+#: searx/templates/simple/result_templates/torrent.html:11
+msgid "Filesize"
+msgstr "Mida del fitxer"
+
+#: searx/templates/oscar/result_templates/torrent.html:9
+#: searx/templates/simple/result_templates/torrent.html:12
+msgid "Bytes"
+msgstr "Bytes"
+
+#: searx/templates/oscar/result_templates/torrent.html:10
+#: searx/templates/simple/result_templates/torrent.html:13
+msgid "kiB"
+msgstr "kiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:11
+#: searx/templates/simple/result_templates/torrent.html:14
+msgid "MiB"
+msgstr "MiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:12
+#: searx/templates/simple/result_templates/torrent.html:15
+msgid "GiB"
+msgstr "GiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:13
+#: searx/templates/simple/result_templates/torrent.html:16
+msgid "TiB"
+msgstr "TiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:15
+#: searx/templates/simple/result_templates/torrent.html:20
+msgid "Number of Files"
+msgstr "Nombre de fiters"
+
+#: searx/templates/oscar/result_templates/videos.html:7
+#: searx/templates/simple/result_templates/videos.html:6
+msgid "show video"
+msgstr "mostra el vídeo"
+
+#: searx/templates/oscar/result_templates/videos.html:7
+#: searx/templates/simple/result_templates/videos.html:6
+msgid "hide video"
+msgstr "amaga el vídeo"
+
+#: searx/templates/pix-art/results.html:28
+msgid "Load more..."
+msgstr "Carrega'n més..."
+
+#: searx/templates/simple/base.html:31
+msgid "No item found"
+msgstr "No s'ha trobat cap element"
+
+#: searx/templates/simple/preferences.html:89
+msgid "Supports selected language"
+msgstr "Suporta la llengua seleccionada"
+
+#: searx/templates/simple/preferences.html:118
+msgid "User interface"
+msgstr "Interfície d'usuari"
+
+#: searx/templates/simple/preferences.html:154
+msgid "Privacy"
+msgstr "Privadesa"
diff --git a/searx/translations/cs/LC_MESSAGES/messages.mo b/searx/translations/cs/LC_MESSAGES/messages.mo
index 3e638afa32..eded7150ad 100644
Binary files a/searx/translations/cs/LC_MESSAGES/messages.mo and b/searx/translations/cs/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/cs/LC_MESSAGES/messages.po b/searx/translations/cs/LC_MESSAGES/messages.po
index ef96271fc0..8d3ce3c3e9 100644
--- a/searx/translations/cs/LC_MESSAGES/messages.po
+++ b/searx/translations/cs/LC_MESSAGES/messages.po
@@ -4,24 +4,25 @@
#
# Translators:
# Clon <[email protected]>, 2017
+# Václav Zouzalík <[email protected]>, 2018
msgid ""
msgstr ""
"Project-Id-Version: searx\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2017-11-01 21:31+0100\n"
-"PO-Revision-Date: 2017-11-01 20:31+0000\n"
-"Last-Translator: Adam Tauber <[email protected]>\n"
+"PO-Revision-Date: 2018-12-18 12:03+0000\n"
+"Last-Translator: Václav Zouzalík <[email protected]>\n"
"Language-Team: Czech (http://www.transifex.com/asciimoo/searx/language/cs/)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.3.4\n"
"Language: cs\n"
-"Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;\n"
+"Plural-Forms: nplurals=4; plural=(n == 1 && n % 1 == 0) ? 0 : (n >= 2 && n <= 4 && n % 1 == 0) ? 1: (n % 1 != 0 ) ? 2 : 3;\n"
#: searx/search.py:137 searx/search.py:182
msgid "timeout"
-msgstr ""
+msgstr "timeout"
#: searx/search.py:144
msgid "request exception"
@@ -77,7 +78,7 @@ msgstr "Neplatné nastavení, upravte svoje předvolby"
#: searx/webapp.py:415
msgid "Invalid settings"
-msgstr ""
+msgstr "Neplatné nastavení"
#: searx/webapp.py:449 searx/webapp.py:493
msgid "search error"
@@ -131,7 +132,7 @@ msgstr ""
#: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221
msgid "Errors"
-msgstr "Chyb"
+msgstr "Chyby"
#: searx/engines/pdbe.py:87
msgid "{title} (OBSOLETE)"
@@ -187,13 +188,13 @@ msgstr "Hledat ve vybrané kategorii"
msgid ""
"Perform search immediately if a category selected. Disable to select "
"multiple categories. (JavaScript required)"
-msgstr ""
+msgstr "Vyhledávejte okamžitě, pokud je vybrána kategorie. Vypněte, pokud potřebujete zvolit vícero kategorií. (vyžaduje JavaScript)"
#: searx/plugins/self_info.py:20
msgid ""
"Displays your IP if the query is \"ip\" and your user agent if the query "
"contains \"user agent\"."
-msgstr ""
+msgstr "Ukáže vaši IP adresu, pokud dotaz je \"ip\", a váš user agent, pokud dotatz obsahuje \"user agent\"."
#: searx/plugins/tracker_url_remover.py:26
msgid "Tracker URL remover"
@@ -375,7 +376,7 @@ msgstr "Modrá (základní)"
#: searx/templates/courgette/preferences.html:87
msgid "Violet"
-msgstr ""
+msgstr "Fialová"
#: searx/templates/courgette/preferences.html:88
msgid "Green"
@@ -383,7 +384,7 @@ msgstr "Zelená"
#: searx/templates/courgette/preferences.html:89
msgid "Cyan"
-msgstr ""
+msgstr "Modrozelená"
#: searx/templates/courgette/preferences.html:90
msgid "Orange"
@@ -543,14 +544,14 @@ msgstr "původní kontext"
#: searx/templates/oscar/result_templates/torrent.html:6
#: searx/templates/simple/result_templates/torrent.html:9
msgid "Seeder"
-msgstr ""
+msgstr "Seeder"
#: searx/templates/courgette/result_templates/torrent.html:7
#: searx/templates/legacy/result_templates/torrent.html:11
#: searx/templates/oscar/result_templates/torrent.html:6
#: searx/templates/simple/result_templates/torrent.html:9
msgid "Leecher"
-msgstr ""
+msgstr "Leecher"
#: searx/templates/courgette/result_templates/torrent.html:9
#: searx/templates/legacy/result_templates/torrent.html:9
@@ -676,7 +677,7 @@ msgstr "Změnít jazyk prostředí"
#: searx/templates/oscar/preferences.html:58
#: searx/templates/simple/preferences.html:60
msgid "Find stuff as you type"
-msgstr ""
+msgstr "Vyhledávat během psaní"
#: searx/templates/oscar/preferences.html:69
#: searx/templates/simple/preferences.html:173
@@ -729,7 +730,7 @@ msgstr "Zkratka"
#: searx/templates/oscar/preferences.html:164
#: searx/templates/oscar/preferences.html:174
msgid "Selected language"
-msgstr ""
+msgstr "Zvolený jazyk"
#: searx/templates/oscar/preferences.html:166
#: searx/templates/oscar/preferences.html:172
@@ -853,7 +854,7 @@ msgstr "Rok"
#: searx/templates/oscar/messages/first_time.html:6
#: searx/templates/oscar/messages/no_data_available.html:3
msgid "Heads up!"
-msgstr "Hlavy vzhůru!"
+msgstr "Hlavu vzhůru!"
#: searx/templates/oscar/messages/first_time.html:7
msgid "It look like you are using searx first time."
@@ -879,7 +880,7 @@ msgstr ""
#: searx/templates/oscar/messages/no_results.html:10
#: searx/templates/simple/messages/no_results.html:10
msgid "Please, try again later or find another searx instance."
-msgstr ""
+msgstr "Zkuste prosím později, nebo na jiné instanci searxu."
#: searx/templates/oscar/messages/no_results.html:14
#: searx/templates/simple/messages/no_results.html:14
@@ -891,7 +892,7 @@ msgstr "Pardón!"
msgid ""
"we didn't find any results. Please use another query or search in more "
"categories."
-msgstr "nenašly jsme žádné výsledky. Prosím použíjte jiný dotaz nebo hledejte ve více kategoriích."
+msgstr "Nenašli jsme žádné výsledky. Použijte prosím jiný dotaz nebo hledejte ve více kategoriích."
#: searx/templates/oscar/messages/save_settings_successfull.html:7
msgid "Well done!"
@@ -1010,4 +1011,4 @@ msgstr ""
#: searx/templates/simple/preferences.html:154
msgid "Privacy"
-msgstr ""
+msgstr "Soukromí"
diff --git a/searx/translations/cy/LC_MESSAGES/messages.mo b/searx/translations/cy/LC_MESSAGES/messages.mo
new file mode 100644
index 0000000000..066d268e03
Binary files /dev/null and b/searx/translations/cy/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/cy/LC_MESSAGES/messages.po b/searx/translations/cy/LC_MESSAGES/messages.po
new file mode 100644
index 0000000000..3344c6d1d5
--- /dev/null
+++ b/searx/translations/cy/LC_MESSAGES/messages.po
@@ -0,0 +1,1013 @@
+# Translations template for PROJECT.
+# Copyright (C) 2017 ORGANIZATION
+# This file is distributed under the same license as the PROJECT project.
+#
+# Translators:
+# Aled Powell <[email protected]>, 2019
+msgid ""
+msgstr ""
+"Project-Id-Version: searx\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2017-11-01 21:31+0100\n"
+"PO-Revision-Date: 2019-01-05 13:50+0000\n"
+"Last-Translator: Aled Powell <[email protected]>\n"
+"Language-Team: Welsh (http://www.transifex.com/asciimoo/searx/language/cy/)\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.3.4\n"
+"Language: cy\n"
+"Plural-Forms: nplurals=4; plural=(n==1) ? 0 : (n==2) ? 1 : (n != 8 && n != 11) ? 2 : 3;\n"
+
+#: searx/search.py:137 searx/search.py:182
+msgid "timeout"
+msgstr ""
+
+#: searx/search.py:144
+msgid "request exception"
+msgstr ""
+
+#: searx/search.py:151
+msgid "unexpected crash"
+msgstr ""
+
+#: searx/webapp.py:136
+msgid "files"
+msgstr "ffeiliau"
+
+#: searx/webapp.py:137
+msgid "general"
+msgstr "cyffredinol"
+
+#: searx/webapp.py:138
+msgid "music"
+msgstr "cerddoriaeth"
+
+#: searx/webapp.py:139
+msgid "social media"
+msgstr "cyfryngau cymdeithasol"
+
+#: searx/webapp.py:140
+msgid "images"
+msgstr "delweddau"
+
+#: searx/webapp.py:141
+msgid "videos"
+msgstr "fideos"
+
+#: searx/webapp.py:142
+msgid "it"
+msgstr "Technoleg"
+
+#: searx/webapp.py:143
+msgid "news"
+msgstr "newyddion"
+
+#: searx/webapp.py:144
+msgid "map"
+msgstr "map"
+
+#: searx/webapp.py:145
+msgid "science"
+msgstr "gwyddoniaeth"
+
+#: searx/webapp.py:399 searx/webapp.py:658
+msgid "Invalid settings, please edit your preferences"
+msgstr "Gosodiadau annilys. Addasa dy ddewisiadau."
+
+#: searx/webapp.py:415
+msgid "Invalid settings"
+msgstr "Gosodiadau annilys"
+
+#: searx/webapp.py:449 searx/webapp.py:493
+msgid "search error"
+msgstr "gwall chwilio"
+
+#: searx/webapp.py:530
+msgid "{minutes} minute(s) ago"
+msgstr "{minutes} munud yn ôl"
+
+#: searx/webapp.py:532
+msgid "{hours} hour(s), {minutes} minute(s) ago"
+msgstr "{hours} awr, {minutes} munud yn ôl"
+
+#: searx/answerers/random/answerer.py:53
+msgid "Random value generator"
+msgstr ""
+
+#: searx/answerers/random/answerer.py:54
+msgid "Generate different random values"
+msgstr ""
+
+#: searx/answerers/statistics/answerer.py:53
+msgid "Statistics functions"
+msgstr ""
+
+#: searx/answerers/statistics/answerer.py:54
+msgid "Compute {functions} of the arguments"
+msgstr ""
+
+#: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201
+msgid "Engine time (sec)"
+msgstr ""
+
+#: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205
+msgid "Page loads (sec)"
+msgstr ""
+
+#: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209
+#: searx/templates/oscar/results.html:95
+#: searx/templates/simple/results.html:20
+msgid "Number of results"
+msgstr "Nifer o ganlyniadau"
+
+#: searx/engines/__init__.py:206 searx/engines/flycheck___init__.py:213
+msgid "Scores"
+msgstr "Sgoriau"
+
+#: searx/engines/__init__.py:210 searx/engines/flycheck___init__.py:217
+msgid "Scores per result"
+msgstr ""
+
+#: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221
+msgid "Errors"
+msgstr "Gwallau"
+
+#: searx/engines/pdbe.py:87
+msgid "{title} (OBSOLETE)"
+msgstr ""
+
+#: searx/engines/pdbe.py:91
+msgid "This entry has been superseded by"
+msgstr ""
+
+#: searx/engines/pubmed.py:74
+msgid "No abstract is available for this publication."
+msgstr ""
+
+#: searx/plugins/https_rewrite.py:32
+msgid "Rewrite HTTP links to HTTPS if possible"
+msgstr ""
+
+#: searx/plugins/infinite_scroll.py:3
+msgid "Infinite scroll"
+msgstr ""
+
+#: searx/plugins/infinite_scroll.py:4
+msgid "Automatically load next page when scrolling to bottom of current page"
+msgstr ""
+
+#: searx/plugins/oa_doi_rewrite.py:9
+msgid "Open Access DOI rewrite"
+msgstr ""
+
+#: searx/plugins/oa_doi_rewrite.py:10
+msgid ""
+"Avoid paywalls by redirecting to open-access versions of publications when "
+"available"
+msgstr ""
+
+#: searx/plugins/open_results_on_new_tab.py:18
+#: searx/templates/oscar/preferences.html:114
+#: searx/templates/simple/preferences.html:149
+msgid "Open result links on new browser tabs"
+msgstr "Agor dolenni canlyniadau mewn tabiau newydd yn y porwr"
+
+#: searx/plugins/open_results_on_new_tab.py:19
+msgid ""
+"Results are opened in the same window by default. This plugin overwrites the"
+" default behaviour to open links on new tabs/windows. (JavaScript required)"
+msgstr "Mae canlyniadau fel arfer yn cael eu hagor yn yr un ffenestr. Mae'r ategolyn hwn yn newid hyn fel bod dolenni yn cael eu hagor mewn tabiau/ffenestri newydd. (Angen JavaScript)"
+
+#: searx/plugins/search_on_category_select.py:18
+msgid "Search on category select"
+msgstr ""
+
+#: searx/plugins/search_on_category_select.py:19
+msgid ""
+"Perform search immediately if a category selected. Disable to select "
+"multiple categories. (JavaScript required)"
+msgstr ""
+
+#: searx/plugins/self_info.py:20
+msgid ""
+"Displays your IP if the query is \"ip\" and your user agent if the query "
+"contains \"user agent\"."
+msgstr ""
+
+#: searx/plugins/tracker_url_remover.py:26
+msgid "Tracker URL remover"
+msgstr ""
+
+#: searx/plugins/tracker_url_remover.py:27
+msgid "Remove trackers arguments from the returned URL"
+msgstr ""
+
+#: searx/plugins/vim_hotkeys.py:3
+msgid "Vim-like hotkeys"
+msgstr ""
+
+#: searx/plugins/vim_hotkeys.py:4
+msgid ""
+"Navigate search results with Vim-like hotkeys (JavaScript required). Press "
+"\"h\" key on main or result page to get help."
+msgstr ""
+
+#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4
+#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4
+#: searx/templates/simple/404.html:4
+msgid "Page not found"
+msgstr ""
+
+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6
+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6
+#: searx/templates/simple/404.html:6
+#, python-format
+msgid "Go to %(search_page)s."
+msgstr "Mynd i %(search_page)s."
+
+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6
+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6
+#: searx/templates/simple/404.html:6
+msgid "search page"
+msgstr "tudalen chwilio"
+
+#: searx/templates/courgette/index.html:9
+#: searx/templates/courgette/index.html:13
+#: searx/templates/courgette/results.html:5
+#: searx/templates/legacy/index.html:8 searx/templates/legacy/index.html:12
+#: searx/templates/oscar/navbar.html:7
+#: searx/templates/oscar/preferences.html:3
+#: searx/templates/pix-art/index.html:8
+msgid "preferences"
+msgstr "dewisiadau"
+
+#: searx/templates/courgette/index.html:11
+#: searx/templates/legacy/index.html:10 searx/templates/oscar/about.html:2
+#: searx/templates/oscar/navbar.html:6 searx/templates/pix-art/index.html:7
+msgid "about"
+msgstr "ynghylch"
+
+#: searx/templates/courgette/preferences.html:5
+#: searx/templates/legacy/preferences.html:5
+#: searx/templates/oscar/preferences.html:8
+#: searx/templates/pix-art/preferences.html:5
+#: searx/templates/simple/preferences.html:26
+msgid "Preferences"
+msgstr "Dewisiadau"
+
+#: searx/templates/courgette/preferences.html:9
+#: searx/templates/legacy/preferences.html:9
+#: searx/templates/oscar/preferences.html:33
+#: searx/templates/oscar/preferences.html:35
+#: searx/templates/simple/preferences.html:34
+msgid "Default categories"
+msgstr "Categorïau arferol"
+
+#: searx/templates/courgette/preferences.html:13
+#: searx/templates/legacy/preferences.html:14
+#: searx/templates/oscar/preferences.html:41
+#: searx/templates/pix-art/preferences.html:9
+#: searx/templates/simple/preferences.html:39
+#: searx/templates/simple/preferences.html:163
+msgid "Search language"
+msgstr "Iaith chwilio"
+
+#: searx/templates/courgette/preferences.html:16
+#: searx/templates/legacy/preferences.html:17
+#: searx/templates/oscar/languages.html:6
+#: searx/templates/pix-art/preferences.html:12
+#: searx/templates/simple/languages.html:2
+#: searx/templates/simple/preferences.html:42
+msgid "Default language"
+msgstr "Iaith arferol"
+
+#: searx/templates/courgette/preferences.html:24
+#: searx/templates/legacy/preferences.html:25
+#: searx/templates/oscar/preferences.html:47
+#: searx/templates/pix-art/preferences.html:20
+#: searx/templates/simple/preferences.html:120
+msgid "Interface language"
+msgstr "Iaith y rhyngwyneb"
+
+#: searx/templates/courgette/preferences.html:34
+#: searx/templates/legacy/preferences.html:35
+#: searx/templates/oscar/preferences.html:57
+#: searx/templates/simple/preferences.html:51
+msgid "Autocomplete"
+msgstr "Awto-gwblhau"
+
+#: searx/templates/courgette/preferences.html:45
+#: searx/templates/legacy/preferences.html:46
+#: searx/templates/oscar/preferences.html:68
+#: searx/templates/simple/preferences.html:166
+msgid "Image proxy"
+msgstr ""
+
+#: searx/templates/courgette/preferences.html:48
+#: searx/templates/legacy/preferences.html:49
+#: searx/templates/oscar/preferences.html:72
+#: searx/templates/simple/preferences.html:169
+msgid "Enabled"
+msgstr "Galluogwyd"
+
+#: searx/templates/courgette/preferences.html:49
+#: searx/templates/legacy/preferences.html:50
+#: searx/templates/oscar/preferences.html:73
+#: searx/templates/simple/preferences.html:170
+msgid "Disabled"
+msgstr "Analluogwyd"
+
+#: searx/templates/courgette/preferences.html:54
+#: searx/templates/legacy/preferences.html:55
+#: searx/templates/oscar/preferences.html:77
+#: searx/templates/pix-art/preferences.html:30
+#: searx/templates/simple/preferences.html:156
+msgid "Method"
+msgstr "Dull"
+
+#: searx/templates/courgette/preferences.html:63
+#: searx/templates/legacy/preferences.html:64
+#: searx/templates/oscar/preferences.html:86
+#: searx/templates/oscar/preferences.html:165
+#: searx/templates/oscar/preferences.html:173
+#: searx/templates/simple/preferences.html:63
+#: searx/templates/simple/preferences.html:90
+msgid "SafeSearch"
+msgstr ""
+
+#: searx/templates/courgette/preferences.html:66
+#: searx/templates/legacy/preferences.html:67
+#: searx/templates/oscar/preferences.html:90
+#: searx/templates/simple/preferences.html:66
+msgid "Strict"
+msgstr "Caeth"
+
+#: searx/templates/courgette/preferences.html:67
+#: searx/templates/legacy/preferences.html:68
+#: searx/templates/oscar/preferences.html:91
+#: searx/templates/simple/preferences.html:67
+msgid "Moderate"
+msgstr "Cymhedrol"
+
+#: searx/templates/courgette/preferences.html:68
+#: searx/templates/legacy/preferences.html:69
+#: searx/templates/oscar/preferences.html:92
+#: searx/templates/simple/preferences.html:68
+msgid "None"
+msgstr "Dim"
+
+#: searx/templates/courgette/preferences.html:73
+#: searx/templates/legacy/preferences.html:74
+#: searx/templates/oscar/preferences.html:96
+#: searx/templates/pix-art/preferences.html:39
+#: searx/templates/simple/preferences.html:131
+msgid "Themes"
+msgstr "Themâu"
+
+#: searx/templates/courgette/preferences.html:83
+msgid "Color"
+msgstr "Lliw"
+
+#: searx/templates/courgette/preferences.html:86
+msgid "Blue (default)"
+msgstr "Glas (arferol)"
+
+#: searx/templates/courgette/preferences.html:87
+msgid "Violet"
+msgstr "Fioled"
+
+#: searx/templates/courgette/preferences.html:88
+msgid "Green"
+msgstr "Gwyrdd"
+
+#: searx/templates/courgette/preferences.html:89
+msgid "Cyan"
+msgstr "Gwyrddlas"
+
+#: searx/templates/courgette/preferences.html:90
+msgid "Orange"
+msgstr "Oren"
+
+#: searx/templates/courgette/preferences.html:91
+msgid "Red"
+msgstr "Coch"
+
+#: searx/templates/courgette/preferences.html:96
+#: searx/templates/legacy/preferences.html:93
+#: searx/templates/pix-art/preferences.html:49
+#: searx/templates/simple/preferences.html:77
+msgid "Currently used search engines"
+msgstr ""
+
+#: searx/templates/courgette/preferences.html:100
+#: searx/templates/legacy/preferences.html:97
+#: searx/templates/oscar/preferences.html:162
+#: searx/templates/oscar/preferences.html:176
+#: searx/templates/pix-art/preferences.html:53
+#: searx/templates/simple/preferences.html:87
+msgid "Engine name"
+msgstr ""
+
+#: searx/templates/courgette/preferences.html:101
+#: searx/templates/legacy/preferences.html:98
+msgid "Category"
+msgstr "Categori"
+
+#: searx/templates/courgette/preferences.html:102
+#: searx/templates/courgette/preferences.html:113
+#: searx/templates/legacy/preferences.html:99
+#: searx/templates/legacy/preferences.html:110
+#: searx/templates/oscar/preferences.html:161
+#: searx/templates/oscar/preferences.html:177
+#: searx/templates/pix-art/preferences.html:54
+#: searx/templates/pix-art/preferences.html:64
+#: searx/templates/simple/preferences.html:86
+msgid "Allow"
+msgstr "Caniatáu"
+
+#: searx/templates/courgette/preferences.html:102
+#: searx/templates/courgette/preferences.html:114
+#: searx/templates/legacy/preferences.html:99
+#: searx/templates/legacy/preferences.html:111
+#: searx/templates/pix-art/preferences.html:54
+#: searx/templates/pix-art/preferences.html:65
+msgid "Block"
+msgstr "Rhwystro"
+
+#: searx/templates/courgette/preferences.html:122
+#: searx/templates/legacy/preferences.html:119
+#: searx/templates/oscar/preferences.html:297
+#: searx/templates/pix-art/preferences.html:73
+#: searx/templates/simple/preferences.html:180
+msgid ""
+"These settings are stored in your cookies, this allows us not to store this "
+"data about you."
+msgstr ""
+
+#: searx/templates/courgette/preferences.html:124
+#: searx/templates/legacy/preferences.html:121
+#: searx/templates/oscar/preferences.html:299
+#: searx/templates/pix-art/preferences.html:75
+#: searx/templates/simple/preferences.html:182
+msgid ""
+"These cookies serve your sole convenience, we don't use these cookies to "
+"track you."
+msgstr ""
+
+#: searx/templates/courgette/preferences.html:127
+#: searx/templates/legacy/preferences.html:124
+#: searx/templates/oscar/preferences.html:305
+#: searx/templates/pix-art/preferences.html:78
+#: searx/templates/simple/preferences.html:185
+msgid "save"
+msgstr "cadw"
+
+#: searx/templates/courgette/preferences.html:128
+#: searx/templates/legacy/preferences.html:125
+#: searx/templates/oscar/preferences.html:307
+#: searx/templates/simple/preferences.html:186
+msgid "Reset defaults"
+msgstr "Ailosod rhagosodiadau"
+
+#: searx/templates/courgette/preferences.html:129
+#: searx/templates/legacy/preferences.html:126
+#: searx/templates/oscar/preferences.html:306
+#: searx/templates/pix-art/preferences.html:79
+#: searx/templates/simple/preferences.html:187
+msgid "back"
+msgstr "nôl"
+
+#: searx/templates/courgette/results.html:12
+#: searx/templates/legacy/results.html:13
+#: searx/templates/oscar/results.html:136
+#: searx/templates/simple/results.html:58
+msgid "Search URL"
+msgstr ""
+
+#: searx/templates/courgette/results.html:16
+#: searx/templates/legacy/results.html:17
+#: searx/templates/oscar/results.html:141
+#: searx/templates/simple/results.html:62
+msgid "Download results"
+msgstr "Lawrlwytho'r canlyniadau"
+
+#: searx/templates/courgette/results.html:34
+#: searx/templates/legacy/results.html:35
+#: searx/templates/simple/results.html:10
+msgid "Answers"
+msgstr "Atebion"
+
+#: searx/templates/courgette/results.html:42
+#: searx/templates/legacy/results.html:43
+#: searx/templates/oscar/results.html:116
+#: searx/templates/simple/results.html:42
+msgid "Suggestions"
+msgstr "Awgrymiadau"
+
+#: searx/templates/courgette/results.html:70
+#: searx/templates/legacy/results.html:81
+#: searx/templates/oscar/results.html:68 searx/templates/oscar/results.html:78
+#: searx/templates/simple/results.html:130
+msgid "previous page"
+msgstr "tudalen ddiwethaf"
+
+#: searx/templates/courgette/results.html:81
+#: searx/templates/legacy/results.html:92
+#: searx/templates/oscar/results.html:62 searx/templates/oscar/results.html:84
+#: searx/templates/simple/results.html:145
+msgid "next page"
+msgstr "tudalen nesaf"
+
+#: searx/templates/courgette/search.html:3
+#: searx/templates/legacy/search.html:3 searx/templates/oscar/search.html:6
+#: searx/templates/oscar/search_full.html:9
+#: searx/templates/pix-art/search.html:3 searx/templates/simple/search.html:4
+msgid "Search for..."
+msgstr "Chwilio am..."
+
+#: searx/templates/courgette/stats.html:4 searx/templates/legacy/stats.html:4
+#: searx/templates/oscar/stats.html:5 searx/templates/pix-art/stats.html:4
+#: searx/templates/simple/stats.html:7
+msgid "Engine stats"
+msgstr ""
+
+#: searx/templates/courgette/result_templates/images.html:4
+#: searx/templates/legacy/result_templates/images.html:4
+#: searx/templates/pix-art/result_templates/images.html:4
+msgid "original context"
+msgstr "cyd-destun gwreiddiol"
+
+#: searx/templates/courgette/result_templates/torrent.html:7
+#: searx/templates/legacy/result_templates/torrent.html:11
+#: searx/templates/oscar/result_templates/torrent.html:6
+#: searx/templates/simple/result_templates/torrent.html:9
+msgid "Seeder"
+msgstr "Hadau"
+
+#: searx/templates/courgette/result_templates/torrent.html:7
+#: searx/templates/legacy/result_templates/torrent.html:11
+#: searx/templates/oscar/result_templates/torrent.html:6
+#: searx/templates/simple/result_templates/torrent.html:9
+msgid "Leecher"
+msgstr "Lawrlwythwyr"
+
+#: searx/templates/courgette/result_templates/torrent.html:9
+#: searx/templates/legacy/result_templates/torrent.html:9
+#: searx/templates/oscar/macros.html:23
+#: searx/templates/simple/result_templates/torrent.html:6
+msgid "magnet link"
+msgstr "dolen magnet"
+
+#: searx/templates/courgette/result_templates/torrent.html:10
+#: searx/templates/legacy/result_templates/torrent.html:10
+#: searx/templates/oscar/macros.html:24
+#: searx/templates/simple/result_templates/torrent.html:7
+msgid "torrent file"
+msgstr "ffeil torrent"
+
+#: searx/templates/legacy/categories.html:8
+#: searx/templates/simple/categories.html:6
+msgid "Click on the magnifier to perform search"
+msgstr "Cliciwch ar y chwyddwydr i berfformio chwiliad"
+
+#: searx/templates/legacy/preferences.html:84
+#: searx/templates/oscar/preferences.html:113
+#: searx/templates/simple/preferences.html:142
+msgid "Results on new tabs"
+msgstr "Canlyniadau mewn tabiau newydd"
+
+#: searx/templates/legacy/preferences.html:87
+#: searx/templates/oscar/preferences.html:117
+#: searx/templates/simple/preferences.html:145
+msgid "On"
+msgstr "Ymlaen"
+
+#: searx/templates/legacy/preferences.html:88
+#: searx/templates/oscar/preferences.html:118
+#: searx/templates/simple/preferences.html:146
+msgid "Off"
+msgstr "I ffwrdd"
+
+#: searx/templates/legacy/result_templates/code.html:3
+#: searx/templates/legacy/result_templates/default.html:3
+#: searx/templates/legacy/result_templates/map.html:9
+#: searx/templates/oscar/macros.html:34 searx/templates/oscar/macros.html:48
+#: searx/templates/simple/macros.html:43
+msgid "cached"
+msgstr ""
+
+#: searx/templates/oscar/advanced.html:4
+msgid "Advanced settings"
+msgstr "Gosodiadau uwch"
+
+#: searx/templates/oscar/base.html:62
+#: searx/templates/oscar/messages/first_time.html:4
+#: searx/templates/oscar/messages/save_settings_successfull.html:5
+#: searx/templates/oscar/messages/unknow_error.html:5
+msgid "Close"
+msgstr "Cau"
+
+#: searx/templates/oscar/base.html:64
+#: searx/templates/oscar/messages/no_results.html:4
+#: searx/templates/simple/messages/no_results.html:4
+#: searx/templates/simple/results.html:25
+msgid "Error!"
+msgstr "Gwall!"
+
+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
+msgid "Powered by"
+msgstr "Pwerwyd gan"
+
+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
+msgid "a privacy-respecting, hackable metasearch engine"
+msgstr ""
+
+#: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50
+#: searx/templates/simple/macros.html:43
+msgid "proxied"
+msgstr ""
+
+#: searx/templates/oscar/macros.html:92
+msgid "supported"
+msgstr "cefnogir"
+
+#: searx/templates/oscar/macros.html:96
+msgid "not supported"
+msgstr "ni chefnogir"
+
+#: searx/templates/oscar/preferences.html:13
+#: searx/templates/oscar/preferences.html:22
+#: searx/templates/simple/preferences.html:32
+msgid "General"
+msgstr "Cyffredin"
+
+#: searx/templates/oscar/preferences.html:14
+#: searx/templates/oscar/preferences.html:146
+#: searx/templates/simple/preferences.html:76
+msgid "Engines"
+msgstr "Peiriannau"
+
+#: searx/templates/oscar/preferences.html:15
+#: searx/templates/oscar/preferences.html:219
+msgid "Plugins"
+msgstr "Ategolion"
+
+#: searx/templates/oscar/preferences.html:16
+#: searx/templates/oscar/preferences.html:245
+msgid "Answerers"
+msgstr "Atebwyr"
+
+#: searx/templates/oscar/preferences.html:17
+#: searx/templates/oscar/preferences.html:272
+msgid "Cookies"
+msgstr "Cwcis"
+
+#: searx/templates/oscar/preferences.html:42
+#: searx/templates/simple/preferences.html:48
+msgid "What language do you prefer for search?"
+msgstr "Ym mha iaith wyt ti'n ffafrio chwilio?"
+
+#: searx/templates/oscar/preferences.html:48
+#: searx/templates/simple/preferences.html:128
+msgid "Change the language of the layout"
+msgstr "Newid iaith rhyngwyneb searX"
+
+#: searx/templates/oscar/preferences.html:58
+#: searx/templates/simple/preferences.html:60
+msgid "Find stuff as you type"
+msgstr "Darganfod pethau wrth i chi deipio"
+
+#: searx/templates/oscar/preferences.html:69
+#: searx/templates/simple/preferences.html:173
+msgid "Proxying image results through searx"
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:78
+msgid ""
+"Change how forms are submited, <a "
+"href=\"http://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods\""
+" rel=\"external\">learn more about request methods</a>"
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:87
+#: searx/templates/simple/preferences.html:71
+msgid "Filter content"
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:97
+#: searx/templates/simple/preferences.html:139
+msgid "Change searx layout"
+msgstr "Newid cynllun searX"
+
+#: searx/templates/oscar/preferences.html:106
+#: searx/templates/oscar/preferences.html:111
+msgid "Choose style for this theme"
+msgstr "Dewis arddull ar gyfer y thema hon"
+
+#: searx/templates/oscar/preferences.html:106
+#: searx/templates/oscar/preferences.html:111
+msgid "Style"
+msgstr "Arddull"
+
+#: searx/templates/oscar/preferences.html:122
+msgid "Open Access DOI resolver"
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:123
+msgid ""
+"Redirect to open-access versions of publications when available (plugin "
+"required)"
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:163
+#: searx/templates/oscar/preferences.html:175
+#: searx/templates/simple/preferences.html:88
+msgid "Shortcut"
+msgstr "Llwybr Byr"
+
+#: searx/templates/oscar/preferences.html:164
+#: searx/templates/oscar/preferences.html:174
+msgid "Selected language"
+msgstr "Iaith a ddewiswyd"
+
+#: searx/templates/oscar/preferences.html:166
+#: searx/templates/oscar/preferences.html:172
+#: searx/templates/simple/preferences.html:91
+msgid "Time range"
+msgstr "Cyfnod amser"
+
+#: searx/templates/oscar/preferences.html:167
+#: searx/templates/oscar/preferences.html:171
+#: searx/templates/simple/preferences.html:92
+msgid "Avg. time"
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:168
+#: searx/templates/oscar/preferences.html:170
+#: searx/templates/simple/preferences.html:93
+msgid "Max time"
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:248
+msgid "This is the list of searx's instant answering modules."
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:252
+msgid "Name"
+msgstr "Enw"
+
+#: searx/templates/oscar/preferences.html:253
+msgid "Keywords"
+msgstr "Allweddeiriau"
+
+#: searx/templates/oscar/preferences.html:254
+msgid "Description"
+msgstr "Disgrifiad"
+
+#: searx/templates/oscar/preferences.html:255
+msgid "Examples"
+msgstr "Enghreifftiau"
+
+#: searx/templates/oscar/preferences.html:275
+msgid ""
+"This is the list of cookies and their values searx is storing on your "
+"computer."
+msgstr "Dyma restr y cwcis, a'u gwerthoedd, mae searX yn eu cadw ar eich dyfais."
+
+#: searx/templates/oscar/preferences.html:276
+msgid "With that list, you can assess searx transparency."
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:281
+msgid "Cookie name"
+msgstr "Enw cwci"
+
+#: searx/templates/oscar/preferences.html:282
+msgid "Value"
+msgstr "Gwerth"
+
+#: searx/templates/oscar/preferences.html:301
+msgid "Search URL of the currently saved preferences"
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:301
+msgid ""
+"Note: specifying custom settings in the search URL can reduce privacy by "
+"leaking data to the clicked result sites."
+msgstr ""
+
+#: searx/templates/oscar/results.html:17
+msgid "Search results"
+msgstr "Canlyniadau chwilio"
+
+#: searx/templates/oscar/results.html:21
+#: searx/templates/simple/results.html:84
+msgid "Try searching for:"
+msgstr "Rho gynnig ar chwilio am:"
+
+#: searx/templates/oscar/results.html:100
+#: searx/templates/simple/results.html:25
+msgid "Engines cannot retrieve results"
+msgstr "Ni all y peiriannau cael canlyniadau"
+
+#: searx/templates/oscar/results.html:131
+msgid "Links"
+msgstr "Dolenni"
+
+#: searx/templates/oscar/search.html:8
+#: searx/templates/oscar/search_full.html:11
+#: searx/templates/simple/search.html:5
+msgid "Start search"
+msgstr "Dechrau chwilio"
+
+#: searx/templates/oscar/stats.html:2
+msgid "stats"
+msgstr "ystadegau"
+
+#: searx/templates/oscar/time-range.html:3
+#: searx/templates/simple/time-range.html:3
+msgid "Anytime"
+msgstr "Unrhyw amser"
+
+#: searx/templates/oscar/time-range.html:6
+#: searx/templates/simple/time-range.html:6
+msgid "Last day"
+msgstr "Y diwrnod diwethaf"
+
+#: searx/templates/oscar/time-range.html:9
+#: searx/templates/simple/time-range.html:9
+msgid "Last week"
+msgstr "Yr wythnos diwethaf"
+
+#: searx/templates/oscar/time-range.html:12
+#: searx/templates/simple/time-range.html:12
+msgid "Last month"
+msgstr "Y mis diwethaf"
+
+#: searx/templates/oscar/time-range.html:15
+#: searx/templates/simple/time-range.html:15
+msgid "Last year"
+msgstr "Y flwyddyn ddiwethaf"
+
+#: searx/templates/oscar/messages/first_time.html:6
+#: searx/templates/oscar/messages/no_data_available.html:3
+msgid "Heads up!"
+msgstr ""
+
+#: searx/templates/oscar/messages/first_time.html:7
+msgid "It look like you are using searx first time."
+msgstr "Mae'n ymddangos eich bod yn defnyddio searx am y tro cyntaf."
+
+#: searx/templates/oscar/messages/no_cookies.html:3
+msgid "Information!"
+msgstr "Gwybodaeth!"
+
+#: searx/templates/oscar/messages/no_cookies.html:4
+msgid "currently, there are no cookies defined."
+msgstr ""
+
+#: searx/templates/oscar/messages/no_data_available.html:4
+msgid "There is currently no data available. "
+msgstr "Does dim data ar gael ar hyn o bryd."
+
+#: searx/templates/oscar/messages/no_results.html:4
+#: searx/templates/simple/messages/no_results.html:4
+msgid "Engines cannot retrieve results."
+msgstr "Ni all y peiriannau cael canlyniadau."
+
+#: searx/templates/oscar/messages/no_results.html:10
+#: searx/templates/simple/messages/no_results.html:10
+msgid "Please, try again later or find another searx instance."
+msgstr ""
+
+#: searx/templates/oscar/messages/no_results.html:14
+#: searx/templates/simple/messages/no_results.html:14
+msgid "Sorry!"
+msgstr "Sori!"
+
+#: searx/templates/oscar/messages/no_results.html:15
+#: searx/templates/simple/messages/no_results.html:15
+msgid ""
+"we didn't find any results. Please use another query or search in more "
+"categories."
+msgstr "Ni ddaethpwyd o hyd i unrhyw ganlyniadau. Defnyddiwch derm(au) chwilio gwahanol neu ehangu'r chwilio i ragor o gategorïau."
+
+#: searx/templates/oscar/messages/save_settings_successfull.html:7
+msgid "Well done!"
+msgstr "Da iawn!"
+
+#: searx/templates/oscar/messages/save_settings_successfull.html:8
+msgid "Settings saved successfully."
+msgstr "Cadwyd y gosodiadau yn iawn!"
+
+#: searx/templates/oscar/messages/unknow_error.html:7
+msgid "Oh snap!"
+msgstr ""
+
+#: searx/templates/oscar/messages/unknow_error.html:8
+msgid "Something went wrong."
+msgstr "Aeth rhywbeth o'i le."
+
+#: searx/templates/oscar/result_templates/default.html:7
+#: searx/templates/simple/result_templates/default.html:6
+msgid "show media"
+msgstr "dangos cyfryngau"
+
+#: searx/templates/oscar/result_templates/default.html:7
+#: searx/templates/simple/result_templates/default.html:6
+msgid "hide media"
+msgstr "cuddio cyfryngau"
+
+#: searx/templates/oscar/result_templates/images.html:30
+msgid "Get image"
+msgstr "Cael y ddelwedd"
+
+#: searx/templates/oscar/result_templates/images.html:33
+msgid "View source"
+msgstr "Gweld y ffynhonnell"
+
+#: searx/templates/oscar/result_templates/map.html:7
+#: searx/templates/simple/result_templates/map.html:7
+msgid "show map"
+msgstr "dangos map"
+
+#: searx/templates/oscar/result_templates/map.html:7
+#: searx/templates/simple/result_templates/map.html:7
+msgid "hide map"
+msgstr "cuddio map"
+
+#: searx/templates/oscar/result_templates/map.html:11
+#: searx/templates/simple/result_templates/map.html:11
+msgid "show details"
+msgstr "dangos manylion"
+
+#: searx/templates/oscar/result_templates/map.html:11
+#: searx/templates/simple/result_templates/map.html:11
+msgid "hide details"
+msgstr "cuddio manylion"
+
+#: searx/templates/oscar/result_templates/torrent.html:7
+#: searx/templates/simple/result_templates/torrent.html:11
+msgid "Filesize"
+msgstr "Maint ffeil"
+
+#: searx/templates/oscar/result_templates/torrent.html:9
+#: searx/templates/simple/result_templates/torrent.html:12
+msgid "Bytes"
+msgstr "Beitiau"
+
+#: searx/templates/oscar/result_templates/torrent.html:10
+#: searx/templates/simple/result_templates/torrent.html:13
+msgid "kiB"
+msgstr "kiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:11
+#: searx/templates/simple/result_templates/torrent.html:14
+msgid "MiB"
+msgstr "MiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:12
+#: searx/templates/simple/result_templates/torrent.html:15
+msgid "GiB"
+msgstr "GiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:13
+#: searx/templates/simple/result_templates/torrent.html:16
+msgid "TiB"
+msgstr "TiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:15
+#: searx/templates/simple/result_templates/torrent.html:20
+msgid "Number of Files"
+msgstr "Nifer o Ffeiliau"
+
+#: searx/templates/oscar/result_templates/videos.html:7
+#: searx/templates/simple/result_templates/videos.html:6
+msgid "show video"
+msgstr "dangos fideo"
+
+#: searx/templates/oscar/result_templates/videos.html:7
+#: searx/templates/simple/result_templates/videos.html:6
+msgid "hide video"
+msgstr "cuddio fideo"
+
+#: searx/templates/pix-art/results.html:28
+msgid "Load more..."
+msgstr "Dysgu mwy..."
+
+#: searx/templates/simple/base.html:31
+msgid "No item found"
+msgstr "Ni chanfuwyd eitem"
+
+#: searx/templates/simple/preferences.html:89
+msgid "Supports selected language"
+msgstr "Cefnogir yr iaith a ddewiswyd"
+
+#: searx/templates/simple/preferences.html:118
+msgid "User interface"
+msgstr "Rhyngwyneb defnyddiwr"
+
+#: searx/templates/simple/preferences.html:154
+msgid "Privacy"
+msgstr "Preifatrwydd"
diff --git a/searx/translations/da/LC_MESSAGES/messages.mo b/searx/translations/da/LC_MESSAGES/messages.mo
index 21cea9ef7a..8813c0779a 100644
Binary files a/searx/translations/da/LC_MESSAGES/messages.mo and b/searx/translations/da/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/de/LC_MESSAGES/messages.mo b/searx/translations/de/LC_MESSAGES/messages.mo
index d6458785db..a525fbf1e8 100644
Binary files a/searx/translations/de/LC_MESSAGES/messages.mo and b/searx/translations/de/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/de/LC_MESSAGES/messages.po b/searx/translations/de/LC_MESSAGES/messages.po
index 2bf22911e6..64f253ebd9 100644
--- a/searx/translations/de/LC_MESSAGES/messages.po
+++ b/searx/translations/de/LC_MESSAGES/messages.po
@@ -12,6 +12,7 @@
# Max <[email protected]>, 2015
# pointhi, 2014
# rike, 2014
+# S R <[email protected]>, 2018
# stf <[email protected]>, 2014
# stf <[email protected]>, 2014
# Thomas Pointhuber, 2016-2017
@@ -21,8 +22,8 @@ msgstr ""
"Project-Id-Version: searx\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2017-11-01 21:31+0100\n"
-"PO-Revision-Date: 2017-12-09 13:14+0000\n"
-"Last-Translator: Mario Siegmann <[email protected]>\n"
+"PO-Revision-Date: 2018-12-28 11:02+0000\n"
+"Last-Translator: S R <[email protected]>\n"
"Language-Team: German (http://www.transifex.com/asciimoo/searx/language/de/)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
@@ -159,25 +160,25 @@ msgstr "Keine Zusammenfassung für die Veröffentlichung verfügbar."
#: searx/plugins/https_rewrite.py:32
msgid "Rewrite HTTP links to HTTPS if possible"
-msgstr "Wandelt wenn möglich HTTP Links in HTTPS Links um"
+msgstr "Wandelt wenn möglich HTTP-Links in HTTPS-Links um"
#: searx/plugins/infinite_scroll.py:3
msgid "Infinite scroll"
-msgstr "Undendliches Scrollen"
+msgstr "Unendliches Scrollen"
#: searx/plugins/infinite_scroll.py:4
msgid "Automatically load next page when scrolling to bottom of current page"
-msgstr "Lädt automatisch die nächste Seite wenn das Ende der aktuellen Seite erreicht wurde"
+msgstr "Lädt automatisch die nächste Seite, wenn das Ende der aktuellen Seite erreicht wurde"
#: searx/plugins/oa_doi_rewrite.py:9
msgid "Open Access DOI rewrite"
-msgstr "Open Access DOI rewrite"
+msgstr "Open-Access-DOI umschreiben"
#: searx/plugins/oa_doi_rewrite.py:10
msgid ""
"Avoid paywalls by redirecting to open-access versions of publications when "
"available"
-msgstr "Bezahlbeschränkungen durch die Weiterleitung zu der verfügbaren Open-Access Version vermeiden"
+msgstr "Bezahlbeschränkungen durch die Weiterleitung zu der verfügbaren Open-Access-Version vermeiden"
#: searx/plugins/open_results_on_new_tab.py:18
#: searx/templates/oscar/preferences.html:114
@@ -223,7 +224,7 @@ msgstr "An Vim angelehnte Tastenkombinationen"
msgid ""
"Navigate search results with Vim-like hotkeys (JavaScript required). Press "
"\"h\" key on main or result page to get help."
-msgstr "Navigiere in der Ergebnisseite mit Vim ähnlichen Tastataurkombinationen (es wird JavaScript benötigt).\nDrücke \"h\" auf der Start bzw. Ergebnisseite um eine Hifefenster anzuzeigen"
+msgstr "Navigiere in der Ergebnisseite mit Vim ähnlichen Tastaturkombinationen (es wird JavaScript benötigt).\nDrücke \"h\" auf der Start- bzw. Ergebnisseite, um ein Hifefenster anzuzeigen"
#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4
#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4
diff --git a/searx/translations/el_GR/LC_MESSAGES/messages.mo b/searx/translations/el_GR/LC_MESSAGES/messages.mo
index c9798d318b..2bc6a57a37 100644
Binary files a/searx/translations/el_GR/LC_MESSAGES/messages.mo and b/searx/translations/el_GR/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/en/LC_MESSAGES/messages.mo b/searx/translations/en/LC_MESSAGES/messages.mo
index 9632e6ca50..0c270c06dd 100644
Binary files a/searx/translations/en/LC_MESSAGES/messages.mo and b/searx/translations/en/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/eo/LC_MESSAGES/messages.mo b/searx/translations/eo/LC_MESSAGES/messages.mo
index cb0bc55702..cfad4d49ed 100644
Binary files a/searx/translations/eo/LC_MESSAGES/messages.mo and b/searx/translations/eo/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/eo/LC_MESSAGES/messages.po b/searx/translations/eo/LC_MESSAGES/messages.po
index de31c9a4bf..cd7ffe80eb 100644
--- a/searx/translations/eo/LC_MESSAGES/messages.po
+++ b/searx/translations/eo/LC_MESSAGES/messages.po
@@ -6,13 +6,14 @@
# Jack Stehn <[email protected]>, 2017
# juanda097 <[email protected]>, 2015-2016
# pizzaiolo, 2016
+# Václav Zouzalík <[email protected]>, 2018
msgid ""
msgstr ""
"Project-Id-Version: searx\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2017-11-01 21:31+0100\n"
-"PO-Revision-Date: 2017-11-01 20:31+0000\n"
-"Last-Translator: Adam Tauber <[email protected]>\n"
+"PO-Revision-Date: 2018-11-22 08:33+0000\n"
+"Last-Translator: Václav Zouzalík <[email protected]>\n"
"Language-Team: Esperanto (http://www.transifex.com/asciimoo/searx/language/eo/)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
@@ -23,15 +24,15 @@ msgstr ""
#: searx/search.py:137 searx/search.py:182
msgid "timeout"
-msgstr ""
+msgstr "tempolimo"
#: searx/search.py:144
msgid "request exception"
-msgstr ""
+msgstr "escepto de peto"
#: searx/search.py:151
msgid "unexpected crash"
-msgstr ""
+msgstr "neatendita paneo"
#: searx/webapp.py:136
msgid "files"
@@ -47,7 +48,7 @@ msgstr "muziko"
#: searx/webapp.py:139
msgid "social media"
-msgstr "sociaj retservoj"
+msgstr "sociaj retoj"
#: searx/webapp.py:140
msgid "images"
@@ -75,11 +76,11 @@ msgstr "scienco"
#: searx/webapp.py:399 searx/webapp.py:658
msgid "Invalid settings, please edit your preferences"
-msgstr ""
+msgstr "Nevalidaj agordoj, bonvolu redakti viajn agordojn"
#: searx/webapp.py:415
msgid "Invalid settings"
-msgstr ""
+msgstr "Nevalidaj agordoj"
#: searx/webapp.py:449 searx/webapp.py:493
msgid "search error"
@@ -95,23 +96,23 @@ msgstr "antaŭ {hours} horo(j), {minutes} minuto(j)"
#: searx/answerers/random/answerer.py:53
msgid "Random value generator"
-msgstr ""
+msgstr "Hazardvalora generilo"
#: searx/answerers/random/answerer.py:54
msgid "Generate different random values"
-msgstr ""
+msgstr "Ĝi generas diversajn hazardajn valorojn"
#: searx/answerers/statistics/answerer.py:53
msgid "Statistics functions"
-msgstr ""
+msgstr "Statistikaj funkcioj"
#: searx/answerers/statistics/answerer.py:54
msgid "Compute {functions} of the arguments"
-msgstr ""
+msgstr "Kalkulas {functions} el la argumentoj"
#: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201
msgid "Engine time (sec)"
-msgstr ""
+msgstr "Motora tempo (s)"
#: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205
msgid "Page loads (sec)"
@@ -137,19 +138,19 @@ msgstr "Eraroj"
#: searx/engines/pdbe.py:87
msgid "{title} (OBSOLETE)"
-msgstr ""
+msgstr "{title} (MALNOVA)"
#: searx/engines/pdbe.py:91
msgid "This entry has been superseded by"
-msgstr ""
+msgstr "Tiu ĉi enigo estis anstataŭigita per"
#: searx/engines/pubmed.py:74
msgid "No abstract is available for this publication."
-msgstr ""
+msgstr "Neniu resumo atingeblas por tiu ĉi eldonaĵo."
#: searx/plugins/https_rewrite.py:32
msgid "Rewrite HTTP links to HTTPS if possible"
-msgstr "Reverki HTTP ligiloj HTTP se eble"
+msgstr "Ŝanĝi HTTP-ligilojn al HTTPS, se eblas"
#: searx/plugins/infinite_scroll.py:3
msgid "Infinite scroll"
@@ -157,17 +158,17 @@ msgstr "Senfina rulumado"
#: searx/plugins/infinite_scroll.py:4
msgid "Automatically load next page when scrolling to bottom of current page"
-msgstr "Aŭtomate ŝarĝi la sekvan paĝon kiam rulumante al la subo de la nuna paĝo"
+msgstr "Aŭtomate ŝarĝi sekvan paĝon rulumante al la subo de la nuna paĝo"
#: searx/plugins/oa_doi_rewrite.py:9
msgid "Open Access DOI rewrite"
-msgstr ""
+msgstr "Malfermalira COI-ŝanĝo"
#: searx/plugins/oa_doi_rewrite.py:10
msgid ""
"Avoid paywalls by redirecting to open-access versions of publications when "
"available"
-msgstr "Eviti pagomurojn alidirektante al liberaj versioj de eldonaĵoj kiam eblas"
+msgstr "Eviti pagomurojn per direkto al malfermaliraj versioj de eldonaĵoj, se eblas"
#: searx/plugins/open_results_on_new_tab.py:18
#: searx/templates/oscar/preferences.html:114
@@ -179,7 +180,7 @@ msgstr "Malfermi rezultligilojn en novaj retumilaj langetoj"
msgid ""
"Results are opened in the same window by default. This plugin overwrites the"
" default behaviour to open links on new tabs/windows. (JavaScript required)"
-msgstr "Oni malfermas rezultojn en la sama langeto defaŭlte. Ĉi tiu aldonaĵo ŝanĝas la kutima agmaniero por malfermi ligilojn en novaj langetoj/fenestroj. (ĜavaSkripto bezonata)"
+msgstr "Oni malfermas rezultojn en la sama langeto defaŭlte. Ĉi tiu aldonaĵo ŝanĝas la kutiman agmanieron por malfermi ligilojn en novaj langetoj/fenestroj. (ĜavoSkripto bezonata)"
#: searx/plugins/search_on_category_select.py:18
msgid "Search on category select"
@@ -189,7 +190,7 @@ msgstr "Serĉi en elektita kategorio"
msgid ""
"Perform search immediately if a category selected. Disable to select "
"multiple categories. (JavaScript required)"
-msgstr "Serĉi tuj se oni elektas kategorion. Malŝaltu ĝin por elekti plurajn kategoriojn (ĜavaSkripto bezonata)"
+msgstr "Serĉi tuj se oni elektas kategorion. Malŝaltu ĝin por elekti plurajn kategoriojn (ĜavoSkripto bezonata)"
#: searx/plugins/self_info.py:20
msgid ""
@@ -203,7 +204,7 @@ msgstr "Forigilo de URL-spuriloj"
#: searx/plugins/tracker_url_remover.py:27
msgid "Remove trackers arguments from the returned URL"
-msgstr "Forviŝi spuraj esprimoj de la URL"
+msgstr "Forviŝi spurajn argumentojn el la ricevita URL"
#: searx/plugins/vim_hotkeys.py:3
msgid "Vim-like hotkeys"
@@ -213,7 +214,7 @@ msgstr "Vim-ŝajnaj klavkomandoj"
msgid ""
"Navigate search results with Vim-like hotkeys (JavaScript required). Press "
"\"h\" key on main or result page to get help."
-msgstr "Tranavigi serĉrezultojn per Vim-ŝajnaj klavkomandoj (ĜavaSkripto bezonata). Premu \"h\" por helptekstaro en rezultpaĝo."
+msgstr "Tranavigi serĉrezultojn per Vim-ŝajnaj klavkomandoj (ĜavoSkripto bezonata). Premu \"h\" por helptekstaro en ĉef- aŭ rezultpaĝo."
#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4
#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4
@@ -242,7 +243,7 @@ msgstr "Serĉopaĝo"
#: searx/templates/oscar/preferences.html:3
#: searx/templates/pix-art/index.html:8
msgid "preferences"
-msgstr "preferoj"
+msgstr "agordoj"
#: searx/templates/courgette/index.html:11
#: searx/templates/legacy/index.html:10 searx/templates/oscar/about.html:2
@@ -256,7 +257,7 @@ msgstr "pri"
#: searx/templates/pix-art/preferences.html:5
#: searx/templates/simple/preferences.html:26
msgid "Preferences"
-msgstr "Preferoj"
+msgstr "Agordoj"
#: searx/templates/courgette/preferences.html:9
#: searx/templates/legacy/preferences.html:9
@@ -282,7 +283,7 @@ msgstr "Serĉolingvo"
#: searx/templates/simple/languages.html:2
#: searx/templates/simple/preferences.html:42
msgid "Default language"
-msgstr ""
+msgstr "Defaŭlta lingvo"
#: searx/templates/courgette/preferences.html:24
#: searx/templates/legacy/preferences.html:25
@@ -400,7 +401,7 @@ msgstr "Ruĝa"
#: searx/templates/pix-art/preferences.html:49
#: searx/templates/simple/preferences.html:77
msgid "Currently used search engines"
-msgstr " Aktuale uzitajn serĉilojn"
+msgstr " Aktuale uzataj serĉiloj"
#: searx/templates/courgette/preferences.html:100
#: searx/templates/legacy/preferences.html:97
@@ -445,7 +446,7 @@ msgstr "Bloki"
msgid ""
"These settings are stored in your cookies, this allows us not to store this "
"data about you."
-msgstr "Tiuj agordoj estas konservitaj en viaj kuketoj kaj tio eblas, ke ni ne konservu tiujn datumojn pri vi."
+msgstr "Tiuj ĉi agordoj estas konservitaj en viaj kuketoj, kio ebligas al ni ne konservi tiujn datumojn pri vi en nia servilo."
#: searx/templates/courgette/preferences.html:124
#: searx/templates/legacy/preferences.html:121
@@ -478,7 +479,7 @@ msgstr "Reagordi al defaŭlto"
#: searx/templates/pix-art/preferences.html:79
#: searx/templates/simple/preferences.html:187
msgid "back"
-msgstr "antaŭe"
+msgstr "antaŭen"
#: searx/templates/courgette/results.html:12
#: searx/templates/legacy/results.html:13
@@ -492,7 +493,7 @@ msgstr "Serĉi URL"
#: searx/templates/oscar/results.html:141
#: searx/templates/simple/results.html:62
msgid "Download results"
-msgstr "Alŝutaj rezultoj"
+msgstr "Elŝuti rezultojn"
#: searx/templates/courgette/results.html:34
#: searx/templates/legacy/results.html:35
@@ -519,7 +520,7 @@ msgstr " antaŭa paĝo"
#: searx/templates/oscar/results.html:62 searx/templates/oscar/results.html:84
#: searx/templates/simple/results.html:145
msgid "next page"
-msgstr " sekvanta paĝo"
+msgstr " sekva paĝo"
#: searx/templates/courgette/search.html:3
#: searx/templates/legacy/search.html:3 searx/templates/oscar/search.html:6
@@ -615,11 +616,11 @@ msgstr "Fermi"
#: searx/templates/simple/messages/no_results.html:4
#: searx/templates/simple/results.html:25
msgid "Error!"
-msgstr ""
+msgstr "Eraro!"
#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
msgid "Powered by"
-msgstr "Funkciigita de"
+msgstr "Funkciigita per"
#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
msgid "a privacy-respecting, hackable metasearch engine"
@@ -628,15 +629,15 @@ msgstr "kodumebla metaserĉilo kiu respektas vian privatecon"
#: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50
#: searx/templates/simple/macros.html:43
msgid "proxied"
-msgstr ""
+msgstr "prokurata"
#: searx/templates/oscar/macros.html:92
msgid "supported"
-msgstr ""
+msgstr "subtenata"
#: searx/templates/oscar/macros.html:96
msgid "not supported"
-msgstr ""
+msgstr "nesubtenata"
#: searx/templates/oscar/preferences.html:13
#: searx/templates/oscar/preferences.html:22
@@ -658,7 +659,7 @@ msgstr "Aldonaĵoj"
#: searx/templates/oscar/preferences.html:16
#: searx/templates/oscar/preferences.html:245
msgid "Answerers"
-msgstr ""
+msgstr "Respondiloj"
#: searx/templates/oscar/preferences.html:17
#: searx/templates/oscar/preferences.html:272
@@ -673,7 +674,7 @@ msgstr "Kiun lingvon vi pli ŝatas por serĉi?"
#: searx/templates/oscar/preferences.html:48
#: searx/templates/simple/preferences.html:128
msgid "Change the language of the layout"
-msgstr "Ŝanĝi la fasonadan lingvon"
+msgstr "Ŝanĝi lingvon de la fasono"
#: searx/templates/oscar/preferences.html:58
#: searx/templates/simple/preferences.html:60
@@ -690,7 +691,7 @@ msgid ""
"Change how forms are submited, <a "
"href=\"http://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods\""
" rel=\"external\">learn more about request methods</a>"
-msgstr " Ŝanĝi kiel formoj estas senditaj, < href=\"http://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods\" rel=\"external\"> Lerni pli pri peto-metodoj</> "
+msgstr "Ŝanĝi kiel formoj estas sendataj, <a href=\"http://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods\" rel=\"external\">sciu pli pri peto-metodoj</a> "
#: searx/templates/oscar/preferences.html:87
#: searx/templates/simple/preferences.html:71
@@ -700,7 +701,7 @@ msgstr "Filtri enhavon"
#: searx/templates/oscar/preferences.html:97
#: searx/templates/simple/preferences.html:139
msgid "Change searx layout"
-msgstr "Ŝanĝi searx-fasonadon"
+msgstr "Ŝanĝi fasonon de Searx"
#: searx/templates/oscar/preferences.html:106
#: searx/templates/oscar/preferences.html:111
@@ -714,13 +715,13 @@ msgstr "Stilo"
#: searx/templates/oscar/preferences.html:122
msgid "Open Access DOI resolver"
-msgstr ""
+msgstr "Malfermalira COI-solvilo"
#: searx/templates/oscar/preferences.html:123
msgid ""
"Redirect to open-access versions of publications when available (plugin "
"required)"
-msgstr ""
+msgstr "Direkti al malfermaliraj versioj de eldonaĵoj, se eblas (aldonaĵo necesas)"
#: searx/templates/oscar/preferences.html:163
#: searx/templates/oscar/preferences.html:175
@@ -731,13 +732,13 @@ msgstr "Fulmoklavo"
#: searx/templates/oscar/preferences.html:164
#: searx/templates/oscar/preferences.html:174
msgid "Selected language"
-msgstr ""
+msgstr "Elekti lingvon"
#: searx/templates/oscar/preferences.html:166
#: searx/templates/oscar/preferences.html:172
#: searx/templates/simple/preferences.html:91
msgid "Time range"
-msgstr ""
+msgstr "Tempa intervalo"
#: searx/templates/oscar/preferences.html:167
#: searx/templates/oscar/preferences.html:171
@@ -753,29 +754,29 @@ msgstr "Maksimuma tempo"
#: searx/templates/oscar/preferences.html:248
msgid "This is the list of searx's instant answering modules."
-msgstr ""
+msgstr "Tio ĉi estas listo de tuje respondantaj moduloj de Searx."
#: searx/templates/oscar/preferences.html:252
msgid "Name"
-msgstr ""
+msgstr "Nomo"
#: searx/templates/oscar/preferences.html:253
msgid "Keywords"
-msgstr ""
+msgstr "Ŝlosilvortoj"
#: searx/templates/oscar/preferences.html:254
msgid "Description"
-msgstr ""
+msgstr "Priskribo"
#: searx/templates/oscar/preferences.html:255
msgid "Examples"
-msgstr ""
+msgstr "Ekzemploj"
#: searx/templates/oscar/preferences.html:275
msgid ""
"This is the list of cookies and their values searx is storing on your "
"computer."
-msgstr "Ĉi tiu estas la listo de kuketoj kaj siaj valoroj, kiujn searx konservas en via komputilo."
+msgstr "Ĉi tio estas listo de kuketoj kaj iliaj valoroj, kiujn searx konservas en via komputilo."
#: searx/templates/oscar/preferences.html:276
msgid "With that list, you can assess searx transparency."
@@ -791,13 +792,13 @@ msgstr "Valoro"
#: searx/templates/oscar/preferences.html:301
msgid "Search URL of the currently saved preferences"
-msgstr ""
+msgstr "Serĉo-URL kun aktuale konservitaj agordoj"
#: searx/templates/oscar/preferences.html:301
msgid ""
"Note: specifying custom settings in the search URL can reduce privacy by "
"leaking data to the clicked result sites."
-msgstr ""
+msgstr "Rimarko: Precizigo de propraj agordoj en la serĉo-URL povas malaltigi privatecon per nevola diskonigo de la datumoj al alklikantaj retejoj."
#: searx/templates/oscar/results.html:17
msgid "Search results"
@@ -806,12 +807,12 @@ msgstr "Serĉrezultoj"
#: searx/templates/oscar/results.html:21
#: searx/templates/simple/results.html:84
msgid "Try searching for:"
-msgstr ""
+msgstr "Provu serĉi:"
#: searx/templates/oscar/results.html:100
#: searx/templates/simple/results.html:25
msgid "Engines cannot retrieve results"
-msgstr ""
+msgstr "Motoroj ne povas trovi rezultojn"
#: searx/templates/oscar/results.html:131
msgid "Links"
@@ -830,7 +831,7 @@ msgstr "statistikoj"
#: searx/templates/oscar/time-range.html:3
#: searx/templates/simple/time-range.html:3
msgid "Anytime"
-msgstr "Ie"
+msgstr "Iam ajn"
#: searx/templates/oscar/time-range.html:6
#: searx/templates/simple/time-range.html:6
@@ -850,7 +851,7 @@ msgstr "Pasinta monato"
#: searx/templates/oscar/time-range.html:15
#: searx/templates/simple/time-range.html:15
msgid "Last year"
-msgstr ""
+msgstr "Pasinta jaro"
#: searx/templates/oscar/messages/first_time.html:6
#: searx/templates/oscar/messages/no_data_available.html:3
@@ -859,7 +860,7 @@ msgstr "Atentu!"
#: searx/templates/oscar/messages/first_time.html:7
msgid "It look like you are using searx first time."
-msgstr "Ŝajnas, ke ĉi tiu estas via unua fojo uzante searx"
+msgstr "Ŝajnas, ke ĉi tio estas via unua fojo, kiam vi uzas searx."
#: searx/templates/oscar/messages/no_cookies.html:3
msgid "Information!"
@@ -867,7 +868,7 @@ msgstr "Informoj!"
#: searx/templates/oscar/messages/no_cookies.html:4
msgid "currently, there are no cookies defined."
-msgstr "ĉi-momente, ne estas kuketoj difinitaj."
+msgstr "nun ne estas ajnaj kuketoj difinitaj."
#: searx/templates/oscar/messages/no_data_available.html:4
msgid "There is currently no data available. "
@@ -876,17 +877,17 @@ msgstr "Nun ne estas datumoj disponeblaj."
#: searx/templates/oscar/messages/no_results.html:4
#: searx/templates/simple/messages/no_results.html:4
msgid "Engines cannot retrieve results."
-msgstr ""
+msgstr "Motoroj ne povas trovi rezultojn."
#: searx/templates/oscar/messages/no_results.html:10
#: searx/templates/simple/messages/no_results.html:10
msgid "Please, try again later or find another searx instance."
-msgstr ""
+msgstr "Bonvolu provi ĝin poste aŭ trovi aliajn searx-instancon."
#: searx/templates/oscar/messages/no_results.html:14
#: searx/templates/simple/messages/no_results.html:14
msgid "Sorry!"
-msgstr "Mizera!"
+msgstr "Pardonu!"
#: searx/templates/oscar/messages/no_results.html:15
#: searx/templates/simple/messages/no_results.html:15
@@ -952,7 +953,7 @@ msgstr "kaŝi detalojn"
#: searx/templates/oscar/result_templates/torrent.html:7
#: searx/templates/simple/result_templates/torrent.html:11
msgid "Filesize"
-msgstr "Dosiergrando"
+msgstr "Dosiergrandeco"
#: searx/templates/oscar/result_templates/torrent.html:9
#: searx/templates/simple/result_templates/torrent.html:12
@@ -1000,16 +1001,16 @@ msgstr "Ŝarĝi pli..."
#: searx/templates/simple/base.html:31
msgid "No item found"
-msgstr ""
+msgstr "Nenio trovita"
#: searx/templates/simple/preferences.html:89
msgid "Supports selected language"
-msgstr ""
+msgstr "Subtenas elektitan lingvon"
#: searx/templates/simple/preferences.html:118
msgid "User interface"
-msgstr ""
+msgstr "Fasado"
#: searx/templates/simple/preferences.html:154
msgid "Privacy"
-msgstr ""
+msgstr "Privateco"
diff --git a/searx/translations/es/LC_MESSAGES/messages.mo b/searx/translations/es/LC_MESSAGES/messages.mo
index c8c08316e9..358cce8cbd 100644
Binary files a/searx/translations/es/LC_MESSAGES/messages.mo and b/searx/translations/es/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/eu/LC_MESSAGES/messages.mo b/searx/translations/eu/LC_MESSAGES/messages.mo
new file mode 100644
index 0000000000..db58fdc84a
Binary files /dev/null and b/searx/translations/eu/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/eu/LC_MESSAGES/messages.po b/searx/translations/eu/LC_MESSAGES/messages.po
new file mode 100644
index 0000000000..b6fa194e10
--- /dev/null
+++ b/searx/translations/eu/LC_MESSAGES/messages.po
@@ -0,0 +1,1015 @@
+# Translations template for PROJECT.
+# Copyright (C) 2017 ORGANIZATION
+# This file is distributed under the same license as the PROJECT project.
+#
+# Translators:
+# beriain <[email protected]>, 2018
+# beriain <[email protected]>, 2018
+# Txopi <[email protected]>, 2016
+msgid ""
+msgstr ""
+"Project-Id-Version: searx\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2017-11-01 21:31+0100\n"
+"PO-Revision-Date: 2018-11-06 16:39+0000\n"
+"Last-Translator: beriain <[email protected]>\n"
+"Language-Team: Basque (http://www.transifex.com/asciimoo/searx/language/eu/)\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.3.4\n"
+"Language: eu\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+#: searx/search.py:137 searx/search.py:182
+msgid "timeout"
+msgstr "denbora agortzea"
+
+#: searx/search.py:144
+msgid "request exception"
+msgstr "salbuespena eskaeran"
+
+#: searx/search.py:151
+msgid "unexpected crash"
+msgstr "ustekabeko gelditzea"
+
+#: searx/webapp.py:136
+msgid "files"
+msgstr "fitxategiak"
+
+#: searx/webapp.py:137
+msgid "general"
+msgstr "orokorra"
+
+#: searx/webapp.py:138
+msgid "music"
+msgstr "musika"
+
+#: searx/webapp.py:139
+msgid "social media"
+msgstr "multimedia soziala"
+
+#: searx/webapp.py:140
+msgid "images"
+msgstr "irudiak"
+
+#: searx/webapp.py:141
+msgid "videos"
+msgstr "bideoak"
+
+#: searx/webapp.py:142
+msgid "it"
+msgstr "it"
+
+#: searx/webapp.py:143
+msgid "news"
+msgstr "berriak"
+
+#: searx/webapp.py:144
+msgid "map"
+msgstr "mapa"
+
+#: searx/webapp.py:145
+msgid "science"
+msgstr "zientzia"
+
+#: searx/webapp.py:399 searx/webapp.py:658
+msgid "Invalid settings, please edit your preferences"
+msgstr "Ezarpen ez baliodunak, mesedez editatu zure hobespenak"
+
+#: searx/webapp.py:415
+msgid "Invalid settings"
+msgstr "Ezarpen ez baliodunak"
+
+#: searx/webapp.py:449 searx/webapp.py:493
+msgid "search error"
+msgstr "bilaketa akatsa"
+
+#: searx/webapp.py:530
+msgid "{minutes} minute(s) ago"
+msgstr "duela {minutes} minutu"
+
+#: searx/webapp.py:532
+msgid "{hours} hour(s), {minutes} minute(s) ago"
+msgstr "duela {hours} ordu eta {minutes} minutu"
+
+#: searx/answerers/random/answerer.py:53
+msgid "Random value generator"
+msgstr "Ausazko balio sortzailea"
+
+#: searx/answerers/random/answerer.py:54
+msgid "Generate different random values"
+msgstr "Ausazko balio ezberdinak sortu"
+
+#: searx/answerers/statistics/answerer.py:53
+msgid "Statistics functions"
+msgstr "Funtzio estatistikoak"
+
+#: searx/answerers/statistics/answerer.py:54
+msgid "Compute {functions} of the arguments"
+msgstr "Parametroen {functions} zenbatu"
+
+#: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201
+msgid "Engine time (sec)"
+msgstr "Bilatzailearen denbora (seg)"
+
+#: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205
+msgid "Page loads (sec)"
+msgstr "Orri kargak (seg)"
+
+#: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209
+#: searx/templates/oscar/results.html:95
+#: searx/templates/simple/results.html:20
+msgid "Number of results"
+msgstr "Emaitza kopurua"
+
+#: searx/engines/__init__.py:206 searx/engines/flycheck___init__.py:213
+msgid "Scores"
+msgstr "Balorazioak"
+
+#: searx/engines/__init__.py:210 searx/engines/flycheck___init__.py:217
+msgid "Scores per result"
+msgstr "Balorazioak emaitza bakoitzeko"
+
+#: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221
+msgid "Errors"
+msgstr "Erroreak"
+
+#: searx/engines/pdbe.py:87
+msgid "{title} (OBSOLETE)"
+msgstr "{title} (ZAHARKITUA)"
+
+#: searx/engines/pdbe.py:91
+msgid "This entry has been superseded by"
+msgstr "Sarrera hau hurrengoarekin ordezkatu da"
+
+#: searx/engines/pubmed.py:74
+msgid "No abstract is available for this publication."
+msgstr "Ez dago abstrakturik eskuragarri argitalpen honetarako."
+
+#: searx/plugins/https_rewrite.py:32
+msgid "Rewrite HTTP links to HTTPS if possible"
+msgstr "HTTP loturak HTTPS bihurtu ahal denean"
+
+#: searx/plugins/infinite_scroll.py:3
+msgid "Infinite scroll"
+msgstr "Korritze amaigabea"
+
+#: searx/plugins/infinite_scroll.py:4
+msgid "Automatically load next page when scrolling to bottom of current page"
+msgstr "Hurrengo orria automatikoki kargatu uneko orriaren behekaldera mugitzerakoan"
+
+#: searx/plugins/oa_doi_rewrite.py:9
+msgid "Open Access DOI rewrite"
+msgstr "Berridatzi Open Access DOI"
+
+#: searx/plugins/oa_doi_rewrite.py:10
+msgid ""
+"Avoid paywalls by redirecting to open-access versions of publications when "
+"available"
+msgstr "Ordainketa hormak sahiestu argitalpenen sartze-askeko bertsioetara berbidaliz ahal denean"
+
+#: searx/plugins/open_results_on_new_tab.py:18
+#: searx/templates/oscar/preferences.html:114
+#: searx/templates/simple/preferences.html:149
+msgid "Open result links on new browser tabs"
+msgstr "Emaitzen estekak nabigatzailearen fitxa berrietan ireki"
+
+#: searx/plugins/open_results_on_new_tab.py:19
+msgid ""
+"Results are opened in the same window by default. This plugin overwrites the"
+" default behaviour to open links on new tabs/windows. (JavaScript required)"
+msgstr "Emaitzak leiho berdinean irekitzen dira lehenetsi bezala. Plugin honek lehenetsitako jokabidea aldatzen du estekak fitxa/leiho berrietan irekitzeko. (JavaScript behar du)"
+
+#: searx/plugins/search_on_category_select.py:18
+msgid "Search on category select"
+msgstr "Bilatu kategoria hautatzerakoan"
+
+#: searx/plugins/search_on_category_select.py:19
+msgid ""
+"Perform search immediately if a category selected. Disable to select "
+"multiple categories. (JavaScript required)"
+msgstr "Bilaketa egin kategoria hautatu bezain laster. Ezgaitu ezazu hainbat kategoria hautatu ahal izateko. (JavaScript behar du)"
+
+#: searx/plugins/self_info.py:20
+msgid ""
+"Displays your IP if the query is \"ip\" and your user agent if the query "
+"contains \"user agent\"."
+msgstr "Zure IPa erakutsi bilatutakoa \"ip\" bada eta zure user agenta bilatutakoa \"user agent\" bada."
+
+#: searx/plugins/tracker_url_remover.py:26
+msgid "Tracker URL remover"
+msgstr "URL aztarnariak kendu"
+
+#: searx/plugins/tracker_url_remover.py:27
+msgid "Remove trackers arguments from the returned URL"
+msgstr "Aztarnarien argumentuak kendu itzulitako URLtik"
+
+#: searx/plugins/vim_hotkeys.py:3
+msgid "Vim-like hotkeys"
+msgstr "Vim antzeko laster-teklak"
+
+#: searx/plugins/vim_hotkeys.py:4
+msgid ""
+"Navigate search results with Vim-like hotkeys (JavaScript required). Press "
+"\"h\" key on main or result page to get help."
+msgstr "Emaitzetan zehar Vim bezalako tekla azkarrekin nabigatu (JavaScript behar du). Sakatu \"h\" tekla orri nagusian edo emaitzen orrian laguntza ikusteko."
+
+#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4
+#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4
+#: searx/templates/simple/404.html:4
+msgid "Page not found"
+msgstr "Orria ez da aurkitu"
+
+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6
+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6
+#: searx/templates/simple/404.html:6
+#, python-format
+msgid "Go to %(search_page)s."
+msgstr "%(search_page)s(e)ra joan."
+
+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6
+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6
+#: searx/templates/simple/404.html:6
+msgid "search page"
+msgstr "bilaketa orria"
+
+#: searx/templates/courgette/index.html:9
+#: searx/templates/courgette/index.html:13
+#: searx/templates/courgette/results.html:5
+#: searx/templates/legacy/index.html:8 searx/templates/legacy/index.html:12
+#: searx/templates/oscar/navbar.html:7
+#: searx/templates/oscar/preferences.html:3
+#: searx/templates/pix-art/index.html:8
+msgid "preferences"
+msgstr "hobespenak"
+
+#: searx/templates/courgette/index.html:11
+#: searx/templates/legacy/index.html:10 searx/templates/oscar/about.html:2
+#: searx/templates/oscar/navbar.html:6 searx/templates/pix-art/index.html:7
+msgid "about"
+msgstr "honi buruz"
+
+#: searx/templates/courgette/preferences.html:5
+#: searx/templates/legacy/preferences.html:5
+#: searx/templates/oscar/preferences.html:8
+#: searx/templates/pix-art/preferences.html:5
+#: searx/templates/simple/preferences.html:26
+msgid "Preferences"
+msgstr "Hobespenak"
+
+#: searx/templates/courgette/preferences.html:9
+#: searx/templates/legacy/preferences.html:9
+#: searx/templates/oscar/preferences.html:33
+#: searx/templates/oscar/preferences.html:35
+#: searx/templates/simple/preferences.html:34
+msgid "Default categories"
+msgstr "Lehenetsitako kategoriak"
+
+#: searx/templates/courgette/preferences.html:13
+#: searx/templates/legacy/preferences.html:14
+#: searx/templates/oscar/preferences.html:41
+#: searx/templates/pix-art/preferences.html:9
+#: searx/templates/simple/preferences.html:39
+#: searx/templates/simple/preferences.html:163
+msgid "Search language"
+msgstr "Bilaketaren hizkuntza"
+
+#: searx/templates/courgette/preferences.html:16
+#: searx/templates/legacy/preferences.html:17
+#: searx/templates/oscar/languages.html:6
+#: searx/templates/pix-art/preferences.html:12
+#: searx/templates/simple/languages.html:2
+#: searx/templates/simple/preferences.html:42
+msgid "Default language"
+msgstr "Lehenetsitako hizkuntza"
+
+#: searx/templates/courgette/preferences.html:24
+#: searx/templates/legacy/preferences.html:25
+#: searx/templates/oscar/preferences.html:47
+#: searx/templates/pix-art/preferences.html:20
+#: searx/templates/simple/preferences.html:120
+msgid "Interface language"
+msgstr "Interfazearen hizkuntza"
+
+#: searx/templates/courgette/preferences.html:34
+#: searx/templates/legacy/preferences.html:35
+#: searx/templates/oscar/preferences.html:57
+#: searx/templates/simple/preferences.html:51
+msgid "Autocomplete"
+msgstr "Osatze automatikoa"
+
+#: searx/templates/courgette/preferences.html:45
+#: searx/templates/legacy/preferences.html:46
+#: searx/templates/oscar/preferences.html:68
+#: searx/templates/simple/preferences.html:166
+msgid "Image proxy"
+msgstr "Irudietarako proxya"
+
+#: searx/templates/courgette/preferences.html:48
+#: searx/templates/legacy/preferences.html:49
+#: searx/templates/oscar/preferences.html:72
+#: searx/templates/simple/preferences.html:169
+msgid "Enabled"
+msgstr "Gaituta"
+
+#: searx/templates/courgette/preferences.html:49
+#: searx/templates/legacy/preferences.html:50
+#: searx/templates/oscar/preferences.html:73
+#: searx/templates/simple/preferences.html:170
+msgid "Disabled"
+msgstr "Desgaituta"
+
+#: searx/templates/courgette/preferences.html:54
+#: searx/templates/legacy/preferences.html:55
+#: searx/templates/oscar/preferences.html:77
+#: searx/templates/pix-art/preferences.html:30
+#: searx/templates/simple/preferences.html:156
+msgid "Method"
+msgstr "Metodoa"
+
+#: searx/templates/courgette/preferences.html:63
+#: searx/templates/legacy/preferences.html:64
+#: searx/templates/oscar/preferences.html:86
+#: searx/templates/oscar/preferences.html:165
+#: searx/templates/oscar/preferences.html:173
+#: searx/templates/simple/preferences.html:63
+#: searx/templates/simple/preferences.html:90
+msgid "SafeSearch"
+msgstr "Bilaketa segurua"
+
+#: searx/templates/courgette/preferences.html:66
+#: searx/templates/legacy/preferences.html:67
+#: searx/templates/oscar/preferences.html:90
+#: searx/templates/simple/preferences.html:66
+msgid "Strict"
+msgstr "Zorrotza"
+
+#: searx/templates/courgette/preferences.html:67
+#: searx/templates/legacy/preferences.html:68
+#: searx/templates/oscar/preferences.html:91
+#: searx/templates/simple/preferences.html:67
+msgid "Moderate"
+msgstr "Moderatua"
+
+#: searx/templates/courgette/preferences.html:68
+#: searx/templates/legacy/preferences.html:69
+#: searx/templates/oscar/preferences.html:92
+#: searx/templates/simple/preferences.html:68
+msgid "None"
+msgstr "Bat ere ez"
+
+#: searx/templates/courgette/preferences.html:73
+#: searx/templates/legacy/preferences.html:74
+#: searx/templates/oscar/preferences.html:96
+#: searx/templates/pix-art/preferences.html:39
+#: searx/templates/simple/preferences.html:131
+msgid "Themes"
+msgstr "Itxurak"
+
+#: searx/templates/courgette/preferences.html:83
+msgid "Color"
+msgstr "Kolorea"
+
+#: searx/templates/courgette/preferences.html:86
+msgid "Blue (default)"
+msgstr "Urdina (lehenetsia)"
+
+#: searx/templates/courgette/preferences.html:87
+msgid "Violet"
+msgstr "Bioleta"
+
+#: searx/templates/courgette/preferences.html:88
+msgid "Green"
+msgstr "Berdea"
+
+#: searx/templates/courgette/preferences.html:89
+msgid "Cyan"
+msgstr "Zian"
+
+#: searx/templates/courgette/preferences.html:90
+msgid "Orange"
+msgstr "Laranja"
+
+#: searx/templates/courgette/preferences.html:91
+msgid "Red"
+msgstr "Gorria"
+
+#: searx/templates/courgette/preferences.html:96
+#: searx/templates/legacy/preferences.html:93
+#: searx/templates/pix-art/preferences.html:49
+#: searx/templates/simple/preferences.html:77
+msgid "Currently used search engines"
+msgstr "Erabiliak izaten ari diren bilatzaileak"
+
+#: searx/templates/courgette/preferences.html:100
+#: searx/templates/legacy/preferences.html:97
+#: searx/templates/oscar/preferences.html:162
+#: searx/templates/oscar/preferences.html:176
+#: searx/templates/pix-art/preferences.html:53
+#: searx/templates/simple/preferences.html:87
+msgid "Engine name"
+msgstr "Bilatzailearen izena"
+
+#: searx/templates/courgette/preferences.html:101
+#: searx/templates/legacy/preferences.html:98
+msgid "Category"
+msgstr "Kategoria"
+
+#: searx/templates/courgette/preferences.html:102
+#: searx/templates/courgette/preferences.html:113
+#: searx/templates/legacy/preferences.html:99
+#: searx/templates/legacy/preferences.html:110
+#: searx/templates/oscar/preferences.html:161
+#: searx/templates/oscar/preferences.html:177
+#: searx/templates/pix-art/preferences.html:54
+#: searx/templates/pix-art/preferences.html:64
+#: searx/templates/simple/preferences.html:86
+msgid "Allow"
+msgstr "Baimendu"
+
+#: searx/templates/courgette/preferences.html:102
+#: searx/templates/courgette/preferences.html:114
+#: searx/templates/legacy/preferences.html:99
+#: searx/templates/legacy/preferences.html:111
+#: searx/templates/pix-art/preferences.html:54
+#: searx/templates/pix-art/preferences.html:65
+msgid "Block"
+msgstr "Blokeatu"
+
+#: searx/templates/courgette/preferences.html:122
+#: searx/templates/legacy/preferences.html:119
+#: searx/templates/oscar/preferences.html:297
+#: searx/templates/pix-art/preferences.html:73
+#: searx/templates/simple/preferences.html:180
+msgid ""
+"These settings are stored in your cookies, this allows us not to store this "
+"data about you."
+msgstr "Ezarpen hauek zure cookietan gurdetzen dira, honek zuri buruzko informaziorik ez gordetzea baimentzen digu."
+
+#: searx/templates/courgette/preferences.html:124
+#: searx/templates/legacy/preferences.html:121
+#: searx/templates/oscar/preferences.html:299
+#: searx/templates/pix-art/preferences.html:75
+#: searx/templates/simple/preferences.html:182
+msgid ""
+"These cookies serve your sole convenience, we don't use these cookies to "
+"track you."
+msgstr "Cookie hauek zure onurarako besterik ez dira, ez ditugu zure jarraipenik egiteko erabiltzen."
+
+#: searx/templates/courgette/preferences.html:127
+#: searx/templates/legacy/preferences.html:124
+#: searx/templates/oscar/preferences.html:305
+#: searx/templates/pix-art/preferences.html:78
+#: searx/templates/simple/preferences.html:185
+msgid "save"
+msgstr "gorde"
+
+#: searx/templates/courgette/preferences.html:128
+#: searx/templates/legacy/preferences.html:125
+#: searx/templates/oscar/preferences.html:307
+#: searx/templates/simple/preferences.html:186
+msgid "Reset defaults"
+msgstr "Berrezarri lehenetsiak"
+
+#: searx/templates/courgette/preferences.html:129
+#: searx/templates/legacy/preferences.html:126
+#: searx/templates/oscar/preferences.html:306
+#: searx/templates/pix-art/preferences.html:79
+#: searx/templates/simple/preferences.html:187
+msgid "back"
+msgstr "atzera"
+
+#: searx/templates/courgette/results.html:12
+#: searx/templates/legacy/results.html:13
+#: searx/templates/oscar/results.html:136
+#: searx/templates/simple/results.html:58
+msgid "Search URL"
+msgstr "Bilaketa URLa"
+
+#: searx/templates/courgette/results.html:16
+#: searx/templates/legacy/results.html:17
+#: searx/templates/oscar/results.html:141
+#: searx/templates/simple/results.html:62
+msgid "Download results"
+msgstr "Emaitzak deskargatu"
+
+#: searx/templates/courgette/results.html:34
+#: searx/templates/legacy/results.html:35
+#: searx/templates/simple/results.html:10
+msgid "Answers"
+msgstr "Erantzunak"
+
+#: searx/templates/courgette/results.html:42
+#: searx/templates/legacy/results.html:43
+#: searx/templates/oscar/results.html:116
+#: searx/templates/simple/results.html:42
+msgid "Suggestions"
+msgstr "Iradokizunak"
+
+#: searx/templates/courgette/results.html:70
+#: searx/templates/legacy/results.html:81
+#: searx/templates/oscar/results.html:68 searx/templates/oscar/results.html:78
+#: searx/templates/simple/results.html:130
+msgid "previous page"
+msgstr "aurreko orrialdea"
+
+#: searx/templates/courgette/results.html:81
+#: searx/templates/legacy/results.html:92
+#: searx/templates/oscar/results.html:62 searx/templates/oscar/results.html:84
+#: searx/templates/simple/results.html:145
+msgid "next page"
+msgstr "hurrengo orrialdea"
+
+#: searx/templates/courgette/search.html:3
+#: searx/templates/legacy/search.html:3 searx/templates/oscar/search.html:6
+#: searx/templates/oscar/search_full.html:9
+#: searx/templates/pix-art/search.html:3 searx/templates/simple/search.html:4
+msgid "Search for..."
+msgstr "Bilatu..."
+
+#: searx/templates/courgette/stats.html:4 searx/templates/legacy/stats.html:4
+#: searx/templates/oscar/stats.html:5 searx/templates/pix-art/stats.html:4
+#: searx/templates/simple/stats.html:7
+msgid "Engine stats"
+msgstr "Bilatzaileen estatistikak"
+
+#: searx/templates/courgette/result_templates/images.html:4
+#: searx/templates/legacy/result_templates/images.html:4
+#: searx/templates/pix-art/result_templates/images.html:4
+msgid "original context"
+msgstr "jatorrizko testuingurua"
+
+#: searx/templates/courgette/result_templates/torrent.html:7
+#: searx/templates/legacy/result_templates/torrent.html:11
+#: searx/templates/oscar/result_templates/torrent.html:6
+#: searx/templates/simple/result_templates/torrent.html:9
+msgid "Seeder"
+msgstr "Ereilea"
+
+#: searx/templates/courgette/result_templates/torrent.html:7
+#: searx/templates/legacy/result_templates/torrent.html:11
+#: searx/templates/oscar/result_templates/torrent.html:6
+#: searx/templates/simple/result_templates/torrent.html:9
+msgid "Leecher"
+msgstr "Izaina"
+
+#: searx/templates/courgette/result_templates/torrent.html:9
+#: searx/templates/legacy/result_templates/torrent.html:9
+#: searx/templates/oscar/macros.html:23
+#: searx/templates/simple/result_templates/torrent.html:6
+msgid "magnet link"
+msgstr "magnet lotura"
+
+#: searx/templates/courgette/result_templates/torrent.html:10
+#: searx/templates/legacy/result_templates/torrent.html:10
+#: searx/templates/oscar/macros.html:24
+#: searx/templates/simple/result_templates/torrent.html:7
+msgid "torrent file"
+msgstr "torrent fitxategia"
+
+#: searx/templates/legacy/categories.html:8
+#: searx/templates/simple/categories.html:6
+msgid "Click on the magnifier to perform search"
+msgstr "Lupan sakatu bilaketa egiteko"
+
+#: searx/templates/legacy/preferences.html:84
+#: searx/templates/oscar/preferences.html:113
+#: searx/templates/simple/preferences.html:142
+msgid "Results on new tabs"
+msgstr "Emaitzak fitxa berrietan"
+
+#: searx/templates/legacy/preferences.html:87
+#: searx/templates/oscar/preferences.html:117
+#: searx/templates/simple/preferences.html:145
+msgid "On"
+msgstr "Piztuta"
+
+#: searx/templates/legacy/preferences.html:88
+#: searx/templates/oscar/preferences.html:118
+#: searx/templates/simple/preferences.html:146
+msgid "Off"
+msgstr "Itzalita"
+
+#: searx/templates/legacy/result_templates/code.html:3
+#: searx/templates/legacy/result_templates/default.html:3
+#: searx/templates/legacy/result_templates/map.html:9
+#: searx/templates/oscar/macros.html:34 searx/templates/oscar/macros.html:48
+#: searx/templates/simple/macros.html:43
+msgid "cached"
+msgstr "cacheatuta"
+
+#: searx/templates/oscar/advanced.html:4
+msgid "Advanced settings"
+msgstr "Ezarpen aurreratuak"
+
+#: searx/templates/oscar/base.html:62
+#: searx/templates/oscar/messages/first_time.html:4
+#: searx/templates/oscar/messages/save_settings_successfull.html:5
+#: searx/templates/oscar/messages/unknow_error.html:5
+msgid "Close"
+msgstr "Itxi"
+
+#: searx/templates/oscar/base.html:64
+#: searx/templates/oscar/messages/no_results.html:4
+#: searx/templates/simple/messages/no_results.html:4
+#: searx/templates/simple/results.html:25
+msgid "Error!"
+msgstr "Errorea!"
+
+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
+msgid "Powered by"
+msgstr "Honek bultzatua"
+
+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
+msgid "a privacy-respecting, hackable metasearch engine"
+msgstr "pribatutasun-errespetatzaile, metabilaketa motor hackeagarri bat"
+
+#: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50
+#: searx/templates/simple/macros.html:43
+msgid "proxied"
+msgstr "proxyatuta"
+
+#: searx/templates/oscar/macros.html:92
+msgid "supported"
+msgstr "onartua"
+
+#: searx/templates/oscar/macros.html:96
+msgid "not supported"
+msgstr "ez onartua"
+
+#: searx/templates/oscar/preferences.html:13
+#: searx/templates/oscar/preferences.html:22
+#: searx/templates/simple/preferences.html:32
+msgid "General"
+msgstr "Orokorra"
+
+#: searx/templates/oscar/preferences.html:14
+#: searx/templates/oscar/preferences.html:146
+#: searx/templates/simple/preferences.html:76
+msgid "Engines"
+msgstr "Bilatzaileak"
+
+#: searx/templates/oscar/preferences.html:15
+#: searx/templates/oscar/preferences.html:219
+msgid "Plugins"
+msgstr "Pluginak"
+
+#: searx/templates/oscar/preferences.html:16
+#: searx/templates/oscar/preferences.html:245
+msgid "Answerers"
+msgstr "Erantzun emaileak"
+
+#: searx/templates/oscar/preferences.html:17
+#: searx/templates/oscar/preferences.html:272
+msgid "Cookies"
+msgstr "Cookieak"
+
+#: searx/templates/oscar/preferences.html:42
+#: searx/templates/simple/preferences.html:48
+msgid "What language do you prefer for search?"
+msgstr "Zein hizkuntzan egin nahi duzu bilaketa?"
+
+#: searx/templates/oscar/preferences.html:48
+#: searx/templates/simple/preferences.html:128
+msgid "Change the language of the layout"
+msgstr "Interfazearen hizkuntza aldatu"
+
+#: searx/templates/oscar/preferences.html:58
+#: searx/templates/simple/preferences.html:60
+msgid "Find stuff as you type"
+msgstr "Gauzak aurkitu idatzi bitartean"
+
+#: searx/templates/oscar/preferences.html:69
+#: searx/templates/simple/preferences.html:173
+msgid "Proxying image results through searx"
+msgstr "Irudien emaitzak searx bitartez proxyatu"
+
+#: searx/templates/oscar/preferences.html:78
+msgid ""
+"Change how forms are submited, <a "
+"href=\"http://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods\""
+" rel=\"external\">learn more about request methods</a>"
+msgstr "Aldatu inprimakiak nola bidaltzen diren, <a href=\"http://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods\" rel=\"external\">ikasi gehiago eskaera metodoen inguruan</a>"
+
+#: searx/templates/oscar/preferences.html:87
+#: searx/templates/simple/preferences.html:71
+msgid "Filter content"
+msgstr "Edukia iragazi"
+
+#: searx/templates/oscar/preferences.html:97
+#: searx/templates/simple/preferences.html:139
+msgid "Change searx layout"
+msgstr "Searxen diseinua aldatu"
+
+#: searx/templates/oscar/preferences.html:106
+#: searx/templates/oscar/preferences.html:111
+msgid "Choose style for this theme"
+msgstr "Gai honetarako estiloa hautatu"
+
+#: searx/templates/oscar/preferences.html:106
+#: searx/templates/oscar/preferences.html:111
+msgid "Style"
+msgstr "Estiloa"
+
+#: searx/templates/oscar/preferences.html:122
+msgid "Open Access DOI resolver"
+msgstr "Open Access DOI ebatzi"
+
+#: searx/templates/oscar/preferences.html:123
+msgid ""
+"Redirect to open-access versions of publications when available (plugin "
+"required)"
+msgstr "Argitalpenen sartze-askeko bertsioetara berbidali ahal denean (plugina behar du)"
+
+#: searx/templates/oscar/preferences.html:163
+#: searx/templates/oscar/preferences.html:175
+#: searx/templates/simple/preferences.html:88
+msgid "Shortcut"
+msgstr "Lasterbidea"
+
+#: searx/templates/oscar/preferences.html:164
+#: searx/templates/oscar/preferences.html:174
+msgid "Selected language"
+msgstr "Hautatutako hizkuntza"
+
+#: searx/templates/oscar/preferences.html:166
+#: searx/templates/oscar/preferences.html:172
+#: searx/templates/simple/preferences.html:91
+msgid "Time range"
+msgstr "Denbora tartea"
+
+#: searx/templates/oscar/preferences.html:167
+#: searx/templates/oscar/preferences.html:171
+#: searx/templates/simple/preferences.html:92
+msgid "Avg. time"
+msgstr " Batezbesteko denbora"
+
+#: searx/templates/oscar/preferences.html:168
+#: searx/templates/oscar/preferences.html:170
+#: searx/templates/simple/preferences.html:93
+msgid "Max time"
+msgstr "Gehienezko denbora"
+
+#: searx/templates/oscar/preferences.html:248
+msgid "This is the list of searx's instant answering modules."
+msgstr "Hau da searxen berehalako erantzunen moduluen zerrenda."
+
+#: searx/templates/oscar/preferences.html:252
+msgid "Name"
+msgstr "Izena"
+
+#: searx/templates/oscar/preferences.html:253
+msgid "Keywords"
+msgstr "Gako-hitzak"
+
+#: searx/templates/oscar/preferences.html:254
+msgid "Description"
+msgstr "Deskripzioa"
+
+#: searx/templates/oscar/preferences.html:255
+msgid "Examples"
+msgstr "Adibideak"
+
+#: searx/templates/oscar/preferences.html:275
+msgid ""
+"This is the list of cookies and their values searx is storing on your "
+"computer."
+msgstr "Hau searxek zure ordenagailuan gordetzen ari den cookien eta haien balioen zerrenda bat da."
+
+#: searx/templates/oscar/preferences.html:276
+msgid "With that list, you can assess searx transparency."
+msgstr "Zerrenda horrekin, searxen gardentasuna balioztatu dezakezu."
+
+#: searx/templates/oscar/preferences.html:281
+msgid "Cookie name"
+msgstr "Cookiearen izena"
+
+#: searx/templates/oscar/preferences.html:282
+msgid "Value"
+msgstr "Balioa"
+
+#: searx/templates/oscar/preferences.html:301
+msgid "Search URL of the currently saved preferences"
+msgstr "Une honetan gordetako hobespenen bilaketa URLa"
+
+#: searx/templates/oscar/preferences.html:301
+msgid ""
+"Note: specifying custom settings in the search URL can reduce privacy by "
+"leaking data to the clicked result sites."
+msgstr "Oharra: bilaketa URLan ezarpen pertsonalizatuak zehazteak pribatutasuna txikiagotu dezake klikatutako erantzun guneetara datuak emanez"
+
+#: searx/templates/oscar/results.html:17
+msgid "Search results"
+msgstr "Bilaketaren emaitzak"
+
+#: searx/templates/oscar/results.html:21
+#: searx/templates/simple/results.html:84
+msgid "Try searching for:"
+msgstr "Saiatu hau bilatzen:"
+
+#: searx/templates/oscar/results.html:100
+#: searx/templates/simple/results.html:25
+msgid "Engines cannot retrieve results"
+msgstr "Bilatzaileek ezin dute emaitzik lortu"
+
+#: searx/templates/oscar/results.html:131
+msgid "Links"
+msgstr "Estekak"
+
+#: searx/templates/oscar/search.html:8
+#: searx/templates/oscar/search_full.html:11
+#: searx/templates/simple/search.html:5
+msgid "Start search"
+msgstr "Bilaketa hasi"
+
+#: searx/templates/oscar/stats.html:2
+msgid "stats"
+msgstr "estatistikak"
+
+#: searx/templates/oscar/time-range.html:3
+#: searx/templates/simple/time-range.html:3
+msgid "Anytime"
+msgstr "Edonoiz"
+
+#: searx/templates/oscar/time-range.html:6
+#: searx/templates/simple/time-range.html:6
+msgid "Last day"
+msgstr "Azken eguna"
+
+#: searx/templates/oscar/time-range.html:9
+#: searx/templates/simple/time-range.html:9
+msgid "Last week"
+msgstr "Azken astea"
+
+#: searx/templates/oscar/time-range.html:12
+#: searx/templates/simple/time-range.html:12
+msgid "Last month"
+msgstr "Azken hilabetea"
+
+#: searx/templates/oscar/time-range.html:15
+#: searx/templates/simple/time-range.html:15
+msgid "Last year"
+msgstr "Azken urtea"
+
+#: searx/templates/oscar/messages/first_time.html:6
+#: searx/templates/oscar/messages/no_data_available.html:3
+msgid "Heads up!"
+msgstr "Kasu!"
+
+#: searx/templates/oscar/messages/first_time.html:7
+msgid "It look like you are using searx first time."
+msgstr "Searx lehen aldiz erabiltzen ari zarela ematen du."
+
+#: searx/templates/oscar/messages/no_cookies.html:3
+msgid "Information!"
+msgstr "Informazioa!"
+
+#: searx/templates/oscar/messages/no_cookies.html:4
+msgid "currently, there are no cookies defined."
+msgstr "une honetan, ez dago cookierik definitutik."
+
+#: searx/templates/oscar/messages/no_data_available.html:4
+msgid "There is currently no data available. "
+msgstr "Une honetan ez dago daturik eskuragarri."
+
+#: searx/templates/oscar/messages/no_results.html:4
+#: searx/templates/simple/messages/no_results.html:4
+msgid "Engines cannot retrieve results."
+msgstr "Bilatzaileek ezin dute emaitzik lortu."
+
+#: searx/templates/oscar/messages/no_results.html:10
+#: searx/templates/simple/messages/no_results.html:10
+msgid "Please, try again later or find another searx instance."
+msgstr "Mesedez, saiatu berriz beranduago edo bila ezazu beste searx instantzia bat."
+
+#: searx/templates/oscar/messages/no_results.html:14
+#: searx/templates/simple/messages/no_results.html:14
+msgid "Sorry!"
+msgstr "Barkatu!"
+
+#: searx/templates/oscar/messages/no_results.html:15
+#: searx/templates/simple/messages/no_results.html:15
+msgid ""
+"we didn't find any results. Please use another query or search in more "
+"categories."
+msgstr "ez dugu emaitzik aurkitu. Mesedez beste kontsulta bat egin edo bilatu kategoria gehiagotan."
+
+#: searx/templates/oscar/messages/save_settings_successfull.html:7
+msgid "Well done!"
+msgstr "Ondo egina!"
+
+#: searx/templates/oscar/messages/save_settings_successfull.html:8
+msgid "Settings saved successfully."
+msgstr "Ezarpenak ongi gorde dira."
+
+#: searx/templates/oscar/messages/unknow_error.html:7
+msgid "Oh snap!"
+msgstr "Hara!"
+
+#: searx/templates/oscar/messages/unknow_error.html:8
+msgid "Something went wrong."
+msgstr "Zerbait gaizki joan da."
+
+#: searx/templates/oscar/result_templates/default.html:7
+#: searx/templates/simple/result_templates/default.html:6
+msgid "show media"
+msgstr "media erakutsi"
+
+#: searx/templates/oscar/result_templates/default.html:7
+#: searx/templates/simple/result_templates/default.html:6
+msgid "hide media"
+msgstr "media ezkutatu"
+
+#: searx/templates/oscar/result_templates/images.html:30
+msgid "Get image"
+msgstr "Irudia eskuratu"
+
+#: searx/templates/oscar/result_templates/images.html:33
+msgid "View source"
+msgstr "Iturria ikusi"
+
+#: searx/templates/oscar/result_templates/map.html:7
+#: searx/templates/simple/result_templates/map.html:7
+msgid "show map"
+msgstr "mapa erakutsi"
+
+#: searx/templates/oscar/result_templates/map.html:7
+#: searx/templates/simple/result_templates/map.html:7
+msgid "hide map"
+msgstr "mapa ezkutatu"
+
+#: searx/templates/oscar/result_templates/map.html:11
+#: searx/templates/simple/result_templates/map.html:11
+msgid "show details"
+msgstr "xehetasunak erakutsi"
+
+#: searx/templates/oscar/result_templates/map.html:11
+#: searx/templates/simple/result_templates/map.html:11
+msgid "hide details"
+msgstr "xehetasunak ezkutatu"
+
+#: searx/templates/oscar/result_templates/torrent.html:7
+#: searx/templates/simple/result_templates/torrent.html:11
+msgid "Filesize"
+msgstr "Fitxategi neurria"
+
+#: searx/templates/oscar/result_templates/torrent.html:9
+#: searx/templates/simple/result_templates/torrent.html:12
+msgid "Bytes"
+msgstr "Byteak"
+
+#: searx/templates/oscar/result_templates/torrent.html:10
+#: searx/templates/simple/result_templates/torrent.html:13
+msgid "kiB"
+msgstr "kiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:11
+#: searx/templates/simple/result_templates/torrent.html:14
+msgid "MiB"
+msgstr "MiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:12
+#: searx/templates/simple/result_templates/torrent.html:15
+msgid "GiB"
+msgstr "GiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:13
+#: searx/templates/simple/result_templates/torrent.html:16
+msgid "TiB"
+msgstr "TiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:15
+#: searx/templates/simple/result_templates/torrent.html:20
+msgid "Number of Files"
+msgstr "Fitxategi kopurua"
+
+#: searx/templates/oscar/result_templates/videos.html:7
+#: searx/templates/simple/result_templates/videos.html:6
+msgid "show video"
+msgstr "erakutsi bideoa"
+
+#: searx/templates/oscar/result_templates/videos.html:7
+#: searx/templates/simple/result_templates/videos.html:6
+msgid "hide video"
+msgstr "ezkutatu bideoa"
+
+#: searx/templates/pix-art/results.html:28
+msgid "Load more..."
+msgstr "Kargatu gehiago..."
+
+#: searx/templates/simple/base.html:31
+msgid "No item found"
+msgstr "Ez da elementurik aurkitu"
+
+#: searx/templates/simple/preferences.html:89
+msgid "Supports selected language"
+msgstr "Hautatutako hizkuntza onartzen du"
+
+#: searx/templates/simple/preferences.html:118
+msgid "User interface"
+msgstr "Erabiltzailearen interfazea"
+
+#: searx/templates/simple/preferences.html:154
+msgid "Privacy"
+msgstr "Pribatutasuna"
diff --git a/searx/translations/fa_IR/LC_MESSAGES/messages.mo b/searx/translations/fa_IR/LC_MESSAGES/messages.mo
new file mode 100644
index 0000000000..4ef71416b3
Binary files /dev/null and b/searx/translations/fa_IR/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/fa_IR/LC_MESSAGES/messages.po b/searx/translations/fa_IR/LC_MESSAGES/messages.po
new file mode 100644
index 0000000000..0e568e1db8
--- /dev/null
+++ b/searx/translations/fa_IR/LC_MESSAGES/messages.po
@@ -0,0 +1,1015 @@
+# Translations template for PROJECT.
+# Copyright (C) 2017 ORGANIZATION
+# This file is distributed under the same license as the PROJECT project.
+#
+# Translators:
+# Aurora, 2018
+# Jim <[email protected]>, 2017
+# Mostafa Ahangarha <[email protected]>, 2018
+msgid ""
+msgstr ""
+"Project-Id-Version: searx\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2017-11-01 21:31+0100\n"
+"PO-Revision-Date: 2018-04-16 16:41+0000\n"
+"Last-Translator: Aurora\n"
+"Language-Team: Persian (Iran) (http://www.transifex.com/asciimoo/searx/language/fa_IR/)\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.3.4\n"
+"Language: fa_IR\n"
+"Plural-Forms: nplurals=2; plural=(n > 1);\n"
+
+#: searx/search.py:137 searx/search.py:182
+msgid "timeout"
+msgstr "پایان زمان<br>"
+
+#: searx/search.py:144
+msgid "request exception"
+msgstr "خطا در درخواست"
+
+#: searx/search.py:151
+msgid "unexpected crash"
+msgstr "ایست ناگهانی"
+
+#: searx/webapp.py:136
+msgid "files"
+msgstr "فایل ها<br>"
+
+#: searx/webapp.py:137
+msgid "general"
+msgstr "فراگیر"
+
+#: searx/webapp.py:138
+msgid "music"
+msgstr "موسیقی"
+
+#: searx/webapp.py:139
+msgid "social media"
+msgstr "رسانه اجتماعی"
+
+#: searx/webapp.py:140
+msgid "images"
+msgstr "تصاویر<br>"
+
+#: searx/webapp.py:141
+msgid "videos"
+msgstr "ویدیو ها<br>"
+
+#: searx/webapp.py:142
+msgid "it"
+msgstr "فناوری اطلاعات"
+
+#: searx/webapp.py:143
+msgid "news"
+msgstr "اخبار"
+
+#: searx/webapp.py:144
+msgid "map"
+msgstr "نقشه"
+
+#: searx/webapp.py:145
+msgid "science"
+msgstr "دانش<br>"
+
+#: searx/webapp.py:399 searx/webapp.py:658
+msgid "Invalid settings, please edit your preferences"
+msgstr "تنظیمات نادرست است، لطفا اولویتهای جستجو را تغییر دهید"
+
+#: searx/webapp.py:415
+msgid "Invalid settings"
+msgstr "تنظیمات اشتباه"
+
+#: searx/webapp.py:449 searx/webapp.py:493
+msgid "search error"
+msgstr "خطای جستجو"
+
+#: searx/webapp.py:530
+msgid "{minutes} minute(s) ago"
+msgstr "{minutes} دقیقه پیش"
+
+#: searx/webapp.py:532
+msgid "{hours} hour(s), {minutes} minute(s) ago"
+msgstr "{hours} ساعت و {minutes} دقیقه پیش"
+
+#: searx/answerers/random/answerer.py:53
+msgid "Random value generator"
+msgstr "ایجاد کننده ی مقدار تصادفی"
+
+#: searx/answerers/random/answerer.py:54
+msgid "Generate different random values"
+msgstr "ایجاد مقادیر تصادفی متفاوت"
+
+#: searx/answerers/statistics/answerer.py:53
+msgid "Statistics functions"
+msgstr "توابع آماری"
+
+#: searx/answerers/statistics/answerer.py:54
+msgid "Compute {functions} of the arguments"
+msgstr "پردازش {عملکرد های} نشانوند ها<br>"
+
+#: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201
+msgid "Engine time (sec)"
+msgstr "زمان موتور(ثانیه)<br>"
+
+#: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205
+msgid "Page loads (sec)"
+msgstr "زمان بارگذاری صفحه (ثانیه)"
+
+#: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209
+#: searx/templates/oscar/results.html:95
+#: searx/templates/simple/results.html:20
+msgid "Number of results"
+msgstr "تعداد نتایج"
+
+#: searx/engines/__init__.py:206 searx/engines/flycheck___init__.py:213
+msgid "Scores"
+msgstr "امتیازات<br>"
+
+#: searx/engines/__init__.py:210 searx/engines/flycheck___init__.py:217
+msgid "Scores per result"
+msgstr "امتیازات بر نتیجه<br>"
+
+#: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221
+msgid "Errors"
+msgstr "خطاها"
+
+#: searx/engines/pdbe.py:87
+msgid "{title} (OBSOLETE)"
+msgstr "{title} (OBSOLETE)"
+
+#: searx/engines/pdbe.py:91
+msgid "This entry has been superseded by"
+msgstr "این ورودی معلق شده است توسط"
+
+#: searx/engines/pubmed.py:74
+msgid "No abstract is available for this publication."
+msgstr "هیچ چکید ای برای این نشریه در دسترس نیست.<br>"
+
+#: searx/plugins/https_rewrite.py:32
+msgid "Rewrite HTTP links to HTTPS if possible"
+msgstr "تغییر پیوند های HTTP به HTTPS در صورت امکان"
+
+#: searx/plugins/infinite_scroll.py:3
+msgid "Infinite scroll"
+msgstr "پایین رفتن بیپایان"
+
+#: searx/plugins/infinite_scroll.py:4
+msgid "Automatically load next page when scrolling to bottom of current page"
+msgstr "بارگذاری خودکار صفحه بعد در صورت پیمایش تا پایین صفحه کنونی"
+
+#: searx/plugins/oa_doi_rewrite.py:9
+msgid "Open Access DOI rewrite"
+msgstr "بازنویسی Open Access DOI<br>"
+
+#: searx/plugins/oa_doi_rewrite.py:10
+msgid ""
+"Avoid paywalls by redirecting to open-access versions of publications when "
+"available"
+msgstr "امتناع از منابع غیر رایگان با تغییر مسیر به نسخه ی رایگان نشریات اگر در دسترس باشد<br>"
+
+#: searx/plugins/open_results_on_new_tab.py:18
+#: searx/templates/oscar/preferences.html:114
+#: searx/templates/simple/preferences.html:149
+msgid "Open result links on new browser tabs"
+msgstr "باز کردن لینک های نتیجه در برگهی جدید مرورگر"
+
+#: searx/plugins/open_results_on_new_tab.py:19
+msgid ""
+"Results are opened in the same window by default. This plugin overwrites the"
+" default behaviour to open links on new tabs/windows. (JavaScript required)"
+msgstr "به طور پیشفرض، نتایج در پنجره ی کنونی باز میشوند. این افزونه، رفتار پیشفرض را برای بازشدن پیوند در پنجره/برگه جدید تغییر میدهد. (نیازمند جاوااسکریپت)"
+
+#: searx/plugins/search_on_category_select.py:18
+msgid "Search on category select"
+msgstr "جستجو به محض انتخاب یک دسته<br>"
+
+#: searx/plugins/search_on_category_select.py:19
+msgid ""
+"Perform search immediately if a category selected. Disable to select "
+"multiple categories. (JavaScript required)"
+msgstr "جستجو به محض انتخاب یک دسته. برای انتخاب چند دسته این گزینه را غیرفعال کنید. (نیازمند جاواسکریپت)<br>"
+
+#: searx/plugins/self_info.py:20
+msgid ""
+"Displays your IP if the query is \"ip\" and your user agent if the query "
+"contains \"user agent\"."
+msgstr "اگر آی پی شما در صورت جستجو برای 'ip' و نشان دادن عامل کاربر در صورت جستجو برای 'user agent'.<br>"
+
+#: searx/plugins/tracker_url_remover.py:26
+msgid "Tracker URL remover"
+msgstr "از بین برنده ی آدرس ردیاب ها<br>"
+
+#: searx/plugins/tracker_url_remover.py:27
+msgid "Remove trackers arguments from the returned URL"
+msgstr "حذف نشانوند های ردیاب ها از آدرس برگشتی"
+
+#: searx/plugins/vim_hotkeys.py:3
+msgid "Vim-like hotkeys"
+msgstr "کلیدهای میانبر شبیه Vim<br>"
+
+#: searx/plugins/vim_hotkeys.py:4
+msgid ""
+"Navigate search results with Vim-like hotkeys (JavaScript required). Press "
+"\"h\" key on main or result page to get help."
+msgstr "جابجایی در نتایج با کلیدهای میانبر مشابه Vim (نیازمند جاوااسکریپت). در صفحه اصلی و یا صفحه نتیجه، دکمه h را برای نمایش راهنما بفشارید."
+
+#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4
+#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4
+#: searx/templates/simple/404.html:4
+msgid "Page not found"
+msgstr "صفحه پیدا نشد"
+
+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6
+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6
+#: searx/templates/simple/404.html:6
+#, python-format
+msgid "Go to %(search_page)s."
+msgstr "برو به%(search_page)s."
+
+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6
+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6
+#: searx/templates/simple/404.html:6
+msgid "search page"
+msgstr "صفحه جستجو<br>"
+
+#: searx/templates/courgette/index.html:9
+#: searx/templates/courgette/index.html:13
+#: searx/templates/courgette/results.html:5
+#: searx/templates/legacy/index.html:8 searx/templates/legacy/index.html:12
+#: searx/templates/oscar/navbar.html:7
+#: searx/templates/oscar/preferences.html:3
+#: searx/templates/pix-art/index.html:8
+msgid "preferences"
+msgstr "تنظیمات<br>"
+
+#: searx/templates/courgette/index.html:11
+#: searx/templates/legacy/index.html:10 searx/templates/oscar/about.html:2
+#: searx/templates/oscar/navbar.html:6 searx/templates/pix-art/index.html:7
+msgid "about"
+msgstr "درباره<br>"
+
+#: searx/templates/courgette/preferences.html:5
+#: searx/templates/legacy/preferences.html:5
+#: searx/templates/oscar/preferences.html:8
+#: searx/templates/pix-art/preferences.html:5
+#: searx/templates/simple/preferences.html:26
+msgid "Preferences"
+msgstr "تنظیمات<br>"
+
+#: searx/templates/courgette/preferences.html:9
+#: searx/templates/legacy/preferences.html:9
+#: searx/templates/oscar/preferences.html:33
+#: searx/templates/oscar/preferences.html:35
+#: searx/templates/simple/preferences.html:34
+msgid "Default categories"
+msgstr "دستهبندی های پیشفرض"
+
+#: searx/templates/courgette/preferences.html:13
+#: searx/templates/legacy/preferences.html:14
+#: searx/templates/oscar/preferences.html:41
+#: searx/templates/pix-art/preferences.html:9
+#: searx/templates/simple/preferences.html:39
+#: searx/templates/simple/preferences.html:163
+msgid "Search language"
+msgstr "زبان جستجو"
+
+#: searx/templates/courgette/preferences.html:16
+#: searx/templates/legacy/preferences.html:17
+#: searx/templates/oscar/languages.html:6
+#: searx/templates/pix-art/preferences.html:12
+#: searx/templates/simple/languages.html:2
+#: searx/templates/simple/preferences.html:42
+msgid "Default language"
+msgstr "زبان پیشفرض"
+
+#: searx/templates/courgette/preferences.html:24
+#: searx/templates/legacy/preferences.html:25
+#: searx/templates/oscar/preferences.html:47
+#: searx/templates/pix-art/preferences.html:20
+#: searx/templates/simple/preferences.html:120
+msgid "Interface language"
+msgstr "زبان رابط کاربری"
+
+#: searx/templates/courgette/preferences.html:34
+#: searx/templates/legacy/preferences.html:35
+#: searx/templates/oscar/preferences.html:57
+#: searx/templates/simple/preferences.html:51
+msgid "Autocomplete"
+msgstr "تکمیل خودکار<br>"
+
+#: searx/templates/courgette/preferences.html:45
+#: searx/templates/legacy/preferences.html:46
+#: searx/templates/oscar/preferences.html:68
+#: searx/templates/simple/preferences.html:166
+msgid "Image proxy"
+msgstr "پراکسی تصویر<br>"
+
+#: searx/templates/courgette/preferences.html:48
+#: searx/templates/legacy/preferences.html:49
+#: searx/templates/oscar/preferences.html:72
+#: searx/templates/simple/preferences.html:169
+msgid "Enabled"
+msgstr "فعال<br>"
+
+#: searx/templates/courgette/preferences.html:49
+#: searx/templates/legacy/preferences.html:50
+#: searx/templates/oscar/preferences.html:73
+#: searx/templates/simple/preferences.html:170
+msgid "Disabled"
+msgstr "غیرفعال"
+
+#: searx/templates/courgette/preferences.html:54
+#: searx/templates/legacy/preferences.html:55
+#: searx/templates/oscar/preferences.html:77
+#: searx/templates/pix-art/preferences.html:30
+#: searx/templates/simple/preferences.html:156
+msgid "Method"
+msgstr "روش<br>"
+
+#: searx/templates/courgette/preferences.html:63
+#: searx/templates/legacy/preferences.html:64
+#: searx/templates/oscar/preferences.html:86
+#: searx/templates/oscar/preferences.html:165
+#: searx/templates/oscar/preferences.html:173
+#: searx/templates/simple/preferences.html:63
+#: searx/templates/simple/preferences.html:90
+msgid "SafeSearch"
+msgstr "جستجوی امن"
+
+#: searx/templates/courgette/preferences.html:66
+#: searx/templates/legacy/preferences.html:67
+#: searx/templates/oscar/preferences.html:90
+#: searx/templates/simple/preferences.html:66
+msgid "Strict"
+msgstr "سخت گیر<br>"
+
+#: searx/templates/courgette/preferences.html:67
+#: searx/templates/legacy/preferences.html:68
+#: searx/templates/oscar/preferences.html:91
+#: searx/templates/simple/preferences.html:67
+msgid "Moderate"
+msgstr "متوسط<br>"
+
+#: searx/templates/courgette/preferences.html:68
+#: searx/templates/legacy/preferences.html:69
+#: searx/templates/oscar/preferences.html:92
+#: searx/templates/simple/preferences.html:68
+msgid "None"
+msgstr "هیچ<br>"
+
+#: searx/templates/courgette/preferences.html:73
+#: searx/templates/legacy/preferences.html:74
+#: searx/templates/oscar/preferences.html:96
+#: searx/templates/pix-art/preferences.html:39
+#: searx/templates/simple/preferences.html:131
+msgid "Themes"
+msgstr "تم ها<br>"
+
+#: searx/templates/courgette/preferences.html:83
+msgid "Color"
+msgstr "رنگ"
+
+#: searx/templates/courgette/preferences.html:86
+msgid "Blue (default)"
+msgstr "آبی (پیشفرض)"
+
+#: searx/templates/courgette/preferences.html:87
+msgid "Violet"
+msgstr "بنفش"
+
+#: searx/templates/courgette/preferences.html:88
+msgid "Green"
+msgstr "سبز<br>"
+
+#: searx/templates/courgette/preferences.html:89
+msgid "Cyan"
+msgstr "فیروزهای"
+
+#: searx/templates/courgette/preferences.html:90
+msgid "Orange"
+msgstr "نارنجی"
+
+#: searx/templates/courgette/preferences.html:91
+msgid "Red"
+msgstr "قرمز"
+
+#: searx/templates/courgette/preferences.html:96
+#: searx/templates/legacy/preferences.html:93
+#: searx/templates/pix-art/preferences.html:49
+#: searx/templates/simple/preferences.html:77
+msgid "Currently used search engines"
+msgstr "موتورهای جستجوی در حال استفاده"
+
+#: searx/templates/courgette/preferences.html:100
+#: searx/templates/legacy/preferences.html:97
+#: searx/templates/oscar/preferences.html:162
+#: searx/templates/oscar/preferences.html:176
+#: searx/templates/pix-art/preferences.html:53
+#: searx/templates/simple/preferences.html:87
+msgid "Engine name"
+msgstr "نام موتور"
+
+#: searx/templates/courgette/preferences.html:101
+#: searx/templates/legacy/preferences.html:98
+msgid "Category"
+msgstr "دسته"
+
+#: searx/templates/courgette/preferences.html:102
+#: searx/templates/courgette/preferences.html:113
+#: searx/templates/legacy/preferences.html:99
+#: searx/templates/legacy/preferences.html:110
+#: searx/templates/oscar/preferences.html:161
+#: searx/templates/oscar/preferences.html:177
+#: searx/templates/pix-art/preferences.html:54
+#: searx/templates/pix-art/preferences.html:64
+#: searx/templates/simple/preferences.html:86
+msgid "Allow"
+msgstr "اجازه"
+
+#: searx/templates/courgette/preferences.html:102
+#: searx/templates/courgette/preferences.html:114
+#: searx/templates/legacy/preferences.html:99
+#: searx/templates/legacy/preferences.html:111
+#: searx/templates/pix-art/preferences.html:54
+#: searx/templates/pix-art/preferences.html:65
+msgid "Block"
+msgstr "انسداد<br>"
+
+#: searx/templates/courgette/preferences.html:122
+#: searx/templates/legacy/preferences.html:119
+#: searx/templates/oscar/preferences.html:297
+#: searx/templates/pix-art/preferences.html:73
+#: searx/templates/simple/preferences.html:180
+msgid ""
+"These settings are stored in your cookies, this allows us not to store this "
+"data about you."
+msgstr "این تنظیمات در کوکی های شما ذخیره شده اند، این به ما اجازه می دهد این اطلاعات را درباره شما ذخیره نکنیم.<br>"
+
+#: searx/templates/courgette/preferences.html:124
+#: searx/templates/legacy/preferences.html:121
+#: searx/templates/oscar/preferences.html:299
+#: searx/templates/pix-art/preferences.html:75
+#: searx/templates/simple/preferences.html:182
+msgid ""
+"These cookies serve your sole convenience, we don't use these cookies to "
+"track you."
+msgstr "این کوکی ها برای راحتی شماست، ما از این کوکی برای ردیابی شما استفاده نمیکنیم.<br>"
+
+#: searx/templates/courgette/preferences.html:127
+#: searx/templates/legacy/preferences.html:124
+#: searx/templates/oscar/preferences.html:305
+#: searx/templates/pix-art/preferences.html:78
+#: searx/templates/simple/preferences.html:185
+msgid "save"
+msgstr "ذخیره"
+
+#: searx/templates/courgette/preferences.html:128
+#: searx/templates/legacy/preferences.html:125
+#: searx/templates/oscar/preferences.html:307
+#: searx/templates/simple/preferences.html:186
+msgid "Reset defaults"
+msgstr "بازنشانی پیشفرض ها<br>"
+
+#: searx/templates/courgette/preferences.html:129
+#: searx/templates/legacy/preferences.html:126
+#: searx/templates/oscar/preferences.html:306
+#: searx/templates/pix-art/preferences.html:79
+#: searx/templates/simple/preferences.html:187
+msgid "back"
+msgstr "عقب<br>"
+
+#: searx/templates/courgette/results.html:12
+#: searx/templates/legacy/results.html:13
+#: searx/templates/oscar/results.html:136
+#: searx/templates/simple/results.html:58
+msgid "Search URL"
+msgstr "آدرس جستجو<br>"
+
+#: searx/templates/courgette/results.html:16
+#: searx/templates/legacy/results.html:17
+#: searx/templates/oscar/results.html:141
+#: searx/templates/simple/results.html:62
+msgid "Download results"
+msgstr "نتایج دانلود<br>"
+
+#: searx/templates/courgette/results.html:34
+#: searx/templates/legacy/results.html:35
+#: searx/templates/simple/results.html:10
+msgid "Answers"
+msgstr "پاسخ ها<br>"
+
+#: searx/templates/courgette/results.html:42
+#: searx/templates/legacy/results.html:43
+#: searx/templates/oscar/results.html:116
+#: searx/templates/simple/results.html:42
+msgid "Suggestions"
+msgstr "پیشنهادات"
+
+#: searx/templates/courgette/results.html:70
+#: searx/templates/legacy/results.html:81
+#: searx/templates/oscar/results.html:68 searx/templates/oscar/results.html:78
+#: searx/templates/simple/results.html:130
+msgid "previous page"
+msgstr "صفحه پیش"
+
+#: searx/templates/courgette/results.html:81
+#: searx/templates/legacy/results.html:92
+#: searx/templates/oscar/results.html:62 searx/templates/oscar/results.html:84
+#: searx/templates/simple/results.html:145
+msgid "next page"
+msgstr "صفحه بعد"
+
+#: searx/templates/courgette/search.html:3
+#: searx/templates/legacy/search.html:3 searx/templates/oscar/search.html:6
+#: searx/templates/oscar/search_full.html:9
+#: searx/templates/pix-art/search.html:3 searx/templates/simple/search.html:4
+msgid "Search for..."
+msgstr "جستجو برای …"
+
+#: searx/templates/courgette/stats.html:4 searx/templates/legacy/stats.html:4
+#: searx/templates/oscar/stats.html:5 searx/templates/pix-art/stats.html:4
+#: searx/templates/simple/stats.html:7
+msgid "Engine stats"
+msgstr "آمار موتور<br>"
+
+#: searx/templates/courgette/result_templates/images.html:4
+#: searx/templates/legacy/result_templates/images.html:4
+#: searx/templates/pix-art/result_templates/images.html:4
+msgid "original context"
+msgstr "متن اصلی<br>"
+
+#: searx/templates/courgette/result_templates/torrent.html:7
+#: searx/templates/legacy/result_templates/torrent.html:11
+#: searx/templates/oscar/result_templates/torrent.html:6
+#: searx/templates/simple/result_templates/torrent.html:9
+msgid "Seeder"
+msgstr "سیدر<br>"
+
+#: searx/templates/courgette/result_templates/torrent.html:7
+#: searx/templates/legacy/result_templates/torrent.html:11
+#: searx/templates/oscar/result_templates/torrent.html:6
+#: searx/templates/simple/result_templates/torrent.html:9
+msgid "Leecher"
+msgstr "لیچر<br>"
+
+#: searx/templates/courgette/result_templates/torrent.html:9
+#: searx/templates/legacy/result_templates/torrent.html:9
+#: searx/templates/oscar/macros.html:23
+#: searx/templates/simple/result_templates/torrent.html:6
+msgid "magnet link"
+msgstr "لینک مگنت<br>"
+
+#: searx/templates/courgette/result_templates/torrent.html:10
+#: searx/templates/legacy/result_templates/torrent.html:10
+#: searx/templates/oscar/macros.html:24
+#: searx/templates/simple/result_templates/torrent.html:7
+msgid "torrent file"
+msgstr "فایل تورنت<br>"
+
+#: searx/templates/legacy/categories.html:8
+#: searx/templates/simple/categories.html:6
+msgid "Click on the magnifier to perform search"
+msgstr "برای اجرای جستجو روی ذره بین کلیک کنید<br>"
+
+#: searx/templates/legacy/preferences.html:84
+#: searx/templates/oscar/preferences.html:113
+#: searx/templates/simple/preferences.html:142
+msgid "Results on new tabs"
+msgstr "نتایج در برگه جدید"
+
+#: searx/templates/legacy/preferences.html:87
+#: searx/templates/oscar/preferences.html:117
+#: searx/templates/simple/preferences.html:145
+msgid "On"
+msgstr "روشن<br>"
+
+#: searx/templates/legacy/preferences.html:88
+#: searx/templates/oscar/preferences.html:118
+#: searx/templates/simple/preferences.html:146
+msgid "Off"
+msgstr "خاموش<br>"
+
+#: searx/templates/legacy/result_templates/code.html:3
+#: searx/templates/legacy/result_templates/default.html:3
+#: searx/templates/legacy/result_templates/map.html:9
+#: searx/templates/oscar/macros.html:34 searx/templates/oscar/macros.html:48
+#: searx/templates/simple/macros.html:43
+msgid "cached"
+msgstr "ذخیره شده<br>"
+
+#: searx/templates/oscar/advanced.html:4
+msgid "Advanced settings"
+msgstr "تنظیمات پیشرفته<br>"
+
+#: searx/templates/oscar/base.html:62
+#: searx/templates/oscar/messages/first_time.html:4
+#: searx/templates/oscar/messages/save_settings_successfull.html:5
+#: searx/templates/oscar/messages/unknow_error.html:5
+msgid "Close"
+msgstr "بستن<br>"
+
+#: searx/templates/oscar/base.html:64
+#: searx/templates/oscar/messages/no_results.html:4
+#: searx/templates/simple/messages/no_results.html:4
+#: searx/templates/simple/results.html:25
+msgid "Error!"
+msgstr "خطا!<br>"
+
+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
+msgid "Powered by"
+msgstr "قدرت گرفته از<br>"
+
+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
+msgid "a privacy-respecting, hackable metasearch engine"
+msgstr "یک ابر موتور جستجوی حافظ حریم شخصی"
+
+#: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50
+#: searx/templates/simple/macros.html:43
+msgid "proxied"
+msgstr "پراکسی شده<br>"
+
+#: searx/templates/oscar/macros.html:92
+msgid "supported"
+msgstr "پشتیبانی شده<br>"
+
+#: searx/templates/oscar/macros.html:96
+msgid "not supported"
+msgstr "پشتیبانی نشده<br>"
+
+#: searx/templates/oscar/preferences.html:13
+#: searx/templates/oscar/preferences.html:22
+#: searx/templates/simple/preferences.html:32
+msgid "General"
+msgstr "کلی<br>"
+
+#: searx/templates/oscar/preferences.html:14
+#: searx/templates/oscar/preferences.html:146
+#: searx/templates/simple/preferences.html:76
+msgid "Engines"
+msgstr "موتور ها<br>"
+
+#: searx/templates/oscar/preferences.html:15
+#: searx/templates/oscar/preferences.html:219
+msgid "Plugins"
+msgstr "افزونه ها"
+
+#: searx/templates/oscar/preferences.html:16
+#: searx/templates/oscar/preferences.html:245
+msgid "Answerers"
+msgstr "پاسخگو ها<br>"
+
+#: searx/templates/oscar/preferences.html:17
+#: searx/templates/oscar/preferences.html:272
+msgid "Cookies"
+msgstr "کوکی ها<br>"
+
+#: searx/templates/oscar/preferences.html:42
+#: searx/templates/simple/preferences.html:48
+msgid "What language do you prefer for search?"
+msgstr "چه زبانی را برای جستجو ترجیح میدهید؟"
+
+#: searx/templates/oscar/preferences.html:48
+#: searx/templates/simple/preferences.html:128
+msgid "Change the language of the layout"
+msgstr "تغییر زبان رابط کاربری"
+
+#: searx/templates/oscar/preferences.html:58
+#: searx/templates/simple/preferences.html:60
+msgid "Find stuff as you type"
+msgstr "یافتن مطالب حین نوشتن"
+
+#: searx/templates/oscar/preferences.html:69
+#: searx/templates/simple/preferences.html:173
+msgid "Proxying image results through searx"
+msgstr "پراکسی کردن نتایج تصویری از طریق searx<br>"
+
+#: searx/templates/oscar/preferences.html:78
+msgid ""
+"Change how forms are submited, <a "
+"href=\"http://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods\""
+" rel=\"external\">learn more about request methods</a>"
+msgstr "چگونگی ثبت فرم ها را تغییر دهید، درباره ی متود های درخواست بیشتر بدانید"
+
+#: searx/templates/oscar/preferences.html:87
+#: searx/templates/simple/preferences.html:71
+msgid "Filter content"
+msgstr "فیلتر کردن محتوا"
+
+#: searx/templates/oscar/preferences.html:97
+#: searx/templates/simple/preferences.html:139
+msgid "Change searx layout"
+msgstr "رابط کاربری searx را تغییر دهید<br>"
+
+#: searx/templates/oscar/preferences.html:106
+#: searx/templates/oscar/preferences.html:111
+msgid "Choose style for this theme"
+msgstr "سبک این پوسته را انتخاب کنید"
+
+#: searx/templates/oscar/preferences.html:106
+#: searx/templates/oscar/preferences.html:111
+msgid "Style"
+msgstr "سبک"
+
+#: searx/templates/oscar/preferences.html:122
+msgid "Open Access DOI resolver"
+msgstr "حل کننده ی Open Access DOI<br>"
+
+#: searx/templates/oscar/preferences.html:123
+msgid ""
+"Redirect to open-access versions of publications when available (plugin "
+"required)"
+msgstr "هدایت به نسخه رایگان نشریات اگر در دسترس باشد(نیازمند به افزونه)<br>"
+
+#: searx/templates/oscar/preferences.html:163
+#: searx/templates/oscar/preferences.html:175
+#: searx/templates/simple/preferences.html:88
+msgid "Shortcut"
+msgstr "میانبر<br>"
+
+#: searx/templates/oscar/preferences.html:164
+#: searx/templates/oscar/preferences.html:174
+msgid "Selected language"
+msgstr "زبان انتخابی<br>"
+
+#: searx/templates/oscar/preferences.html:166
+#: searx/templates/oscar/preferences.html:172
+#: searx/templates/simple/preferences.html:91
+msgid "Time range"
+msgstr "بازه ی زمانی<br>"
+
+#: searx/templates/oscar/preferences.html:167
+#: searx/templates/oscar/preferences.html:171
+#: searx/templates/simple/preferences.html:92
+msgid "Avg. time"
+msgstr "زمان میانگین"
+
+#: searx/templates/oscar/preferences.html:168
+#: searx/templates/oscar/preferences.html:170
+#: searx/templates/simple/preferences.html:93
+msgid "Max time"
+msgstr "حداکثر زمان"
+
+#: searx/templates/oscar/preferences.html:248
+msgid "This is the list of searx's instant answering modules."
+msgstr "این، فهرست ماژولهای پاسخ بلادرنگ searx است."
+
+#: searx/templates/oscar/preferences.html:252
+msgid "Name"
+msgstr "نام"
+
+#: searx/templates/oscar/preferences.html:253
+msgid "Keywords"
+msgstr "کلیدواژه ها<br>"
+
+#: searx/templates/oscar/preferences.html:254
+msgid "Description"
+msgstr "شرح<br>"
+
+#: searx/templates/oscar/preferences.html:255
+msgid "Examples"
+msgstr "مثال ها<br>"
+
+#: searx/templates/oscar/preferences.html:275
+msgid ""
+"This is the list of cookies and their values searx is storing on your "
+"computer."
+msgstr "این، لیست کوکیها و مقادیری است که searx روی دستگاه شما ذخیره میکند."
+
+#: searx/templates/oscar/preferences.html:276
+msgid "With that list, you can assess searx transparency."
+msgstr "با آن لیست، میتوانید شفافیت searx را ارزیابی کنید."
+
+#: searx/templates/oscar/preferences.html:281
+msgid "Cookie name"
+msgstr "نام کوکی<br>"
+
+#: searx/templates/oscar/preferences.html:282
+msgid "Value"
+msgstr "مقدار<br>"
+
+#: searx/templates/oscar/preferences.html:301
+msgid "Search URL of the currently saved preferences"
+msgstr "آدرس جستجو بر اساس تنظیمات ذخیره شده<br>"
+
+#: searx/templates/oscar/preferences.html:301
+msgid ""
+"Note: specifying custom settings in the search URL can reduce privacy by "
+"leaking data to the clicked result sites."
+msgstr "هشدار: تعیین تنظیمات شخصی در آدرس جستجو میتواند حریم شخصی شما را به خطر بیاندازد با درز کردن اطلاعات به سایت های نتایج انتخاب شده.<br>"
+
+#: searx/templates/oscar/results.html:17
+msgid "Search results"
+msgstr "نتایج جستجو<br>"
+
+#: searx/templates/oscar/results.html:21
+#: searx/templates/simple/results.html:84
+msgid "Try searching for:"
+msgstr "تلاش کنید برای جستجوی:"
+
+#: searx/templates/oscar/results.html:100
+#: searx/templates/simple/results.html:25
+msgid "Engines cannot retrieve results"
+msgstr "موتور ها قادر به دریافت نتایج نیستند<br>"
+
+#: searx/templates/oscar/results.html:131
+msgid "Links"
+msgstr "لینک ها<br>"
+
+#: searx/templates/oscar/search.html:8
+#: searx/templates/oscar/search_full.html:11
+#: searx/templates/simple/search.html:5
+msgid "Start search"
+msgstr "شروع جستجو<br>"
+
+#: searx/templates/oscar/stats.html:2
+msgid "stats"
+msgstr "آمار<br>"
+
+#: searx/templates/oscar/time-range.html:3
+#: searx/templates/simple/time-range.html:3
+msgid "Anytime"
+msgstr "هر زمانی<br>"
+
+#: searx/templates/oscar/time-range.html:6
+#: searx/templates/simple/time-range.html:6
+msgid "Last day"
+msgstr "روز گذشته"
+
+#: searx/templates/oscar/time-range.html:9
+#: searx/templates/simple/time-range.html:9
+msgid "Last week"
+msgstr "هفته گذشته"
+
+#: searx/templates/oscar/time-range.html:12
+#: searx/templates/simple/time-range.html:12
+msgid "Last month"
+msgstr "ماه گذشته"
+
+#: searx/templates/oscar/time-range.html:15
+#: searx/templates/simple/time-range.html:15
+msgid "Last year"
+msgstr "سال گذشته"
+
+#: searx/templates/oscar/messages/first_time.html:6
+#: searx/templates/oscar/messages/no_data_available.html:3
+msgid "Heads up!"
+msgstr "بالاخره!<br>"
+
+#: searx/templates/oscar/messages/first_time.html:7
+msgid "It look like you are using searx first time."
+msgstr "به نظر میرسد اولین باری است که از searx استفاده میکنید."
+
+#: searx/templates/oscar/messages/no_cookies.html:3
+msgid "Information!"
+msgstr "اطلاعات!"
+
+#: searx/templates/oscar/messages/no_cookies.html:4
+msgid "currently, there are no cookies defined."
+msgstr "در حال حاضر کوکیای تعریف نشده است."
+
+#: searx/templates/oscar/messages/no_data_available.html:4
+msgid "There is currently no data available. "
+msgstr "در حال حاضر هیچ دادهای در دسترس نیست."
+
+#: searx/templates/oscar/messages/no_results.html:4
+#: searx/templates/simple/messages/no_results.html:4
+msgid "Engines cannot retrieve results."
+msgstr "موتورها قادر به دریافت نتایج نیستند."
+
+#: searx/templates/oscar/messages/no_results.html:10
+#: searx/templates/simple/messages/no_results.html:10
+msgid "Please, try again later or find another searx instance."
+msgstr "لطفا بعدا دوباره تلاش کنید و یا به دنبال نمونهای دیگری از searx بگردید."
+
+#: searx/templates/oscar/messages/no_results.html:14
+#: searx/templates/simple/messages/no_results.html:14
+msgid "Sorry!"
+msgstr "ببخشید!"
+
+#: searx/templates/oscar/messages/no_results.html:15
+#: searx/templates/simple/messages/no_results.html:15
+msgid ""
+"we didn't find any results. Please use another query or search in more "
+"categories."
+msgstr "چیزی پیدا نشد. لطفا جستار دیگری را امتحان و یا در دسته های بیشتری جستجو کنید."
+
+#: searx/templates/oscar/messages/save_settings_successfull.html:7
+msgid "Well done!"
+msgstr "آفرین!<br>"
+
+#: searx/templates/oscar/messages/save_settings_successfull.html:8
+msgid "Settings saved successfully."
+msgstr "تنظیمات با موفقیت ذخیره شد!<br>"
+
+#: searx/templates/oscar/messages/unknow_error.html:7
+msgid "Oh snap!"
+msgstr "ای وای! خراب شد!<br>"
+
+#: searx/templates/oscar/messages/unknow_error.html:8
+msgid "Something went wrong."
+msgstr "مشکلی رخ داد."
+
+#: searx/templates/oscar/result_templates/default.html:7
+#: searx/templates/simple/result_templates/default.html:6
+msgid "show media"
+msgstr "نمایش رسانه<br>"
+
+#: searx/templates/oscar/result_templates/default.html:7
+#: searx/templates/simple/result_templates/default.html:6
+msgid "hide media"
+msgstr "پنهان سازی رسانه<br>"
+
+#: searx/templates/oscar/result_templates/images.html:30
+msgid "Get image"
+msgstr "دریافت تصویر"
+
+#: searx/templates/oscar/result_templates/images.html:33
+msgid "View source"
+msgstr "نمایش منبع"
+
+#: searx/templates/oscar/result_templates/map.html:7
+#: searx/templates/simple/result_templates/map.html:7
+msgid "show map"
+msgstr "نمایش نقشه"
+
+#: searx/templates/oscar/result_templates/map.html:7
+#: searx/templates/simple/result_templates/map.html:7
+msgid "hide map"
+msgstr "پنهانسازی نقشه"
+
+#: searx/templates/oscar/result_templates/map.html:11
+#: searx/templates/simple/result_templates/map.html:11
+msgid "show details"
+msgstr "نمایش جزئیات"
+
+#: searx/templates/oscar/result_templates/map.html:11
+#: searx/templates/simple/result_templates/map.html:11
+msgid "hide details"
+msgstr "پنهانسازی جزئیات"
+
+#: searx/templates/oscar/result_templates/torrent.html:7
+#: searx/templates/simple/result_templates/torrent.html:11
+msgid "Filesize"
+msgstr "اندازه فایل<br>"
+
+#: searx/templates/oscar/result_templates/torrent.html:9
+#: searx/templates/simple/result_templates/torrent.html:12
+msgid "Bytes"
+msgstr "بایت"
+
+#: searx/templates/oscar/result_templates/torrent.html:10
+#: searx/templates/simple/result_templates/torrent.html:13
+msgid "kiB"
+msgstr "کیلوبایت"
+
+#: searx/templates/oscar/result_templates/torrent.html:11
+#: searx/templates/simple/result_templates/torrent.html:14
+msgid "MiB"
+msgstr "مگابایت"
+
+#: searx/templates/oscar/result_templates/torrent.html:12
+#: searx/templates/simple/result_templates/torrent.html:15
+msgid "GiB"
+msgstr "گیگابایت"
+
+#: searx/templates/oscar/result_templates/torrent.html:13
+#: searx/templates/simple/result_templates/torrent.html:16
+msgid "TiB"
+msgstr "ترابایت"
+
+#: searx/templates/oscar/result_templates/torrent.html:15
+#: searx/templates/simple/result_templates/torrent.html:20
+msgid "Number of Files"
+msgstr "تعداد فایل ها"
+
+#: searx/templates/oscar/result_templates/videos.html:7
+#: searx/templates/simple/result_templates/videos.html:6
+msgid "show video"
+msgstr "نمایش ویدئو"
+
+#: searx/templates/oscar/result_templates/videos.html:7
+#: searx/templates/simple/result_templates/videos.html:6
+msgid "hide video"
+msgstr "پنهانسازی ویدئو"
+
+#: searx/templates/pix-art/results.html:28
+msgid "Load more..."
+msgstr "بیشتر…<br>"
+
+#: searx/templates/simple/base.html:31
+msgid "No item found"
+msgstr "چیزی پیدا نشد"
+
+#: searx/templates/simple/preferences.html:89
+msgid "Supports selected language"
+msgstr "زبان انتخاب شده را پشتیبانی میکند"
+
+#: searx/templates/simple/preferences.html:118
+msgid "User interface"
+msgstr "رابط کاربری"
+
+#: searx/templates/simple/preferences.html:154
+msgid "Privacy"
+msgstr "حریم خصوصی"
diff --git a/searx/translations/fi/LC_MESSAGES/messages.mo b/searx/translations/fi/LC_MESSAGES/messages.mo
index c6aa15235d..b3905ca2f4 100644
Binary files a/searx/translations/fi/LC_MESSAGES/messages.mo and b/searx/translations/fi/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/fil/LC_MESSAGES/messages.mo b/searx/translations/fil/LC_MESSAGES/messages.mo
index df14318fbf..f1b4e75c77 100644
Binary files a/searx/translations/fil/LC_MESSAGES/messages.mo and b/searx/translations/fil/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/fr/LC_MESSAGES/messages.mo b/searx/translations/fr/LC_MESSAGES/messages.mo
index f9a72717a6..7fd3ee8917 100644
Binary files a/searx/translations/fr/LC_MESSAGES/messages.mo and b/searx/translations/fr/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/fr/LC_MESSAGES/messages.po b/searx/translations/fr/LC_MESSAGES/messages.po
index 60c8038059..4c076ddbf7 100644
--- a/searx/translations/fr/LC_MESSAGES/messages.po
+++ b/searx/translations/fr/LC_MESSAGES/messages.po
@@ -3,7 +3,7 @@
# This file is distributed under the same license as the PROJECT project.
#
# Translators:
-# Alexandre Flament <[email protected]>, 2017
+# Alexandre Flament <[email protected]>, 2017-2018
# Benjamin Sonntag <[email protected]>, 2014
# Cqoicebordel <[email protected]>, 2014
# Cqoicebordel <[email protected]>, 2014-2017
@@ -16,8 +16,8 @@ msgstr ""
"Project-Id-Version: searx\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2017-11-01 21:31+0100\n"
-"PO-Revision-Date: 2017-11-03 12:00+0000\n"
-"Last-Translator: Cqoicebordel <[email protected]>\n"
+"PO-Revision-Date: 2018-04-03 08:18+0000\n"
+"Last-Translator: Alexandre Flament <[email protected]>\n"
"Language-Team: French (http://www.transifex.com/asciimoo/searx/language/fr/)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
@@ -84,7 +84,7 @@ msgstr "Paramètres non valides, veuillez éditer vos préférences"
#: searx/webapp.py:415
msgid "Invalid settings"
-msgstr "Réglages non valides"
+msgstr "Paramètres non valides"
#: searx/webapp.py:449 searx/webapp.py:493
msgid "search error"
@@ -100,7 +100,7 @@ msgstr "il y a {hours} heure(s), {minutes} minute(s)"
#: searx/answerers/random/answerer.py:53
msgid "Random value generator"
-msgstr "Générateur aléatoire"
+msgstr "Générateur de valeur aléatoire"
#: searx/answerers/random/answerer.py:54
msgid "Generate different random values"
@@ -138,7 +138,7 @@ msgstr "Score par résultat"
#: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221
msgid "Errors"
-msgstr "Erreurs"
+msgstr "Erreur"
#: searx/engines/pdbe.py:87
msgid "{title} (OBSOLETE)"
@@ -663,7 +663,7 @@ msgstr "Plugins"
#: searx/templates/oscar/preferences.html:16
#: searx/templates/oscar/preferences.html:245
msgid "Answerers"
-msgstr "Répondants"
+msgstr "Réponses instantanées"
#: searx/templates/oscar/preferences.html:17
#: searx/templates/oscar/preferences.html:272
diff --git a/searx/translations/gl/LC_MESSAGES/messages.mo b/searx/translations/gl/LC_MESSAGES/messages.mo
new file mode 100644
index 0000000000..30d7b0dd60
Binary files /dev/null and b/searx/translations/gl/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/gl/LC_MESSAGES/messages.po b/searx/translations/gl/LC_MESSAGES/messages.po
new file mode 100644
index 0000000000..3e1e2230fd
--- /dev/null
+++ b/searx/translations/gl/LC_MESSAGES/messages.po
@@ -0,0 +1,1013 @@
+# Translations template for PROJECT.
+# Copyright (C) 2017 ORGANIZATION
+# This file is distributed under the same license as the PROJECT project.
+#
+# Translators:
+# Xosé M. Lamas <[email protected]>, 2018
+msgid ""
+msgstr ""
+"Project-Id-Version: searx\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2017-11-01 21:31+0100\n"
+"PO-Revision-Date: 2018-04-20 11:00+0000\n"
+"Last-Translator: Xosé M. Lamas <[email protected]>\n"
+"Language-Team: Galician (http://www.transifex.com/asciimoo/searx/language/gl/)\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.3.4\n"
+"Language: gl\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+#: searx/search.py:137 searx/search.py:182
+msgid "timeout"
+msgstr "caducidade"
+
+#: searx/search.py:144
+msgid "request exception"
+msgstr "excepción na petición"
+
+#: searx/search.py:151
+msgid "unexpected crash"
+msgstr "fallo non agardado"
+
+#: searx/webapp.py:136
+msgid "files"
+msgstr "ficheiros"
+
+#: searx/webapp.py:137
+msgid "general"
+msgstr "xeral"
+
+#: searx/webapp.py:138
+msgid "music"
+msgstr "música"
+
+#: searx/webapp.py:139
+msgid "social media"
+msgstr "Medios sociais"
+
+#: searx/webapp.py:140
+msgid "images"
+msgstr "imaxes"
+
+#: searx/webapp.py:141
+msgid "videos"
+msgstr "vídeos"
+
+#: searx/webapp.py:142
+msgid "it"
+msgstr "TIC"
+
+#: searx/webapp.py:143
+msgid "news"
+msgstr "novas"
+
+#: searx/webapp.py:144
+msgid "map"
+msgstr "mapa"
+
+#: searx/webapp.py:145
+msgid "science"
+msgstr "ciencia"
+
+#: searx/webapp.py:399 searx/webapp.py:658
+msgid "Invalid settings, please edit your preferences"
+msgstr "Axustes non válidos, por favor edite a configuración"
+
+#: searx/webapp.py:415
+msgid "Invalid settings"
+msgstr "Axustes non válidos"
+
+#: searx/webapp.py:449 searx/webapp.py:493
+msgid "search error"
+msgstr "fallo na busca"
+
+#: searx/webapp.py:530
+msgid "{minutes} minute(s) ago"
+msgstr "hai {minutes} minuto(s)"
+
+#: searx/webapp.py:532
+msgid "{hours} hour(s), {minutes} minute(s) ago"
+msgstr "hai {hours} hora(s), {minutes} minuto(s)"
+
+#: searx/answerers/random/answerer.py:53
+msgid "Random value generator"
+msgstr "Xerador de valor aleatorio"
+
+#: searx/answerers/random/answerer.py:54
+msgid "Generate different random values"
+msgstr "Xerar diferentes valores aleatorios"
+
+#: searx/answerers/statistics/answerer.py:53
+msgid "Statistics functions"
+msgstr "Funcións de estatística"
+
+#: searx/answerers/statistics/answerer.py:54
+msgid "Compute {functions} of the arguments"
+msgstr "Calcule {functions} dos argumentos"
+
+#: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201
+msgid "Engine time (sec)"
+msgstr "Tempo de busca (sec)"
+
+#: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205
+msgid "Page loads (sec)"
+msgstr "Cargou en (seg)"
+
+#: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209
+#: searx/templates/oscar/results.html:95
+#: searx/templates/simple/results.html:20
+msgid "Number of results"
+msgstr "Número de resultados"
+
+#: searx/engines/__init__.py:206 searx/engines/flycheck___init__.py:213
+msgid "Scores"
+msgstr "Puntuacións"
+
+#: searx/engines/__init__.py:210 searx/engines/flycheck___init__.py:217
+msgid "Scores per result"
+msgstr "Puntuacións por resultado"
+
+#: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221
+msgid "Errors"
+msgstr "Fallos"
+
+#: searx/engines/pdbe.py:87
+msgid "{title} (OBSOLETE)"
+msgstr "{title} (OBSOLETO)"
+
+#: searx/engines/pdbe.py:91
+msgid "This entry has been superseded by"
+msgstr "Esta entrada foi proporcionada por"
+
+#: searx/engines/pubmed.py:74
+msgid "No abstract is available for this publication."
+msgstr "Non hai dispoñible un extracto para esta publicación."
+
+#: searx/plugins/https_rewrite.py:32
+msgid "Rewrite HTTP links to HTTPS if possible"
+msgstr "Reescribir ligazóns HTTP a HTTPS si fose posible"
+
+#: searx/plugins/infinite_scroll.py:3
+msgid "Infinite scroll"
+msgstr "Desplazamento infinito"
+
+#: searx/plugins/infinite_scroll.py:4
+msgid "Automatically load next page when scrolling to bottom of current page"
+msgstr "Cargar automáticamente a seguinte páxina ao desplazarse ao fondo da páxina actual"
+
+#: searx/plugins/oa_doi_rewrite.py:9
+msgid "Open Access DOI rewrite"
+msgstr "Reescritura Open Access DOI"
+
+#: searx/plugins/oa_doi_rewrite.py:10
+msgid ""
+"Avoid paywalls by redirecting to open-access versions of publications when "
+"available"
+msgstr "Evitar muros de pago redirecciionando a versións públicas das publicacións cando estén dispoñibles"
+
+#: searx/plugins/open_results_on_new_tab.py:18
+#: searx/templates/oscar/preferences.html:114
+#: searx/templates/simple/preferences.html:149
+msgid "Open result links on new browser tabs"
+msgstr "Abrir ligazóns de resultados en novas lapelas do navegador"
+
+#: searx/plugins/open_results_on_new_tab.py:19
+msgid ""
+"Results are opened in the same window by default. This plugin overwrites the"
+" default behaviour to open links on new tabs/windows. (JavaScript required)"
+msgstr "Por omisión, os resultados ábrense na mesma lapela. Este engadido sobreescribe o comportamento por omisión para abrir as ligazóns en novas lapelas/ventás. (Require JavaScript)"
+
+#: searx/plugins/search_on_category_select.py:18
+msgid "Search on category select"
+msgstr "Busca en categoría seleccionada"
+
+#: searx/plugins/search_on_category_select.py:19
+msgid ""
+"Perform search immediately if a category selected. Disable to select "
+"multiple categories. (JavaScript required)"
+msgstr "Busca ao momento si hai unha categoría seleccionada. Desactivar para seleccionar múltiples categorías. (Require JavaScript)"
+
+#: searx/plugins/self_info.py:20
+msgid ""
+"Displays your IP if the query is \"ip\" and your user agent if the query "
+"contains \"user agent\"."
+msgstr "Mostra o seu IP si a consulta é \"ip\" e o seu Use Agent si a consulta contén \"user agent\"."
+
+#: searx/plugins/tracker_url_remover.py:26
+msgid "Tracker URL remover"
+msgstr "Eliminador de rastrexadores na URL"
+
+#: searx/plugins/tracker_url_remover.py:27
+msgid "Remove trackers arguments from the returned URL"
+msgstr "Elimina os argumentos de rastrexo da URL devolta"
+
+#: searx/plugins/vim_hotkeys.py:3
+msgid "Vim-like hotkeys"
+msgstr "Atallos como os de Vim"
+
+#: searx/plugins/vim_hotkeys.py:4
+msgid ""
+"Navigate search results with Vim-like hotkeys (JavaScript required). Press "
+"\"h\" key on main or result page to get help."
+msgstr "Navegar nos resultados da busca con atallos como os de Vim (require JavaScript). Pulse \"h\" na pantalla principal ou de resultados para obter axuda."
+
+#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4
+#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4
+#: searx/templates/simple/404.html:4
+msgid "Page not found"
+msgstr "Páxina non atopada"
+
+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6
+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6
+#: searx/templates/simple/404.html:6
+#, python-format
+msgid "Go to %(search_page)s."
+msgstr "Ir a %(search_page)s"
+
+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6
+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6
+#: searx/templates/simple/404.html:6
+msgid "search page"
+msgstr "páxina de busca"
+
+#: searx/templates/courgette/index.html:9
+#: searx/templates/courgette/index.html:13
+#: searx/templates/courgette/results.html:5
+#: searx/templates/legacy/index.html:8 searx/templates/legacy/index.html:12
+#: searx/templates/oscar/navbar.html:7
+#: searx/templates/oscar/preferences.html:3
+#: searx/templates/pix-art/index.html:8
+msgid "preferences"
+msgstr "axustes"
+
+#: searx/templates/courgette/index.html:11
+#: searx/templates/legacy/index.html:10 searx/templates/oscar/about.html:2
+#: searx/templates/oscar/navbar.html:6 searx/templates/pix-art/index.html:7
+msgid "about"
+msgstr "Sobre"
+
+#: searx/templates/courgette/preferences.html:5
+#: searx/templates/legacy/preferences.html:5
+#: searx/templates/oscar/preferences.html:8
+#: searx/templates/pix-art/preferences.html:5
+#: searx/templates/simple/preferences.html:26
+msgid "Preferences"
+msgstr "Axustes"
+
+#: searx/templates/courgette/preferences.html:9
+#: searx/templates/legacy/preferences.html:9
+#: searx/templates/oscar/preferences.html:33
+#: searx/templates/oscar/preferences.html:35
+#: searx/templates/simple/preferences.html:34
+msgid "Default categories"
+msgstr "Categorías por omisión"
+
+#: searx/templates/courgette/preferences.html:13
+#: searx/templates/legacy/preferences.html:14
+#: searx/templates/oscar/preferences.html:41
+#: searx/templates/pix-art/preferences.html:9
+#: searx/templates/simple/preferences.html:39
+#: searx/templates/simple/preferences.html:163
+msgid "Search language"
+msgstr "Idioma de busca"
+
+#: searx/templates/courgette/preferences.html:16
+#: searx/templates/legacy/preferences.html:17
+#: searx/templates/oscar/languages.html:6
+#: searx/templates/pix-art/preferences.html:12
+#: searx/templates/simple/languages.html:2
+#: searx/templates/simple/preferences.html:42
+msgid "Default language"
+msgstr "Idioma por omisión"
+
+#: searx/templates/courgette/preferences.html:24
+#: searx/templates/legacy/preferences.html:25
+#: searx/templates/oscar/preferences.html:47
+#: searx/templates/pix-art/preferences.html:20
+#: searx/templates/simple/preferences.html:120
+msgid "Interface language"
+msgstr "Idioma da interface"
+
+#: searx/templates/courgette/preferences.html:34
+#: searx/templates/legacy/preferences.html:35
+#: searx/templates/oscar/preferences.html:57
+#: searx/templates/simple/preferences.html:51
+msgid "Autocomplete"
+msgstr "Autocompletar"
+
+#: searx/templates/courgette/preferences.html:45
+#: searx/templates/legacy/preferences.html:46
+#: searx/templates/oscar/preferences.html:68
+#: searx/templates/simple/preferences.html:166
+msgid "Image proxy"
+msgstr "Proxy de imaxes"
+
+#: searx/templates/courgette/preferences.html:48
+#: searx/templates/legacy/preferences.html:49
+#: searx/templates/oscar/preferences.html:72
+#: searx/templates/simple/preferences.html:169
+msgid "Enabled"
+msgstr "Activado"
+
+#: searx/templates/courgette/preferences.html:49
+#: searx/templates/legacy/preferences.html:50
+#: searx/templates/oscar/preferences.html:73
+#: searx/templates/simple/preferences.html:170
+msgid "Disabled"
+msgstr "Desactivado"
+
+#: searx/templates/courgette/preferences.html:54
+#: searx/templates/legacy/preferences.html:55
+#: searx/templates/oscar/preferences.html:77
+#: searx/templates/pix-art/preferences.html:30
+#: searx/templates/simple/preferences.html:156
+msgid "Method"
+msgstr "Método"
+
+#: searx/templates/courgette/preferences.html:63
+#: searx/templates/legacy/preferences.html:64
+#: searx/templates/oscar/preferences.html:86
+#: searx/templates/oscar/preferences.html:165
+#: searx/templates/oscar/preferences.html:173
+#: searx/templates/simple/preferences.html:63
+#: searx/templates/simple/preferences.html:90
+msgid "SafeSearch"
+msgstr "Busca segura"
+
+#: searx/templates/courgette/preferences.html:66
+#: searx/templates/legacy/preferences.html:67
+#: searx/templates/oscar/preferences.html:90
+#: searx/templates/simple/preferences.html:66
+msgid "Strict"
+msgstr "Stricta"
+
+#: searx/templates/courgette/preferences.html:67
+#: searx/templates/legacy/preferences.html:68
+#: searx/templates/oscar/preferences.html:91
+#: searx/templates/simple/preferences.html:67
+msgid "Moderate"
+msgstr "Moderada"
+
+#: searx/templates/courgette/preferences.html:68
+#: searx/templates/legacy/preferences.html:69
+#: searx/templates/oscar/preferences.html:92
+#: searx/templates/simple/preferences.html:68
+msgid "None"
+msgstr "Ningunha"
+
+#: searx/templates/courgette/preferences.html:73
+#: searx/templates/legacy/preferences.html:74
+#: searx/templates/oscar/preferences.html:96
+#: searx/templates/pix-art/preferences.html:39
+#: searx/templates/simple/preferences.html:131
+msgid "Themes"
+msgstr "Decorado"
+
+#: searx/templates/courgette/preferences.html:83
+msgid "Color"
+msgstr "Cor"
+
+#: searx/templates/courgette/preferences.html:86
+msgid "Blue (default)"
+msgstr "Azul (por omisión)"
+
+#: searx/templates/courgette/preferences.html:87
+msgid "Violet"
+msgstr "Violeta"
+
+#: searx/templates/courgette/preferences.html:88
+msgid "Green"
+msgstr "Verde"
+
+#: searx/templates/courgette/preferences.html:89
+msgid "Cyan"
+msgstr "Cian"
+
+#: searx/templates/courgette/preferences.html:90
+msgid "Orange"
+msgstr "Laranxa"
+
+#: searx/templates/courgette/preferences.html:91
+msgid "Red"
+msgstr "Vermello"
+
+#: searx/templates/courgette/preferences.html:96
+#: searx/templates/legacy/preferences.html:93
+#: searx/templates/pix-art/preferences.html:49
+#: searx/templates/simple/preferences.html:77
+msgid "Currently used search engines"
+msgstr "Motores de busca utilizados actualmente"
+
+#: searx/templates/courgette/preferences.html:100
+#: searx/templates/legacy/preferences.html:97
+#: searx/templates/oscar/preferences.html:162
+#: searx/templates/oscar/preferences.html:176
+#: searx/templates/pix-art/preferences.html:53
+#: searx/templates/simple/preferences.html:87
+msgid "Engine name"
+msgstr "Nome do motor"
+
+#: searx/templates/courgette/preferences.html:101
+#: searx/templates/legacy/preferences.html:98
+msgid "Category"
+msgstr "Categoría"
+
+#: searx/templates/courgette/preferences.html:102
+#: searx/templates/courgette/preferences.html:113
+#: searx/templates/legacy/preferences.html:99
+#: searx/templates/legacy/preferences.html:110
+#: searx/templates/oscar/preferences.html:161
+#: searx/templates/oscar/preferences.html:177
+#: searx/templates/pix-art/preferences.html:54
+#: searx/templates/pix-art/preferences.html:64
+#: searx/templates/simple/preferences.html:86
+msgid "Allow"
+msgstr "Permitir"
+
+#: searx/templates/courgette/preferences.html:102
+#: searx/templates/courgette/preferences.html:114
+#: searx/templates/legacy/preferences.html:99
+#: searx/templates/legacy/preferences.html:111
+#: searx/templates/pix-art/preferences.html:54
+#: searx/templates/pix-art/preferences.html:65
+msgid "Block"
+msgstr "Bloquear"
+
+#: searx/templates/courgette/preferences.html:122
+#: searx/templates/legacy/preferences.html:119
+#: searx/templates/oscar/preferences.html:297
+#: searx/templates/pix-art/preferences.html:73
+#: searx/templates/simple/preferences.html:180
+msgid ""
+"These settings are stored in your cookies, this allows us not to store this "
+"data about you."
+msgstr "Estos axustes gárdanse en testemuños, esto permítenos non ter que gardar ningún datos sobre vostede."
+
+#: searx/templates/courgette/preferences.html:124
+#: searx/templates/legacy/preferences.html:121
+#: searx/templates/oscar/preferences.html:299
+#: searx/templates/pix-art/preferences.html:75
+#: searx/templates/simple/preferences.html:182
+msgid ""
+"These cookies serve your sole convenience, we don't use these cookies to "
+"track you."
+msgstr "Estos testemuños son para a súa conveniencia, non utilizamos estos testemuños para rastrexala."
+
+#: searx/templates/courgette/preferences.html:127
+#: searx/templates/legacy/preferences.html:124
+#: searx/templates/oscar/preferences.html:305
+#: searx/templates/pix-art/preferences.html:78
+#: searx/templates/simple/preferences.html:185
+msgid "save"
+msgstr "gardar"
+
+#: searx/templates/courgette/preferences.html:128
+#: searx/templates/legacy/preferences.html:125
+#: searx/templates/oscar/preferences.html:307
+#: searx/templates/simple/preferences.html:186
+msgid "Reset defaults"
+msgstr "Restablecer"
+
+#: searx/templates/courgette/preferences.html:129
+#: searx/templates/legacy/preferences.html:126
+#: searx/templates/oscar/preferences.html:306
+#: searx/templates/pix-art/preferences.html:79
+#: searx/templates/simple/preferences.html:187
+msgid "back"
+msgstr "atrás"
+
+#: searx/templates/courgette/results.html:12
+#: searx/templates/legacy/results.html:13
+#: searx/templates/oscar/results.html:136
+#: searx/templates/simple/results.html:58
+msgid "Search URL"
+msgstr "Busca URL"
+
+#: searx/templates/courgette/results.html:16
+#: searx/templates/legacy/results.html:17
+#: searx/templates/oscar/results.html:141
+#: searx/templates/simple/results.html:62
+msgid "Download results"
+msgstr "Descargar resultados"
+
+#: searx/templates/courgette/results.html:34
+#: searx/templates/legacy/results.html:35
+#: searx/templates/simple/results.html:10
+msgid "Answers"
+msgstr "Respostas"
+
+#: searx/templates/courgette/results.html:42
+#: searx/templates/legacy/results.html:43
+#: searx/templates/oscar/results.html:116
+#: searx/templates/simple/results.html:42
+msgid "Suggestions"
+msgstr "Suxestións"
+
+#: searx/templates/courgette/results.html:70
+#: searx/templates/legacy/results.html:81
+#: searx/templates/oscar/results.html:68 searx/templates/oscar/results.html:78
+#: searx/templates/simple/results.html:130
+msgid "previous page"
+msgstr "páxina anterior"
+
+#: searx/templates/courgette/results.html:81
+#: searx/templates/legacy/results.html:92
+#: searx/templates/oscar/results.html:62 searx/templates/oscar/results.html:84
+#: searx/templates/simple/results.html:145
+msgid "next page"
+msgstr "páxina seguinte"
+
+#: searx/templates/courgette/search.html:3
+#: searx/templates/legacy/search.html:3 searx/templates/oscar/search.html:6
+#: searx/templates/oscar/search_full.html:9
+#: searx/templates/pix-art/search.html:3 searx/templates/simple/search.html:4
+msgid "Search for..."
+msgstr "Buscar por..."
+
+#: searx/templates/courgette/stats.html:4 searx/templates/legacy/stats.html:4
+#: searx/templates/oscar/stats.html:5 searx/templates/pix-art/stats.html:4
+#: searx/templates/simple/stats.html:7
+msgid "Engine stats"
+msgstr "Estatísticas do buscador"
+
+#: searx/templates/courgette/result_templates/images.html:4
+#: searx/templates/legacy/result_templates/images.html:4
+#: searx/templates/pix-art/result_templates/images.html:4
+msgid "original context"
+msgstr "contexto orixinal"
+
+#: searx/templates/courgette/result_templates/torrent.html:7
+#: searx/templates/legacy/result_templates/torrent.html:11
+#: searx/templates/oscar/result_templates/torrent.html:6
+#: searx/templates/simple/result_templates/torrent.html:9
+msgid "Seeder"
+msgstr "Sementadora"
+
+#: searx/templates/courgette/result_templates/torrent.html:7
+#: searx/templates/legacy/result_templates/torrent.html:11
+#: searx/templates/oscar/result_templates/torrent.html:6
+#: searx/templates/simple/result_templates/torrent.html:9
+msgid "Leecher"
+msgstr "Cliente"
+
+#: searx/templates/courgette/result_templates/torrent.html:9
+#: searx/templates/legacy/result_templates/torrent.html:9
+#: searx/templates/oscar/macros.html:23
+#: searx/templates/simple/result_templates/torrent.html:6
+msgid "magnet link"
+msgstr "ligazón magnet"
+
+#: searx/templates/courgette/result_templates/torrent.html:10
+#: searx/templates/legacy/result_templates/torrent.html:10
+#: searx/templates/oscar/macros.html:24
+#: searx/templates/simple/result_templates/torrent.html:7
+msgid "torrent file"
+msgstr "ficheiro torrent"
+
+#: searx/templates/legacy/categories.html:8
+#: searx/templates/simple/categories.html:6
+msgid "Click on the magnifier to perform search"
+msgstr "Pulse na lupa para realizar a busca"
+
+#: searx/templates/legacy/preferences.html:84
+#: searx/templates/oscar/preferences.html:113
+#: searx/templates/simple/preferences.html:142
+msgid "Results on new tabs"
+msgstr "Resultados en novas lapelas"
+
+#: searx/templates/legacy/preferences.html:87
+#: searx/templates/oscar/preferences.html:117
+#: searx/templates/simple/preferences.html:145
+msgid "On"
+msgstr "On"
+
+#: searx/templates/legacy/preferences.html:88
+#: searx/templates/oscar/preferences.html:118
+#: searx/templates/simple/preferences.html:146
+msgid "Off"
+msgstr "Off"
+
+#: searx/templates/legacy/result_templates/code.html:3
+#: searx/templates/legacy/result_templates/default.html:3
+#: searx/templates/legacy/result_templates/map.html:9
+#: searx/templates/oscar/macros.html:34 searx/templates/oscar/macros.html:48
+#: searx/templates/simple/macros.html:43
+msgid "cached"
+msgstr "en memoria"
+
+#: searx/templates/oscar/advanced.html:4
+msgid "Advanced settings"
+msgstr "Axustes avanzados"
+
+#: searx/templates/oscar/base.html:62
+#: searx/templates/oscar/messages/first_time.html:4
+#: searx/templates/oscar/messages/save_settings_successfull.html:5
+#: searx/templates/oscar/messages/unknow_error.html:5
+msgid "Close"
+msgstr "Pechar"
+
+#: searx/templates/oscar/base.html:64
+#: searx/templates/oscar/messages/no_results.html:4
+#: searx/templates/simple/messages/no_results.html:4
+#: searx/templates/simple/results.html:25
+msgid "Error!"
+msgstr "Fallo!"
+
+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
+msgid "Powered by"
+msgstr "Proporcionado por"
+
+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
+msgid "a privacy-respecting, hackable metasearch engine"
+msgstr "un metabuscador configurable e respetuoso coa intimidade"
+
+#: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50
+#: searx/templates/simple/macros.html:43
+msgid "proxied"
+msgstr "a través de proxy"
+
+#: searx/templates/oscar/macros.html:92
+msgid "supported"
+msgstr "soportado"
+
+#: searx/templates/oscar/macros.html:96
+msgid "not supported"
+msgstr "non soportado"
+
+#: searx/templates/oscar/preferences.html:13
+#: searx/templates/oscar/preferences.html:22
+#: searx/templates/simple/preferences.html:32
+msgid "General"
+msgstr "Xeral"
+
+#: searx/templates/oscar/preferences.html:14
+#: searx/templates/oscar/preferences.html:146
+#: searx/templates/simple/preferences.html:76
+msgid "Engines"
+msgstr "Motores"
+
+#: searx/templates/oscar/preferences.html:15
+#: searx/templates/oscar/preferences.html:219
+msgid "Plugins"
+msgstr "Engadidos"
+
+#: searx/templates/oscar/preferences.html:16
+#: searx/templates/oscar/preferences.html:245
+msgid "Answerers"
+msgstr "Respostas"
+
+#: searx/templates/oscar/preferences.html:17
+#: searx/templates/oscar/preferences.html:272
+msgid "Cookies"
+msgstr "Testemuños"
+
+#: searx/templates/oscar/preferences.html:42
+#: searx/templates/simple/preferences.html:48
+msgid "What language do you prefer for search?"
+msgstr "Qué idioma prefire para buscar?"
+
+#: searx/templates/oscar/preferences.html:48
+#: searx/templates/simple/preferences.html:128
+msgid "Change the language of the layout"
+msgstr "Cambiar o idioma da disposición"
+
+#: searx/templates/oscar/preferences.html:58
+#: searx/templates/simple/preferences.html:60
+msgid "Find stuff as you type"
+msgstr "Buscar nas cousas metras escribe"
+
+#: searx/templates/oscar/preferences.html:69
+#: searx/templates/simple/preferences.html:173
+msgid "Proxying image results through searx"
+msgstr "Utilizar o proxy de searx para as imaxes dos resultados"
+
+#: searx/templates/oscar/preferences.html:78
+msgid ""
+"Change how forms are submited, <a "
+"href=\"http://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods\""
+" rel=\"external\">learn more about request methods</a>"
+msgstr "Cambiar cómo se envían os formularios, <a href=\"http://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods\" rel=\"external\">coñeza máis sobre os métodos de consulta</a>"
+
+#: searx/templates/oscar/preferences.html:87
+#: searx/templates/simple/preferences.html:71
+msgid "Filter content"
+msgstr "Filtro de contido"
+
+#: searx/templates/oscar/preferences.html:97
+#: searx/templates/simple/preferences.html:139
+msgid "Change searx layout"
+msgstr "Cambiar a disposición de searx"
+
+#: searx/templates/oscar/preferences.html:106
+#: searx/templates/oscar/preferences.html:111
+msgid "Choose style for this theme"
+msgstr "Escolla o estilo para este decorado"
+
+#: searx/templates/oscar/preferences.html:106
+#: searx/templates/oscar/preferences.html:111
+msgid "Style"
+msgstr "Estilo"
+
+#: searx/templates/oscar/preferences.html:122
+msgid "Open Access DOI resolver"
+msgstr "Resolutor Open Access DOI"
+
+#: searx/templates/oscar/preferences.html:123
+msgid ""
+"Redirect to open-access versions of publications when available (plugin "
+"required)"
+msgstr "Redireccionar a versións abertas das publicacións cando estén dispoñibles (require o engadido)"
+
+#: searx/templates/oscar/preferences.html:163
+#: searx/templates/oscar/preferences.html:175
+#: searx/templates/simple/preferences.html:88
+msgid "Shortcut"
+msgstr "Atallo"
+
+#: searx/templates/oscar/preferences.html:164
+#: searx/templates/oscar/preferences.html:174
+msgid "Selected language"
+msgstr "Idioma seleccionado"
+
+#: searx/templates/oscar/preferences.html:166
+#: searx/templates/oscar/preferences.html:172
+#: searx/templates/simple/preferences.html:91
+msgid "Time range"
+msgstr "Rango temporal"
+
+#: searx/templates/oscar/preferences.html:167
+#: searx/templates/oscar/preferences.html:171
+#: searx/templates/simple/preferences.html:92
+msgid "Avg. time"
+msgstr "Tempo medio"
+
+#: searx/templates/oscar/preferences.html:168
+#: searx/templates/oscar/preferences.html:170
+#: searx/templates/simple/preferences.html:93
+msgid "Max time"
+msgstr "Tempo máx."
+
+#: searx/templates/oscar/preferences.html:248
+msgid "This is the list of searx's instant answering modules."
+msgstr "Este é o listado dos módulos de respostas instantáneas de searx"
+
+#: searx/templates/oscar/preferences.html:252
+msgid "Name"
+msgstr "Nome"
+
+#: searx/templates/oscar/preferences.html:253
+msgid "Keywords"
+msgstr "Palabras chave"
+
+#: searx/templates/oscar/preferences.html:254
+msgid "Description"
+msgstr "Descrición"
+
+#: searx/templates/oscar/preferences.html:255
+msgid "Examples"
+msgstr "Exemplos"
+
+#: searx/templates/oscar/preferences.html:275
+msgid ""
+"This is the list of cookies and their values searx is storing on your "
+"computer."
+msgstr "Este é o listados dos testemuños e os seus valores que searx almacena na súa computadora."
+
+#: searx/templates/oscar/preferences.html:276
+msgid "With that list, you can assess searx transparency."
+msgstr "Con esta lista vostede pode comprobar a transparencia de searx."
+
+#: searx/templates/oscar/preferences.html:281
+msgid "Cookie name"
+msgstr "Nome do testemuño"
+
+#: searx/templates/oscar/preferences.html:282
+msgid "Value"
+msgstr "Valor"
+
+#: searx/templates/oscar/preferences.html:301
+msgid "Search URL of the currently saved preferences"
+msgstr "URL de busca dos axustes gardados actualmente."
+
+#: searx/templates/oscar/preferences.html:301
+msgid ""
+"Note: specifying custom settings in the search URL can reduce privacy by "
+"leaking data to the clicked result sites."
+msgstr "Nota: indicando axustes personalizados na URL de busca pode reducir a súa intimidade ao filtrar datos aos sitios web dos resultados."
+
+#: searx/templates/oscar/results.html:17
+msgid "Search results"
+msgstr "Resultados da busca"
+
+#: searx/templates/oscar/results.html:21
+#: searx/templates/simple/results.html:84
+msgid "Try searching for:"
+msgstr "Intente buscar:"
+
+#: searx/templates/oscar/results.html:100
+#: searx/templates/simple/results.html:25
+msgid "Engines cannot retrieve results"
+msgstr "Os buscadores non obtiveron resultados"
+
+#: searx/templates/oscar/results.html:131
+msgid "Links"
+msgstr "Ligazóns"
+
+#: searx/templates/oscar/search.html:8
+#: searx/templates/oscar/search_full.html:11
+#: searx/templates/simple/search.html:5
+msgid "Start search"
+msgstr "Iniciar busca"
+
+#: searx/templates/oscar/stats.html:2
+msgid "stats"
+msgstr "estatísticas"
+
+#: searx/templates/oscar/time-range.html:3
+#: searx/templates/simple/time-range.html:3
+msgid "Anytime"
+msgstr "Calquer momento"
+
+#: searx/templates/oscar/time-range.html:6
+#: searx/templates/simple/time-range.html:6
+msgid "Last day"
+msgstr "Último día"
+
+#: searx/templates/oscar/time-range.html:9
+#: searx/templates/simple/time-range.html:9
+msgid "Last week"
+msgstr "Última semana"
+
+#: searx/templates/oscar/time-range.html:12
+#: searx/templates/simple/time-range.html:12
+msgid "Last month"
+msgstr "Último mes"
+
+#: searx/templates/oscar/time-range.html:15
+#: searx/templates/simple/time-range.html:15
+msgid "Last year"
+msgstr "Último ano"
+
+#: searx/templates/oscar/messages/first_time.html:6
+#: searx/templates/oscar/messages/no_data_available.html:3
+msgid "Heads up!"
+msgstr "Heads up!"
+
+#: searx/templates/oscar/messages/first_time.html:7
+msgid "It look like you are using searx first time."
+msgstr "Semella que é a primeira vez que utiliza searx."
+
+#: searx/templates/oscar/messages/no_cookies.html:3
+msgid "Information!"
+msgstr "Información!"
+
+#: searx/templates/oscar/messages/no_cookies.html:4
+msgid "currently, there are no cookies defined."
+msgstr "actualmente non hai testemuños establecidos."
+
+#: searx/templates/oscar/messages/no_data_available.html:4
+msgid "There is currently no data available. "
+msgstr "Non hai datos dispoñibles."
+
+#: searx/templates/oscar/messages/no_results.html:4
+#: searx/templates/simple/messages/no_results.html:4
+msgid "Engines cannot retrieve results."
+msgstr "Os buscadores non poden obter resultados."
+
+#: searx/templates/oscar/messages/no_results.html:10
+#: searx/templates/simple/messages/no_results.html:10
+msgid "Please, try again later or find another searx instance."
+msgstr "Por favor, inténteo de novo máis tarde ou busque outra instancia de searx."
+
+#: searx/templates/oscar/messages/no_results.html:14
+#: searx/templates/simple/messages/no_results.html:14
+msgid "Sorry!"
+msgstr "Sentímolo!"
+
+#: searx/templates/oscar/messages/no_results.html:15
+#: searx/templates/simple/messages/no_results.html:15
+msgid ""
+"we didn't find any results. Please use another query or search in more "
+"categories."
+msgstr "non atopamos ningún resultado. Por favor, utilice outra consulta ou busque en máis categorías."
+
+#: searx/templates/oscar/messages/save_settings_successfull.html:7
+msgid "Well done!"
+msgstr "Ben feito!"
+
+#: searx/templates/oscar/messages/save_settings_successfull.html:8
+msgid "Settings saved successfully."
+msgstr "Gardáronse correctamente os Axustes."
+
+#: searx/templates/oscar/messages/unknow_error.html:7
+msgid "Oh snap!"
+msgstr "Vaia!"
+
+#: searx/templates/oscar/messages/unknow_error.html:8
+msgid "Something went wrong."
+msgstr "Algo fallou."
+
+#: searx/templates/oscar/result_templates/default.html:7
+#: searx/templates/simple/result_templates/default.html:6
+msgid "show media"
+msgstr "mostrar medios"
+
+#: searx/templates/oscar/result_templates/default.html:7
+#: searx/templates/simple/result_templates/default.html:6
+msgid "hide media"
+msgstr "agochar medios"
+
+#: searx/templates/oscar/result_templates/images.html:30
+msgid "Get image"
+msgstr "Obter imaxe"
+
+#: searx/templates/oscar/result_templates/images.html:33
+msgid "View source"
+msgstr "Ver fonte"
+
+#: searx/templates/oscar/result_templates/map.html:7
+#: searx/templates/simple/result_templates/map.html:7
+msgid "show map"
+msgstr "mostrar mapa"
+
+#: searx/templates/oscar/result_templates/map.html:7
+#: searx/templates/simple/result_templates/map.html:7
+msgid "hide map"
+msgstr "agochar mapa"
+
+#: searx/templates/oscar/result_templates/map.html:11
+#: searx/templates/simple/result_templates/map.html:11
+msgid "show details"
+msgstr "mostrar detalles"
+
+#: searx/templates/oscar/result_templates/map.html:11
+#: searx/templates/simple/result_templates/map.html:11
+msgid "hide details"
+msgstr "agochar detalles"
+
+#: searx/templates/oscar/result_templates/torrent.html:7
+#: searx/templates/simple/result_templates/torrent.html:11
+msgid "Filesize"
+msgstr "Tamaño do ficheiro"
+
+#: searx/templates/oscar/result_templates/torrent.html:9
+#: searx/templates/simple/result_templates/torrent.html:12
+msgid "Bytes"
+msgstr "Bytes"
+
+#: searx/templates/oscar/result_templates/torrent.html:10
+#: searx/templates/simple/result_templates/torrent.html:13
+msgid "kiB"
+msgstr "kiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:11
+#: searx/templates/simple/result_templates/torrent.html:14
+msgid "MiB"
+msgstr "MiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:12
+#: searx/templates/simple/result_templates/torrent.html:15
+msgid "GiB"
+msgstr "GiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:13
+#: searx/templates/simple/result_templates/torrent.html:16
+msgid "TiB"
+msgstr "TiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:15
+#: searx/templates/simple/result_templates/torrent.html:20
+msgid "Number of Files"
+msgstr "Número de ficheiros"
+
+#: searx/templates/oscar/result_templates/videos.html:7
+#: searx/templates/simple/result_templates/videos.html:6
+msgid "show video"
+msgstr "mostrar vídeo"
+
+#: searx/templates/oscar/result_templates/videos.html:7
+#: searx/templates/simple/result_templates/videos.html:6
+msgid "hide video"
+msgstr "agochar vídeo"
+
+#: searx/templates/pix-art/results.html:28
+msgid "Load more..."
+msgstr "Cargar máis..."
+
+#: searx/templates/simple/base.html:31
+msgid "No item found"
+msgstr "Non se atoparon elementos"
+
+#: searx/templates/simple/preferences.html:89
+msgid "Supports selected language"
+msgstr "Soporta o idioma seleccionado"
+
+#: searx/templates/simple/preferences.html:118
+msgid "User interface"
+msgstr "Interface de usuaria"
+
+#: searx/templates/simple/preferences.html:154
+msgid "Privacy"
+msgstr "Intimidade"
diff --git a/searx/translations/he/LC_MESSAGES/messages.mo b/searx/translations/he/LC_MESSAGES/messages.mo
index 10bb25c50c..c034025007 100644
Binary files a/searx/translations/he/LC_MESSAGES/messages.mo and b/searx/translations/he/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/hr/LC_MESSAGES/messages.mo b/searx/translations/hr/LC_MESSAGES/messages.mo
index ee986d5c9d..9e232a1513 100644
Binary files a/searx/translations/hr/LC_MESSAGES/messages.mo and b/searx/translations/hr/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/hu/LC_MESSAGES/messages.mo b/searx/translations/hu/LC_MESSAGES/messages.mo
index 4e06f2fd37..392dc99dff 100644
Binary files a/searx/translations/hu/LC_MESSAGES/messages.mo and b/searx/translations/hu/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/it/LC_MESSAGES/messages.mo b/searx/translations/it/LC_MESSAGES/messages.mo
index 49dabadcf1..53eca9e674 100644
Binary files a/searx/translations/it/LC_MESSAGES/messages.mo and b/searx/translations/it/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/it/LC_MESSAGES/messages.po b/searx/translations/it/LC_MESSAGES/messages.po
index b2db9b3f7f..c2d5c85887 100644
--- a/searx/translations/it/LC_MESSAGES/messages.po
+++ b/searx/translations/it/LC_MESSAGES/messages.po
@@ -5,18 +5,20 @@
# Translators:
# Adam Tauber <[email protected]>, 2018
# caoswave, 2016
-# caoswave, 2016-2017
+# caoswave, 2016-2018
# dp <[email protected]>, 2014
# dp <[email protected]>, 2014,2017
+# Federico <[email protected]>, 2018
# Luca C <[email protected]>, 2017
# Luc <[email protected]>, 2015
+# Random_R, 2018
msgid ""
msgstr ""
"Project-Id-Version: searx\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2017-11-01 21:31+0100\n"
-"PO-Revision-Date: 2018-02-01 22:21+0000\n"
-"Last-Translator: Adam Tauber <[email protected]>\n"
+"PO-Revision-Date: 2018-10-16 15:53+0000\n"
+"Last-Translator: caoswave\n"
"Language-Team: Italian (http://www.transifex.com/asciimoo/searx/language/it/)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
@@ -31,7 +33,7 @@ msgstr "tempo scaduto"
#: searx/search.py:144
msgid "request exception"
-msgstr ""
+msgstr "eccezione della richiesta"
#: searx/search.py:151
msgid "unexpected crash"
@@ -149,7 +151,7 @@ msgstr "Questa voce è stata sostituita da"
#: searx/engines/pubmed.py:74
msgid "No abstract is available for this publication."
-msgstr ""
+msgstr "Nessun sommario disponibile per questa pubblicazione"
#: searx/plugins/https_rewrite.py:32
msgid "Rewrite HTTP links to HTTPS if possible"
@@ -199,7 +201,7 @@ msgstr "Esegui la ricerca immediatamente se una categoria è selezionata. Disabi
msgid ""
"Displays your IP if the query is \"ip\" and your user agent if the query "
"contains \"user agent\"."
-msgstr "Mostra il tuo IP se hai cercato \\\"ip\\\" ed il tuo user agent se hai cercato \\\"user agent\\\"."
+msgstr "Mostra il tuo IP se hai cercato \"ip\" ed il tuo user agent se hai cercato \"user agent\"."
#: searx/plugins/tracker_url_remover.py:26
msgid "Tracker URL remover"
@@ -217,7 +219,7 @@ msgstr "Scorciatoie in stile Vim"
msgid ""
"Navigate search results with Vim-like hotkeys (JavaScript required). Press "
"\"h\" key on main or result page to get help."
-msgstr "Usa comandi in stile Vim per navigare tra i risultati (JavaScript necessario). Premi il tasto \\\"h\\\" per visualizzare la finestra d'aiuto."
+msgstr "Usa comandi in stile Vim per navigare tra i risultati (JavaScript necessario). Premi il tasto \"h\" per visualizzare la finestra d'aiuto."
#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4
#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4
@@ -718,13 +720,13 @@ msgstr "Stile"
#: searx/templates/oscar/preferences.html:122
msgid "Open Access DOI resolver"
-msgstr ""
+msgstr "Resolver Open Access DOI"
#: searx/templates/oscar/preferences.html:123
msgid ""
"Redirect to open-access versions of publications when available (plugin "
"required)"
-msgstr ""
+msgstr "Indirizza a versioni open-access delle pubblicazioni quando disponibili (plugin richiesto)"
#: searx/templates/oscar/preferences.html:163
#: searx/templates/oscar/preferences.html:175
@@ -795,13 +797,13 @@ msgstr "Valore"
#: searx/templates/oscar/preferences.html:301
msgid "Search URL of the currently saved preferences"
-msgstr ""
+msgstr "Cerca URL delle preferenze attualmente salvate"
#: searx/templates/oscar/preferences.html:301
msgid ""
"Note: specifying custom settings in the search URL can reduce privacy by "
"leaking data to the clicked result sites."
-msgstr ""
+msgstr "Nota: specificando impostazioni personalizzate nella ricerca URL può ridurre la privacy facendo traperlare dati ai siti cliccati"
#: searx/templates/oscar/results.html:17
msgid "Search results"
@@ -815,7 +817,7 @@ msgstr "Prova a cercare:"
#: searx/templates/oscar/results.html:100
#: searx/templates/simple/results.html:25
msgid "Engines cannot retrieve results"
-msgstr ""
+msgstr "I motori di ricerca non riescono a recuperare risultati"
#: searx/templates/oscar/results.html:131
msgid "Links"
@@ -880,7 +882,7 @@ msgstr "Non ci sono dati attualmente disponibili."
#: searx/templates/oscar/messages/no_results.html:4
#: searx/templates/simple/messages/no_results.html:4
msgid "Engines cannot retrieve results."
-msgstr ""
+msgstr "I motori di ricerca non riescono a recuperare risultati"
#: searx/templates/oscar/messages/no_results.html:10
#: searx/templates/simple/messages/no_results.html:10
diff --git a/searx/translations/ja/LC_MESSAGES/messages.mo b/searx/translations/ja/LC_MESSAGES/messages.mo
index 39ea698cbc..eb1267287a 100644
Binary files a/searx/translations/ja/LC_MESSAGES/messages.mo and b/searx/translations/ja/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/ja/LC_MESSAGES/messages.po b/searx/translations/ja/LC_MESSAGES/messages.po
index 11a297ca28..24d2c359ef 100644
--- a/searx/translations/ja/LC_MESSAGES/messages.po
+++ b/searx/translations/ja/LC_MESSAGES/messages.po
@@ -3,11 +3,12 @@
# This file is distributed under the same license as the PROJECT project.
#
# Translators:
-# Akio Nishimura <[email protected]>, 2016-2017
+# Akio Nishimura <[email protected]>, 2016-2018
# Thomas Pointhuber, 2014-2015
# FIRST AUTHOR <EMAIL@ADDRESS>, 2014,2016
# Lucas Phillips <[email protected]>, 2015
# Max <[email protected]>, 2015
+# Nobuhiro Iwamatsu <[email protected]>, 2018
# pointhi, 2014
# Thomas Pointhuber, 2015-2016
msgid ""
@@ -15,8 +16,8 @@ msgstr ""
"Project-Id-Version: searx\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2017-11-01 21:31+0100\n"
-"PO-Revision-Date: 2017-11-01 20:31+0000\n"
-"Last-Translator: Adam Tauber <[email protected]>\n"
+"PO-Revision-Date: 2018-08-13 03:03+0000\n"
+"Last-Translator: Nobuhiro Iwamatsu <[email protected]>\n"
"Language-Team: Japanese (http://www.transifex.com/asciimoo/searx/language/ja/)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
@@ -27,15 +28,15 @@ msgstr ""
#: searx/search.py:137 searx/search.py:182
msgid "timeout"
-msgstr ""
+msgstr "タイムアウト"
#: searx/search.py:144
msgid "request exception"
-msgstr ""
+msgstr "例外要求"
#: searx/search.py:151
msgid "unexpected crash"
-msgstr ""
+msgstr "予期しないクラッシュ"
#: searx/webapp.py:136
msgid "files"
@@ -83,7 +84,7 @@ msgstr "不正な設定です。設定を編集してください。"
#: searx/webapp.py:415
msgid "Invalid settings"
-msgstr ""
+msgstr "不正な設定"
#: searx/webapp.py:449 searx/webapp.py:493
msgid "search error"
@@ -149,11 +150,11 @@ msgstr "このエントリーの優先"
#: searx/engines/pubmed.py:74
msgid "No abstract is available for this publication."
-msgstr ""
+msgstr "この出版物には要約がありません。"
#: searx/plugins/https_rewrite.py:32
msgid "Rewrite HTTP links to HTTPS if possible"
-msgstr "可能ならばHTTPリンクをHTTPSリンクに書き換える"
+msgstr "可能ならば HTTP リンクを HTTPS リンクに書き換える"
#: searx/plugins/infinite_scroll.py:3
msgid "Infinite scroll"
@@ -165,7 +166,7 @@ msgstr "現在のページの下端でスクロールすると自動的に次の
#: searx/plugins/oa_doi_rewrite.py:9
msgid "Open Access DOI rewrite"
-msgstr ""
+msgstr "オープンアクセス DOI リライト"
#: searx/plugins/oa_doi_rewrite.py:10
msgid ""
@@ -183,7 +184,7 @@ msgstr "検索結果のリンクを新しいタブで開く"
msgid ""
"Results are opened in the same window by default. This plugin overwrites the"
" default behaviour to open links on new tabs/windows. (JavaScript required)"
-msgstr "デフォルトでは結果は同じウィンドウで開きます。このプラグインはデフォルトの動作を書き換えて新しいタブ/ウィンドウで開くようにします。(JavaScriptが必要です)"
+msgstr "デフォルトでは結果は同じウィンドウで開きます。このプラグインはデフォルトの動作を書き換えて新しいタブ/ウィンドウで開くようにします。(JavaScript が必要です)"
#: searx/plugins/search_on_category_select.py:18
msgid "Search on category select"
@@ -193,37 +194,37 @@ msgstr "カテゴリ選択したら検索を実行"
msgid ""
"Perform search immediately if a category selected. Disable to select "
"multiple categories. (JavaScript required)"
-msgstr "カテゴリが選択されたときに検索を実行します。複数のカテゴリを選択する場合は無効にします。(JavaScriptが必要です)"
+msgstr "カテゴリが選択されたときに検索を実行します。複数のカテゴリを選択する場合は無効にします。(JavaScript が必要です)"
#: searx/plugins/self_info.py:20
msgid ""
"Displays your IP if the query is \"ip\" and your user agent if the query "
"contains \"user agent\"."
-msgstr "クエリが \"ip\" の場合にあなたのIPを、クエリに\"user agent\"が含まれる場合にあなたのユーザーエージェントを表示します。"
+msgstr "クエリが \"ip\" の場合にあなたのIPを、クエリに \"user agent\" が含まれる場合にあなたのユーザーエージェントを表示します。"
#: searx/plugins/tracker_url_remover.py:26
msgid "Tracker URL remover"
-msgstr "トラッカーURLリムーバー"
+msgstr "トラッカー URL リムーバー"
#: searx/plugins/tracker_url_remover.py:27
msgid "Remove trackers arguments from the returned URL"
-msgstr "返されたURLからトラッカー引数を消去します。"
+msgstr "返された URL からトラッカー引数を消去する"
#: searx/plugins/vim_hotkeys.py:3
msgid "Vim-like hotkeys"
-msgstr "Vim風のホットキー"
+msgstr "Vim 風のホットキー"
#: searx/plugins/vim_hotkeys.py:4
msgid ""
"Navigate search results with Vim-like hotkeys (JavaScript required). Press "
"\"h\" key on main or result page to get help."
-msgstr "検索結果をVim風のホットキーで操作します(JavaScriptが必要)。メインページまたは検索結果ページで\"h\"キーを押してヘルプを表示します。"
+msgstr "検索結果をVim 風のホットキーで操作します(JavaScript が必要)。メインページまたは検索結果ページで \"h\" キーを押してヘルプを表示します。"
#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4
#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4
#: searx/templates/simple/404.html:4
msgid "Page not found"
-msgstr "ページが見付かりません"
+msgstr "ページが見つかりません"
#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6
#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6
@@ -315,14 +316,14 @@ msgstr "画像プロキシ"
#: searx/templates/oscar/preferences.html:72
#: searx/templates/simple/preferences.html:169
msgid "Enabled"
-msgstr "有効にする"
+msgstr "有効"
#: searx/templates/courgette/preferences.html:49
#: searx/templates/legacy/preferences.html:50
#: searx/templates/oscar/preferences.html:73
#: searx/templates/simple/preferences.html:170
msgid "Disabled"
-msgstr "使用不可能にする"
+msgstr "無効"
#: searx/templates/courgette/preferences.html:54
#: searx/templates/legacy/preferences.html:55
@@ -489,7 +490,7 @@ msgstr "戻る"
#: searx/templates/oscar/results.html:136
#: searx/templates/simple/results.html:58
msgid "Search URL"
-msgstr "URLを検索する"
+msgstr "URL を検索する"
#: searx/templates/courgette/results.html:16
#: searx/templates/legacy/results.html:17
@@ -623,7 +624,7 @@ msgstr "エラー!"
#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
msgid "Powered by"
-msgstr "提供:"
+msgstr "Powered by"
#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
msgid "a privacy-respecting, hackable metasearch engine"
@@ -636,11 +637,11 @@ msgstr "プロキシされている"
#: searx/templates/oscar/macros.html:92
msgid "supported"
-msgstr ""
+msgstr "サポート"
#: searx/templates/oscar/macros.html:96
msgid "not supported"
-msgstr ""
+msgstr "未サポート"
#: searx/templates/oscar/preferences.html:13
#: searx/templates/oscar/preferences.html:22
@@ -672,12 +673,12 @@ msgstr "クッキー"
#: searx/templates/oscar/preferences.html:42
#: searx/templates/simple/preferences.html:48
msgid "What language do you prefer for search?"
-msgstr "検索に使う言語はどれが良いですか?"
+msgstr "検索に使う言語はどれが良いですか?"
#: searx/templates/oscar/preferences.html:48
#: searx/templates/simple/preferences.html:128
msgid "Change the language of the layout"
-msgstr "表示する言語を変更できます"
+msgstr "表示する言語を変更"
#: searx/templates/oscar/preferences.html:58
#: searx/templates/simple/preferences.html:60
@@ -687,7 +688,7 @@ msgstr "入力補助に使う検索エンジン"
#: searx/templates/oscar/preferences.html:69
#: searx/templates/simple/preferences.html:173
msgid "Proxying image results through searx"
-msgstr "画像検索結果をsearxでプロキシする"
+msgstr "画像検索結果を searx でプロキシする"
#: searx/templates/oscar/preferences.html:78
msgid ""
@@ -704,7 +705,7 @@ msgstr "コンテンツをフィルタリングする"
#: searx/templates/oscar/preferences.html:97
#: searx/templates/simple/preferences.html:139
msgid "Change searx layout"
-msgstr "searxのレイアウトの変更"
+msgstr "Searx のレイアウトの変更"
#: searx/templates/oscar/preferences.html:106
#: searx/templates/oscar/preferences.html:111
@@ -718,13 +719,13 @@ msgstr "スタイル"
#: searx/templates/oscar/preferences.html:122
msgid "Open Access DOI resolver"
-msgstr ""
+msgstr "オープンアクセス DOI リゾルバー"
#: searx/templates/oscar/preferences.html:123
msgid ""
"Redirect to open-access versions of publications when available (plugin "
"required)"
-msgstr ""
+msgstr "利用可能な場合(プラグインが必要)、オープンアクセス版の出版物にリダイレクトする"
#: searx/templates/oscar/preferences.html:163
#: searx/templates/oscar/preferences.html:175
@@ -735,7 +736,7 @@ msgstr "ショートカット"
#: searx/templates/oscar/preferences.html:164
#: searx/templates/oscar/preferences.html:174
msgid "Selected language"
-msgstr ""
+msgstr "選択された言語"
#: searx/templates/oscar/preferences.html:166
#: searx/templates/oscar/preferences.html:172
@@ -757,7 +758,7 @@ msgstr "最大時間"
#: searx/templates/oscar/preferences.html:248
msgid "This is the list of searx's instant answering modules."
-msgstr "これはsearxの即席回答モジュールのリストです。"
+msgstr "これは searx の即席回答モジュールのリストです。"
#: searx/templates/oscar/preferences.html:252
msgid "Name"
@@ -783,7 +784,7 @@ msgstr "これはクッキーのリストで、これらの値はあなたのコ
#: searx/templates/oscar/preferences.html:276
msgid "With that list, you can assess searx transparency."
-msgstr "このリストによって、あなたはsearxの透明性を評価できます。"
+msgstr "このリストによって、あなたは searx の透明性を評価できます。"
#: searx/templates/oscar/preferences.html:281
msgid "Cookie name"
@@ -795,13 +796,13 @@ msgstr "値"
#: searx/templates/oscar/preferences.html:301
msgid "Search URL of the currently saved preferences"
-msgstr ""
+msgstr "現在保存されている設定の検索 URL"
#: searx/templates/oscar/preferences.html:301
msgid ""
"Note: specifying custom settings in the search URL can reduce privacy by "
"leaking data to the clicked result sites."
-msgstr ""
+msgstr "注意: 検索 URL にカスタム設定を指定すると、クリックした結果サイトにデータが漏洩し、プライバシーが低下する恐れがあります。"
#: searx/templates/oscar/results.html:17
msgid "Search results"
@@ -810,12 +811,12 @@ msgstr "検索結果"
#: searx/templates/oscar/results.html:21
#: searx/templates/simple/results.html:84
msgid "Try searching for:"
-msgstr ""
+msgstr "検索:"
#: searx/templates/oscar/results.html:100
#: searx/templates/simple/results.html:25
msgid "Engines cannot retrieve results"
-msgstr ""
+msgstr "エンジンは結果を取得できません"
#: searx/templates/oscar/results.html:131
msgid "Links"
@@ -859,15 +860,15 @@ msgstr "去年"
#: searx/templates/oscar/messages/first_time.html:6
#: searx/templates/oscar/messages/no_data_available.html:3
msgid "Heads up!"
-msgstr "お知らせ"
+msgstr "気をつけて!"
#: searx/templates/oscar/messages/first_time.html:7
msgid "It look like you are using searx first time."
-msgstr "searxを使うのは初めてようですね。"
+msgstr "Searxを使うのは初めてようですね。"
#: searx/templates/oscar/messages/no_cookies.html:3
msgid "Information!"
-msgstr "お知らせ"
+msgstr "お知らせ!"
#: searx/templates/oscar/messages/no_cookies.html:4
msgid "currently, there are no cookies defined."
@@ -880,12 +881,12 @@ msgstr "現在データがありません。"
#: searx/templates/oscar/messages/no_results.html:4
#: searx/templates/simple/messages/no_results.html:4
msgid "Engines cannot retrieve results."
-msgstr ""
+msgstr "エンジンは結果を取得できません。"
#: searx/templates/oscar/messages/no_results.html:10
#: searx/templates/simple/messages/no_results.html:10
msgid "Please, try again later or find another searx instance."
-msgstr ""
+msgstr "後でやり直すか、別の searx インスタンスを探して下さい。"
#: searx/templates/oscar/messages/no_results.html:14
#: searx/templates/simple/messages/no_results.html:14
@@ -901,7 +902,7 @@ msgstr "検索結果はありませんでした。別カテゴリで、他のク
#: searx/templates/oscar/messages/save_settings_successfull.html:7
msgid "Well done!"
-msgstr "あっぱれ。"
+msgstr "あっぱれ!"
#: searx/templates/oscar/messages/save_settings_successfull.html:8
msgid "Settings saved successfully."
@@ -909,7 +910,7 @@ msgstr "設定の保存に成功しました。"
#: searx/templates/oscar/messages/unknow_error.html:7
msgid "Oh snap!"
-msgstr "ちぇっ"
+msgstr "ちぇっ!"
#: searx/templates/oscar/messages/unknow_error.html:8
msgid "Something went wrong."
@@ -956,7 +957,7 @@ msgstr "詳細を隠す"
#: searx/templates/oscar/result_templates/torrent.html:7
#: searx/templates/simple/result_templates/torrent.html:11
msgid "Filesize"
-msgstr "ファイル・サイズ"
+msgstr "ファイルサイズ"
#: searx/templates/oscar/result_templates/torrent.html:9
#: searx/templates/simple/result_templates/torrent.html:12
@@ -1004,16 +1005,16 @@ msgstr "もっと見る…"
#: searx/templates/simple/base.html:31
msgid "No item found"
-msgstr ""
+msgstr "アイテムが見つかりません"
#: searx/templates/simple/preferences.html:89
msgid "Supports selected language"
-msgstr "選択している言語のサポート"
+msgstr "選択された言語のサポート"
#: searx/templates/simple/preferences.html:118
msgid "User interface"
-msgstr ""
+msgstr "ユーザーインターフェース"
#: searx/templates/simple/preferences.html:154
msgid "Privacy"
-msgstr ""
+msgstr "プライバシー"
diff --git a/searx/translations/nl/LC_MESSAGES/messages.mo b/searx/translations/nl/LC_MESSAGES/messages.mo
index 15fcacbc4a..057853be52 100644
Binary files a/searx/translations/nl/LC_MESSAGES/messages.mo and b/searx/translations/nl/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/nl/LC_MESSAGES/messages.po b/searx/translations/nl/LC_MESSAGES/messages.po
index 1fc8700f95..b4e0614025 100644
--- a/searx/translations/nl/LC_MESSAGES/messages.po
+++ b/searx/translations/nl/LC_MESSAGES/messages.po
@@ -3,7 +3,7 @@
# This file is distributed under the same license as the PROJECT project.
#
# Translators:
-# André Koot <[email protected]>, 2014-2017
+# André Koot <[email protected]>, 2014-2018
# Nathan Follens, 2015-2018
# Rejo Zenger <[email protected]>, 2016-2017
msgid ""
@@ -11,8 +11,8 @@ msgstr ""
"Project-Id-Version: searx\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2017-11-01 21:31+0100\n"
-"PO-Revision-Date: 2018-02-15 22:43+0000\n"
-"Last-Translator: Nathan Follens\n"
+"PO-Revision-Date: 2018-09-22 06:46+0000\n"
+"Last-Translator: André Koot <[email protected]>\n"
"Language-Team: Dutch (http://www.transifex.com/asciimoo/searx/language/nl/)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
@@ -835,7 +835,7 @@ msgstr "Altijd"
#: searx/templates/oscar/time-range.html:6
#: searx/templates/simple/time-range.html:6
msgid "Last day"
-msgstr "Voorbije dag"
+msgstr "Gisteren"
#: searx/templates/oscar/time-range.html:9
#: searx/templates/simple/time-range.html:9
@@ -881,7 +881,7 @@ msgstr "Zoekmachines konden geen resultaten ophalen."
#: searx/templates/oscar/messages/no_results.html:10
#: searx/templates/simple/messages/no_results.html:10
msgid "Please, try again later or find another searx instance."
-msgstr "Probeer het later opnieuw, of gebruik een andere instantie van searx."
+msgstr "Probeer het later opnieuw, of gebruik een andere searx server."
#: searx/templates/oscar/messages/no_results.html:14
#: searx/templates/simple/messages/no_results.html:14
diff --git a/searx/translations/nl_BE/LC_MESSAGES/messages.mo b/searx/translations/nl_BE/LC_MESSAGES/messages.mo
new file mode 100644
index 0000000000..e233f5c897
Binary files /dev/null and b/searx/translations/nl_BE/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/nl_BE/LC_MESSAGES/messages.po b/searx/translations/nl_BE/LC_MESSAGES/messages.po
new file mode 100644
index 0000000000..c4ef0228ac
--- /dev/null
+++ b/searx/translations/nl_BE/LC_MESSAGES/messages.po
@@ -0,0 +1,1013 @@
+# Translations template for PROJECT.
+# Copyright (C) 2017 ORGANIZATION
+# This file is distributed under the same license as the PROJECT project.
+#
+# Translators:
+# Nathan Follens, 2018
+msgid ""
+msgstr ""
+"Project-Id-Version: searx\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2017-11-01 21:31+0100\n"
+"PO-Revision-Date: 2018-06-24 07:59+0000\n"
+"Last-Translator: Nathan Follens\n"
+"Language-Team: Dutch (Belgium) (http://www.transifex.com/asciimoo/searx/language/nl_BE/)\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.3.4\n"
+"Language: nl_BE\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+#: searx/search.py:137 searx/search.py:182
+msgid "timeout"
+msgstr "time-out"
+
+#: searx/search.py:144
+msgid "request exception"
+msgstr "aanvraaguitzondering"
+
+#: searx/search.py:151
+msgid "unexpected crash"
+msgstr "onverwachte crash"
+
+#: searx/webapp.py:136
+msgid "files"
+msgstr "bestanden"
+
+#: searx/webapp.py:137
+msgid "general"
+msgstr "algemeen"
+
+#: searx/webapp.py:138
+msgid "music"
+msgstr "muziek"
+
+#: searx/webapp.py:139
+msgid "social media"
+msgstr "sociale media"
+
+#: searx/webapp.py:140
+msgid "images"
+msgstr "afbeeldingen"
+
+#: searx/webapp.py:141
+msgid "videos"
+msgstr "video’s"
+
+#: searx/webapp.py:142
+msgid "it"
+msgstr "IT"
+
+#: searx/webapp.py:143
+msgid "news"
+msgstr "nieuws"
+
+#: searx/webapp.py:144
+msgid "map"
+msgstr "kaart"
+
+#: searx/webapp.py:145
+msgid "science"
+msgstr "wetenschap"
+
+#: searx/webapp.py:399 searx/webapp.py:658
+msgid "Invalid settings, please edit your preferences"
+msgstr "Ongeldige instellingen, werkt uw voorkeuren bij"
+
+#: searx/webapp.py:415
+msgid "Invalid settings"
+msgstr "Ongeldige instellingen"
+
+#: searx/webapp.py:449 searx/webapp.py:493
+msgid "search error"
+msgstr "zoekfout"
+
+#: searx/webapp.py:530
+msgid "{minutes} minute(s) ago"
+msgstr "{minutes} min geleden"
+
+#: searx/webapp.py:532
+msgid "{hours} hour(s), {minutes} minute(s) ago"
+msgstr "{hours} uur, {minutes} min geleden"
+
+#: searx/answerers/random/answerer.py:53
+msgid "Random value generator"
+msgstr "Willekeurigewaardegenerator"
+
+#: searx/answerers/random/answerer.py:54
+msgid "Generate different random values"
+msgstr "Genereert verschillende willekeurige waarden"
+
+#: searx/answerers/statistics/answerer.py:53
+msgid "Statistics functions"
+msgstr "Statistische functies"
+
+#: searx/answerers/statistics/answerer.py:54
+msgid "Compute {functions} of the arguments"
+msgstr "Berekent {functions} van de argumenten"
+
+#: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201
+msgid "Engine time (sec)"
+msgstr "Snelheid zoekmachien (sec)"
+
+#: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205
+msgid "Page loads (sec)"
+msgstr "Laden van pagina’s (sec)"
+
+#: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209
+#: searx/templates/oscar/results.html:95
+#: searx/templates/simple/results.html:20
+msgid "Number of results"
+msgstr "Aantal zoekresultaten"
+
+#: searx/engines/__init__.py:206 searx/engines/flycheck___init__.py:213
+msgid "Scores"
+msgstr "Scores"
+
+#: searx/engines/__init__.py:210 searx/engines/flycheck___init__.py:217
+msgid "Scores per result"
+msgstr "Scores per zoekresultaat"
+
+#: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221
+msgid "Errors"
+msgstr "Fouten"
+
+#: searx/engines/pdbe.py:87
+msgid "{title} (OBSOLETE)"
+msgstr "{title} (VEROUDERD)"
+
+#: searx/engines/pdbe.py:91
+msgid "This entry has been superseded by"
+msgstr "Dit object is vervangen door"
+
+#: searx/engines/pubmed.py:74
+msgid "No abstract is available for this publication."
+msgstr "Voor deze publicatie is geen abstract beschikbaar."
+
+#: searx/plugins/https_rewrite.py:32
+msgid "Rewrite HTTP links to HTTPS if possible"
+msgstr "Herschrijft HTTP-koppelingen naar HTTPS, indien mogelijk"
+
+#: searx/plugins/infinite_scroll.py:3
+msgid "Infinite scroll"
+msgstr "Oneindig scrollen"
+
+#: searx/plugins/infinite_scroll.py:4
+msgid "Automatically load next page when scrolling to bottom of current page"
+msgstr "Volgende pagina automatisch laden bij bereiken van den onderkant van de huidige pagina"
+
+#: searx/plugins/oa_doi_rewrite.py:9
+msgid "Open Access DOI rewrite"
+msgstr "Open Access DOI herschrijven"
+
+#: searx/plugins/oa_doi_rewrite.py:10
+msgid ""
+"Avoid paywalls by redirecting to open-access versions of publications when "
+"available"
+msgstr "Omzeilt betaalmuren met een doorverwijzing naar vrij toegankelijke versies van publicaties indien beschikbaar"
+
+#: searx/plugins/open_results_on_new_tab.py:18
+#: searx/templates/oscar/preferences.html:114
+#: searx/templates/simple/preferences.html:149
+msgid "Open result links on new browser tabs"
+msgstr "Koppelingen openen in nieuwe tabbladen"
+
+#: searx/plugins/open_results_on_new_tab.py:19
+msgid ""
+"Results are opened in the same window by default. This plugin overwrites the"
+" default behaviour to open links on new tabs/windows. (JavaScript required)"
+msgstr "Resultaten worden standaard in hetzelfde venster geopend. Deze plug-in overschrijft het standaardgedrag zodat koppelingen in nieuwe tabbladen/vensters worden geopend. (JavaScript vereist)"
+
+#: searx/plugins/search_on_category_select.py:18
+msgid "Search on category select"
+msgstr "Zoeken bij selecteren van categorie"
+
+#: searx/plugins/search_on_category_select.py:19
+msgid ""
+"Perform search immediately if a category selected. Disable to select "
+"multiple categories. (JavaScript required)"
+msgstr "Zoekopdracht onmiddellijk uitvoeren wanneer dat een categorie wordt geselecteerd. Zet dit uit voor meerdere categorieën te selecteren. (JavaScript vereist)"
+
+#: searx/plugins/self_info.py:20
+msgid ""
+"Displays your IP if the query is \"ip\" and your user agent if the query "
+"contains \"user agent\"."
+msgstr "Geeft uw IP-adres weer als de zoekopdracht ‘ip’ is, en uwe gebruikersagent als de zoekopdracht ‘user agent’ bevat."
+
+#: searx/plugins/tracker_url_remover.py:26
+msgid "Tracker URL remover"
+msgstr "Tracker-URL-verwijderaar"
+
+#: searx/plugins/tracker_url_remover.py:27
+msgid "Remove trackers arguments from the returned URL"
+msgstr "Verwijdert trackerargumenten van de gekregen URL"
+
+#: searx/plugins/vim_hotkeys.py:3
+msgid "Vim-like hotkeys"
+msgstr "Sneltoetsen gelijk in Vim"
+
+#: searx/plugins/vim_hotkeys.py:4
+msgid ""
+"Navigate search results with Vim-like hotkeys (JavaScript required). Press "
+"\"h\" key on main or result page to get help."
+msgstr "Bladert door zoekresultaten met sneltoetsen gelijk die in Vim (JavaScript vereist). Drukt op ‘h’ op de hoofdpagina of de pagina met resultaten voor hulp."
+
+#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4
+#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4
+#: searx/templates/simple/404.html:4
+msgid "Page not found"
+msgstr "Pagina niet gevonden"
+
+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6
+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6
+#: searx/templates/simple/404.html:6
+#, python-format
+msgid "Go to %(search_page)s."
+msgstr "Ga naar %(search_page)s."
+
+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6
+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6
+#: searx/templates/simple/404.html:6
+msgid "search page"
+msgstr "zoekpagina"
+
+#: searx/templates/courgette/index.html:9
+#: searx/templates/courgette/index.html:13
+#: searx/templates/courgette/results.html:5
+#: searx/templates/legacy/index.html:8 searx/templates/legacy/index.html:12
+#: searx/templates/oscar/navbar.html:7
+#: searx/templates/oscar/preferences.html:3
+#: searx/templates/pix-art/index.html:8
+msgid "preferences"
+msgstr "voorkeuren"
+
+#: searx/templates/courgette/index.html:11
+#: searx/templates/legacy/index.html:10 searx/templates/oscar/about.html:2
+#: searx/templates/oscar/navbar.html:6 searx/templates/pix-art/index.html:7
+msgid "about"
+msgstr "over"
+
+#: searx/templates/courgette/preferences.html:5
+#: searx/templates/legacy/preferences.html:5
+#: searx/templates/oscar/preferences.html:8
+#: searx/templates/pix-art/preferences.html:5
+#: searx/templates/simple/preferences.html:26
+msgid "Preferences"
+msgstr "Voorkeuren"
+
+#: searx/templates/courgette/preferences.html:9
+#: searx/templates/legacy/preferences.html:9
+#: searx/templates/oscar/preferences.html:33
+#: searx/templates/oscar/preferences.html:35
+#: searx/templates/simple/preferences.html:34
+msgid "Default categories"
+msgstr "Standaardcategorieën"
+
+#: searx/templates/courgette/preferences.html:13
+#: searx/templates/legacy/preferences.html:14
+#: searx/templates/oscar/preferences.html:41
+#: searx/templates/pix-art/preferences.html:9
+#: searx/templates/simple/preferences.html:39
+#: searx/templates/simple/preferences.html:163
+msgid "Search language"
+msgstr "Zoektaal"
+
+#: searx/templates/courgette/preferences.html:16
+#: searx/templates/legacy/preferences.html:17
+#: searx/templates/oscar/languages.html:6
+#: searx/templates/pix-art/preferences.html:12
+#: searx/templates/simple/languages.html:2
+#: searx/templates/simple/preferences.html:42
+msgid "Default language"
+msgstr "Standaardtaal"
+
+#: searx/templates/courgette/preferences.html:24
+#: searx/templates/legacy/preferences.html:25
+#: searx/templates/oscar/preferences.html:47
+#: searx/templates/pix-art/preferences.html:20
+#: searx/templates/simple/preferences.html:120
+msgid "Interface language"
+msgstr "Interfacetaal"
+
+#: searx/templates/courgette/preferences.html:34
+#: searx/templates/legacy/preferences.html:35
+#: searx/templates/oscar/preferences.html:57
+#: searx/templates/simple/preferences.html:51
+msgid "Autocomplete"
+msgstr "Auto-aanvullen"
+
+#: searx/templates/courgette/preferences.html:45
+#: searx/templates/legacy/preferences.html:46
+#: searx/templates/oscar/preferences.html:68
+#: searx/templates/simple/preferences.html:166
+msgid "Image proxy"
+msgstr "Afbeeldingsproxy"
+
+#: searx/templates/courgette/preferences.html:48
+#: searx/templates/legacy/preferences.html:49
+#: searx/templates/oscar/preferences.html:72
+#: searx/templates/simple/preferences.html:169
+msgid "Enabled"
+msgstr "Ingeschakeld"
+
+#: searx/templates/courgette/preferences.html:49
+#: searx/templates/legacy/preferences.html:50
+#: searx/templates/oscar/preferences.html:73
+#: searx/templates/simple/preferences.html:170
+msgid "Disabled"
+msgstr "Uitgeschakeld"
+
+#: searx/templates/courgette/preferences.html:54
+#: searx/templates/legacy/preferences.html:55
+#: searx/templates/oscar/preferences.html:77
+#: searx/templates/pix-art/preferences.html:30
+#: searx/templates/simple/preferences.html:156
+msgid "Method"
+msgstr "Methode"
+
+#: searx/templates/courgette/preferences.html:63
+#: searx/templates/legacy/preferences.html:64
+#: searx/templates/oscar/preferences.html:86
+#: searx/templates/oscar/preferences.html:165
+#: searx/templates/oscar/preferences.html:173
+#: searx/templates/simple/preferences.html:63
+#: searx/templates/simple/preferences.html:90
+msgid "SafeSearch"
+msgstr "SafeSearch"
+
+#: searx/templates/courgette/preferences.html:66
+#: searx/templates/legacy/preferences.html:67
+#: searx/templates/oscar/preferences.html:90
+#: searx/templates/simple/preferences.html:66
+msgid "Strict"
+msgstr "Strikt"
+
+#: searx/templates/courgette/preferences.html:67
+#: searx/templates/legacy/preferences.html:68
+#: searx/templates/oscar/preferences.html:91
+#: searx/templates/simple/preferences.html:67
+msgid "Moderate"
+msgstr "Gemiddeld"
+
+#: searx/templates/courgette/preferences.html:68
+#: searx/templates/legacy/preferences.html:69
+#: searx/templates/oscar/preferences.html:92
+#: searx/templates/simple/preferences.html:68
+msgid "None"
+msgstr "Geen"
+
+#: searx/templates/courgette/preferences.html:73
+#: searx/templates/legacy/preferences.html:74
+#: searx/templates/oscar/preferences.html:96
+#: searx/templates/pix-art/preferences.html:39
+#: searx/templates/simple/preferences.html:131
+msgid "Themes"
+msgstr "Thema’s"
+
+#: searx/templates/courgette/preferences.html:83
+msgid "Color"
+msgstr "Kleur"
+
+#: searx/templates/courgette/preferences.html:86
+msgid "Blue (default)"
+msgstr "Blauw (standaard)"
+
+#: searx/templates/courgette/preferences.html:87
+msgid "Violet"
+msgstr "Paars"
+
+#: searx/templates/courgette/preferences.html:88
+msgid "Green"
+msgstr "Groen"
+
+#: searx/templates/courgette/preferences.html:89
+msgid "Cyan"
+msgstr "Appelblauwzeegroen"
+
+#: searx/templates/courgette/preferences.html:90
+msgid "Orange"
+msgstr "Oranje"
+
+#: searx/templates/courgette/preferences.html:91
+msgid "Red"
+msgstr "Rood"
+
+#: searx/templates/courgette/preferences.html:96
+#: searx/templates/legacy/preferences.html:93
+#: searx/templates/pix-art/preferences.html:49
+#: searx/templates/simple/preferences.html:77
+msgid "Currently used search engines"
+msgstr "Momenteel gebruikte zoekmachienen"
+
+#: searx/templates/courgette/preferences.html:100
+#: searx/templates/legacy/preferences.html:97
+#: searx/templates/oscar/preferences.html:162
+#: searx/templates/oscar/preferences.html:176
+#: searx/templates/pix-art/preferences.html:53
+#: searx/templates/simple/preferences.html:87
+msgid "Engine name"
+msgstr "Naam zoekmachien"
+
+#: searx/templates/courgette/preferences.html:101
+#: searx/templates/legacy/preferences.html:98
+msgid "Category"
+msgstr "Categorie"
+
+#: searx/templates/courgette/preferences.html:102
+#: searx/templates/courgette/preferences.html:113
+#: searx/templates/legacy/preferences.html:99
+#: searx/templates/legacy/preferences.html:110
+#: searx/templates/oscar/preferences.html:161
+#: searx/templates/oscar/preferences.html:177
+#: searx/templates/pix-art/preferences.html:54
+#: searx/templates/pix-art/preferences.html:64
+#: searx/templates/simple/preferences.html:86
+msgid "Allow"
+msgstr "Toestaan"
+
+#: searx/templates/courgette/preferences.html:102
+#: searx/templates/courgette/preferences.html:114
+#: searx/templates/legacy/preferences.html:99
+#: searx/templates/legacy/preferences.html:111
+#: searx/templates/pix-art/preferences.html:54
+#: searx/templates/pix-art/preferences.html:65
+msgid "Block"
+msgstr "Blokkeren"
+
+#: searx/templates/courgette/preferences.html:122
+#: searx/templates/legacy/preferences.html:119
+#: searx/templates/oscar/preferences.html:297
+#: searx/templates/pix-art/preferences.html:73
+#: searx/templates/simple/preferences.html:180
+msgid ""
+"These settings are stored in your cookies, this allows us not to store this "
+"data about you."
+msgstr "Deze instellingen worden bewaard in uw cookies. Hierdoor hoeven wij niks over u te bewaren."
+
+#: searx/templates/courgette/preferences.html:124
+#: searx/templates/legacy/preferences.html:121
+#: searx/templates/oscar/preferences.html:299
+#: searx/templates/pix-art/preferences.html:75
+#: searx/templates/simple/preferences.html:182
+msgid ""
+"These cookies serve your sole convenience, we don't use these cookies to "
+"track you."
+msgstr "Deze cookies zijn alleen voor uw eigen gemak, we gebruiken deze cookies niet voor u te volgen."
+
+#: searx/templates/courgette/preferences.html:127
+#: searx/templates/legacy/preferences.html:124
+#: searx/templates/oscar/preferences.html:305
+#: searx/templates/pix-art/preferences.html:78
+#: searx/templates/simple/preferences.html:185
+msgid "save"
+msgstr "opslaan"
+
+#: searx/templates/courgette/preferences.html:128
+#: searx/templates/legacy/preferences.html:125
+#: searx/templates/oscar/preferences.html:307
+#: searx/templates/simple/preferences.html:186
+msgid "Reset defaults"
+msgstr "Standaardinstellingen herstellen"
+
+#: searx/templates/courgette/preferences.html:129
+#: searx/templates/legacy/preferences.html:126
+#: searx/templates/oscar/preferences.html:306
+#: searx/templates/pix-art/preferences.html:79
+#: searx/templates/simple/preferences.html:187
+msgid "back"
+msgstr "terug"
+
+#: searx/templates/courgette/results.html:12
+#: searx/templates/legacy/results.html:13
+#: searx/templates/oscar/results.html:136
+#: searx/templates/simple/results.html:58
+msgid "Search URL"
+msgstr "Zoek-URL"
+
+#: searx/templates/courgette/results.html:16
+#: searx/templates/legacy/results.html:17
+#: searx/templates/oscar/results.html:141
+#: searx/templates/simple/results.html:62
+msgid "Download results"
+msgstr "Zoekresultaten downloaden"
+
+#: searx/templates/courgette/results.html:34
+#: searx/templates/legacy/results.html:35
+#: searx/templates/simple/results.html:10
+msgid "Answers"
+msgstr "Antwoorden"
+
+#: searx/templates/courgette/results.html:42
+#: searx/templates/legacy/results.html:43
+#: searx/templates/oscar/results.html:116
+#: searx/templates/simple/results.html:42
+msgid "Suggestions"
+msgstr "Suggesties"
+
+#: searx/templates/courgette/results.html:70
+#: searx/templates/legacy/results.html:81
+#: searx/templates/oscar/results.html:68 searx/templates/oscar/results.html:78
+#: searx/templates/simple/results.html:130
+msgid "previous page"
+msgstr "vorige pagina"
+
+#: searx/templates/courgette/results.html:81
+#: searx/templates/legacy/results.html:92
+#: searx/templates/oscar/results.html:62 searx/templates/oscar/results.html:84
+#: searx/templates/simple/results.html:145
+msgid "next page"
+msgstr "volgende pagina"
+
+#: searx/templates/courgette/search.html:3
+#: searx/templates/legacy/search.html:3 searx/templates/oscar/search.html:6
+#: searx/templates/oscar/search_full.html:9
+#: searx/templates/pix-art/search.html:3 searx/templates/simple/search.html:4
+msgid "Search for..."
+msgstr "Zoeken naar..."
+
+#: searx/templates/courgette/stats.html:4 searx/templates/legacy/stats.html:4
+#: searx/templates/oscar/stats.html:5 searx/templates/pix-art/stats.html:4
+#: searx/templates/simple/stats.html:7
+msgid "Engine stats"
+msgstr "Zoekmachienstatistieken"
+
+#: searx/templates/courgette/result_templates/images.html:4
+#: searx/templates/legacy/result_templates/images.html:4
+#: searx/templates/pix-art/result_templates/images.html:4
+msgid "original context"
+msgstr "oorspronkelijke context"
+
+#: searx/templates/courgette/result_templates/torrent.html:7
+#: searx/templates/legacy/result_templates/torrent.html:11
+#: searx/templates/oscar/result_templates/torrent.html:6
+#: searx/templates/simple/result_templates/torrent.html:9
+msgid "Seeder"
+msgstr "Seeders"
+
+#: searx/templates/courgette/result_templates/torrent.html:7
+#: searx/templates/legacy/result_templates/torrent.html:11
+#: searx/templates/oscar/result_templates/torrent.html:6
+#: searx/templates/simple/result_templates/torrent.html:9
+msgid "Leecher"
+msgstr "Leechers"
+
+#: searx/templates/courgette/result_templates/torrent.html:9
+#: searx/templates/legacy/result_templates/torrent.html:9
+#: searx/templates/oscar/macros.html:23
+#: searx/templates/simple/result_templates/torrent.html:6
+msgid "magnet link"
+msgstr "magneetkoppeling"
+
+#: searx/templates/courgette/result_templates/torrent.html:10
+#: searx/templates/legacy/result_templates/torrent.html:10
+#: searx/templates/oscar/macros.html:24
+#: searx/templates/simple/result_templates/torrent.html:7
+msgid "torrent file"
+msgstr "torrentbestand"
+
+#: searx/templates/legacy/categories.html:8
+#: searx/templates/simple/categories.html:6
+msgid "Click on the magnifier to perform search"
+msgstr "Klikt op het vergrootglas voor te zoeken"
+
+#: searx/templates/legacy/preferences.html:84
+#: searx/templates/oscar/preferences.html:113
+#: searx/templates/simple/preferences.html:142
+msgid "Results on new tabs"
+msgstr "Resultaten op nieuwe tabbladen"
+
+#: searx/templates/legacy/preferences.html:87
+#: searx/templates/oscar/preferences.html:117
+#: searx/templates/simple/preferences.html:145
+msgid "On"
+msgstr "Aan"
+
+#: searx/templates/legacy/preferences.html:88
+#: searx/templates/oscar/preferences.html:118
+#: searx/templates/simple/preferences.html:146
+msgid "Off"
+msgstr "Uit"
+
+#: searx/templates/legacy/result_templates/code.html:3
+#: searx/templates/legacy/result_templates/default.html:3
+#: searx/templates/legacy/result_templates/map.html:9
+#: searx/templates/oscar/macros.html:34 searx/templates/oscar/macros.html:48
+#: searx/templates/simple/macros.html:43
+msgid "cached"
+msgstr "gecachet"
+
+#: searx/templates/oscar/advanced.html:4
+msgid "Advanced settings"
+msgstr "Geavanceerde instellingen"
+
+#: searx/templates/oscar/base.html:62
+#: searx/templates/oscar/messages/first_time.html:4
+#: searx/templates/oscar/messages/save_settings_successfull.html:5
+#: searx/templates/oscar/messages/unknow_error.html:5
+msgid "Close"
+msgstr "Sluiten"
+
+#: searx/templates/oscar/base.html:64
+#: searx/templates/oscar/messages/no_results.html:4
+#: searx/templates/simple/messages/no_results.html:4
+#: searx/templates/simple/results.html:25
+msgid "Error!"
+msgstr "Fout!"
+
+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
+msgid "Powered by"
+msgstr "Aangedreven door"
+
+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
+msgid "a privacy-respecting, hackable metasearch engine"
+msgstr "een privacyrespecterend, aanpasbaar metazoekmachien"
+
+#: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50
+#: searx/templates/simple/macros.html:43
+msgid "proxied"
+msgstr "geproxyt"
+
+#: searx/templates/oscar/macros.html:92
+msgid "supported"
+msgstr "ondersteund"
+
+#: searx/templates/oscar/macros.html:96
+msgid "not supported"
+msgstr "niet ondersteund"
+
+#: searx/templates/oscar/preferences.html:13
+#: searx/templates/oscar/preferences.html:22
+#: searx/templates/simple/preferences.html:32
+msgid "General"
+msgstr "Algemeen"
+
+#: searx/templates/oscar/preferences.html:14
+#: searx/templates/oscar/preferences.html:146
+#: searx/templates/simple/preferences.html:76
+msgid "Engines"
+msgstr "Zoekmachienen"
+
+#: searx/templates/oscar/preferences.html:15
+#: searx/templates/oscar/preferences.html:219
+msgid "Plugins"
+msgstr "Plug-ins"
+
+#: searx/templates/oscar/preferences.html:16
+#: searx/templates/oscar/preferences.html:245
+msgid "Answerers"
+msgstr "Beantwoorders"
+
+#: searx/templates/oscar/preferences.html:17
+#: searx/templates/oscar/preferences.html:272
+msgid "Cookies"
+msgstr "Cookies"
+
+#: searx/templates/oscar/preferences.html:42
+#: searx/templates/simple/preferences.html:48
+msgid "What language do you prefer for search?"
+msgstr "Welke taal wilt ge gebruiken voor het zoeken?"
+
+#: searx/templates/oscar/preferences.html:48
+#: searx/templates/simple/preferences.html:128
+msgid "Change the language of the layout"
+msgstr "Wijzigt de taal van den opmaak"
+
+#: searx/templates/oscar/preferences.html:58
+#: searx/templates/simple/preferences.html:60
+msgid "Find stuff as you type"
+msgstr "Zoekt tijdens het typen"
+
+#: searx/templates/oscar/preferences.html:69
+#: searx/templates/simple/preferences.html:173
+msgid "Proxying image results through searx"
+msgstr "Afbeeldingsresultaten via searx laden"
+
+#: searx/templates/oscar/preferences.html:78
+msgid ""
+"Change how forms are submited, <a "
+"href=\"http://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods\""
+" rel=\"external\">learn more about request methods</a>"
+msgstr "Bepaalt hoe dat de formulieren worden ingestuurd, <a href=\"http://nl.wikipedia.org/wiki/Hypertext_Transfer_Protocol#HTTP-requests\" rel=\"external\">leest meer over opvraagmethodes</a>"
+
+#: searx/templates/oscar/preferences.html:87
+#: searx/templates/simple/preferences.html:71
+msgid "Filter content"
+msgstr "Filteren op inhoud"
+
+#: searx/templates/oscar/preferences.html:97
+#: searx/templates/simple/preferences.html:139
+msgid "Change searx layout"
+msgstr "Opmaak van searx aanpassen"
+
+#: searx/templates/oscar/preferences.html:106
+#: searx/templates/oscar/preferences.html:111
+msgid "Choose style for this theme"
+msgstr "Kiest ne stijl voor dit thema"
+
+#: searx/templates/oscar/preferences.html:106
+#: searx/templates/oscar/preferences.html:111
+msgid "Style"
+msgstr "Stijl"
+
+#: searx/templates/oscar/preferences.html:122
+msgid "Open Access DOI resolver"
+msgstr "Open Access DOI herschrijven"
+
+#: searx/templates/oscar/preferences.html:123
+msgid ""
+"Redirect to open-access versions of publications when available (plugin "
+"required)"
+msgstr "Doorverwijzen naar vrij toegankelijke versies van publicaties, indien beschikbaar (plug-in vereist)"
+
+#: searx/templates/oscar/preferences.html:163
+#: searx/templates/oscar/preferences.html:175
+#: searx/templates/simple/preferences.html:88
+msgid "Shortcut"
+msgstr "Snelkoppeling"
+
+#: searx/templates/oscar/preferences.html:164
+#: searx/templates/oscar/preferences.html:174
+msgid "Selected language"
+msgstr "Geselecteerde taal"
+
+#: searx/templates/oscar/preferences.html:166
+#: searx/templates/oscar/preferences.html:172
+#: searx/templates/simple/preferences.html:91
+msgid "Time range"
+msgstr "Tijdspanne"
+
+#: searx/templates/oscar/preferences.html:167
+#: searx/templates/oscar/preferences.html:171
+#: searx/templates/simple/preferences.html:92
+msgid "Avg. time"
+msgstr "Gem. duur"
+
+#: searx/templates/oscar/preferences.html:168
+#: searx/templates/oscar/preferences.html:170
+#: searx/templates/simple/preferences.html:93
+msgid "Max time"
+msgstr "Max. duur"
+
+#: searx/templates/oscar/preferences.html:248
+msgid "This is the list of searx's instant answering modules."
+msgstr "Dit is het overzicht van de instantantwoordmodules van searx."
+
+#: searx/templates/oscar/preferences.html:252
+msgid "Name"
+msgstr "Naam"
+
+#: searx/templates/oscar/preferences.html:253
+msgid "Keywords"
+msgstr "Kernwoorden"
+
+#: searx/templates/oscar/preferences.html:254
+msgid "Description"
+msgstr "Beschrijving"
+
+#: searx/templates/oscar/preferences.html:255
+msgid "Examples"
+msgstr "Voorbeelden"
+
+#: searx/templates/oscar/preferences.html:275
+msgid ""
+"This is the list of cookies and their values searx is storing on your "
+"computer."
+msgstr "Dit is de lijst van cookies en hun waarden die searx op uwe computer opslaat."
+
+#: searx/templates/oscar/preferences.html:276
+msgid "With that list, you can assess searx transparency."
+msgstr "Met deze lijst kunt ge de openheid van searx beoordelen."
+
+#: searx/templates/oscar/preferences.html:281
+msgid "Cookie name"
+msgstr "Cookienaam"
+
+#: searx/templates/oscar/preferences.html:282
+msgid "Value"
+msgstr "Waarde"
+
+#: searx/templates/oscar/preferences.html:301
+msgid "Search URL of the currently saved preferences"
+msgstr "Zoek-URL van de momenteel opgeslagen voorkeuren"
+
+#: searx/templates/oscar/preferences.html:301
+msgid ""
+"Note: specifying custom settings in the search URL can reduce privacy by "
+"leaking data to the clicked result sites."
+msgstr "Let op: aangepaste instellingen opgeven in de zoek-URL kan nadelig zijn voor uw privacy, omdat het gegevens lekt aan de aangeklikte resultaatwebsites."
+
+#: searx/templates/oscar/results.html:17
+msgid "Search results"
+msgstr "Zoekresultaten"
+
+#: searx/templates/oscar/results.html:21
+#: searx/templates/simple/results.html:84
+msgid "Try searching for:"
+msgstr "Probeert te zoeken naar:"
+
+#: searx/templates/oscar/results.html:100
+#: searx/templates/simple/results.html:25
+msgid "Engines cannot retrieve results"
+msgstr "Zoekmachienen konden geen resultaten ophalen"
+
+#: searx/templates/oscar/results.html:131
+msgid "Links"
+msgstr "Koppelingen"
+
+#: searx/templates/oscar/search.html:8
+#: searx/templates/oscar/search_full.html:11
+#: searx/templates/simple/search.html:5
+msgid "Start search"
+msgstr "Start zoeken"
+
+#: searx/templates/oscar/stats.html:2
+msgid "stats"
+msgstr "stats"
+
+#: searx/templates/oscar/time-range.html:3
+#: searx/templates/simple/time-range.html:3
+msgid "Anytime"
+msgstr "Altijd"
+
+#: searx/templates/oscar/time-range.html:6
+#: searx/templates/simple/time-range.html:6
+msgid "Last day"
+msgstr "Voorbijen dag"
+
+#: searx/templates/oscar/time-range.html:9
+#: searx/templates/simple/time-range.html:9
+msgid "Last week"
+msgstr "Voorbije week"
+
+#: searx/templates/oscar/time-range.html:12
+#: searx/templates/simple/time-range.html:12
+msgid "Last month"
+msgstr "Voorbije maand"
+
+#: searx/templates/oscar/time-range.html:15
+#: searx/templates/simple/time-range.html:15
+msgid "Last year"
+msgstr "Voorbije jaar"
+
+#: searx/templates/oscar/messages/first_time.html:6
+#: searx/templates/oscar/messages/no_data_available.html:3
+msgid "Heads up!"
+msgstr "Opgelet!"
+
+#: searx/templates/oscar/messages/first_time.html:7
+msgid "It look like you are using searx first time."
+msgstr "Het lijkt erop dat ge searx voor den eerste keer gebruikt."
+
+#: searx/templates/oscar/messages/no_cookies.html:3
+msgid "Information!"
+msgstr "Informatie!"
+
+#: searx/templates/oscar/messages/no_cookies.html:4
+msgid "currently, there are no cookies defined."
+msgstr "der zijn momenteel geen cookies gedefinieerd."
+
+#: searx/templates/oscar/messages/no_data_available.html:4
+msgid "There is currently no data available. "
+msgstr "Der zijn momenteel geen gegevens beschikbaar."
+
+#: searx/templates/oscar/messages/no_results.html:4
+#: searx/templates/simple/messages/no_results.html:4
+msgid "Engines cannot retrieve results."
+msgstr "Zoekmachinen konden geen resultaten ophalen."
+
+#: searx/templates/oscar/messages/no_results.html:10
+#: searx/templates/simple/messages/no_results.html:10
+msgid "Please, try again later or find another searx instance."
+msgstr "Probeert het later opnieuw, of gebruikt een andere instantie van searx."
+
+#: searx/templates/oscar/messages/no_results.html:14
+#: searx/templates/simple/messages/no_results.html:14
+msgid "Sorry!"
+msgstr "Sorry!"
+
+#: searx/templates/oscar/messages/no_results.html:15
+#: searx/templates/simple/messages/no_results.html:15
+msgid ""
+"we didn't find any results. Please use another query or search in more "
+"categories."
+msgstr "We konden geen resultaten vinden. Probeert nen andere zoekopdracht, of zoekt in meer categorieën."
+
+#: searx/templates/oscar/messages/save_settings_successfull.html:7
+msgid "Well done!"
+msgstr "Goed gedaan!"
+
+#: searx/templates/oscar/messages/save_settings_successfull.html:8
+msgid "Settings saved successfully."
+msgstr "Instellingen opgeslagen."
+
+#: searx/templates/oscar/messages/unknow_error.html:7
+msgid "Oh snap!"
+msgstr "Oeps!"
+
+#: searx/templates/oscar/messages/unknow_error.html:8
+msgid "Something went wrong."
+msgstr "Der is iets misgegaan."
+
+#: searx/templates/oscar/result_templates/default.html:7
+#: searx/templates/simple/result_templates/default.html:6
+msgid "show media"
+msgstr "media tonen"
+
+#: searx/templates/oscar/result_templates/default.html:7
+#: searx/templates/simple/result_templates/default.html:6
+msgid "hide media"
+msgstr "media verbergen"
+
+#: searx/templates/oscar/result_templates/images.html:30
+msgid "Get image"
+msgstr "Afbeelding tonen"
+
+#: searx/templates/oscar/result_templates/images.html:33
+msgid "View source"
+msgstr "Bron bekijken"
+
+#: searx/templates/oscar/result_templates/map.html:7
+#: searx/templates/simple/result_templates/map.html:7
+msgid "show map"
+msgstr "kaart tonen"
+
+#: searx/templates/oscar/result_templates/map.html:7
+#: searx/templates/simple/result_templates/map.html:7
+msgid "hide map"
+msgstr "kaart verbergen"
+
+#: searx/templates/oscar/result_templates/map.html:11
+#: searx/templates/simple/result_templates/map.html:11
+msgid "show details"
+msgstr "details tonen"
+
+#: searx/templates/oscar/result_templates/map.html:11
+#: searx/templates/simple/result_templates/map.html:11
+msgid "hide details"
+msgstr "details verbergen"
+
+#: searx/templates/oscar/result_templates/torrent.html:7
+#: searx/templates/simple/result_templates/torrent.html:11
+msgid "Filesize"
+msgstr "Bestandsgrootte"
+
+#: searx/templates/oscar/result_templates/torrent.html:9
+#: searx/templates/simple/result_templates/torrent.html:12
+msgid "Bytes"
+msgstr "Bytes"
+
+#: searx/templates/oscar/result_templates/torrent.html:10
+#: searx/templates/simple/result_templates/torrent.html:13
+msgid "kiB"
+msgstr "kiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:11
+#: searx/templates/simple/result_templates/torrent.html:14
+msgid "MiB"
+msgstr "MiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:12
+#: searx/templates/simple/result_templates/torrent.html:15
+msgid "GiB"
+msgstr "GiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:13
+#: searx/templates/simple/result_templates/torrent.html:16
+msgid "TiB"
+msgstr "TiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:15
+#: searx/templates/simple/result_templates/torrent.html:20
+msgid "Number of Files"
+msgstr "Aantal bestanden"
+
+#: searx/templates/oscar/result_templates/videos.html:7
+#: searx/templates/simple/result_templates/videos.html:6
+msgid "show video"
+msgstr "video tonen"
+
+#: searx/templates/oscar/result_templates/videos.html:7
+#: searx/templates/simple/result_templates/videos.html:6
+msgid "hide video"
+msgstr "video verbergen"
+
+#: searx/templates/pix-art/results.html:28
+msgid "Load more..."
+msgstr "Meer laden…"
+
+#: searx/templates/simple/base.html:31
+msgid "No item found"
+msgstr "Geen resultaat gevonden"
+
+#: searx/templates/simple/preferences.html:89
+msgid "Supports selected language"
+msgstr "Ondersteunt geselecteerde taal"
+
+#: searx/templates/simple/preferences.html:118
+msgid "User interface"
+msgstr "Gebruikersinterface"
+
+#: searx/templates/simple/preferences.html:154
+msgid "Privacy"
+msgstr "Privacy"
diff --git a/searx/translations/pl/LC_MESSAGES/messages.mo b/searx/translations/pl/LC_MESSAGES/messages.mo
index d5c5f55efc..e4a4510131 100644
Binary files a/searx/translations/pl/LC_MESSAGES/messages.mo and b/searx/translations/pl/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/pt/LC_MESSAGES/messages.mo b/searx/translations/pt/LC_MESSAGES/messages.mo
index bbf2fba0fa..d88c449f1e 100644
Binary files a/searx/translations/pt/LC_MESSAGES/messages.mo and b/searx/translations/pt/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/pt/LC_MESSAGES/messages.po b/searx/translations/pt/LC_MESSAGES/messages.po
index 81ac6f5b9e..d8446731a5 100644
--- a/searx/translations/pt/LC_MESSAGES/messages.po
+++ b/searx/translations/pt/LC_MESSAGES/messages.po
@@ -4,13 +4,14 @@
#
# Translators:
# Dickprince, 2017
+# Chacal Exodius, 2018
msgid ""
msgstr ""
"Project-Id-Version: searx\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2017-11-01 21:31+0100\n"
-"PO-Revision-Date: 2017-11-01 20:31+0000\n"
-"Last-Translator: Adam Tauber <[email protected]>\n"
+"PO-Revision-Date: 2018-12-26 22:43+0000\n"
+"Last-Translator: Chacal Exodius\n"
"Language-Team: Portuguese (http://www.transifex.com/asciimoo/searx/language/pt/)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
@@ -21,15 +22,15 @@ msgstr ""
#: searx/search.py:137 searx/search.py:182
msgid "timeout"
-msgstr ""
+msgstr "tempo esgotado"
#: searx/search.py:144
msgid "request exception"
-msgstr ""
+msgstr "solicitar exceção"
#: searx/search.py:151
msgid "unexpected crash"
-msgstr ""
+msgstr "acidente inesperado"
#: searx/webapp.py:136
msgid "files"
@@ -77,7 +78,7 @@ msgstr "Definições inválidas, por favor edite as suas preferências"
#: searx/webapp.py:415
msgid "Invalid settings"
-msgstr ""
+msgstr "Configurações inválidas"
#: searx/webapp.py:449 searx/webapp.py:493
msgid "search error"
@@ -143,7 +144,7 @@ msgstr "Esta entrada foi substituída por"
#: searx/engines/pubmed.py:74
msgid "No abstract is available for this publication."
-msgstr ""
+msgstr "Nenhum resumo está disponível para esta publicação."
#: searx/plugins/https_rewrite.py:32
msgid "Rewrite HTTP links to HTTPS if possible"
@@ -159,7 +160,7 @@ msgstr "Carregar automaticamente a próxima página assim que se desloque para o
#: searx/plugins/oa_doi_rewrite.py:9
msgid "Open Access DOI rewrite"
-msgstr ""
+msgstr "Reescrita DOI de acesso aberto"
#: searx/plugins/oa_doi_rewrite.py:10
msgid ""
@@ -461,7 +462,7 @@ msgstr "Estes cookies servem somente para sua conveniência, não os utilizamos
#: searx/templates/pix-art/preferences.html:78
#: searx/templates/simple/preferences.html:185
msgid "save"
-msgstr "guardar"
+msgstr "Guardar"
#: searx/templates/courgette/preferences.html:128
#: searx/templates/legacy/preferences.html:125
@@ -476,7 +477,7 @@ msgstr "Repor predefinições"
#: searx/templates/pix-art/preferences.html:79
#: searx/templates/simple/preferences.html:187
msgid "back"
-msgstr "atrás"
+msgstr "Atrás"
#: searx/templates/courgette/results.html:12
#: searx/templates/legacy/results.html:13
@@ -630,11 +631,11 @@ msgstr "via proxy"
#: searx/templates/oscar/macros.html:92
msgid "supported"
-msgstr ""
+msgstr "suportado"
#: searx/templates/oscar/macros.html:96
msgid "not supported"
-msgstr ""
+msgstr "não suportado"
#: searx/templates/oscar/preferences.html:13
#: searx/templates/oscar/preferences.html:22
@@ -712,13 +713,13 @@ msgstr "Estilo"
#: searx/templates/oscar/preferences.html:122
msgid "Open Access DOI resolver"
-msgstr ""
+msgstr "Resolvedor DOI de Acesso Aberto"
#: searx/templates/oscar/preferences.html:123
msgid ""
"Redirect to open-access versions of publications when available (plugin "
"required)"
-msgstr ""
+msgstr "Redirecionar para versões de acesso aberto de publicações quando disponíveis (requer plug-in)"
#: searx/templates/oscar/preferences.html:163
#: searx/templates/oscar/preferences.html:175
@@ -729,7 +730,7 @@ msgstr "Atalho"
#: searx/templates/oscar/preferences.html:164
#: searx/templates/oscar/preferences.html:174
msgid "Selected language"
-msgstr ""
+msgstr "Idioma selecionado"
#: searx/templates/oscar/preferences.html:166
#: searx/templates/oscar/preferences.html:172
@@ -789,13 +790,13 @@ msgstr "Valor"
#: searx/templates/oscar/preferences.html:301
msgid "Search URL of the currently saved preferences"
-msgstr ""
+msgstr "URL de pesquisa das preferências salvas atualmente"
#: searx/templates/oscar/preferences.html:301
msgid ""
"Note: specifying custom settings in the search URL can reduce privacy by "
"leaking data to the clicked result sites."
-msgstr ""
+msgstr "Nota: a especificação de configurações personalizadas no URL de pesquisa pode reduzir a privacidade ao vazar dados para os sites de resultados clicados."
#: searx/templates/oscar/results.html:17
msgid "Search results"
@@ -804,12 +805,12 @@ msgstr "Resultados de pesquisa"
#: searx/templates/oscar/results.html:21
#: searx/templates/simple/results.html:84
msgid "Try searching for:"
-msgstr ""
+msgstr "Tente pesquisar por:"
#: searx/templates/oscar/results.html:100
#: searx/templates/simple/results.html:25
msgid "Engines cannot retrieve results"
-msgstr ""
+msgstr "Mecanismos não podem recuperar resultados"
#: searx/templates/oscar/results.html:131
msgid "Links"
@@ -874,12 +875,12 @@ msgstr "Não existem dados disponíveis."
#: searx/templates/oscar/messages/no_results.html:4
#: searx/templates/simple/messages/no_results.html:4
msgid "Engines cannot retrieve results."
-msgstr ""
+msgstr "Mecanismos não podem recuperar resultados."
#: searx/templates/oscar/messages/no_results.html:10
#: searx/templates/simple/messages/no_results.html:10
msgid "Please, try again later or find another searx instance."
-msgstr ""
+msgstr "Por favor, tente novamente mais tarde ou encontre outra ocorrência de searx."
#: searx/templates/oscar/messages/no_results.html:14
#: searx/templates/simple/messages/no_results.html:14
@@ -998,7 +999,7 @@ msgstr "Carregar mais..."
#: searx/templates/simple/base.html:31
msgid "No item found"
-msgstr ""
+msgstr "Nenhum item encontrado"
#: searx/templates/simple/preferences.html:89
msgid "Supports selected language"
@@ -1006,8 +1007,8 @@ msgstr "Suporta idioma selecionado"
#: searx/templates/simple/preferences.html:118
msgid "User interface"
-msgstr ""
+msgstr "Interface de usuário"
#: searx/templates/simple/preferences.html:154
msgid "Privacy"
-msgstr ""
+msgstr "Privacidade"
diff --git a/searx/translations/pt_BR/LC_MESSAGES/messages.mo b/searx/translations/pt_BR/LC_MESSAGES/messages.mo
index a169ac908f..03eb74f4ef 100644
Binary files a/searx/translations/pt_BR/LC_MESSAGES/messages.mo and b/searx/translations/pt_BR/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/pt_BR/LC_MESSAGES/messages.po b/searx/translations/pt_BR/LC_MESSAGES/messages.po
index b5a786c70c..6a0a6b8374 100644
--- a/searx/translations/pt_BR/LC_MESSAGES/messages.po
+++ b/searx/translations/pt_BR/LC_MESSAGES/messages.po
@@ -4,6 +4,7 @@
#
# Translators:
# Adam Tauber <[email protected]>, 2017
+# Chacal Exodius, 2018
# Gabriel Nunes <[email protected]>, 2017
# Guimarães Mello <[email protected]>, 2017
# Neton Brício <[email protected]>, 2015
@@ -14,8 +15,8 @@ msgstr ""
"Project-Id-Version: searx\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2017-11-01 21:31+0100\n"
-"PO-Revision-Date: 2018-03-03 12:33+0000\n"
-"Last-Translator: shizuka\n"
+"PO-Revision-Date: 2018-08-06 05:21+0000\n"
+"Last-Translator: Chacal Exodius\n"
"Language-Team: Portuguese (Brazil) (http://www.transifex.com/asciimoo/searx/language/pt_BR/)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
@@ -164,7 +165,7 @@ msgstr "Automaticamente carregar a próxima página quando ir até o fim da pág
#: searx/plugins/oa_doi_rewrite.py:9
msgid "Open Access DOI rewrite"
-msgstr ""
+msgstr "Reescrita DOI de acesso aberto"
#: searx/plugins/oa_doi_rewrite.py:10
msgid ""
@@ -717,7 +718,7 @@ msgstr "Estilo"
#: searx/templates/oscar/preferences.html:122
msgid "Open Access DOI resolver"
-msgstr ""
+msgstr "Resolvedor DOI de Acesso Aberto"
#: searx/templates/oscar/preferences.html:123
msgid ""
diff --git a/searx/translations/ro/LC_MESSAGES/messages.mo b/searx/translations/ro/LC_MESSAGES/messages.mo
index 5c6714e939..f39068576a 100644
Binary files a/searx/translations/ro/LC_MESSAGES/messages.mo and b/searx/translations/ro/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/ro/LC_MESSAGES/messages.po b/searx/translations/ro/LC_MESSAGES/messages.po
index 21a2e7d1d5..7cfd1be37f 100644
--- a/searx/translations/ro/LC_MESSAGES/messages.po
+++ b/searx/translations/ro/LC_MESSAGES/messages.po
@@ -4,13 +4,14 @@
#
# Translators:
# adrian.fita <[email protected]>, 2015
+# Daniel Șerbănescu <[email protected]>, 2018
msgid ""
msgstr ""
"Project-Id-Version: searx\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2017-11-01 21:31+0100\n"
-"PO-Revision-Date: 2017-11-01 20:31+0000\n"
-"Last-Translator: Adam Tauber <[email protected]>\n"
+"PO-Revision-Date: 2018-11-15 21:18+0000\n"
+"Last-Translator: Daniel Șerbănescu <[email protected]>\n"
"Language-Team: Romanian (http://www.transifex.com/asciimoo/searx/language/ro/)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
@@ -21,15 +22,15 @@ msgstr ""
#: searx/search.py:137 searx/search.py:182
msgid "timeout"
-msgstr ""
+msgstr "timp alocat expirat"
#: searx/search.py:144
msgid "request exception"
-msgstr ""
+msgstr "excepție la cerere"
#: searx/search.py:151
msgid "unexpected crash"
-msgstr ""
+msgstr "terminare prematură neașteptată"
#: searx/webapp.py:136
msgid "files"
@@ -37,7 +38,7 @@ msgstr "fișiere"
#: searx/webapp.py:137
msgid "general"
-msgstr "general"
+msgstr "generale"
#: searx/webapp.py:138
msgid "music"
@@ -57,7 +58,7 @@ msgstr "videouri"
#: searx/webapp.py:142
msgid "it"
-msgstr "el(ea)"
+msgstr "informatică"
#: searx/webapp.py:143
msgid "news"
@@ -65,23 +66,23 @@ msgstr "știri"
#: searx/webapp.py:144
msgid "map"
-msgstr "hartă"
+msgstr "hărți"
#: searx/webapp.py:145
msgid "science"
-msgstr ""
+msgstr "știință"
#: searx/webapp.py:399 searx/webapp.py:658
msgid "Invalid settings, please edit your preferences"
-msgstr ""
+msgstr "Configurări nevalide, editați preferințele"
#: searx/webapp.py:415
msgid "Invalid settings"
-msgstr ""
+msgstr "Configurări nevalide"
#: searx/webapp.py:449 searx/webapp.py:493
msgid "search error"
-msgstr ""
+msgstr "eroare de căutare"
#: searx/webapp.py:530
msgid "{minutes} minute(s) ago"
@@ -93,27 +94,27 @@ msgstr "{hours} oră(e), {minutes} minut(e) în urmă"
#: searx/answerers/random/answerer.py:53
msgid "Random value generator"
-msgstr ""
+msgstr "Generator de valori aleatorii"
#: searx/answerers/random/answerer.py:54
msgid "Generate different random values"
-msgstr ""
+msgstr "Generează valori aleatoare diferite"
#: searx/answerers/statistics/answerer.py:53
msgid "Statistics functions"
-msgstr ""
+msgstr "Funcții statistice"
#: searx/answerers/statistics/answerer.py:54
msgid "Compute {functions} of the arguments"
-msgstr ""
+msgstr "Calculează {functions} din argumente"
#: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201
msgid "Engine time (sec)"
-msgstr ""
+msgstr "Timpul motorului (sec)"
#: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205
msgid "Page loads (sec)"
-msgstr "Încărcarea paginilor (sec)"
+msgstr "Încărcarea paginii (sec)"
#: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209
#: searx/templates/oscar/results.html:95
@@ -135,15 +136,15 @@ msgstr "Erori"
#: searx/engines/pdbe.py:87
msgid "{title} (OBSOLETE)"
-msgstr ""
+msgstr "{title} (ÎNVECHIT)"
#: searx/engines/pdbe.py:91
msgid "This entry has been superseded by"
-msgstr ""
+msgstr "Această intrare a fost perimată de"
#: searx/engines/pubmed.py:74
msgid "No abstract is available for this publication."
-msgstr ""
+msgstr "Niciun abstract disponibil pentru această publicație."
#: searx/plugins/https_rewrite.py:32
msgid "Rewrite HTTP links to HTTPS if possible"
@@ -151,86 +152,86 @@ msgstr "Rescrie legăturile HTTP cu HTTPS dacă e posibil"
#: searx/plugins/infinite_scroll.py:3
msgid "Infinite scroll"
-msgstr ""
+msgstr "Derulare infinită"
#: searx/plugins/infinite_scroll.py:4
msgid "Automatically load next page when scrolling to bottom of current page"
-msgstr ""
+msgstr "Încarcă automat pagina următoare când se derulează la baza paginii curente"
#: searx/plugins/oa_doi_rewrite.py:9
msgid "Open Access DOI rewrite"
-msgstr ""
+msgstr "Rescriere către acces deschis DOI"
#: searx/plugins/oa_doi_rewrite.py:10
msgid ""
"Avoid paywalls by redirecting to open-access versions of publications when "
"available"
-msgstr ""
+msgstr "Evită „zidurile de plată” redirecționând către versiuni cu acces deschis ale publicațiilor când sunt disponibile"
#: searx/plugins/open_results_on_new_tab.py:18
#: searx/templates/oscar/preferences.html:114
#: searx/templates/simple/preferences.html:149
msgid "Open result links on new browser tabs"
-msgstr ""
+msgstr "Deschide legăturile rezultate în taburi noi"
#: searx/plugins/open_results_on_new_tab.py:19
msgid ""
"Results are opened in the same window by default. This plugin overwrites the"
" default behaviour to open links on new tabs/windows. (JavaScript required)"
-msgstr ""
+msgstr "Rezultatele sunt deschise în aceeași fereastră în mod implicit. Acest modul suprascrie acțiunea implicită de a deschide legături în ferestre/taburi noi. (Necesită JavaScript)"
#: searx/plugins/search_on_category_select.py:18
msgid "Search on category select"
-msgstr "Căutare în categoria selectată"
+msgstr "Caută la selectarea categoriei"
#: searx/plugins/search_on_category_select.py:19
msgid ""
"Perform search immediately if a category selected. Disable to select "
"multiple categories. (JavaScript required)"
-msgstr ""
+msgstr "Execută căutarea imediat dacă o categorie este selectată. Dezactivează pentru a selecta categorii multiple. (Necesită JavaScript)"
#: searx/plugins/self_info.py:20
msgid ""
"Displays your IP if the query is \"ip\" and your user agent if the query "
"contains \"user agent\"."
-msgstr ""
+msgstr "Afișează IP-ul dacă interogarea este „ip” și agentul de utilizator dacă interogarea conține „user agent”."
#: searx/plugins/tracker_url_remover.py:26
msgid "Tracker URL remover"
-msgstr ""
+msgstr "Eliminator de URL pentru urmăritor"
#: searx/plugins/tracker_url_remover.py:27
msgid "Remove trackers arguments from the returned URL"
-msgstr ""
+msgstr "Elimină argumentele urmăritorului din URL-ul returnat"
#: searx/plugins/vim_hotkeys.py:3
msgid "Vim-like hotkeys"
-msgstr ""
+msgstr "Scurtături de tastatură în stilul Vim"
#: searx/plugins/vim_hotkeys.py:4
msgid ""
"Navigate search results with Vim-like hotkeys (JavaScript required). Press "
"\"h\" key on main or result page to get help."
-msgstr ""
+msgstr "Navighează rezultatele căutării cu scurtături de tastatură în stilul Vim (necesită JavaScript). Apăsați tasta „h” în pagina principală sau în pagina cu rezultate pentru a obține ajutor."
#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4
#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4
#: searx/templates/simple/404.html:4
msgid "Page not found"
-msgstr ""
+msgstr "Pagină negăsită"
#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6
#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6
#: searx/templates/simple/404.html:6
#, python-format
msgid "Go to %(search_page)s."
-msgstr ""
+msgstr "Navighează la %(search_page)s"
#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6
#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6
#: searx/templates/simple/404.html:6
msgid "search page"
-msgstr ""
+msgstr "pagină de căutare"
#: searx/templates/courgette/index.html:9
#: searx/templates/courgette/index.html:13
@@ -280,7 +281,7 @@ msgstr "Limba de căutare"
#: searx/templates/simple/languages.html:2
#: searx/templates/simple/preferences.html:42
msgid "Default language"
-msgstr ""
+msgstr "Limba implicită"
#: searx/templates/courgette/preferences.html:24
#: searx/templates/legacy/preferences.html:25
@@ -443,7 +444,7 @@ msgstr "Blochează"
msgid ""
"These settings are stored in your cookies, this allows us not to store this "
"data about you."
-msgstr "Aceste setări sunt stocate în cookie-urile d-voastră, aceasta ne permite să nu stocăm aceste date despre d-voastră."
+msgstr "Aceste configurări sunt stocate în cookie-uri, ceea ce ne permite să nu stocăm aceste date despre dumeavoastră."
#: searx/templates/courgette/preferences.html:124
#: searx/templates/legacy/preferences.html:121
@@ -453,7 +454,7 @@ msgstr "Aceste setări sunt stocate în cookie-urile d-voastră, aceasta ne perm
msgid ""
"These cookies serve your sole convenience, we don't use these cookies to "
"track you."
-msgstr "Aceste cookie-uri servesc doar pentru confortul d-voastră, noi nu stocăm aceste cookie-uri pentru a vă urmări."
+msgstr "Aceste cookie-uri servesc doar pentru conveniența dumneavoastră, noi nu stocăm aceste cookie-uri pentru a vă urmări."
#: searx/templates/courgette/preferences.html:127
#: searx/templates/legacy/preferences.html:124
@@ -468,7 +469,7 @@ msgstr "salvează"
#: searx/templates/oscar/preferences.html:307
#: searx/templates/simple/preferences.html:186
msgid "Reset defaults"
-msgstr "Resetează valorile implicite"
+msgstr "Restabilește la valorile implicite"
#: searx/templates/courgette/preferences.html:129
#: searx/templates/legacy/preferences.html:126
@@ -543,14 +544,14 @@ msgstr "contextul original"
#: searx/templates/oscar/result_templates/torrent.html:6
#: searx/templates/simple/result_templates/torrent.html:9
msgid "Seeder"
-msgstr "Seeder"
+msgstr "Partener"
#: searx/templates/courgette/result_templates/torrent.html:7
#: searx/templates/legacy/result_templates/torrent.html:11
#: searx/templates/oscar/result_templates/torrent.html:6
#: searx/templates/simple/result_templates/torrent.html:9
msgid "Leecher"
-msgstr "Leecher"
+msgstr "Sursă incompletă"
#: searx/templates/courgette/result_templates/torrent.html:9
#: searx/templates/legacy/result_templates/torrent.html:9
@@ -575,19 +576,19 @@ msgstr "Apăsați pe lupă pentru a executa căutarea"
#: searx/templates/oscar/preferences.html:113
#: searx/templates/simple/preferences.html:142
msgid "Results on new tabs"
-msgstr ""
+msgstr "Rezultate în taburi noi"
#: searx/templates/legacy/preferences.html:87
#: searx/templates/oscar/preferences.html:117
#: searx/templates/simple/preferences.html:145
msgid "On"
-msgstr ""
+msgstr "Pornit"
#: searx/templates/legacy/preferences.html:88
#: searx/templates/oscar/preferences.html:118
#: searx/templates/simple/preferences.html:146
msgid "Off"
-msgstr ""
+msgstr "Oprit"
#: searx/templates/legacy/result_templates/code.html:3
#: searx/templates/legacy/result_templates/default.html:3
@@ -599,7 +600,7 @@ msgstr "stocat temporar"
#: searx/templates/oscar/advanced.html:4
msgid "Advanced settings"
-msgstr ""
+msgstr "Configurări avansate"
#: searx/templates/oscar/base.html:62
#: searx/templates/oscar/messages/first_time.html:4
@@ -613,7 +614,7 @@ msgstr "Închide"
#: searx/templates/simple/messages/no_results.html:4
#: searx/templates/simple/results.html:25
msgid "Error!"
-msgstr ""
+msgstr "Eroare!"
#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
msgid "Powered by"
@@ -626,21 +627,21 @@ msgstr "un meta-motor de căutare care respectă confidențialitatea"
#: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50
#: searx/templates/simple/macros.html:43
msgid "proxied"
-msgstr ""
+msgstr "delegat"
#: searx/templates/oscar/macros.html:92
msgid "supported"
-msgstr ""
+msgstr "suporat"
#: searx/templates/oscar/macros.html:96
msgid "not supported"
-msgstr ""
+msgstr "nesuportat"
#: searx/templates/oscar/preferences.html:13
#: searx/templates/oscar/preferences.html:22
#: searx/templates/simple/preferences.html:32
msgid "General"
-msgstr "General"
+msgstr "Generale"
#: searx/templates/oscar/preferences.html:14
#: searx/templates/oscar/preferences.html:146
@@ -656,12 +657,12 @@ msgstr "Module"
#: searx/templates/oscar/preferences.html:16
#: searx/templates/oscar/preferences.html:245
msgid "Answerers"
-msgstr ""
+msgstr "Răspunzători"
#: searx/templates/oscar/preferences.html:17
#: searx/templates/oscar/preferences.html:272
msgid "Cookies"
-msgstr ""
+msgstr "Cookie-uri"
#: searx/templates/oscar/preferences.html:42
#: searx/templates/simple/preferences.html:48
@@ -703,99 +704,99 @@ msgstr "Schimbă aspectul lui searx"
#: searx/templates/oscar/preferences.html:106
#: searx/templates/oscar/preferences.html:111
msgid "Choose style for this theme"
-msgstr ""
+msgstr "Alegeți stilul pentru această temă"
#: searx/templates/oscar/preferences.html:106
#: searx/templates/oscar/preferences.html:111
msgid "Style"
-msgstr ""
+msgstr "Stil"
#: searx/templates/oscar/preferences.html:122
msgid "Open Access DOI resolver"
-msgstr ""
+msgstr "Rezolvator de acces deschis DOI"
#: searx/templates/oscar/preferences.html:123
msgid ""
"Redirect to open-access versions of publications when available (plugin "
"required)"
-msgstr ""
+msgstr "Redirecționează către versiuni cu acces deschis ale publicațiilor când sunt disponibile (modul necesar)"
#: searx/templates/oscar/preferences.html:163
#: searx/templates/oscar/preferences.html:175
#: searx/templates/simple/preferences.html:88
msgid "Shortcut"
-msgstr ""
+msgstr "Scurtătură"
#: searx/templates/oscar/preferences.html:164
#: searx/templates/oscar/preferences.html:174
msgid "Selected language"
-msgstr ""
+msgstr "Limba selectată"
#: searx/templates/oscar/preferences.html:166
#: searx/templates/oscar/preferences.html:172
#: searx/templates/simple/preferences.html:91
msgid "Time range"
-msgstr ""
+msgstr "Interval de timp"
#: searx/templates/oscar/preferences.html:167
#: searx/templates/oscar/preferences.html:171
#: searx/templates/simple/preferences.html:92
msgid "Avg. time"
-msgstr ""
+msgstr "Timp mediu"
#: searx/templates/oscar/preferences.html:168
#: searx/templates/oscar/preferences.html:170
#: searx/templates/simple/preferences.html:93
msgid "Max time"
-msgstr ""
+msgstr "Timp maxim"
#: searx/templates/oscar/preferences.html:248
msgid "This is the list of searx's instant answering modules."
-msgstr ""
+msgstr "Aceasta este lista de module de răspundere instantă a lui searx."
#: searx/templates/oscar/preferences.html:252
msgid "Name"
-msgstr ""
+msgstr "Nume"
#: searx/templates/oscar/preferences.html:253
msgid "Keywords"
-msgstr ""
+msgstr "Cuvinte cheie"
#: searx/templates/oscar/preferences.html:254
msgid "Description"
-msgstr ""
+msgstr "Descriere"
#: searx/templates/oscar/preferences.html:255
msgid "Examples"
-msgstr ""
+msgstr "Exemple"
#: searx/templates/oscar/preferences.html:275
msgid ""
"This is the list of cookies and their values searx is storing on your "
"computer."
-msgstr ""
+msgstr "Aceasta este lista de cookie-uri și valorile lor pe care searx le stochează pe calculatorul dumneavoastră."
#: searx/templates/oscar/preferences.html:276
msgid "With that list, you can assess searx transparency."
-msgstr ""
+msgstr "Cu acea listă puteți evalua nivelul de transparență al lui searx."
#: searx/templates/oscar/preferences.html:281
msgid "Cookie name"
-msgstr ""
+msgstr "Nume cookie"
#: searx/templates/oscar/preferences.html:282
msgid "Value"
-msgstr ""
+msgstr "Valuare"
#: searx/templates/oscar/preferences.html:301
msgid "Search URL of the currently saved preferences"
-msgstr ""
+msgstr "URL-ul de căutare al preferințelor salvate curent"
#: searx/templates/oscar/preferences.html:301
msgid ""
"Note: specifying custom settings in the search URL can reduce privacy by "
"leaking data to the clicked result sites."
-msgstr ""
+msgstr "Notă: specificând configurări personalizate în URL-ul de căutare poate reduce nivelul de confidențialitate prin scurgerea datelor către siturile accesate la căutare."
#: searx/templates/oscar/results.html:17
msgid "Search results"
@@ -804,12 +805,12 @@ msgstr "Rezultatele căutării"
#: searx/templates/oscar/results.html:21
#: searx/templates/simple/results.html:84
msgid "Try searching for:"
-msgstr ""
+msgstr "Încercați să căutați după:"
#: searx/templates/oscar/results.html:100
#: searx/templates/simple/results.html:25
msgid "Engines cannot retrieve results"
-msgstr ""
+msgstr "Motoarele nu pot obține rezultate"
#: searx/templates/oscar/results.html:131
msgid "Links"
@@ -828,27 +829,27 @@ msgstr "statistici"
#: searx/templates/oscar/time-range.html:3
#: searx/templates/simple/time-range.html:3
msgid "Anytime"
-msgstr ""
+msgstr "Oricând"
#: searx/templates/oscar/time-range.html:6
#: searx/templates/simple/time-range.html:6
msgid "Last day"
-msgstr ""
+msgstr "Ultima zi"
#: searx/templates/oscar/time-range.html:9
#: searx/templates/simple/time-range.html:9
msgid "Last week"
-msgstr ""
+msgstr "Ultima săptămână"
#: searx/templates/oscar/time-range.html:12
#: searx/templates/simple/time-range.html:12
msgid "Last month"
-msgstr ""
+msgstr "Ultima lună"
#: searx/templates/oscar/time-range.html:15
#: searx/templates/simple/time-range.html:15
msgid "Last year"
-msgstr ""
+msgstr "Ultimul an"
#: searx/templates/oscar/messages/first_time.html:6
#: searx/templates/oscar/messages/no_data_available.html:3
@@ -861,11 +862,11 @@ msgstr "Se pare că folosiți searx pentru prima dată."
#: searx/templates/oscar/messages/no_cookies.html:3
msgid "Information!"
-msgstr ""
+msgstr "Informație!"
#: searx/templates/oscar/messages/no_cookies.html:4
msgid "currently, there are no cookies defined."
-msgstr ""
+msgstr "momentan, nu există cookie-uri definite"
#: searx/templates/oscar/messages/no_data_available.html:4
msgid "There is currently no data available. "
@@ -874,24 +875,24 @@ msgstr "Deocamdată nu există date disponibile."
#: searx/templates/oscar/messages/no_results.html:4
#: searx/templates/simple/messages/no_results.html:4
msgid "Engines cannot retrieve results."
-msgstr ""
+msgstr "Motoarele nu pot obține rezultate"
#: searx/templates/oscar/messages/no_results.html:10
#: searx/templates/simple/messages/no_results.html:10
msgid "Please, try again later or find another searx instance."
-msgstr ""
+msgstr "Încercați din nou mai târziu sau folosiți o altă instanță searx-"
#: searx/templates/oscar/messages/no_results.html:14
#: searx/templates/simple/messages/no_results.html:14
msgid "Sorry!"
-msgstr "Îmi pare rău!"
+msgstr "Ne pare rău!"
#: searx/templates/oscar/messages/no_results.html:15
#: searx/templates/simple/messages/no_results.html:15
msgid ""
"we didn't find any results. Please use another query or search in more "
"categories."
-msgstr "n-am găsit nici un rezultat. Vă rog folosiți o altă interogare sau căutați în mai multe categorii."
+msgstr "n-am găsit nici un rezultat. Folosiți o altă interogare sau căutați în mai multe categorii."
#: searx/templates/oscar/messages/save_settings_successfull.html:7
msgid "Well done!"
@@ -899,7 +900,7 @@ msgstr "Bravo!"
#: searx/templates/oscar/messages/save_settings_successfull.html:8
msgid "Settings saved successfully."
-msgstr "Setările au fost salvate cu succes."
+msgstr "Configurările au fost salvate cu succes."
#: searx/templates/oscar/messages/unknow_error.html:7
msgid "Oh snap!"
@@ -998,16 +999,16 @@ msgstr "Încarcă mai multe..."
#: searx/templates/simple/base.html:31
msgid "No item found"
-msgstr ""
+msgstr "Niciun element găsit"
#: searx/templates/simple/preferences.html:89
msgid "Supports selected language"
-msgstr ""
+msgstr "Suportă limba selectată"
#: searx/templates/simple/preferences.html:118
msgid "User interface"
-msgstr ""
+msgstr "Interfața pentru utilizator"
#: searx/templates/simple/preferences.html:154
msgid "Privacy"
-msgstr ""
+msgstr "Confidențialitate"
diff --git a/searx/translations/ru/LC_MESSAGES/messages.mo b/searx/translations/ru/LC_MESSAGES/messages.mo
index 9d5ea61b08..c6bcdd1b6d 100644
Binary files a/searx/translations/ru/LC_MESSAGES/messages.mo and b/searx/translations/ru/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/ru/LC_MESSAGES/messages.po b/searx/translations/ru/LC_MESSAGES/messages.po
index f6bda5f1b6..befe7f963e 100644
--- a/searx/translations/ru/LC_MESSAGES/messages.po
+++ b/searx/translations/ru/LC_MESSAGES/messages.po
@@ -3,7 +3,7 @@
# This file is distributed under the same license as the PROJECT project.
#
# Translators:
-# Andrey, 2017
+# Andrey, 2017-2019
# dimqua <[email protected]>, 2015
# dimqua <[email protected]>, 2015
# dimqua <[email protected]>, 2017
@@ -14,8 +14,8 @@ msgstr ""
"Project-Id-Version: searx\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2017-11-01 21:31+0100\n"
-"PO-Revision-Date: 2018-02-26 16:39+0000\n"
-"Last-Translator: John DOe <[email protected]>\n"
+"PO-Revision-Date: 2019-01-05 12:11+0000\n"
+"Last-Translator: Andrey\n"
"Language-Team: Russian (http://www.transifex.com/asciimoo/searx/language/ru/)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
@@ -34,7 +34,7 @@ msgstr "ошибка выполнения запроса"
#: searx/search.py:151
msgid "unexpected crash"
-msgstr "неожиданное падение"
+msgstr "неожиданный сбой"
#: searx/webapp.py:136
msgid "files"
@@ -148,7 +148,7 @@ msgstr "Эта запись была заменена на"
#: searx/engines/pubmed.py:74
msgid "No abstract is available for this publication."
-msgstr "Для данной публикации недоступно"
+msgstr "Нет аннотации для этой публикации."
#: searx/plugins/https_rewrite.py:32
msgid "Rewrite HTTP links to HTTPS if possible"
@@ -198,15 +198,15 @@ msgstr "Выполнять поиск немедленно, если выбра
msgid ""
"Displays your IP if the query is \"ip\" and your user agent if the query "
"contains \"user agent\"."
-msgstr "Отображает ваш IP-адрес при запросе \"ip\" и отпечаток браузера при запросе \"user agent\"."
+msgstr "Отображает ваш IP-адрес при запросе \"ip\" и пользовательский агент при запросе \"user agent\"."
#: searx/plugins/tracker_url_remover.py:26
msgid "Tracker URL remover"
-msgstr "Удаление трекера URL-адресов"
+msgstr "Удаление трекинга URL-адресов"
#: searx/plugins/tracker_url_remover.py:27
msgid "Remove trackers arguments from the returned URL"
-msgstr "Удаляет аргументы отслеживания из возвращенного URL-адреса"
+msgstr "Удаляет аргументы отслеживания из URL-адреса"
#: searx/plugins/vim_hotkeys.py:3
msgid "Vim-like hotkeys"
@@ -300,7 +300,7 @@ msgstr "Язык интерфейса"
#: searx/templates/oscar/preferences.html:57
#: searx/templates/simple/preferences.html:51
msgid "Autocomplete"
-msgstr "Подгрузка результатов"
+msgstr "Автозавершение"
#: searx/templates/courgette/preferences.html:45
#: searx/templates/legacy/preferences.html:46
@@ -622,7 +622,7 @@ msgstr "Ошибка!"
#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
msgid "Powered by"
-msgstr "Используется"
+msgstr "Основано на"
#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
msgid "a privacy-respecting, hackable metasearch engine"
@@ -631,7 +631,7 @@ msgstr "уважающая вашу приватность, открытая м
#: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50
#: searx/templates/simple/macros.html:43
msgid "proxied"
-msgstr "proxy"
+msgstr "proxied"
#: searx/templates/oscar/macros.html:92
msgid "supported"
@@ -671,7 +671,7 @@ msgstr "Cookie"
#: searx/templates/oscar/preferences.html:42
#: searx/templates/simple/preferences.html:48
msgid "What language do you prefer for search?"
-msgstr "На каком языке вы предпочитаете искать?"
+msgstr "Какой язык предпочтителен для поиска?"
#: searx/templates/oscar/preferences.html:48
#: searx/templates/simple/preferences.html:128
@@ -686,7 +686,7 @@ msgstr "Поисковые предложения по мере ввода"
#: searx/templates/oscar/preferences.html:69
#: searx/templates/simple/preferences.html:173
msgid "Proxying image results through searx"
-msgstr "Загружать найденные изображения через searx"
+msgstr "Проксировать найденные изображения с помощью searx"
#: searx/templates/oscar/preferences.html:78
msgid ""
@@ -698,7 +698,7 @@ msgstr "Изменяет способ отправки запросов. <a href
#: searx/templates/oscar/preferences.html:87
#: searx/templates/simple/preferences.html:71
msgid "Filter content"
-msgstr "Фильтрация содержимого для взрослых в результатах поиска."
+msgstr "Фильтрация контента"
#: searx/templates/oscar/preferences.html:97
#: searx/templates/simple/preferences.html:139
@@ -800,7 +800,7 @@ msgstr "URL поиска для текущих сохраненных парам
msgid ""
"Note: specifying custom settings in the search URL can reduce privacy by "
"leaking data to the clicked result sites."
-msgstr "Обратите внимание, что задание пользовательских параметров в URL поиска может привести к их утечке к посещаемым сайтам из поисковой выдачи."
+msgstr "Учтите, что укаание пользовательских настроек в URL поиска может привести к их утечке к посещаемым сайтам из поисковой выдачи."
#: searx/templates/oscar/results.html:17
msgid "Search results"
@@ -884,7 +884,7 @@ msgstr "Движки не могут получить результаты."
#: searx/templates/oscar/messages/no_results.html:10
#: searx/templates/simple/messages/no_results.html:10
msgid "Please, try again later or find another searx instance."
-msgstr "Пожалуйста, попробуйте позже или воспользуйтесь другим searx инстансом."
+msgstr "Пожалуйста, попробуйте позже или воспользуйтесь другим сервером searx."
#: searx/templates/oscar/messages/no_results.html:14
#: searx/templates/simple/messages/no_results.html:14
diff --git a/searx/translations/sk/LC_MESSAGES/messages.mo b/searx/translations/sk/LC_MESSAGES/messages.mo
index af7ac9dbba..0d8f6fa707 100644
Binary files a/searx/translations/sk/LC_MESSAGES/messages.mo and b/searx/translations/sk/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/sl/LC_MESSAGES/messages.mo b/searx/translations/sl/LC_MESSAGES/messages.mo
index f20b5275b3..b2cf9e1b13 100644
Binary files a/searx/translations/sl/LC_MESSAGES/messages.mo and b/searx/translations/sl/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/sl/LC_MESSAGES/messages.po b/searx/translations/sl/LC_MESSAGES/messages.po
index f3e6fef070..3ce7f97d63 100644
--- a/searx/translations/sl/LC_MESSAGES/messages.po
+++ b/searx/translations/sl/LC_MESSAGES/messages.po
@@ -3,14 +3,14 @@
# This file is distributed under the same license as the PROJECT project.
#
# Translators:
-# asladic <[email protected]>, 2017
+# asladic <[email protected]>, 2017-2018
msgid ""
msgstr ""
"Project-Id-Version: searx\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2017-11-01 21:31+0100\n"
-"PO-Revision-Date: 2017-11-01 20:31+0000\n"
-"Last-Translator: Adam Tauber <[email protected]>\n"
+"PO-Revision-Date: 2018-05-01 08:59+0000\n"
+"Last-Translator: asladic <[email protected]>\n"
"Language-Team: Slovenian (http://www.transifex.com/asciimoo/searx/language/sl/)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
@@ -21,15 +21,15 @@ msgstr ""
#: searx/search.py:137 searx/search.py:182
msgid "timeout"
-msgstr ""
+msgstr "presežena časovna omejitev"
#: searx/search.py:144
msgid "request exception"
-msgstr ""
+msgstr "napaka poizvedbe"
#: searx/search.py:151
msgid "unexpected crash"
-msgstr ""
+msgstr "nepričakovana napaka"
#: searx/webapp.py:136
msgid "files"
@@ -77,7 +77,7 @@ msgstr "Neveljavne nastavitve. Prosimo, preverite vašo konfiguracijo"
#: searx/webapp.py:415
msgid "Invalid settings"
-msgstr ""
+msgstr "Neveljavne nastavitve"
#: searx/webapp.py:449 searx/webapp.py:493
msgid "search error"
@@ -630,11 +630,11 @@ msgstr "preko posredniškega strežnika"
#: searx/templates/oscar/macros.html:92
msgid "supported"
-msgstr ""
+msgstr "podprto"
#: searx/templates/oscar/macros.html:96
msgid "not supported"
-msgstr ""
+msgstr "ni podprto"
#: searx/templates/oscar/preferences.html:13
#: searx/templates/oscar/preferences.html:22
@@ -718,7 +718,7 @@ msgstr ""
msgid ""
"Redirect to open-access versions of publications when available (plugin "
"required)"
-msgstr ""
+msgstr "Preusmeri na prosto dostopne različice publikacij, ko so na voljo (zahtevan vtičnik)"
#: searx/templates/oscar/preferences.html:163
#: searx/templates/oscar/preferences.html:175
@@ -729,7 +729,7 @@ msgstr "Bližnjica"
#: searx/templates/oscar/preferences.html:164
#: searx/templates/oscar/preferences.html:174
msgid "Selected language"
-msgstr ""
+msgstr "Izbrani jezik"
#: searx/templates/oscar/preferences.html:166
#: searx/templates/oscar/preferences.html:172
@@ -789,13 +789,13 @@ msgstr "Vrednost"
#: searx/templates/oscar/preferences.html:301
msgid "Search URL of the currently saved preferences"
-msgstr ""
+msgstr "Iskalni URL trenutno shranjenih nastavitev"
#: searx/templates/oscar/preferences.html:301
msgid ""
"Note: specifying custom settings in the search URL can reduce privacy by "
"leaking data to the clicked result sites."
-msgstr ""
+msgstr "Opomba: navajanje lastnih nastavitev v iskalnem URL lahko vodi do zmanjšane zasebnosti preko podajanja podatkov izbranim rezultatom."
#: searx/templates/oscar/results.html:17
msgid "Search results"
@@ -804,12 +804,12 @@ msgstr "Zadetki iskanja"
#: searx/templates/oscar/results.html:21
#: searx/templates/simple/results.html:84
msgid "Try searching for:"
-msgstr ""
+msgstr "Poskusite iskati:"
#: searx/templates/oscar/results.html:100
#: searx/templates/simple/results.html:25
msgid "Engines cannot retrieve results"
-msgstr ""
+msgstr "Iskalniki ne morejo pridobiti rezultatov"
#: searx/templates/oscar/results.html:131
msgid "Links"
@@ -874,12 +874,12 @@ msgstr "Trenutno ni podatkov na voljo."
#: searx/templates/oscar/messages/no_results.html:4
#: searx/templates/simple/messages/no_results.html:4
msgid "Engines cannot retrieve results."
-msgstr ""
+msgstr "Iskalniki ne morejo pridobiti rezultatov."
#: searx/templates/oscar/messages/no_results.html:10
#: searx/templates/simple/messages/no_results.html:10
msgid "Please, try again later or find another searx instance."
-msgstr ""
+msgstr "Prosimo, poskusite kasneje tu ali na drugi instanci searx."
#: searx/templates/oscar/messages/no_results.html:14
#: searx/templates/simple/messages/no_results.html:14
@@ -998,7 +998,7 @@ msgstr "Naloži več..."
#: searx/templates/simple/base.html:31
msgid "No item found"
-msgstr ""
+msgstr "Ni zadetkov"
#: searx/templates/simple/preferences.html:89
msgid "Supports selected language"
@@ -1006,8 +1006,8 @@ msgstr "Podpira izbrani jezik"
#: searx/templates/simple/preferences.html:118
msgid "User interface"
-msgstr ""
+msgstr "Uporabniški vmesnik"
#: searx/templates/simple/preferences.html:154
msgid "Privacy"
-msgstr ""
+msgstr "Zasebnost"
diff --git a/searx/translations/sr/LC_MESSAGES/messages.mo b/searx/translations/sr/LC_MESSAGES/messages.mo
index b38a68d1d3..65efaaa1a9 100644
Binary files a/searx/translations/sr/LC_MESSAGES/messages.mo and b/searx/translations/sr/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/sv/LC_MESSAGES/messages.mo b/searx/translations/sv/LC_MESSAGES/messages.mo
index b3687f94a4..73d36de8cd 100644
Binary files a/searx/translations/sv/LC_MESSAGES/messages.mo and b/searx/translations/sv/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/sv/LC_MESSAGES/messages.po b/searx/translations/sv/LC_MESSAGES/messages.po
index 3c98e06fc6..e8b01c8324 100644
--- a/searx/translations/sv/LC_MESSAGES/messages.po
+++ b/searx/translations/sv/LC_MESSAGES/messages.po
@@ -4,13 +4,14 @@
#
# Translators:
# Jonatan Nyberg, 2016-2017
+# Jonatan Nyberg, 2018
# Jonatan Nyberg, 2017-2018
msgid ""
msgstr ""
"Project-Id-Version: searx\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2017-11-01 21:31+0100\n"
-"PO-Revision-Date: 2018-01-13 11:39+0000\n"
+"PO-Revision-Date: 2018-07-24 18:39+0000\n"
"Last-Translator: Jonatan Nyberg\n"
"Language-Team: Swedish (http://www.transifex.com/asciimoo/searx/language/sv/)\n"
"MIME-Version: 1.0\n"
@@ -160,7 +161,7 @@ msgstr "Automatiskt ladda nästa sida när du bläddrar till botten av aktuell s
#: searx/plugins/oa_doi_rewrite.py:9
msgid "Open Access DOI rewrite"
-msgstr ""
+msgstr "Open Access DOI-omskrivning"
#: searx/plugins/oa_doi_rewrite.py:10
msgid ""
@@ -182,7 +183,7 @@ msgstr "Resultat öppnas i samma fönster som standard. Denna insticksmodul skri
#: searx/plugins/search_on_category_select.py:18
msgid "Search on category select"
-msgstr "Sök på kategori välj"
+msgstr "Sök vid val av kategori"
#: searx/plugins/search_on_category_select.py:19
msgid ""
@@ -198,7 +199,7 @@ msgstr "Visar din IP om förfrågan är \"ip\" och din användaragent om förfr
#: searx/plugins/tracker_url_remover.py:26
msgid "Tracker URL remover"
-msgstr "Trackerwebbadress borttagare"
+msgstr "Bevakningswebbadress borttagare"
#: searx/plugins/tracker_url_remover.py:27
msgid "Remove trackers arguments from the returned URL"
@@ -263,7 +264,7 @@ msgstr "Inställningar"
#: searx/templates/oscar/preferences.html:35
#: searx/templates/simple/preferences.html:34
msgid "Default categories"
-msgstr "Standard kategorier"
+msgstr "Standardkategorier"
#: searx/templates/courgette/preferences.html:13
#: searx/templates/legacy/preferences.html:14
@@ -713,7 +714,7 @@ msgstr "Stil"
#: searx/templates/oscar/preferences.html:122
msgid "Open Access DOI resolver"
-msgstr ""
+msgstr "Open Access DOI-lösare"
#: searx/templates/oscar/preferences.html:123
msgid ""
diff --git a/searx/translations/te/LC_MESSAGES/messages.mo b/searx/translations/te/LC_MESSAGES/messages.mo
new file mode 100644
index 0000000000..57488bf0bd
Binary files /dev/null and b/searx/translations/te/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/te/LC_MESSAGES/messages.po b/searx/translations/te/LC_MESSAGES/messages.po
new file mode 100644
index 0000000000..8da1506ccf
--- /dev/null
+++ b/searx/translations/te/LC_MESSAGES/messages.po
@@ -0,0 +1,1013 @@
+# Translations template for PROJECT.
+# Copyright (C) 2017 ORGANIZATION
+# This file is distributed under the same license as the PROJECT project.
+#
+# Translators:
+# Joseph Nuthalapati <[email protected]>, 2018
+msgid ""
+msgstr ""
+"Project-Id-Version: searx\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2017-11-01 21:31+0100\n"
+"PO-Revision-Date: 2018-03-08 16:40+0000\n"
+"Last-Translator: Joseph Nuthalapati <[email protected]>\n"
+"Language-Team: Telugu (http://www.transifex.com/asciimoo/searx/language/te/)\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.3.4\n"
+"Language: te\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+#: searx/search.py:137 searx/search.py:182
+msgid "timeout"
+msgstr "కాలపరిమితి దాటిపోయింది"
+
+#: searx/search.py:144
+msgid "request exception"
+msgstr ""
+
+#: searx/search.py:151
+msgid "unexpected crash"
+msgstr ""
+
+#: searx/webapp.py:136
+msgid "files"
+msgstr "ఫైళ్ళు"
+
+#: searx/webapp.py:137
+msgid "general"
+msgstr "సాధారణ"
+
+#: searx/webapp.py:138
+msgid "music"
+msgstr "సంగీతం"
+
+#: searx/webapp.py:139
+msgid "social media"
+msgstr "సోషల్ మీడియా"
+
+#: searx/webapp.py:140
+msgid "images"
+msgstr "చిత్రాలు"
+
+#: searx/webapp.py:141
+msgid "videos"
+msgstr "వీడియోలు"
+
+#: searx/webapp.py:142
+msgid "it"
+msgstr "ఐటి"
+
+#: searx/webapp.py:143
+msgid "news"
+msgstr "వార్తలు"
+
+#: searx/webapp.py:144
+msgid "map"
+msgstr "పటము"
+
+#: searx/webapp.py:145
+msgid "science"
+msgstr "విజ్ఞానశాస్త్రం"
+
+#: searx/webapp.py:399 searx/webapp.py:658
+msgid "Invalid settings, please edit your preferences"
+msgstr ""
+
+#: searx/webapp.py:415
+msgid "Invalid settings"
+msgstr "చెల్లని అమరికలు"
+
+#: searx/webapp.py:449 searx/webapp.py:493
+msgid "search error"
+msgstr "శోధనలో దోషము"
+
+#: searx/webapp.py:530
+msgid "{minutes} minute(s) ago"
+msgstr "{minutes} నిమిషము(ల) క్రిందట"
+
+#: searx/webapp.py:532
+msgid "{hours} hour(s), {minutes} minute(s) ago"
+msgstr ""
+
+#: searx/answerers/random/answerer.py:53
+msgid "Random value generator"
+msgstr ""
+
+#: searx/answerers/random/answerer.py:54
+msgid "Generate different random values"
+msgstr ""
+
+#: searx/answerers/statistics/answerer.py:53
+msgid "Statistics functions"
+msgstr "సాంఖ్యకశాస్త్ర ప్రమేయాలు"
+
+#: searx/answerers/statistics/answerer.py:54
+msgid "Compute {functions} of the arguments"
+msgstr ""
+
+#: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201
+msgid "Engine time (sec)"
+msgstr ""
+
+#: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205
+msgid "Page loads (sec)"
+msgstr ""
+
+#: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209
+#: searx/templates/oscar/results.html:95
+#: searx/templates/simple/results.html:20
+msgid "Number of results"
+msgstr "ఫలితముల సంఖ్య"
+
+#: searx/engines/__init__.py:206 searx/engines/flycheck___init__.py:213
+msgid "Scores"
+msgstr ""
+
+#: searx/engines/__init__.py:210 searx/engines/flycheck___init__.py:217
+msgid "Scores per result"
+msgstr ""
+
+#: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221
+msgid "Errors"
+msgstr "దోషములు"
+
+#: searx/engines/pdbe.py:87
+msgid "{title} (OBSOLETE)"
+msgstr ""
+
+#: searx/engines/pdbe.py:91
+msgid "This entry has been superseded by"
+msgstr ""
+
+#: searx/engines/pubmed.py:74
+msgid "No abstract is available for this publication."
+msgstr ""
+
+#: searx/plugins/https_rewrite.py:32
+msgid "Rewrite HTTP links to HTTPS if possible"
+msgstr ""
+
+#: searx/plugins/infinite_scroll.py:3
+msgid "Infinite scroll"
+msgstr ""
+
+#: searx/plugins/infinite_scroll.py:4
+msgid "Automatically load next page when scrolling to bottom of current page"
+msgstr ""
+
+#: searx/plugins/oa_doi_rewrite.py:9
+msgid "Open Access DOI rewrite"
+msgstr ""
+
+#: searx/plugins/oa_doi_rewrite.py:10
+msgid ""
+"Avoid paywalls by redirecting to open-access versions of publications when "
+"available"
+msgstr ""
+
+#: searx/plugins/open_results_on_new_tab.py:18
+#: searx/templates/oscar/preferences.html:114
+#: searx/templates/simple/preferences.html:149
+msgid "Open result links on new browser tabs"
+msgstr ""
+
+#: searx/plugins/open_results_on_new_tab.py:19
+msgid ""
+"Results are opened in the same window by default. This plugin overwrites the"
+" default behaviour to open links on new tabs/windows. (JavaScript required)"
+msgstr ""
+
+#: searx/plugins/search_on_category_select.py:18
+msgid "Search on category select"
+msgstr ""
+
+#: searx/plugins/search_on_category_select.py:19
+msgid ""
+"Perform search immediately if a category selected. Disable to select "
+"multiple categories. (JavaScript required)"
+msgstr ""
+
+#: searx/plugins/self_info.py:20
+msgid ""
+"Displays your IP if the query is \"ip\" and your user agent if the query "
+"contains \"user agent\"."
+msgstr ""
+
+#: searx/plugins/tracker_url_remover.py:26
+msgid "Tracker URL remover"
+msgstr ""
+
+#: searx/plugins/tracker_url_remover.py:27
+msgid "Remove trackers arguments from the returned URL"
+msgstr ""
+
+#: searx/plugins/vim_hotkeys.py:3
+msgid "Vim-like hotkeys"
+msgstr ""
+
+#: searx/plugins/vim_hotkeys.py:4
+msgid ""
+"Navigate search results with Vim-like hotkeys (JavaScript required). Press "
+"\"h\" key on main or result page to get help."
+msgstr ""
+
+#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4
+#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4
+#: searx/templates/simple/404.html:4
+msgid "Page not found"
+msgstr "పుట దొరకలేదు"
+
+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6
+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6
+#: searx/templates/simple/404.html:6
+#, python-format
+msgid "Go to %(search_page)s."
+msgstr "%(search_page)sకు వెళ్ళు"
+
+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6
+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6
+#: searx/templates/simple/404.html:6
+msgid "search page"
+msgstr "శోధన పుట"
+
+#: searx/templates/courgette/index.html:9
+#: searx/templates/courgette/index.html:13
+#: searx/templates/courgette/results.html:5
+#: searx/templates/legacy/index.html:8 searx/templates/legacy/index.html:12
+#: searx/templates/oscar/navbar.html:7
+#: searx/templates/oscar/preferences.html:3
+#: searx/templates/pix-art/index.html:8
+msgid "preferences"
+msgstr "అభిరుచులు"
+
+#: searx/templates/courgette/index.html:11
+#: searx/templates/legacy/index.html:10 searx/templates/oscar/about.html:2
+#: searx/templates/oscar/navbar.html:6 searx/templates/pix-art/index.html:7
+msgid "about"
+msgstr "గురించి"
+
+#: searx/templates/courgette/preferences.html:5
+#: searx/templates/legacy/preferences.html:5
+#: searx/templates/oscar/preferences.html:8
+#: searx/templates/pix-art/preferences.html:5
+#: searx/templates/simple/preferences.html:26
+msgid "Preferences"
+msgstr "అభిరుచులు"
+
+#: searx/templates/courgette/preferences.html:9
+#: searx/templates/legacy/preferences.html:9
+#: searx/templates/oscar/preferences.html:33
+#: searx/templates/oscar/preferences.html:35
+#: searx/templates/simple/preferences.html:34
+msgid "Default categories"
+msgstr "నిష్క్రియ వర్గాలు"
+
+#: searx/templates/courgette/preferences.html:13
+#: searx/templates/legacy/preferences.html:14
+#: searx/templates/oscar/preferences.html:41
+#: searx/templates/pix-art/preferences.html:9
+#: searx/templates/simple/preferences.html:39
+#: searx/templates/simple/preferences.html:163
+msgid "Search language"
+msgstr "శోధన భాష"
+
+#: searx/templates/courgette/preferences.html:16
+#: searx/templates/legacy/preferences.html:17
+#: searx/templates/oscar/languages.html:6
+#: searx/templates/pix-art/preferences.html:12
+#: searx/templates/simple/languages.html:2
+#: searx/templates/simple/preferences.html:42
+msgid "Default language"
+msgstr "నిష్క్రియ భాష"
+
+#: searx/templates/courgette/preferences.html:24
+#: searx/templates/legacy/preferences.html:25
+#: searx/templates/oscar/preferences.html:47
+#: searx/templates/pix-art/preferences.html:20
+#: searx/templates/simple/preferences.html:120
+msgid "Interface language"
+msgstr "వినిమయసీమ భాష"
+
+#: searx/templates/courgette/preferences.html:34
+#: searx/templates/legacy/preferences.html:35
+#: searx/templates/oscar/preferences.html:57
+#: searx/templates/simple/preferences.html:51
+msgid "Autocomplete"
+msgstr ""
+
+#: searx/templates/courgette/preferences.html:45
+#: searx/templates/legacy/preferences.html:46
+#: searx/templates/oscar/preferences.html:68
+#: searx/templates/simple/preferences.html:166
+msgid "Image proxy"
+msgstr ""
+
+#: searx/templates/courgette/preferences.html:48
+#: searx/templates/legacy/preferences.html:49
+#: searx/templates/oscar/preferences.html:72
+#: searx/templates/simple/preferences.html:169
+msgid "Enabled"
+msgstr ""
+
+#: searx/templates/courgette/preferences.html:49
+#: searx/templates/legacy/preferences.html:50
+#: searx/templates/oscar/preferences.html:73
+#: searx/templates/simple/preferences.html:170
+msgid "Disabled"
+msgstr ""
+
+#: searx/templates/courgette/preferences.html:54
+#: searx/templates/legacy/preferences.html:55
+#: searx/templates/oscar/preferences.html:77
+#: searx/templates/pix-art/preferences.html:30
+#: searx/templates/simple/preferences.html:156
+msgid "Method"
+msgstr "విధానం"
+
+#: searx/templates/courgette/preferences.html:63
+#: searx/templates/legacy/preferences.html:64
+#: searx/templates/oscar/preferences.html:86
+#: searx/templates/oscar/preferences.html:165
+#: searx/templates/oscar/preferences.html:173
+#: searx/templates/simple/preferences.html:63
+#: searx/templates/simple/preferences.html:90
+msgid "SafeSearch"
+msgstr "సురక్షితశోధన"
+
+#: searx/templates/courgette/preferences.html:66
+#: searx/templates/legacy/preferences.html:67
+#: searx/templates/oscar/preferences.html:90
+#: searx/templates/simple/preferences.html:66
+msgid "Strict"
+msgstr "కఠినమైన"
+
+#: searx/templates/courgette/preferences.html:67
+#: searx/templates/legacy/preferences.html:68
+#: searx/templates/oscar/preferences.html:91
+#: searx/templates/simple/preferences.html:67
+msgid "Moderate"
+msgstr "మితమైన"
+
+#: searx/templates/courgette/preferences.html:68
+#: searx/templates/legacy/preferences.html:69
+#: searx/templates/oscar/preferences.html:92
+#: searx/templates/simple/preferences.html:68
+msgid "None"
+msgstr "ఏమీ లేదు"
+
+#: searx/templates/courgette/preferences.html:73
+#: searx/templates/legacy/preferences.html:74
+#: searx/templates/oscar/preferences.html:96
+#: searx/templates/pix-art/preferences.html:39
+#: searx/templates/simple/preferences.html:131
+msgid "Themes"
+msgstr ""
+
+#: searx/templates/courgette/preferences.html:83
+msgid "Color"
+msgstr "రంగు"
+
+#: searx/templates/courgette/preferences.html:86
+msgid "Blue (default)"
+msgstr "నీలం (నిష్క్రియం)"
+
+#: searx/templates/courgette/preferences.html:87
+msgid "Violet"
+msgstr "ఊదారంగు"
+
+#: searx/templates/courgette/preferences.html:88
+msgid "Green"
+msgstr "ఆకుపచ్చ"
+
+#: searx/templates/courgette/preferences.html:89
+msgid "Cyan"
+msgstr " ముదురు నీలం"
+
+#: searx/templates/courgette/preferences.html:90
+msgid "Orange"
+msgstr "నారింజ"
+
+#: searx/templates/courgette/preferences.html:91
+msgid "Red"
+msgstr "ఎరుపు"
+
+#: searx/templates/courgette/preferences.html:96
+#: searx/templates/legacy/preferences.html:93
+#: searx/templates/pix-art/preferences.html:49
+#: searx/templates/simple/preferences.html:77
+msgid "Currently used search engines"
+msgstr "ప్రస్తుతం ఉపయోగించబడుతున్న శోధన యంత్రాలు"
+
+#: searx/templates/courgette/preferences.html:100
+#: searx/templates/legacy/preferences.html:97
+#: searx/templates/oscar/preferences.html:162
+#: searx/templates/oscar/preferences.html:176
+#: searx/templates/pix-art/preferences.html:53
+#: searx/templates/simple/preferences.html:87
+msgid "Engine name"
+msgstr "యంత్రం పేరు"
+
+#: searx/templates/courgette/preferences.html:101
+#: searx/templates/legacy/preferences.html:98
+msgid "Category"
+msgstr "వర్గము"
+
+#: searx/templates/courgette/preferences.html:102
+#: searx/templates/courgette/preferences.html:113
+#: searx/templates/legacy/preferences.html:99
+#: searx/templates/legacy/preferences.html:110
+#: searx/templates/oscar/preferences.html:161
+#: searx/templates/oscar/preferences.html:177
+#: searx/templates/pix-art/preferences.html:54
+#: searx/templates/pix-art/preferences.html:64
+#: searx/templates/simple/preferences.html:86
+msgid "Allow"
+msgstr "అనుమతించు"
+
+#: searx/templates/courgette/preferences.html:102
+#: searx/templates/courgette/preferences.html:114
+#: searx/templates/legacy/preferences.html:99
+#: searx/templates/legacy/preferences.html:111
+#: searx/templates/pix-art/preferences.html:54
+#: searx/templates/pix-art/preferences.html:65
+msgid "Block"
+msgstr "అడ్డగించు"
+
+#: searx/templates/courgette/preferences.html:122
+#: searx/templates/legacy/preferences.html:119
+#: searx/templates/oscar/preferences.html:297
+#: searx/templates/pix-art/preferences.html:73
+#: searx/templates/simple/preferences.html:180
+msgid ""
+"These settings are stored in your cookies, this allows us not to store this "
+"data about you."
+msgstr ""
+
+#: searx/templates/courgette/preferences.html:124
+#: searx/templates/legacy/preferences.html:121
+#: searx/templates/oscar/preferences.html:299
+#: searx/templates/pix-art/preferences.html:75
+#: searx/templates/simple/preferences.html:182
+msgid ""
+"These cookies serve your sole convenience, we don't use these cookies to "
+"track you."
+msgstr ""
+
+#: searx/templates/courgette/preferences.html:127
+#: searx/templates/legacy/preferences.html:124
+#: searx/templates/oscar/preferences.html:305
+#: searx/templates/pix-art/preferences.html:78
+#: searx/templates/simple/preferences.html:185
+msgid "save"
+msgstr "దాచు"
+
+#: searx/templates/courgette/preferences.html:128
+#: searx/templates/legacy/preferences.html:125
+#: searx/templates/oscar/preferences.html:307
+#: searx/templates/simple/preferences.html:186
+msgid "Reset defaults"
+msgstr "నిష్క్రియాలకు అమర్చు"
+
+#: searx/templates/courgette/preferences.html:129
+#: searx/templates/legacy/preferences.html:126
+#: searx/templates/oscar/preferences.html:306
+#: searx/templates/pix-art/preferences.html:79
+#: searx/templates/simple/preferences.html:187
+msgid "back"
+msgstr "వెనక్కి"
+
+#: searx/templates/courgette/results.html:12
+#: searx/templates/legacy/results.html:13
+#: searx/templates/oscar/results.html:136
+#: searx/templates/simple/results.html:58
+msgid "Search URL"
+msgstr "శోధన URL"
+
+#: searx/templates/courgette/results.html:16
+#: searx/templates/legacy/results.html:17
+#: searx/templates/oscar/results.html:141
+#: searx/templates/simple/results.html:62
+msgid "Download results"
+msgstr "ఫలితాలను దింపుకోండి"
+
+#: searx/templates/courgette/results.html:34
+#: searx/templates/legacy/results.html:35
+#: searx/templates/simple/results.html:10
+msgid "Answers"
+msgstr "జవాబులు"
+
+#: searx/templates/courgette/results.html:42
+#: searx/templates/legacy/results.html:43
+#: searx/templates/oscar/results.html:116
+#: searx/templates/simple/results.html:42
+msgid "Suggestions"
+msgstr "సూచనలు"
+
+#: searx/templates/courgette/results.html:70
+#: searx/templates/legacy/results.html:81
+#: searx/templates/oscar/results.html:68 searx/templates/oscar/results.html:78
+#: searx/templates/simple/results.html:130
+msgid "previous page"
+msgstr "పూర్వపు పుట"
+
+#: searx/templates/courgette/results.html:81
+#: searx/templates/legacy/results.html:92
+#: searx/templates/oscar/results.html:62 searx/templates/oscar/results.html:84
+#: searx/templates/simple/results.html:145
+msgid "next page"
+msgstr "తర్వాతి పుట"
+
+#: searx/templates/courgette/search.html:3
+#: searx/templates/legacy/search.html:3 searx/templates/oscar/search.html:6
+#: searx/templates/oscar/search_full.html:9
+#: searx/templates/pix-art/search.html:3 searx/templates/simple/search.html:4
+msgid "Search for..."
+msgstr "శోధించు..."
+
+#: searx/templates/courgette/stats.html:4 searx/templates/legacy/stats.html:4
+#: searx/templates/oscar/stats.html:5 searx/templates/pix-art/stats.html:4
+#: searx/templates/simple/stats.html:7
+msgid "Engine stats"
+msgstr ""
+
+#: searx/templates/courgette/result_templates/images.html:4
+#: searx/templates/legacy/result_templates/images.html:4
+#: searx/templates/pix-art/result_templates/images.html:4
+msgid "original context"
+msgstr ""
+
+#: searx/templates/courgette/result_templates/torrent.html:7
+#: searx/templates/legacy/result_templates/torrent.html:11
+#: searx/templates/oscar/result_templates/torrent.html:6
+#: searx/templates/simple/result_templates/torrent.html:9
+msgid "Seeder"
+msgstr ""
+
+#: searx/templates/courgette/result_templates/torrent.html:7
+#: searx/templates/legacy/result_templates/torrent.html:11
+#: searx/templates/oscar/result_templates/torrent.html:6
+#: searx/templates/simple/result_templates/torrent.html:9
+msgid "Leecher"
+msgstr ""
+
+#: searx/templates/courgette/result_templates/torrent.html:9
+#: searx/templates/legacy/result_templates/torrent.html:9
+#: searx/templates/oscar/macros.html:23
+#: searx/templates/simple/result_templates/torrent.html:6
+msgid "magnet link"
+msgstr ""
+
+#: searx/templates/courgette/result_templates/torrent.html:10
+#: searx/templates/legacy/result_templates/torrent.html:10
+#: searx/templates/oscar/macros.html:24
+#: searx/templates/simple/result_templates/torrent.html:7
+msgid "torrent file"
+msgstr ""
+
+#: searx/templates/legacy/categories.html:8
+#: searx/templates/simple/categories.html:6
+msgid "Click on the magnifier to perform search"
+msgstr ""
+
+#: searx/templates/legacy/preferences.html:84
+#: searx/templates/oscar/preferences.html:113
+#: searx/templates/simple/preferences.html:142
+msgid "Results on new tabs"
+msgstr ""
+
+#: searx/templates/legacy/preferences.html:87
+#: searx/templates/oscar/preferences.html:117
+#: searx/templates/simple/preferences.html:145
+msgid "On"
+msgstr ""
+
+#: searx/templates/legacy/preferences.html:88
+#: searx/templates/oscar/preferences.html:118
+#: searx/templates/simple/preferences.html:146
+msgid "Off"
+msgstr ""
+
+#: searx/templates/legacy/result_templates/code.html:3
+#: searx/templates/legacy/result_templates/default.html:3
+#: searx/templates/legacy/result_templates/map.html:9
+#: searx/templates/oscar/macros.html:34 searx/templates/oscar/macros.html:48
+#: searx/templates/simple/macros.html:43
+msgid "cached"
+msgstr ""
+
+#: searx/templates/oscar/advanced.html:4
+msgid "Advanced settings"
+msgstr ""
+
+#: searx/templates/oscar/base.html:62
+#: searx/templates/oscar/messages/first_time.html:4
+#: searx/templates/oscar/messages/save_settings_successfull.html:5
+#: searx/templates/oscar/messages/unknow_error.html:5
+msgid "Close"
+msgstr "మూసివేయు"
+
+#: searx/templates/oscar/base.html:64
+#: searx/templates/oscar/messages/no_results.html:4
+#: searx/templates/simple/messages/no_results.html:4
+#: searx/templates/simple/results.html:25
+msgid "Error!"
+msgstr "దోషం!"
+
+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
+msgid "Powered by"
+msgstr ""
+
+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
+msgid "a privacy-respecting, hackable metasearch engine"
+msgstr ""
+
+#: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50
+#: searx/templates/simple/macros.html:43
+msgid "proxied"
+msgstr ""
+
+#: searx/templates/oscar/macros.html:92
+msgid "supported"
+msgstr "ఆదరించబడిన"
+
+#: searx/templates/oscar/macros.html:96
+msgid "not supported"
+msgstr "ఆదరణ లేని"
+
+#: searx/templates/oscar/preferences.html:13
+#: searx/templates/oscar/preferences.html:22
+#: searx/templates/simple/preferences.html:32
+msgid "General"
+msgstr "సాధారణ"
+
+#: searx/templates/oscar/preferences.html:14
+#: searx/templates/oscar/preferences.html:146
+#: searx/templates/simple/preferences.html:76
+msgid "Engines"
+msgstr "యంత్రాలు"
+
+#: searx/templates/oscar/preferences.html:15
+#: searx/templates/oscar/preferences.html:219
+msgid "Plugins"
+msgstr "ప్లగిన్లు"
+
+#: searx/templates/oscar/preferences.html:16
+#: searx/templates/oscar/preferences.html:245
+msgid "Answerers"
+msgstr "జవాబులు"
+
+#: searx/templates/oscar/preferences.html:17
+#: searx/templates/oscar/preferences.html:272
+msgid "Cookies"
+msgstr "కుకీలు"
+
+#: searx/templates/oscar/preferences.html:42
+#: searx/templates/simple/preferences.html:48
+msgid "What language do you prefer for search?"
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:48
+#: searx/templates/simple/preferences.html:128
+msgid "Change the language of the layout"
+msgstr "వినిమయసీమ యొక్క భాషను మార్చు"
+
+#: searx/templates/oscar/preferences.html:58
+#: searx/templates/simple/preferences.html:60
+msgid "Find stuff as you type"
+msgstr "టైపు చేస్తూ శోధించు"
+
+#: searx/templates/oscar/preferences.html:69
+#: searx/templates/simple/preferences.html:173
+msgid "Proxying image results through searx"
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:78
+msgid ""
+"Change how forms are submited, <a "
+"href=\"http://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods\""
+" rel=\"external\">learn more about request methods</a>"
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:87
+#: searx/templates/simple/preferences.html:71
+msgid "Filter content"
+msgstr "విషయాలను వడకట్టు"
+
+#: searx/templates/oscar/preferences.html:97
+#: searx/templates/simple/preferences.html:139
+msgid "Change searx layout"
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:106
+#: searx/templates/oscar/preferences.html:111
+msgid "Choose style for this theme"
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:106
+#: searx/templates/oscar/preferences.html:111
+msgid "Style"
+msgstr "శైలి"
+
+#: searx/templates/oscar/preferences.html:122
+msgid "Open Access DOI resolver"
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:123
+msgid ""
+"Redirect to open-access versions of publications when available (plugin "
+"required)"
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:163
+#: searx/templates/oscar/preferences.html:175
+#: searx/templates/simple/preferences.html:88
+msgid "Shortcut"
+msgstr "సత్వరమార్గం"
+
+#: searx/templates/oscar/preferences.html:164
+#: searx/templates/oscar/preferences.html:174
+msgid "Selected language"
+msgstr "ఎంచుకున్న భాష"
+
+#: searx/templates/oscar/preferences.html:166
+#: searx/templates/oscar/preferences.html:172
+#: searx/templates/simple/preferences.html:91
+msgid "Time range"
+msgstr "కాల శ్రేణి"
+
+#: searx/templates/oscar/preferences.html:167
+#: searx/templates/oscar/preferences.html:171
+#: searx/templates/simple/preferences.html:92
+msgid "Avg. time"
+msgstr "సగటు సమయం"
+
+#: searx/templates/oscar/preferences.html:168
+#: searx/templates/oscar/preferences.html:170
+#: searx/templates/simple/preferences.html:93
+msgid "Max time"
+msgstr "గరిష్ఠ సమయం"
+
+#: searx/templates/oscar/preferences.html:248
+msgid "This is the list of searx's instant answering modules."
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:252
+msgid "Name"
+msgstr "పేరు"
+
+#: searx/templates/oscar/preferences.html:253
+msgid "Keywords"
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:254
+msgid "Description"
+msgstr "వర్ణన"
+
+#: searx/templates/oscar/preferences.html:255
+msgid "Examples"
+msgstr "ఉదాహరణలు"
+
+#: searx/templates/oscar/preferences.html:275
+msgid ""
+"This is the list of cookies and their values searx is storing on your "
+"computer."
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:276
+msgid "With that list, you can assess searx transparency."
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:281
+msgid "Cookie name"
+msgstr "కుకీ పేరు"
+
+#: searx/templates/oscar/preferences.html:282
+msgid "Value"
+msgstr "విలువ"
+
+#: searx/templates/oscar/preferences.html:301
+msgid "Search URL of the currently saved preferences"
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:301
+msgid ""
+"Note: specifying custom settings in the search URL can reduce privacy by "
+"leaking data to the clicked result sites."
+msgstr ""
+
+#: searx/templates/oscar/results.html:17
+msgid "Search results"
+msgstr ""
+
+#: searx/templates/oscar/results.html:21
+#: searx/templates/simple/results.html:84
+msgid "Try searching for:"
+msgstr "దీనికొరకు శోధించండి:"
+
+#: searx/templates/oscar/results.html:100
+#: searx/templates/simple/results.html:25
+msgid "Engines cannot retrieve results"
+msgstr "యంత్రాలు ఫలితాలను రాబట్టలేకపోతున్నాయి"
+
+#: searx/templates/oscar/results.html:131
+msgid "Links"
+msgstr "లంకెలు"
+
+#: searx/templates/oscar/search.html:8
+#: searx/templates/oscar/search_full.html:11
+#: searx/templates/simple/search.html:5
+msgid "Start search"
+msgstr "శోధన ప్రారంభించు"
+
+#: searx/templates/oscar/stats.html:2
+msgid "stats"
+msgstr "స్థితి వివరణ లెక్కలు"
+
+#: searx/templates/oscar/time-range.html:3
+#: searx/templates/simple/time-range.html:3
+msgid "Anytime"
+msgstr "ఎప్పుడైనా"
+
+#: searx/templates/oscar/time-range.html:6
+#: searx/templates/simple/time-range.html:6
+msgid "Last day"
+msgstr "క్రిందటి రోజు"
+
+#: searx/templates/oscar/time-range.html:9
+#: searx/templates/simple/time-range.html:9
+msgid "Last week"
+msgstr "క్రిందటి వారం"
+
+#: searx/templates/oscar/time-range.html:12
+#: searx/templates/simple/time-range.html:12
+msgid "Last month"
+msgstr "క్రిందటి నెల"
+
+#: searx/templates/oscar/time-range.html:15
+#: searx/templates/simple/time-range.html:15
+msgid "Last year"
+msgstr "క్రిందటి సంవత్సరం"
+
+#: searx/templates/oscar/messages/first_time.html:6
+#: searx/templates/oscar/messages/no_data_available.html:3
+msgid "Heads up!"
+msgstr "జాగ్రత్త!"
+
+#: searx/templates/oscar/messages/first_time.html:7
+msgid "It look like you are using searx first time."
+msgstr ""
+
+#: searx/templates/oscar/messages/no_cookies.html:3
+msgid "Information!"
+msgstr "సమాచారం!"
+
+#: searx/templates/oscar/messages/no_cookies.html:4
+msgid "currently, there are no cookies defined."
+msgstr ""
+
+#: searx/templates/oscar/messages/no_data_available.html:4
+msgid "There is currently no data available. "
+msgstr ""
+
+#: searx/templates/oscar/messages/no_results.html:4
+#: searx/templates/simple/messages/no_results.html:4
+msgid "Engines cannot retrieve results."
+msgstr "యంత్రాలు ఫలితాలను రాబట్టలేకపోయాయి."
+
+#: searx/templates/oscar/messages/no_results.html:10
+#: searx/templates/simple/messages/no_results.html:10
+msgid "Please, try again later or find another searx instance."
+msgstr ""
+
+#: searx/templates/oscar/messages/no_results.html:14
+#: searx/templates/simple/messages/no_results.html:14
+msgid "Sorry!"
+msgstr "క్షమించండి!"
+
+#: searx/templates/oscar/messages/no_results.html:15
+#: searx/templates/simple/messages/no_results.html:15
+msgid ""
+"we didn't find any results. Please use another query or search in more "
+"categories."
+msgstr ""
+
+#: searx/templates/oscar/messages/save_settings_successfull.html:7
+msgid "Well done!"
+msgstr "భళా!"
+
+#: searx/templates/oscar/messages/save_settings_successfull.html:8
+msgid "Settings saved successfully."
+msgstr "ఆమరికలు విజయవంతంగా పొందుపరచబడ్డాయి."
+
+#: searx/templates/oscar/messages/unknow_error.html:7
+msgid "Oh snap!"
+msgstr "అయ్యో!"
+
+#: searx/templates/oscar/messages/unknow_error.html:8
+msgid "Something went wrong."
+msgstr "ఏదో తప్పు జరిగింది."
+
+#: searx/templates/oscar/result_templates/default.html:7
+#: searx/templates/simple/result_templates/default.html:6
+msgid "show media"
+msgstr ""
+
+#: searx/templates/oscar/result_templates/default.html:7
+#: searx/templates/simple/result_templates/default.html:6
+msgid "hide media"
+msgstr ""
+
+#: searx/templates/oscar/result_templates/images.html:30
+msgid "Get image"
+msgstr ""
+
+#: searx/templates/oscar/result_templates/images.html:33
+msgid "View source"
+msgstr ""
+
+#: searx/templates/oscar/result_templates/map.html:7
+#: searx/templates/simple/result_templates/map.html:7
+msgid "show map"
+msgstr ""
+
+#: searx/templates/oscar/result_templates/map.html:7
+#: searx/templates/simple/result_templates/map.html:7
+msgid "hide map"
+msgstr ""
+
+#: searx/templates/oscar/result_templates/map.html:11
+#: searx/templates/simple/result_templates/map.html:11
+msgid "show details"
+msgstr ""
+
+#: searx/templates/oscar/result_templates/map.html:11
+#: searx/templates/simple/result_templates/map.html:11
+msgid "hide details"
+msgstr ""
+
+#: searx/templates/oscar/result_templates/torrent.html:7
+#: searx/templates/simple/result_templates/torrent.html:11
+msgid "Filesize"
+msgstr ""
+
+#: searx/templates/oscar/result_templates/torrent.html:9
+#: searx/templates/simple/result_templates/torrent.html:12
+msgid "Bytes"
+msgstr ""
+
+#: searx/templates/oscar/result_templates/torrent.html:10
+#: searx/templates/simple/result_templates/torrent.html:13
+msgid "kiB"
+msgstr ""
+
+#: searx/templates/oscar/result_templates/torrent.html:11
+#: searx/templates/simple/result_templates/torrent.html:14
+msgid "MiB"
+msgstr ""
+
+#: searx/templates/oscar/result_templates/torrent.html:12
+#: searx/templates/simple/result_templates/torrent.html:15
+msgid "GiB"
+msgstr ""
+
+#: searx/templates/oscar/result_templates/torrent.html:13
+#: searx/templates/simple/result_templates/torrent.html:16
+msgid "TiB"
+msgstr ""
+
+#: searx/templates/oscar/result_templates/torrent.html:15
+#: searx/templates/simple/result_templates/torrent.html:20
+msgid "Number of Files"
+msgstr "ఫైళ్ళ సంఖ్య"
+
+#: searx/templates/oscar/result_templates/videos.html:7
+#: searx/templates/simple/result_templates/videos.html:6
+msgid "show video"
+msgstr "వీడియో చూపించు"
+
+#: searx/templates/oscar/result_templates/videos.html:7
+#: searx/templates/simple/result_templates/videos.html:6
+msgid "hide video"
+msgstr "వీడియోను దాచిపెట్టు"
+
+#: searx/templates/pix-art/results.html:28
+msgid "Load more..."
+msgstr ""
+
+#: searx/templates/simple/base.html:31
+msgid "No item found"
+msgstr "ఏమీ దొరకలేదు"
+
+#: searx/templates/simple/preferences.html:89
+msgid "Supports selected language"
+msgstr ""
+
+#: searx/templates/simple/preferences.html:118
+msgid "User interface"
+msgstr ""
+
+#: searx/templates/simple/preferences.html:154
+msgid "Privacy"
+msgstr "ఆంతరంగికత"
diff --git a/searx/translations/tr/LC_MESSAGES/messages.mo b/searx/translations/tr/LC_MESSAGES/messages.mo
index 4125a24adb..ec47020aa0 100644
Binary files a/searx/translations/tr/LC_MESSAGES/messages.mo and b/searx/translations/tr/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/tr/LC_MESSAGES/messages.po b/searx/translations/tr/LC_MESSAGES/messages.po
index 65974de7a8..a15c85609f 100644
--- a/searx/translations/tr/LC_MESSAGES/messages.po
+++ b/searx/translations/tr/LC_MESSAGES/messages.po
@@ -3,6 +3,7 @@
# This file is distributed under the same license as the PROJECT project.
#
# Translators:
+# Arda Kılıçdağı <[email protected]>, 2018
# Caner Başaran <[email protected]>, 2014-2016
# FIRST AUTHOR <EMAIL@ADDRESS>, 2014
msgid ""
@@ -10,8 +11,8 @@ msgstr ""
"Project-Id-Version: searx\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2017-11-01 21:31+0100\n"
-"PO-Revision-Date: 2017-11-01 20:31+0000\n"
-"Last-Translator: Adam Tauber <[email protected]>\n"
+"PO-Revision-Date: 2018-12-06 17:22+0000\n"
+"Last-Translator: Arda Kılıçdağı <[email protected]>\n"
"Language-Team: Turkish (http://www.transifex.com/asciimoo/searx/language/tr/)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
@@ -22,15 +23,15 @@ msgstr ""
#: searx/search.py:137 searx/search.py:182
msgid "timeout"
-msgstr ""
+msgstr "zaman aşımı"
#: searx/search.py:144
msgid "request exception"
-msgstr ""
+msgstr "istekte bir hata oluştu"
#: searx/search.py:151
msgid "unexpected crash"
-msgstr ""
+msgstr "beklenmmeyen hata"
#: searx/webapp.py:136
msgid "files"
@@ -74,15 +75,15 @@ msgstr "bilim"
#: searx/webapp.py:399 searx/webapp.py:658
msgid "Invalid settings, please edit your preferences"
-msgstr ""
+msgstr "Hatalı ayar girildi, lütfen ayarlarınızı kontrol edin"
#: searx/webapp.py:415
msgid "Invalid settings"
-msgstr ""
+msgstr "Hatalı ayar"
#: searx/webapp.py:449 searx/webapp.py:493
msgid "search error"
-msgstr ""
+msgstr "arama hatası"
#: searx/webapp.py:530
msgid "{minutes} minute(s) ago"
@@ -94,27 +95,27 @@ msgstr "{hours} saat(), {minutes} dakika() önce"
#: searx/answerers/random/answerer.py:53
msgid "Random value generator"
-msgstr ""
+msgstr "Rastgele değer üretici"
#: searx/answerers/random/answerer.py:54
msgid "Generate different random values"
-msgstr ""
+msgstr "Farklı rastgele metinler üret"
#: searx/answerers/statistics/answerer.py:53
msgid "Statistics functions"
-msgstr ""
+msgstr "İstatistik fonksiyonları"
#: searx/answerers/statistics/answerer.py:54
msgid "Compute {functions} of the arguments"
-msgstr ""
+msgstr "Argümanların {functions} değerlerini hesapla"
#: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201
msgid "Engine time (sec)"
-msgstr ""
+msgstr "Motor cevap süresi (sn)"
#: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205
msgid "Page loads (sec)"
-msgstr "Yüklenen sayfa (sn)"
+msgstr "Sayfa yüklenmesi (sn)"
#: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209
#: searx/templates/oscar/results.html:95
@@ -124,11 +125,11 @@ msgstr "Sonuç sayısı"
#: searx/engines/__init__.py:206 searx/engines/flycheck___init__.py:213
msgid "Scores"
-msgstr ""
+msgstr "Skor"
#: searx/engines/__init__.py:210 searx/engines/flycheck___init__.py:217
msgid "Scores per result"
-msgstr ""
+msgstr "Sonuç başına skor"
#: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221
msgid "Errors"
@@ -136,7 +137,7 @@ msgstr "Hatalar"
#: searx/engines/pdbe.py:87
msgid "{title} (OBSOLETE)"
-msgstr ""
+msgstr "{title} (GEÇERSİZ)"
#: searx/engines/pdbe.py:91
msgid "This entry has been superseded by"
diff --git a/searx/translations/uk/LC_MESSAGES/messages.mo b/searx/translations/uk/LC_MESSAGES/messages.mo
new file mode 100644
index 0000000000..6610dfaa7f
Binary files /dev/null and b/searx/translations/uk/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/uk/LC_MESSAGES/messages.po b/searx/translations/uk/LC_MESSAGES/messages.po
new file mode 100644
index 0000000000..7c6ac5aacf
--- /dev/null
+++ b/searx/translations/uk/LC_MESSAGES/messages.po
@@ -0,0 +1,1015 @@
+# Translations template for PROJECT.
+# Copyright (C) 2017 ORGANIZATION
+# This file is distributed under the same license as the PROJECT project.
+#
+# Translators:
+# pvhn4 <[email protected]>, 2017
+# pvhn4 <[email protected]>, 2017
+# zubr139, 2016-2017
+msgid ""
+msgstr ""
+"Project-Id-Version: searx\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2017-11-01 21:31+0100\n"
+"PO-Revision-Date: 2017-11-01 20:31+0000\n"
+"Last-Translator: Adam Tauber <[email protected]>\n"
+"Language-Team: Ukrainian (http://www.transifex.com/asciimoo/searx/language/uk/)\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.3.4\n"
+"Language: uk\n"
+"Plural-Forms: nplurals=4; plural=(n % 1 == 0 && n % 10 == 1 && n % 100 != 11 ? 0 : n % 1 == 0 && n % 10 >= 2 && n % 10 <= 4 && (n % 100 < 12 || n % 100 > 14) ? 1 : n % 1 == 0 && (n % 10 ==0 || (n % 10 >=5 && n % 10 <=9) || (n % 100 >=11 && n % 100 <=14 )) ? 2: 3);\n"
+
+#: searx/search.py:137 searx/search.py:182
+msgid "timeout"
+msgstr ""
+
+#: searx/search.py:144
+msgid "request exception"
+msgstr ""
+
+#: searx/search.py:151
+msgid "unexpected crash"
+msgstr ""
+
+#: searx/webapp.py:136
+msgid "files"
+msgstr "файли"
+
+#: searx/webapp.py:137
+msgid "general"
+msgstr "загальні"
+
+#: searx/webapp.py:138
+msgid "music"
+msgstr "музика"
+
+#: searx/webapp.py:139
+msgid "social media"
+msgstr "соцмережі"
+
+#: searx/webapp.py:140
+msgid "images"
+msgstr "зображення"
+
+#: searx/webapp.py:141
+msgid "videos"
+msgstr "відео"
+
+#: searx/webapp.py:142
+msgid "it"
+msgstr "IT"
+
+#: searx/webapp.py:143
+msgid "news"
+msgstr "новини"
+
+#: searx/webapp.py:144
+msgid "map"
+msgstr "карти"
+
+#: searx/webapp.py:145
+msgid "science"
+msgstr "наука"
+
+#: searx/webapp.py:399 searx/webapp.py:658
+msgid "Invalid settings, please edit your preferences"
+msgstr "Невірні налаштування, будь ласка, зробіть зміни в налаштуваннях"
+
+#: searx/webapp.py:415
+msgid "Invalid settings"
+msgstr ""
+
+#: searx/webapp.py:449 searx/webapp.py:493
+msgid "search error"
+msgstr "помилка пошуку"
+
+#: searx/webapp.py:530
+msgid "{minutes} minute(s) ago"
+msgstr "{minutes} хвилин тому"
+
+#: searx/webapp.py:532
+msgid "{hours} hour(s), {minutes} minute(s) ago"
+msgstr "{hours} годин, {minutes} хвилин тому"
+
+#: searx/answerers/random/answerer.py:53
+msgid "Random value generator"
+msgstr "Генератор випадкових значень"
+
+#: searx/answerers/random/answerer.py:54
+msgid "Generate different random values"
+msgstr "Створити різні випадкові значення"
+
+#: searx/answerers/statistics/answerer.py:53
+msgid "Statistics functions"
+msgstr "Функції статистики"
+
+#: searx/answerers/statistics/answerer.py:54
+msgid "Compute {functions} of the arguments"
+msgstr "Розрахувати {functions} аргументів"
+
+#: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201
+msgid "Engine time (sec)"
+msgstr "Час пошуку (сек)"
+
+#: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205
+msgid "Page loads (sec)"
+msgstr "Час завантадення (сек)"
+
+#: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209
+#: searx/templates/oscar/results.html:95
+#: searx/templates/simple/results.html:20
+msgid "Number of results"
+msgstr "Число результатів"
+
+#: searx/engines/__init__.py:206 searx/engines/flycheck___init__.py:213
+msgid "Scores"
+msgstr "Влучань"
+
+#: searx/engines/__init__.py:210 searx/engines/flycheck___init__.py:217
+msgid "Scores per result"
+msgstr "Влучань за результат"
+
+#: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221
+msgid "Errors"
+msgstr "Помилок"
+
+#: searx/engines/pdbe.py:87
+msgid "{title} (OBSOLETE)"
+msgstr "{title} (OBSOLETE)"
+
+#: searx/engines/pdbe.py:91
+msgid "This entry has been superseded by"
+msgstr "Цей запис був змінений"
+
+#: searx/engines/pubmed.py:74
+msgid "No abstract is available for this publication."
+msgstr ""
+
+#: searx/plugins/https_rewrite.py:32
+msgid "Rewrite HTTP links to HTTPS if possible"
+msgstr "За можливістю замінити в посиланнях HTTP на HTTPS"
+
+#: searx/plugins/infinite_scroll.py:3
+msgid "Infinite scroll"
+msgstr "Нескінченна прокрутка"
+
+#: searx/plugins/infinite_scroll.py:4
+msgid "Automatically load next page when scrolling to bottom of current page"
+msgstr "Автоматично завантажувати наступну сторінку при прокрутці поточної до кінця"
+
+#: searx/plugins/oa_doi_rewrite.py:9
+msgid "Open Access DOI rewrite"
+msgstr ""
+
+#: searx/plugins/oa_doi_rewrite.py:10
+msgid ""
+"Avoid paywalls by redirecting to open-access versions of publications when "
+"available"
+msgstr "Уникайте платіжних каналів шляхом переадресації на версії публікацій з відкритим доступом, коли це можливо"
+
+#: searx/plugins/open_results_on_new_tab.py:18
+#: searx/templates/oscar/preferences.html:114
+#: searx/templates/simple/preferences.html:149
+msgid "Open result links on new browser tabs"
+msgstr "Відкривати посилання результатів в нових вкладках"
+
+#: searx/plugins/open_results_on_new_tab.py:19
+msgid ""
+"Results are opened in the same window by default. This plugin overwrites the"
+" default behaviour to open links on new tabs/windows. (JavaScript required)"
+msgstr "Типово результати відкриваються в тому ж вікні. Цей плагін змінює поведінку, щоб посилання відкривались типово в нових вкладках/вікнах. (Необхідний JavaScript)"
+
+#: searx/plugins/search_on_category_select.py:18
+msgid "Search on category select"
+msgstr "Пошук по обраній категорії"
+
+#: searx/plugins/search_on_category_select.py:19
+msgid ""
+"Perform search immediately if a category selected. Disable to select "
+"multiple categories. (JavaScript required)"
+msgstr "Виконувати пошук зразу при обранні категорії. Вимкнути вибір декількох категорій. (Необхідний JavaScript)"
+
+#: searx/plugins/self_info.py:20
+msgid ""
+"Displays your IP if the query is \"ip\" and your user agent if the query "
+"contains \"user agent\"."
+msgstr "Відображає IP-адресу при запиті \"ip\" та ваш user-agent при запиті \"user agent\"."
+
+#: searx/plugins/tracker_url_remover.py:26
+msgid "Tracker URL remover"
+msgstr "Видалення URL-адреси трекера"
+
+#: searx/plugins/tracker_url_remover.py:27
+msgid "Remove trackers arguments from the returned URL"
+msgstr "Вилучіть аргументи трекера з поверненої URL-адреси"
+
+#: searx/plugins/vim_hotkeys.py:3
+msgid "Vim-like hotkeys"
+msgstr "Гарячі клавіші Vim"
+
+#: searx/plugins/vim_hotkeys.py:4
+msgid ""
+"Navigate search results with Vim-like hotkeys (JavaScript required). Press "
+"\"h\" key on main or result page to get help."
+msgstr "Переміщення результатів пошуку за допомогою віртуальних клавіш (потрібно JavaScript). Натисніть клавішу \"h\" на головній сторінці або на сторінці результатів, щоб отримати допомогу."
+
+#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4
+#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4
+#: searx/templates/simple/404.html:4
+msgid "Page not found"
+msgstr "Сторінка не знайдена"
+
+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6
+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6
+#: searx/templates/simple/404.html:6
+#, python-format
+msgid "Go to %(search_page)s."
+msgstr "Перейти до %(search_page)s."
+
+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6
+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6
+#: searx/templates/simple/404.html:6
+msgid "search page"
+msgstr "сторінки пошуку"
+
+#: searx/templates/courgette/index.html:9
+#: searx/templates/courgette/index.html:13
+#: searx/templates/courgette/results.html:5
+#: searx/templates/legacy/index.html:8 searx/templates/legacy/index.html:12
+#: searx/templates/oscar/navbar.html:7
+#: searx/templates/oscar/preferences.html:3
+#: searx/templates/pix-art/index.html:8
+msgid "preferences"
+msgstr "опції"
+
+#: searx/templates/courgette/index.html:11
+#: searx/templates/legacy/index.html:10 searx/templates/oscar/about.html:2
+#: searx/templates/oscar/navbar.html:6 searx/templates/pix-art/index.html:7
+msgid "about"
+msgstr "про сайт"
+
+#: searx/templates/courgette/preferences.html:5
+#: searx/templates/legacy/preferences.html:5
+#: searx/templates/oscar/preferences.html:8
+#: searx/templates/pix-art/preferences.html:5
+#: searx/templates/simple/preferences.html:26
+msgid "Preferences"
+msgstr "Опції"
+
+#: searx/templates/courgette/preferences.html:9
+#: searx/templates/legacy/preferences.html:9
+#: searx/templates/oscar/preferences.html:33
+#: searx/templates/oscar/preferences.html:35
+#: searx/templates/simple/preferences.html:34
+msgid "Default categories"
+msgstr "Типові категорії"
+
+#: searx/templates/courgette/preferences.html:13
+#: searx/templates/legacy/preferences.html:14
+#: searx/templates/oscar/preferences.html:41
+#: searx/templates/pix-art/preferences.html:9
+#: searx/templates/simple/preferences.html:39
+#: searx/templates/simple/preferences.html:163
+msgid "Search language"
+msgstr "Мова пошуку"
+
+#: searx/templates/courgette/preferences.html:16
+#: searx/templates/legacy/preferences.html:17
+#: searx/templates/oscar/languages.html:6
+#: searx/templates/pix-art/preferences.html:12
+#: searx/templates/simple/languages.html:2
+#: searx/templates/simple/preferences.html:42
+msgid "Default language"
+msgstr "Стандартна мова"
+
+#: searx/templates/courgette/preferences.html:24
+#: searx/templates/legacy/preferences.html:25
+#: searx/templates/oscar/preferences.html:47
+#: searx/templates/pix-art/preferences.html:20
+#: searx/templates/simple/preferences.html:120
+msgid "Interface language"
+msgstr "Мова інтерфейсу"
+
+#: searx/templates/courgette/preferences.html:34
+#: searx/templates/legacy/preferences.html:35
+#: searx/templates/oscar/preferences.html:57
+#: searx/templates/simple/preferences.html:51
+msgid "Autocomplete"
+msgstr "Автозаповнення"
+
+#: searx/templates/courgette/preferences.html:45
+#: searx/templates/legacy/preferences.html:46
+#: searx/templates/oscar/preferences.html:68
+#: searx/templates/simple/preferences.html:166
+msgid "Image proxy"
+msgstr "Проксі для зображень"
+
+#: searx/templates/courgette/preferences.html:48
+#: searx/templates/legacy/preferences.html:49
+#: searx/templates/oscar/preferences.html:72
+#: searx/templates/simple/preferences.html:169
+msgid "Enabled"
+msgstr "Ввімкнено"
+
+#: searx/templates/courgette/preferences.html:49
+#: searx/templates/legacy/preferences.html:50
+#: searx/templates/oscar/preferences.html:73
+#: searx/templates/simple/preferences.html:170
+msgid "Disabled"
+msgstr "Вимкнено"
+
+#: searx/templates/courgette/preferences.html:54
+#: searx/templates/legacy/preferences.html:55
+#: searx/templates/oscar/preferences.html:77
+#: searx/templates/pix-art/preferences.html:30
+#: searx/templates/simple/preferences.html:156
+msgid "Method"
+msgstr "Метод"
+
+#: searx/templates/courgette/preferences.html:63
+#: searx/templates/legacy/preferences.html:64
+#: searx/templates/oscar/preferences.html:86
+#: searx/templates/oscar/preferences.html:165
+#: searx/templates/oscar/preferences.html:173
+#: searx/templates/simple/preferences.html:63
+#: searx/templates/simple/preferences.html:90
+msgid "SafeSearch"
+msgstr "БезпечнийПошук"
+
+#: searx/templates/courgette/preferences.html:66
+#: searx/templates/legacy/preferences.html:67
+#: searx/templates/oscar/preferences.html:90
+#: searx/templates/simple/preferences.html:66
+msgid "Strict"
+msgstr "Жорский"
+
+#: searx/templates/courgette/preferences.html:67
+#: searx/templates/legacy/preferences.html:68
+#: searx/templates/oscar/preferences.html:91
+#: searx/templates/simple/preferences.html:67
+msgid "Moderate"
+msgstr "Помірний"
+
+#: searx/templates/courgette/preferences.html:68
+#: searx/templates/legacy/preferences.html:69
+#: searx/templates/oscar/preferences.html:92
+#: searx/templates/simple/preferences.html:68
+msgid "None"
+msgstr "Вимкнений"
+
+#: searx/templates/courgette/preferences.html:73
+#: searx/templates/legacy/preferences.html:74
+#: searx/templates/oscar/preferences.html:96
+#: searx/templates/pix-art/preferences.html:39
+#: searx/templates/simple/preferences.html:131
+msgid "Themes"
+msgstr "Теми"
+
+#: searx/templates/courgette/preferences.html:83
+msgid "Color"
+msgstr "Колір"
+
+#: searx/templates/courgette/preferences.html:86
+msgid "Blue (default)"
+msgstr "Синій (типово)"
+
+#: searx/templates/courgette/preferences.html:87
+msgid "Violet"
+msgstr "Фіолетовий"
+
+#: searx/templates/courgette/preferences.html:88
+msgid "Green"
+msgstr "Зелений"
+
+#: searx/templates/courgette/preferences.html:89
+msgid "Cyan"
+msgstr "Блакитний"
+
+#: searx/templates/courgette/preferences.html:90
+msgid "Orange"
+msgstr "Помаранчевий"
+
+#: searx/templates/courgette/preferences.html:91
+msgid "Red"
+msgstr "Червоний"
+
+#: searx/templates/courgette/preferences.html:96
+#: searx/templates/legacy/preferences.html:93
+#: searx/templates/pix-art/preferences.html:49
+#: searx/templates/simple/preferences.html:77
+msgid "Currently used search engines"
+msgstr "Пошукові системи, які використовуються"
+
+#: searx/templates/courgette/preferences.html:100
+#: searx/templates/legacy/preferences.html:97
+#: searx/templates/oscar/preferences.html:162
+#: searx/templates/oscar/preferences.html:176
+#: searx/templates/pix-art/preferences.html:53
+#: searx/templates/simple/preferences.html:87
+msgid "Engine name"
+msgstr "Назва пошукової системи"
+
+#: searx/templates/courgette/preferences.html:101
+#: searx/templates/legacy/preferences.html:98
+msgid "Category"
+msgstr "Категорія"
+
+#: searx/templates/courgette/preferences.html:102
+#: searx/templates/courgette/preferences.html:113
+#: searx/templates/legacy/preferences.html:99
+#: searx/templates/legacy/preferences.html:110
+#: searx/templates/oscar/preferences.html:161
+#: searx/templates/oscar/preferences.html:177
+#: searx/templates/pix-art/preferences.html:54
+#: searx/templates/pix-art/preferences.html:64
+#: searx/templates/simple/preferences.html:86
+msgid "Allow"
+msgstr "Дозволити"
+
+#: searx/templates/courgette/preferences.html:102
+#: searx/templates/courgette/preferences.html:114
+#: searx/templates/legacy/preferences.html:99
+#: searx/templates/legacy/preferences.html:111
+#: searx/templates/pix-art/preferences.html:54
+#: searx/templates/pix-art/preferences.html:65
+msgid "Block"
+msgstr "Заблокувати"
+
+#: searx/templates/courgette/preferences.html:122
+#: searx/templates/legacy/preferences.html:119
+#: searx/templates/oscar/preferences.html:297
+#: searx/templates/pix-art/preferences.html:73
+#: searx/templates/simple/preferences.html:180
+msgid ""
+"These settings are stored in your cookies, this allows us not to store this "
+"data about you."
+msgstr "Налаштування зберігаються в ваших cookie-файлах, що дає нам змогу не зберігати ці відомості про вас."
+
+#: searx/templates/courgette/preferences.html:124
+#: searx/templates/legacy/preferences.html:121
+#: searx/templates/oscar/preferences.html:299
+#: searx/templates/pix-art/preferences.html:75
+#: searx/templates/simple/preferences.html:182
+msgid ""
+"These cookies serve your sole convenience, we don't use these cookies to "
+"track you."
+msgstr "Ці cookie-файли необхідні винятково для вашої зручності, ми не використовуємо ці cookie-файли, щоб відслідковувати вас."
+
+#: searx/templates/courgette/preferences.html:127
+#: searx/templates/legacy/preferences.html:124
+#: searx/templates/oscar/preferences.html:305
+#: searx/templates/pix-art/preferences.html:78
+#: searx/templates/simple/preferences.html:185
+msgid "save"
+msgstr "зберегти"
+
+#: searx/templates/courgette/preferences.html:128
+#: searx/templates/legacy/preferences.html:125
+#: searx/templates/oscar/preferences.html:307
+#: searx/templates/simple/preferences.html:186
+msgid "Reset defaults"
+msgstr "Відновити стандартні налаштування"
+
+#: searx/templates/courgette/preferences.html:129
+#: searx/templates/legacy/preferences.html:126
+#: searx/templates/oscar/preferences.html:306
+#: searx/templates/pix-art/preferences.html:79
+#: searx/templates/simple/preferences.html:187
+msgid "back"
+msgstr "назад"
+
+#: searx/templates/courgette/results.html:12
+#: searx/templates/legacy/results.html:13
+#: searx/templates/oscar/results.html:136
+#: searx/templates/simple/results.html:58
+msgid "Search URL"
+msgstr "Посилання на пошук"
+
+#: searx/templates/courgette/results.html:16
+#: searx/templates/legacy/results.html:17
+#: searx/templates/oscar/results.html:141
+#: searx/templates/simple/results.html:62
+msgid "Download results"
+msgstr "Завантажити результати"
+
+#: searx/templates/courgette/results.html:34
+#: searx/templates/legacy/results.html:35
+#: searx/templates/simple/results.html:10
+msgid "Answers"
+msgstr "Відповіді"
+
+#: searx/templates/courgette/results.html:42
+#: searx/templates/legacy/results.html:43
+#: searx/templates/oscar/results.html:116
+#: searx/templates/simple/results.html:42
+msgid "Suggestions"
+msgstr "Пропозиції"
+
+#: searx/templates/courgette/results.html:70
+#: searx/templates/legacy/results.html:81
+#: searx/templates/oscar/results.html:68 searx/templates/oscar/results.html:78
+#: searx/templates/simple/results.html:130
+msgid "previous page"
+msgstr "попередня сторінка"
+
+#: searx/templates/courgette/results.html:81
+#: searx/templates/legacy/results.html:92
+#: searx/templates/oscar/results.html:62 searx/templates/oscar/results.html:84
+#: searx/templates/simple/results.html:145
+msgid "next page"
+msgstr "наступна сторінка"
+
+#: searx/templates/courgette/search.html:3
+#: searx/templates/legacy/search.html:3 searx/templates/oscar/search.html:6
+#: searx/templates/oscar/search_full.html:9
+#: searx/templates/pix-art/search.html:3 searx/templates/simple/search.html:4
+msgid "Search for..."
+msgstr "Шукати..."
+
+#: searx/templates/courgette/stats.html:4 searx/templates/legacy/stats.html:4
+#: searx/templates/oscar/stats.html:5 searx/templates/pix-art/stats.html:4
+#: searx/templates/simple/stats.html:7
+msgid "Engine stats"
+msgstr "Статистика пошукової системи"
+
+#: searx/templates/courgette/result_templates/images.html:4
+#: searx/templates/legacy/result_templates/images.html:4
+#: searx/templates/pix-art/result_templates/images.html:4
+msgid "original context"
+msgstr "в контексті"
+
+#: searx/templates/courgette/result_templates/torrent.html:7
+#: searx/templates/legacy/result_templates/torrent.html:11
+#: searx/templates/oscar/result_templates/torrent.html:6
+#: searx/templates/simple/result_templates/torrent.html:9
+msgid "Seeder"
+msgstr "Сідер"
+
+#: searx/templates/courgette/result_templates/torrent.html:7
+#: searx/templates/legacy/result_templates/torrent.html:11
+#: searx/templates/oscar/result_templates/torrent.html:6
+#: searx/templates/simple/result_templates/torrent.html:9
+msgid "Leecher"
+msgstr "Лічер"
+
+#: searx/templates/courgette/result_templates/torrent.html:9
+#: searx/templates/legacy/result_templates/torrent.html:9
+#: searx/templates/oscar/macros.html:23
+#: searx/templates/simple/result_templates/torrent.html:6
+msgid "magnet link"
+msgstr "магнет-посилання"
+
+#: searx/templates/courgette/result_templates/torrent.html:10
+#: searx/templates/legacy/result_templates/torrent.html:10
+#: searx/templates/oscar/macros.html:24
+#: searx/templates/simple/result_templates/torrent.html:7
+msgid "torrent file"
+msgstr "торрент-файл"
+
+#: searx/templates/legacy/categories.html:8
+#: searx/templates/simple/categories.html:6
+msgid "Click on the magnifier to perform search"
+msgstr "Натисніть лупу, щоб виконати пошук"
+
+#: searx/templates/legacy/preferences.html:84
+#: searx/templates/oscar/preferences.html:113
+#: searx/templates/simple/preferences.html:142
+msgid "Results on new tabs"
+msgstr "Результати в нових вкладках"
+
+#: searx/templates/legacy/preferences.html:87
+#: searx/templates/oscar/preferences.html:117
+#: searx/templates/simple/preferences.html:145
+msgid "On"
+msgstr "Ввімк."
+
+#: searx/templates/legacy/preferences.html:88
+#: searx/templates/oscar/preferences.html:118
+#: searx/templates/simple/preferences.html:146
+msgid "Off"
+msgstr "Вимк."
+
+#: searx/templates/legacy/result_templates/code.html:3
+#: searx/templates/legacy/result_templates/default.html:3
+#: searx/templates/legacy/result_templates/map.html:9
+#: searx/templates/oscar/macros.html:34 searx/templates/oscar/macros.html:48
+#: searx/templates/simple/macros.html:43
+msgid "cached"
+msgstr "архівовано"
+
+#: searx/templates/oscar/advanced.html:4
+msgid "Advanced settings"
+msgstr "Додаткові налаштування"
+
+#: searx/templates/oscar/base.html:62
+#: searx/templates/oscar/messages/first_time.html:4
+#: searx/templates/oscar/messages/save_settings_successfull.html:5
+#: searx/templates/oscar/messages/unknow_error.html:5
+msgid "Close"
+msgstr "Закрити"
+
+#: searx/templates/oscar/base.html:64
+#: searx/templates/oscar/messages/no_results.html:4
+#: searx/templates/simple/messages/no_results.html:4
+#: searx/templates/simple/results.html:25
+msgid "Error!"
+msgstr "Помилка!"
+
+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
+msgid "Powered by"
+msgstr "Використовується"
+
+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
+msgid "a privacy-respecting, hackable metasearch engine"
+msgstr "вільна система метапошуку, яка поважає вашу приватність"
+
+#: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50
+#: searx/templates/simple/macros.html:43
+msgid "proxied"
+msgstr "проксовано"
+
+#: searx/templates/oscar/macros.html:92
+msgid "supported"
+msgstr ""
+
+#: searx/templates/oscar/macros.html:96
+msgid "not supported"
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:13
+#: searx/templates/oscar/preferences.html:22
+#: searx/templates/simple/preferences.html:32
+msgid "General"
+msgstr "Загальні"
+
+#: searx/templates/oscar/preferences.html:14
+#: searx/templates/oscar/preferences.html:146
+#: searx/templates/simple/preferences.html:76
+msgid "Engines"
+msgstr "Пошукові системи"
+
+#: searx/templates/oscar/preferences.html:15
+#: searx/templates/oscar/preferences.html:219
+msgid "Plugins"
+msgstr "Плагіни"
+
+#: searx/templates/oscar/preferences.html:16
+#: searx/templates/oscar/preferences.html:245
+msgid "Answerers"
+msgstr "Відповідачі"
+
+#: searx/templates/oscar/preferences.html:17
+#: searx/templates/oscar/preferences.html:272
+msgid "Cookies"
+msgstr "Cookie-файли"
+
+#: searx/templates/oscar/preferences.html:42
+#: searx/templates/simple/preferences.html:48
+msgid "What language do you prefer for search?"
+msgstr "Якій мові ви віддаєте перевагу для пошуку?"
+
+#: searx/templates/oscar/preferences.html:48
+#: searx/templates/simple/preferences.html:128
+msgid "Change the language of the layout"
+msgstr "Змінити мову сайту"
+
+#: searx/templates/oscar/preferences.html:58
+#: searx/templates/simple/preferences.html:60
+msgid "Find stuff as you type"
+msgstr "Шукати підчас набору"
+
+#: searx/templates/oscar/preferences.html:69
+#: searx/templates/simple/preferences.html:173
+msgid "Proxying image results through searx"
+msgstr "Проксувати знайдені зображення за допомогою searx"
+
+#: searx/templates/oscar/preferences.html:78
+msgid ""
+"Change how forms are submited, <a "
+"href=\"http://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods\""
+" rel=\"external\">learn more about request methods</a>"
+msgstr "Змінити спосіб відправки запитів, <a href=\"http://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods\" rel=\"external\">детальніше про методи запитів</a>"
+
+#: searx/templates/oscar/preferences.html:87
+#: searx/templates/simple/preferences.html:71
+msgid "Filter content"
+msgstr "Фільтр контенту"
+
+#: searx/templates/oscar/preferences.html:97
+#: searx/templates/simple/preferences.html:139
+msgid "Change searx layout"
+msgstr "Змінити вигляд сайту"
+
+#: searx/templates/oscar/preferences.html:106
+#: searx/templates/oscar/preferences.html:111
+msgid "Choose style for this theme"
+msgstr "Обрати стиль для цієї теми"
+
+#: searx/templates/oscar/preferences.html:106
+#: searx/templates/oscar/preferences.html:111
+msgid "Style"
+msgstr "Стиль"
+
+#: searx/templates/oscar/preferences.html:122
+msgid "Open Access DOI resolver"
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:123
+msgid ""
+"Redirect to open-access versions of publications when available (plugin "
+"required)"
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:163
+#: searx/templates/oscar/preferences.html:175
+#: searx/templates/simple/preferences.html:88
+msgid "Shortcut"
+msgstr "Гарячі клавіші"
+
+#: searx/templates/oscar/preferences.html:164
+#: searx/templates/oscar/preferences.html:174
+msgid "Selected language"
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:166
+#: searx/templates/oscar/preferences.html:172
+#: searx/templates/simple/preferences.html:91
+msgid "Time range"
+msgstr "Часовий діапазон"
+
+#: searx/templates/oscar/preferences.html:167
+#: searx/templates/oscar/preferences.html:171
+#: searx/templates/simple/preferences.html:92
+msgid "Avg. time"
+msgstr "Середній час"
+
+#: searx/templates/oscar/preferences.html:168
+#: searx/templates/oscar/preferences.html:170
+#: searx/templates/simple/preferences.html:93
+msgid "Max time"
+msgstr "Максимальний час"
+
+#: searx/templates/oscar/preferences.html:248
+msgid "This is the list of searx's instant answering modules."
+msgstr "Список модулів миттєвих відповідей searx."
+
+#: searx/templates/oscar/preferences.html:252
+msgid "Name"
+msgstr "Назва"
+
+#: searx/templates/oscar/preferences.html:253
+msgid "Keywords"
+msgstr "Ключові слова"
+
+#: searx/templates/oscar/preferences.html:254
+msgid "Description"
+msgstr "Опис"
+
+#: searx/templates/oscar/preferences.html:255
+msgid "Examples"
+msgstr "Приклади"
+
+#: searx/templates/oscar/preferences.html:275
+msgid ""
+"This is the list of cookies and their values searx is storing on your "
+"computer."
+msgstr "Це список cookie-файлів та їх значень, які searx зберігає на вашому комп'ютері."
+
+#: searx/templates/oscar/preferences.html:276
+msgid "With that list, you can assess searx transparency."
+msgstr "По цьому списку ви можете оцінити відкритість searx."
+
+#: searx/templates/oscar/preferences.html:281
+msgid "Cookie name"
+msgstr "Ім'я cookie"
+
+#: searx/templates/oscar/preferences.html:282
+msgid "Value"
+msgstr "Значення"
+
+#: searx/templates/oscar/preferences.html:301
+msgid "Search URL of the currently saved preferences"
+msgstr ""
+
+#: searx/templates/oscar/preferences.html:301
+msgid ""
+"Note: specifying custom settings in the search URL can reduce privacy by "
+"leaking data to the clicked result sites."
+msgstr ""
+
+#: searx/templates/oscar/results.html:17
+msgid "Search results"
+msgstr "Результати пошуку"
+
+#: searx/templates/oscar/results.html:21
+#: searx/templates/simple/results.html:84
+msgid "Try searching for:"
+msgstr ""
+
+#: searx/templates/oscar/results.html:100
+#: searx/templates/simple/results.html:25
+msgid "Engines cannot retrieve results"
+msgstr ""
+
+#: searx/templates/oscar/results.html:131
+msgid "Links"
+msgstr "Посилання"
+
+#: searx/templates/oscar/search.html:8
+#: searx/templates/oscar/search_full.html:11
+#: searx/templates/simple/search.html:5
+msgid "Start search"
+msgstr "Розпочати пошук"
+
+#: searx/templates/oscar/stats.html:2
+msgid "stats"
+msgstr "статистика"
+
+#: searx/templates/oscar/time-range.html:3
+#: searx/templates/simple/time-range.html:3
+msgid "Anytime"
+msgstr "За весь час"
+
+#: searx/templates/oscar/time-range.html:6
+#: searx/templates/simple/time-range.html:6
+msgid "Last day"
+msgstr "За останній день"
+
+#: searx/templates/oscar/time-range.html:9
+#: searx/templates/simple/time-range.html:9
+msgid "Last week"
+msgstr "За останній тиждень"
+
+#: searx/templates/oscar/time-range.html:12
+#: searx/templates/simple/time-range.html:12
+msgid "Last month"
+msgstr "За останній місяць"
+
+#: searx/templates/oscar/time-range.html:15
+#: searx/templates/simple/time-range.html:15
+msgid "Last year"
+msgstr "За останній рік"
+
+#: searx/templates/oscar/messages/first_time.html:6
+#: searx/templates/oscar/messages/no_data_available.html:3
+msgid "Heads up!"
+msgstr "Отакої!"
+
+#: searx/templates/oscar/messages/first_time.html:7
+msgid "It look like you are using searx first time."
+msgstr "Схоже, що ви використовуєте searx вперше."
+
+#: searx/templates/oscar/messages/no_cookies.html:3
+msgid "Information!"
+msgstr "Інформація!"
+
+#: searx/templates/oscar/messages/no_cookies.html:4
+msgid "currently, there are no cookies defined."
+msgstr "в даний час cookie-файли не встановлені."
+
+#: searx/templates/oscar/messages/no_data_available.html:4
+msgid "There is currently no data available. "
+msgstr "В даний час немає доступних даних."
+
+#: searx/templates/oscar/messages/no_results.html:4
+#: searx/templates/simple/messages/no_results.html:4
+msgid "Engines cannot retrieve results."
+msgstr ""
+
+#: searx/templates/oscar/messages/no_results.html:10
+#: searx/templates/simple/messages/no_results.html:10
+msgid "Please, try again later or find another searx instance."
+msgstr ""
+
+#: searx/templates/oscar/messages/no_results.html:14
+#: searx/templates/simple/messages/no_results.html:14
+msgid "Sorry!"
+msgstr "Вибачте!"
+
+#: searx/templates/oscar/messages/no_results.html:15
+#: searx/templates/simple/messages/no_results.html:15
+msgid ""
+"we didn't find any results. Please use another query or search in more "
+"categories."
+msgstr "ми не знайшли жодних результатів. Будь ласка, використайте інший запит або виконайте пошук в декількох категоріях."
+
+#: searx/templates/oscar/messages/save_settings_successfull.html:7
+msgid "Well done!"
+msgstr "Чудово!"
+
+#: searx/templates/oscar/messages/save_settings_successfull.html:8
+msgid "Settings saved successfully."
+msgstr "Налаштування успішно збережені."
+
+#: searx/templates/oscar/messages/unknow_error.html:7
+msgid "Oh snap!"
+msgstr "От халепа!"
+
+#: searx/templates/oscar/messages/unknow_error.html:8
+msgid "Something went wrong."
+msgstr "Щось пішло не так."
+
+#: searx/templates/oscar/result_templates/default.html:7
+#: searx/templates/simple/result_templates/default.html:6
+msgid "show media"
+msgstr "показати медіа"
+
+#: searx/templates/oscar/result_templates/default.html:7
+#: searx/templates/simple/result_templates/default.html:6
+msgid "hide media"
+msgstr "приховати медіа"
+
+#: searx/templates/oscar/result_templates/images.html:30
+msgid "Get image"
+msgstr "Завантажити зображення"
+
+#: searx/templates/oscar/result_templates/images.html:33
+msgid "View source"
+msgstr "Переглянути джерело"
+
+#: searx/templates/oscar/result_templates/map.html:7
+#: searx/templates/simple/result_templates/map.html:7
+msgid "show map"
+msgstr "показати карту"
+
+#: searx/templates/oscar/result_templates/map.html:7
+#: searx/templates/simple/result_templates/map.html:7
+msgid "hide map"
+msgstr "приховати карту"
+
+#: searx/templates/oscar/result_templates/map.html:11
+#: searx/templates/simple/result_templates/map.html:11
+msgid "show details"
+msgstr "показати деталі"
+
+#: searx/templates/oscar/result_templates/map.html:11
+#: searx/templates/simple/result_templates/map.html:11
+msgid "hide details"
+msgstr "приховати деталі"
+
+#: searx/templates/oscar/result_templates/torrent.html:7
+#: searx/templates/simple/result_templates/torrent.html:11
+msgid "Filesize"
+msgstr "Розмір файла"
+
+#: searx/templates/oscar/result_templates/torrent.html:9
+#: searx/templates/simple/result_templates/torrent.html:12
+msgid "Bytes"
+msgstr "Байтів"
+
+#: searx/templates/oscar/result_templates/torrent.html:10
+#: searx/templates/simple/result_templates/torrent.html:13
+msgid "kiB"
+msgstr "КіБ"
+
+#: searx/templates/oscar/result_templates/torrent.html:11
+#: searx/templates/simple/result_templates/torrent.html:14
+msgid "MiB"
+msgstr "МіБ"
+
+#: searx/templates/oscar/result_templates/torrent.html:12
+#: searx/templates/simple/result_templates/torrent.html:15
+msgid "GiB"
+msgstr "ГіБ"
+
+#: searx/templates/oscar/result_templates/torrent.html:13
+#: searx/templates/simple/result_templates/torrent.html:16
+msgid "TiB"
+msgstr "ТіБ"
+
+#: searx/templates/oscar/result_templates/torrent.html:15
+#: searx/templates/simple/result_templates/torrent.html:20
+msgid "Number of Files"
+msgstr "Кількість Файлів"
+
+#: searx/templates/oscar/result_templates/videos.html:7
+#: searx/templates/simple/result_templates/videos.html:6
+msgid "show video"
+msgstr "показати відео"
+
+#: searx/templates/oscar/result_templates/videos.html:7
+#: searx/templates/simple/result_templates/videos.html:6
+msgid "hide video"
+msgstr "приховати відео"
+
+#: searx/templates/pix-art/results.html:28
+msgid "Load more..."
+msgstr "Завантажити більше..."
+
+#: searx/templates/simple/base.html:31
+msgid "No item found"
+msgstr ""
+
+#: searx/templates/simple/preferences.html:89
+msgid "Supports selected language"
+msgstr "Підтримка обраної мови"
+
+#: searx/templates/simple/preferences.html:118
+msgid "User interface"
+msgstr ""
+
+#: searx/templates/simple/preferences.html:154
+msgid "Privacy"
+msgstr ""
diff --git a/searx/translations/vi/LC_MESSAGES/messages.mo b/searx/translations/vi/LC_MESSAGES/messages.mo
new file mode 100644
index 0000000000..07dc309ea2
Binary files /dev/null and b/searx/translations/vi/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/vi/LC_MESSAGES/messages.po b/searx/translations/vi/LC_MESSAGES/messages.po
new file mode 100644
index 0000000000..d8a1a0c943
--- /dev/null
+++ b/searx/translations/vi/LC_MESSAGES/messages.po
@@ -0,0 +1,1013 @@
+# Translations template for PROJECT.
+# Copyright (C) 2017 ORGANIZATION
+# This file is distributed under the same license as the PROJECT project.
+#
+# Translators:
+# dd721411 <[email protected]>, 2018
+msgid ""
+msgstr ""
+"Project-Id-Version: searx\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2017-11-01 21:31+0100\n"
+"PO-Revision-Date: 2018-02-28 17:27+0000\n"
+"Last-Translator: dd721411 <[email protected]>\n"
+"Language-Team: Vietnamese (http://www.transifex.com/asciimoo/searx/language/vi/)\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.3.4\n"
+"Language: vi\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+
+#: searx/search.py:137 searx/search.py:182
+msgid "timeout"
+msgstr "hết thời hạn"
+
+#: searx/search.py:144
+msgid "request exception"
+msgstr "ngoại lệ yêu cầu"
+
+#: searx/search.py:151
+msgid "unexpected crash"
+msgstr "lỗi bất ngờ"
+
+#: searx/webapp.py:136
+msgid "files"
+msgstr "các tập tin"
+
+#: searx/webapp.py:137
+msgid "general"
+msgstr "tổng quát"
+
+#: searx/webapp.py:138
+msgid "music"
+msgstr "âm nhạc"
+
+#: searx/webapp.py:139
+msgid "social media"
+msgstr "mạng xã hội"
+
+#: searx/webapp.py:140
+msgid "images"
+msgstr "hình ảnh"
+
+#: searx/webapp.py:141
+msgid "videos"
+msgstr "phim"
+
+#: searx/webapp.py:142
+msgid "it"
+msgstr "CNTT"
+
+#: searx/webapp.py:143
+msgid "news"
+msgstr "tin tức"
+
+#: searx/webapp.py:144
+msgid "map"
+msgstr "bản đồ"
+
+#: searx/webapp.py:145
+msgid "science"
+msgstr "khoa học"
+
+#: searx/webapp.py:399 searx/webapp.py:658
+msgid "Invalid settings, please edit your preferences"
+msgstr "Cài đặt không hợp lệ, xin xem lại tuỳ chỉnh"
+
+#: searx/webapp.py:415
+msgid "Invalid settings"
+msgstr "Cài đặt không hợp lệ"
+
+#: searx/webapp.py:449 searx/webapp.py:493
+msgid "search error"
+msgstr "lỗi tìm kiếm"
+
+#: searx/webapp.py:530
+msgid "{minutes} minute(s) ago"
+msgstr "{minutes} phút() trước"
+
+#: searx/webapp.py:532
+msgid "{hours} hour(s), {minutes} minute(s) ago"
+msgstr "{hours} giờ(), {minutes} phút() trước"
+
+#: searx/answerers/random/answerer.py:53
+msgid "Random value generator"
+msgstr "Trình tạo giá trị ngẫu nhiên"
+
+#: searx/answerers/random/answerer.py:54
+msgid "Generate different random values"
+msgstr "Tạo các giá trị ngẫu nhiên khác nhau"
+
+#: searx/answerers/statistics/answerer.py:53
+msgid "Statistics functions"
+msgstr "Các hàm thống kê"
+
+#: searx/answerers/statistics/answerer.py:54
+msgid "Compute {functions} of the arguments"
+msgstr "Tính toán {functions} của các đối số"
+
+#: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201
+msgid "Engine time (sec)"
+msgstr "Thời gian trình tìm kiếm (giây)"
+
+#: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205
+msgid "Page loads (sec)"
+msgstr "Tải trang (giây)"
+
+#: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209
+#: searx/templates/oscar/results.html:95
+#: searx/templates/simple/results.html:20
+msgid "Number of results"
+msgstr "Số lượng kết quả"
+
+#: searx/engines/__init__.py:206 searx/engines/flycheck___init__.py:213
+msgid "Scores"
+msgstr "Điểm số"
+
+#: searx/engines/__init__.py:210 searx/engines/flycheck___init__.py:217
+msgid "Scores per result"
+msgstr "Điểm số cho từng kết quả"
+
+#: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221
+msgid "Errors"
+msgstr "Các lỗi"
+
+#: searx/engines/pdbe.py:87
+msgid "{title} (OBSOLETE)"
+msgstr "{title} (LỖI THỜI)"
+
+#: searx/engines/pdbe.py:91
+msgid "This entry has been superseded by"
+msgstr "Mục này đã được thay thế bởi"
+
+#: searx/engines/pubmed.py:74
+msgid "No abstract is available for this publication."
+msgstr "Không có bản tóm tắt nào cho ấn phẩm này."
+
+#: searx/plugins/https_rewrite.py:32
+msgid "Rewrite HTTP links to HTTPS if possible"
+msgstr "Viết lại các liên kết HTTP thành HTTPS khi có thể"
+
+#: searx/plugins/infinite_scroll.py:3
+msgid "Infinite scroll"
+msgstr "Cuộn liên tục"
+
+#: searx/plugins/infinite_scroll.py:4
+msgid "Automatically load next page when scrolling to bottom of current page"
+msgstr "Tự động tải trang kế tiếp khi cuộn đến cuối trang hiện tại"
+
+#: searx/plugins/oa_doi_rewrite.py:9
+msgid "Open Access DOI rewrite"
+msgstr "Viết lại DOI Truy Cập Miễn Phí"
+
+#: searx/plugins/oa_doi_rewrite.py:10
+msgid ""
+"Avoid paywalls by redirecting to open-access versions of publications when "
+"available"
+msgstr "Tránh việc trả phí bằng cách chuyển hướng đến các phiên bản truy cập miễn phí của ấn phẩm khi có thể"
+
+#: searx/plugins/open_results_on_new_tab.py:18
+#: searx/templates/oscar/preferences.html:114
+#: searx/templates/simple/preferences.html:149
+msgid "Open result links on new browser tabs"
+msgstr "Mở kết quả trên những thẻ trình duyệt mới"
+
+#: searx/plugins/open_results_on_new_tab.py:19
+msgid ""
+"Results are opened in the same window by default. This plugin overwrites the"
+" default behaviour to open links on new tabs/windows. (JavaScript required)"
+msgstr "Theo mặc định thì các kết quả được mở trên cùng một cửa sổ. Phần mở rộng này sẽ ghi đè lên hành vi mặc định đó để mở các liên kết trên các thẻ/cửa sổ mới. (yêu cầu JavaScript)"
+
+#: searx/plugins/search_on_category_select.py:18
+msgid "Search on category select"
+msgstr "Tìm kiếm khi chọn danh mục đơn"
+
+#: searx/plugins/search_on_category_select.py:19
+msgid ""
+"Perform search immediately if a category selected. Disable to select "
+"multiple categories. (JavaScript required)"
+msgstr "Thực thi tìm kiếm ngay khi chọn một danh mục. Tắt đi để chọn nhiều danh mục. (yêu cầu JavaScript)"
+
+#: searx/plugins/self_info.py:20
+msgid ""
+"Displays your IP if the query is \"ip\" and your user agent if the query "
+"contains \"user agent\"."
+msgstr "Hiện IP của bạn khi gõ \"ip\" và hiện user agent khi gõ \"user agent\"."
+
+#: searx/plugins/tracker_url_remover.py:26
+msgid "Tracker URL remover"
+msgstr "Trình loại bỏ URL theo dõi"
+
+#: searx/plugins/tracker_url_remover.py:27
+msgid "Remove trackers arguments from the returned URL"
+msgstr "Loại bỏ các đối số theo dõi từ URL trả về"
+
+#: searx/plugins/vim_hotkeys.py:3
+msgid "Vim-like hotkeys"
+msgstr "Các phím tắt Vim-like"
+
+#: searx/plugins/vim_hotkeys.py:4
+msgid ""
+"Navigate search results with Vim-like hotkeys (JavaScript required). Press "
+"\"h\" key on main or result page to get help."
+msgstr "Điều hướng các kết quả tìm kiếm với các phím tắt Vim-like (yêu cầu JavaScript). Nhấn phím \"h\" trên trang chính hoặc trang kết quả để xem trợ giúp."
+
+#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4
+#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4
+#: searx/templates/simple/404.html:4
+msgid "Page not found"
+msgstr "Không tìm thấy trang"
+
+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6
+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6
+#: searx/templates/simple/404.html:6
+#, python-format
+msgid "Go to %(search_page)s."
+msgstr "Đi đến %(search_page)s."
+
+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6
+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6
+#: searx/templates/simple/404.html:6
+msgid "search page"
+msgstr "tìm kiếm trang"
+
+#: searx/templates/courgette/index.html:9
+#: searx/templates/courgette/index.html:13
+#: searx/templates/courgette/results.html:5
+#: searx/templates/legacy/index.html:8 searx/templates/legacy/index.html:12
+#: searx/templates/oscar/navbar.html:7
+#: searx/templates/oscar/preferences.html:3
+#: searx/templates/pix-art/index.html:8
+msgid "preferences"
+msgstr "tuỳ chỉnh"
+
+#: searx/templates/courgette/index.html:11
+#: searx/templates/legacy/index.html:10 searx/templates/oscar/about.html:2
+#: searx/templates/oscar/navbar.html:6 searx/templates/pix-art/index.html:7
+msgid "about"
+msgstr "thông tin về"
+
+#: searx/templates/courgette/preferences.html:5
+#: searx/templates/legacy/preferences.html:5
+#: searx/templates/oscar/preferences.html:8
+#: searx/templates/pix-art/preferences.html:5
+#: searx/templates/simple/preferences.html:26
+msgid "Preferences"
+msgstr "Tuỳ chỉnh"
+
+#: searx/templates/courgette/preferences.html:9
+#: searx/templates/legacy/preferences.html:9
+#: searx/templates/oscar/preferences.html:33
+#: searx/templates/oscar/preferences.html:35
+#: searx/templates/simple/preferences.html:34
+msgid "Default categories"
+msgstr "Các danh mục mặc định"
+
+#: searx/templates/courgette/preferences.html:13
+#: searx/templates/legacy/preferences.html:14
+#: searx/templates/oscar/preferences.html:41
+#: searx/templates/pix-art/preferences.html:9
+#: searx/templates/simple/preferences.html:39
+#: searx/templates/simple/preferences.html:163
+msgid "Search language"
+msgstr "Ngôn ngữ tìm kiếm"
+
+#: searx/templates/courgette/preferences.html:16
+#: searx/templates/legacy/preferences.html:17
+#: searx/templates/oscar/languages.html:6
+#: searx/templates/pix-art/preferences.html:12
+#: searx/templates/simple/languages.html:2
+#: searx/templates/simple/preferences.html:42
+msgid "Default language"
+msgstr "Ngôn ngữ mặc định"
+
+#: searx/templates/courgette/preferences.html:24
+#: searx/templates/legacy/preferences.html:25
+#: searx/templates/oscar/preferences.html:47
+#: searx/templates/pix-art/preferences.html:20
+#: searx/templates/simple/preferences.html:120
+msgid "Interface language"
+msgstr "Ngôn ngữ giao diện"
+
+#: searx/templates/courgette/preferences.html:34
+#: searx/templates/legacy/preferences.html:35
+#: searx/templates/oscar/preferences.html:57
+#: searx/templates/simple/preferences.html:51
+msgid "Autocomplete"
+msgstr "Gợi ý tự động"
+
+#: searx/templates/courgette/preferences.html:45
+#: searx/templates/legacy/preferences.html:46
+#: searx/templates/oscar/preferences.html:68
+#: searx/templates/simple/preferences.html:166
+msgid "Image proxy"
+msgstr "Proxy hình ảnh"
+
+#: searx/templates/courgette/preferences.html:48
+#: searx/templates/legacy/preferences.html:49
+#: searx/templates/oscar/preferences.html:72
+#: searx/templates/simple/preferences.html:169
+msgid "Enabled"
+msgstr "Đã "
+
+#: searx/templates/courgette/preferences.html:49
+#: searx/templates/legacy/preferences.html:50
+#: searx/templates/oscar/preferences.html:73
+#: searx/templates/simple/preferences.html:170
+msgid "Disabled"
+msgstr "Đã tắt"
+
+#: searx/templates/courgette/preferences.html:54
+#: searx/templates/legacy/preferences.html:55
+#: searx/templates/oscar/preferences.html:77
+#: searx/templates/pix-art/preferences.html:30
+#: searx/templates/simple/preferences.html:156
+msgid "Method"
+msgstr "Phương pháp"
+
+#: searx/templates/courgette/preferences.html:63
+#: searx/templates/legacy/preferences.html:64
+#: searx/templates/oscar/preferences.html:86
+#: searx/templates/oscar/preferences.html:165
+#: searx/templates/oscar/preferences.html:173
+#: searx/templates/simple/preferences.html:63
+#: searx/templates/simple/preferences.html:90
+msgid "SafeSearch"
+msgstr "Tìm Kiếm An Toàn"
+
+#: searx/templates/courgette/preferences.html:66
+#: searx/templates/legacy/preferences.html:67
+#: searx/templates/oscar/preferences.html:90
+#: searx/templates/simple/preferences.html:66
+msgid "Strict"
+msgstr "Nghiêm ngặt"
+
+#: searx/templates/courgette/preferences.html:67
+#: searx/templates/legacy/preferences.html:68
+#: searx/templates/oscar/preferences.html:91
+#: searx/templates/simple/preferences.html:67
+msgid "Moderate"
+msgstr "Vừa phải"
+
+#: searx/templates/courgette/preferences.html:68
+#: searx/templates/legacy/preferences.html:69
+#: searx/templates/oscar/preferences.html:92
+#: searx/templates/simple/preferences.html:68
+msgid "None"
+msgstr "Không"
+
+#: searx/templates/courgette/preferences.html:73
+#: searx/templates/legacy/preferences.html:74
+#: searx/templates/oscar/preferences.html:96
+#: searx/templates/pix-art/preferences.html:39
+#: searx/templates/simple/preferences.html:131
+msgid "Themes"
+msgstr "Nền"
+
+#: searx/templates/courgette/preferences.html:83
+msgid "Color"
+msgstr "Màu sắc"
+
+#: searx/templates/courgette/preferences.html:86
+msgid "Blue (default)"
+msgstr "Xanh lam (mặc định)"
+
+#: searx/templates/courgette/preferences.html:87
+msgid "Violet"
+msgstr "Tím"
+
+#: searx/templates/courgette/preferences.html:88
+msgid "Green"
+msgstr "Xanh lục"
+
+#: searx/templates/courgette/preferences.html:89
+msgid "Cyan"
+msgstr "Lục lam"
+
+#: searx/templates/courgette/preferences.html:90
+msgid "Orange"
+msgstr "Cam"
+
+#: searx/templates/courgette/preferences.html:91
+msgid "Red"
+msgstr "Đỏ"
+
+#: searx/templates/courgette/preferences.html:96
+#: searx/templates/legacy/preferences.html:93
+#: searx/templates/pix-art/preferences.html:49
+#: searx/templates/simple/preferences.html:77
+msgid "Currently used search engines"
+msgstr "Các trình tìm kiếm đang được dùng"
+
+#: searx/templates/courgette/preferences.html:100
+#: searx/templates/legacy/preferences.html:97
+#: searx/templates/oscar/preferences.html:162
+#: searx/templates/oscar/preferences.html:176
+#: searx/templates/pix-art/preferences.html:53
+#: searx/templates/simple/preferences.html:87
+msgid "Engine name"
+msgstr "Tên trình tìm kiếm"
+
+#: searx/templates/courgette/preferences.html:101
+#: searx/templates/legacy/preferences.html:98
+msgid "Category"
+msgstr "Danh mục"
+
+#: searx/templates/courgette/preferences.html:102
+#: searx/templates/courgette/preferences.html:113
+#: searx/templates/legacy/preferences.html:99
+#: searx/templates/legacy/preferences.html:110
+#: searx/templates/oscar/preferences.html:161
+#: searx/templates/oscar/preferences.html:177
+#: searx/templates/pix-art/preferences.html:54
+#: searx/templates/pix-art/preferences.html:64
+#: searx/templates/simple/preferences.html:86
+msgid "Allow"
+msgstr "Cho phép"
+
+#: searx/templates/courgette/preferences.html:102
+#: searx/templates/courgette/preferences.html:114
+#: searx/templates/legacy/preferences.html:99
+#: searx/templates/legacy/preferences.html:111
+#: searx/templates/pix-art/preferences.html:54
+#: searx/templates/pix-art/preferences.html:65
+msgid "Block"
+msgstr "Chặn"
+
+#: searx/templates/courgette/preferences.html:122
+#: searx/templates/legacy/preferences.html:119
+#: searx/templates/oscar/preferences.html:297
+#: searx/templates/pix-art/preferences.html:73
+#: searx/templates/simple/preferences.html:180
+msgid ""
+"These settings are stored in your cookies, this allows us not to store this "
+"data about you."
+msgstr "Những cài đặt này được lưu trữ trong các cookie, điều này cho phép chúng tôi không phải lưu các dữ liệu về bạn."
+
+#: searx/templates/courgette/preferences.html:124
+#: searx/templates/legacy/preferences.html:121
+#: searx/templates/oscar/preferences.html:299
+#: searx/templates/pix-art/preferences.html:75
+#: searx/templates/simple/preferences.html:182
+msgid ""
+"These cookies serve your sole convenience, we don't use these cookies to "
+"track you."
+msgstr "Những cookie này chỉ phục vụ cho chính bạn, chúng tôi không sử dụng chúng để theo dõi bạn."
+
+#: searx/templates/courgette/preferences.html:127
+#: searx/templates/legacy/preferences.html:124
+#: searx/templates/oscar/preferences.html:305
+#: searx/templates/pix-art/preferences.html:78
+#: searx/templates/simple/preferences.html:185
+msgid "save"
+msgstr "lưu"
+
+#: searx/templates/courgette/preferences.html:128
+#: searx/templates/legacy/preferences.html:125
+#: searx/templates/oscar/preferences.html:307
+#: searx/templates/simple/preferences.html:186
+msgid "Reset defaults"
+msgstr "Đưa về mặc định"
+
+#: searx/templates/courgette/preferences.html:129
+#: searx/templates/legacy/preferences.html:126
+#: searx/templates/oscar/preferences.html:306
+#: searx/templates/pix-art/preferences.html:79
+#: searx/templates/simple/preferences.html:187
+msgid "back"
+msgstr "trở về"
+
+#: searx/templates/courgette/results.html:12
+#: searx/templates/legacy/results.html:13
+#: searx/templates/oscar/results.html:136
+#: searx/templates/simple/results.html:58
+msgid "Search URL"
+msgstr "URL Tìm kiếm"
+
+#: searx/templates/courgette/results.html:16
+#: searx/templates/legacy/results.html:17
+#: searx/templates/oscar/results.html:141
+#: searx/templates/simple/results.html:62
+msgid "Download results"
+msgstr "Tải về các kết quả"
+
+#: searx/templates/courgette/results.html:34
+#: searx/templates/legacy/results.html:35
+#: searx/templates/simple/results.html:10
+msgid "Answers"
+msgstr "Các đáp án"
+
+#: searx/templates/courgette/results.html:42
+#: searx/templates/legacy/results.html:43
+#: searx/templates/oscar/results.html:116
+#: searx/templates/simple/results.html:42
+msgid "Suggestions"
+msgstr "Các gợi ý"
+
+#: searx/templates/courgette/results.html:70
+#: searx/templates/legacy/results.html:81
+#: searx/templates/oscar/results.html:68 searx/templates/oscar/results.html:78
+#: searx/templates/simple/results.html:130
+msgid "previous page"
+msgstr "trang liền trước"
+
+#: searx/templates/courgette/results.html:81
+#: searx/templates/legacy/results.html:92
+#: searx/templates/oscar/results.html:62 searx/templates/oscar/results.html:84
+#: searx/templates/simple/results.html:145
+msgid "next page"
+msgstr "trang tiếp theo"
+
+#: searx/templates/courgette/search.html:3
+#: searx/templates/legacy/search.html:3 searx/templates/oscar/search.html:6
+#: searx/templates/oscar/search_full.html:9
+#: searx/templates/pix-art/search.html:3 searx/templates/simple/search.html:4
+msgid "Search for..."
+msgstr "Tìm kiếm về..."
+
+#: searx/templates/courgette/stats.html:4 searx/templates/legacy/stats.html:4
+#: searx/templates/oscar/stats.html:5 searx/templates/pix-art/stats.html:4
+#: searx/templates/simple/stats.html:7
+msgid "Engine stats"
+msgstr "Các thông số về trình tìm kiếm"
+
+#: searx/templates/courgette/result_templates/images.html:4
+#: searx/templates/legacy/result_templates/images.html:4
+#: searx/templates/pix-art/result_templates/images.html:4
+msgid "original context"
+msgstr "ngữ cảnh gốc"
+
+#: searx/templates/courgette/result_templates/torrent.html:7
+#: searx/templates/legacy/result_templates/torrent.html:11
+#: searx/templates/oscar/result_templates/torrent.html:6
+#: searx/templates/simple/result_templates/torrent.html:9
+msgid "Seeder"
+msgstr "Seeder"
+
+#: searx/templates/courgette/result_templates/torrent.html:7
+#: searx/templates/legacy/result_templates/torrent.html:11
+#: searx/templates/oscar/result_templates/torrent.html:6
+#: searx/templates/simple/result_templates/torrent.html:9
+msgid "Leecher"
+msgstr "Leecher"
+
+#: searx/templates/courgette/result_templates/torrent.html:9
+#: searx/templates/legacy/result_templates/torrent.html:9
+#: searx/templates/oscar/macros.html:23
+#: searx/templates/simple/result_templates/torrent.html:6
+msgid "magnet link"
+msgstr "liên kết magnet"
+
+#: searx/templates/courgette/result_templates/torrent.html:10
+#: searx/templates/legacy/result_templates/torrent.html:10
+#: searx/templates/oscar/macros.html:24
+#: searx/templates/simple/result_templates/torrent.html:7
+msgid "torrent file"
+msgstr "tập tin torrent"
+
+#: searx/templates/legacy/categories.html:8
+#: searx/templates/simple/categories.html:6
+msgid "Click on the magnifier to perform search"
+msgstr "Nhấp vào hình kính lúp để tiến hành tìm kiếm"
+
+#: searx/templates/legacy/preferences.html:84
+#: searx/templates/oscar/preferences.html:113
+#: searx/templates/simple/preferences.html:142
+msgid "Results on new tabs"
+msgstr "Hiện kết quả trên các thẻ mới"
+
+#: searx/templates/legacy/preferences.html:87
+#: searx/templates/oscar/preferences.html:117
+#: searx/templates/simple/preferences.html:145
+msgid "On"
+msgstr "Bật"
+
+#: searx/templates/legacy/preferences.html:88
+#: searx/templates/oscar/preferences.html:118
+#: searx/templates/simple/preferences.html:146
+msgid "Off"
+msgstr "Tắt"
+
+#: searx/templates/legacy/result_templates/code.html:3
+#: searx/templates/legacy/result_templates/default.html:3
+#: searx/templates/legacy/result_templates/map.html:9
+#: searx/templates/oscar/macros.html:34 searx/templates/oscar/macros.html:48
+#: searx/templates/simple/macros.html:43
+msgid "cached"
+msgstr "đã lưu cache"
+
+#: searx/templates/oscar/advanced.html:4
+msgid "Advanced settings"
+msgstr "Cài đặt nâng cao"
+
+#: searx/templates/oscar/base.html:62
+#: searx/templates/oscar/messages/first_time.html:4
+#: searx/templates/oscar/messages/save_settings_successfull.html:5
+#: searx/templates/oscar/messages/unknow_error.html:5
+msgid "Close"
+msgstr "Đóng"
+
+#: searx/templates/oscar/base.html:64
+#: searx/templates/oscar/messages/no_results.html:4
+#: searx/templates/simple/messages/no_results.html:4
+#: searx/templates/simple/results.html:25
+msgid "Error!"
+msgstr "Lỗi!"
+
+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
+msgid "Powered by"
+msgstr "Được cung cấp bởi"
+
+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
+msgid "a privacy-respecting, hackable metasearch engine"
+msgstr "một trình tìm kiếm đa nguồn, dễ tuỳ biến và tôn trọng quyền riêng tư"
+
+#: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50
+#: searx/templates/simple/macros.html:43
+msgid "proxied"
+msgstr "đã proxy"
+
+#: searx/templates/oscar/macros.html:92
+msgid "supported"
+msgstr "có hỗ trợ"
+
+#: searx/templates/oscar/macros.html:96
+msgid "not supported"
+msgstr "không hỗ trợ"
+
+#: searx/templates/oscar/preferences.html:13
+#: searx/templates/oscar/preferences.html:22
+#: searx/templates/simple/preferences.html:32
+msgid "General"
+msgstr "Tổng quát"
+
+#: searx/templates/oscar/preferences.html:14
+#: searx/templates/oscar/preferences.html:146
+#: searx/templates/simple/preferences.html:76
+msgid "Engines"
+msgstr "Các trình tìm kiếm"
+
+#: searx/templates/oscar/preferences.html:15
+#: searx/templates/oscar/preferences.html:219
+msgid "Plugins"
+msgstr "Các phần mở rộng"
+
+#: searx/templates/oscar/preferences.html:16
+#: searx/templates/oscar/preferences.html:245
+msgid "Answerers"
+msgstr "Trình trả lời nhanh"
+
+#: searx/templates/oscar/preferences.html:17
+#: searx/templates/oscar/preferences.html:272
+msgid "Cookies"
+msgstr "Các cookie"
+
+#: searx/templates/oscar/preferences.html:42
+#: searx/templates/simple/preferences.html:48
+msgid "What language do you prefer for search?"
+msgstr "Bạn muốn tìm kiếm bằng ngôn ngữ nào?"
+
+#: searx/templates/oscar/preferences.html:48
+#: searx/templates/simple/preferences.html:128
+msgid "Change the language of the layout"
+msgstr "Thay đổi ngôn ngữ giao diện"
+
+#: searx/templates/oscar/preferences.html:58
+#: searx/templates/simple/preferences.html:60
+msgid "Find stuff as you type"
+msgstr "Tìm kiếm ngay khi gõ"
+
+#: searx/templates/oscar/preferences.html:69
+#: searx/templates/simple/preferences.html:173
+msgid "Proxying image results through searx"
+msgstr "Proxy kết quả hình ảnh qua searx"
+
+#: searx/templates/oscar/preferences.html:78
+msgid ""
+"Change how forms are submited, <a "
+"href=\"http://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods\""
+" rel=\"external\">learn more about request methods</a>"
+msgstr "Thay đổi cách thức các cụm từ tìm kiếm được gửi đi, <a href=\"http://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods\" rel=\"external\">tìm hiểu thêm về các phương thức tìm kiếm</a>"
+
+#: searx/templates/oscar/preferences.html:87
+#: searx/templates/simple/preferences.html:71
+msgid "Filter content"
+msgstr "Lọc các nội dung"
+
+#: searx/templates/oscar/preferences.html:97
+#: searx/templates/simple/preferences.html:139
+msgid "Change searx layout"
+msgstr "Thay đổi giao diện searx"
+
+#: searx/templates/oscar/preferences.html:106
+#: searx/templates/oscar/preferences.html:111
+msgid "Choose style for this theme"
+msgstr "Chọn phong cách cho nền này"
+
+#: searx/templates/oscar/preferences.html:106
+#: searx/templates/oscar/preferences.html:111
+msgid "Style"
+msgstr "Phong cách"
+
+#: searx/templates/oscar/preferences.html:122
+msgid "Open Access DOI resolver"
+msgstr "Trình xử lý DOI Truy Cập Miễn Phí"
+
+#: searx/templates/oscar/preferences.html:123
+msgid ""
+"Redirect to open-access versions of publications when available (plugin "
+"required)"
+msgstr "Chuyển hướng đến các phiên bản truy cập miễn phí của ấn phẩm khi có thể (yêu cầu phần mở rộng)"
+
+#: searx/templates/oscar/preferences.html:163
+#: searx/templates/oscar/preferences.html:175
+#: searx/templates/simple/preferences.html:88
+msgid "Shortcut"
+msgstr "Lối tắt"
+
+#: searx/templates/oscar/preferences.html:164
+#: searx/templates/oscar/preferences.html:174
+msgid "Selected language"
+msgstr "Ngôn ngữ được chọn"
+
+#: searx/templates/oscar/preferences.html:166
+#: searx/templates/oscar/preferences.html:172
+#: searx/templates/simple/preferences.html:91
+msgid "Time range"
+msgstr "Khoảng thời gian"
+
+#: searx/templates/oscar/preferences.html:167
+#: searx/templates/oscar/preferences.html:171
+#: searx/templates/simple/preferences.html:92
+msgid "Avg. time"
+msgstr "Thời gian trung bình"
+
+#: searx/templates/oscar/preferences.html:168
+#: searx/templates/oscar/preferences.html:170
+#: searx/templates/simple/preferences.html:93
+msgid "Max time"
+msgstr "Thời gian tối đa"
+
+#: searx/templates/oscar/preferences.html:248
+msgid "This is the list of searx's instant answering modules."
+msgstr "Đây là danh sách các module trả lời nhanh của searx"
+
+#: searx/templates/oscar/preferences.html:252
+msgid "Name"
+msgstr "Tên"
+
+#: searx/templates/oscar/preferences.html:253
+msgid "Keywords"
+msgstr "Các từ khoá"
+
+#: searx/templates/oscar/preferences.html:254
+msgid "Description"
+msgstr "Mô tả"
+
+#: searx/templates/oscar/preferences.html:255
+msgid "Examples"
+msgstr "Các ví dụ"
+
+#: searx/templates/oscar/preferences.html:275
+msgid ""
+"This is the list of cookies and their values searx is storing on your "
+"computer."
+msgstr "Đây là danh sách các cookie và giá trị của chúng mà searx đang lưu trữ trên máy tính của bạn."
+
+#: searx/templates/oscar/preferences.html:276
+msgid "With that list, you can assess searx transparency."
+msgstr "Với danh sách này, bạn có thể đánh giá tính minh bạch của searx."
+
+#: searx/templates/oscar/preferences.html:281
+msgid "Cookie name"
+msgstr "Tên cookie"
+
+#: searx/templates/oscar/preferences.html:282
+msgid "Value"
+msgstr "Giá trị"
+
+#: searx/templates/oscar/preferences.html:301
+msgid "Search URL of the currently saved preferences"
+msgstr "URL tìm kiếm của tuỳ chỉnh được lưu hiện tại"
+
+#: searx/templates/oscar/preferences.html:301
+msgid ""
+"Note: specifying custom settings in the search URL can reduce privacy by "
+"leaking data to the clicked result sites."
+msgstr "Ghi chú: việc định rõ cài đặt cá nhân trong URL tìm kiếm có thể làm suy giảm mức độ riêng tư vì nó chuyển dữ liệu đến các trang kết quả được nhấp chọn."
+
+#: searx/templates/oscar/results.html:17
+msgid "Search results"
+msgstr "Kết quả tìm kiếm"
+
+#: searx/templates/oscar/results.html:21
+#: searx/templates/simple/results.html:84
+msgid "Try searching for:"
+msgstr "Thử tìm kiếm:"
+
+#: searx/templates/oscar/results.html:100
+#: searx/templates/simple/results.html:25
+msgid "Engines cannot retrieve results"
+msgstr "Các trình tìm kiếm không nhận được kết quả"
+
+#: searx/templates/oscar/results.html:131
+msgid "Links"
+msgstr "Các liên kết"
+
+#: searx/templates/oscar/search.html:8
+#: searx/templates/oscar/search_full.html:11
+#: searx/templates/simple/search.html:5
+msgid "Start search"
+msgstr "Bắt đầu tìm kiếm"
+
+#: searx/templates/oscar/stats.html:2
+msgid "stats"
+msgstr "các thông số"
+
+#: searx/templates/oscar/time-range.html:3
+#: searx/templates/simple/time-range.html:3
+msgid "Anytime"
+msgstr "Bất kỳ lúc nào"
+
+#: searx/templates/oscar/time-range.html:6
+#: searx/templates/simple/time-range.html:6
+msgid "Last day"
+msgstr "Hôm trước"
+
+#: searx/templates/oscar/time-range.html:9
+#: searx/templates/simple/time-range.html:9
+msgid "Last week"
+msgstr "Tuần trước"
+
+#: searx/templates/oscar/time-range.html:12
+#: searx/templates/simple/time-range.html:12
+msgid "Last month"
+msgstr "Tháng trước"
+
+#: searx/templates/oscar/time-range.html:15
+#: searx/templates/simple/time-range.html:15
+msgid "Last year"
+msgstr "Năm ngoái"
+
+#: searx/templates/oscar/messages/first_time.html:6
+#: searx/templates/oscar/messages/no_data_available.html:3
+msgid "Heads up!"
+msgstr "Cẩn thận!"
+
+#: searx/templates/oscar/messages/first_time.html:7
+msgid "It look like you are using searx first time."
+msgstr "Có vẻ như bạn mới sử dụng searx lần đầu."
+
+#: searx/templates/oscar/messages/no_cookies.html:3
+msgid "Information!"
+msgstr "Thông tin!"
+
+#: searx/templates/oscar/messages/no_cookies.html:4
+msgid "currently, there are no cookies defined."
+msgstr "hiện tại không có cookie nào."
+
+#: searx/templates/oscar/messages/no_data_available.html:4
+msgid "There is currently no data available. "
+msgstr "Hiện không có dữ liệu nào."
+
+#: searx/templates/oscar/messages/no_results.html:4
+#: searx/templates/simple/messages/no_results.html:4
+msgid "Engines cannot retrieve results."
+msgstr "Các trình tìm kiếm không nhận được kết quả."
+
+#: searx/templates/oscar/messages/no_results.html:10
+#: searx/templates/simple/messages/no_results.html:10
+msgid "Please, try again later or find another searx instance."
+msgstr "Xin thử lại lần nữa hoặc tìm một server searx khác"
+
+#: searx/templates/oscar/messages/no_results.html:14
+#: searx/templates/simple/messages/no_results.html:14
+msgid "Sorry!"
+msgstr "Xin lỗi!"
+
+#: searx/templates/oscar/messages/no_results.html:15
+#: searx/templates/simple/messages/no_results.html:15
+msgid ""
+"we didn't find any results. Please use another query or search in more "
+"categories."
+msgstr "chúng tôi không tìm thấy kết quả nào. Xin gõ cụm từ khác hoặc tìm kiếm trong nhiều danh mục hơn."
+
+#: searx/templates/oscar/messages/save_settings_successfull.html:7
+msgid "Well done!"
+msgstr "Tốt lắm!"
+
+#: searx/templates/oscar/messages/save_settings_successfull.html:8
+msgid "Settings saved successfully."
+msgstr "Lưu cài đặt thành công."
+
+#: searx/templates/oscar/messages/unknow_error.html:7
+msgid "Oh snap!"
+msgstr "Quái quỷ thật!"
+
+#: searx/templates/oscar/messages/unknow_error.html:8
+msgid "Something went wrong."
+msgstr "Đã có sự cố."
+
+#: searx/templates/oscar/result_templates/default.html:7
+#: searx/templates/simple/result_templates/default.html:6
+msgid "show media"
+msgstr "hiện nội dung"
+
+#: searx/templates/oscar/result_templates/default.html:7
+#: searx/templates/simple/result_templates/default.html:6
+msgid "hide media"
+msgstr "ẩn nội dung"
+
+#: searx/templates/oscar/result_templates/images.html:30
+msgid "Get image"
+msgstr "Xem hình ảnh"
+
+#: searx/templates/oscar/result_templates/images.html:33
+msgid "View source"
+msgstr "Xem nguồn"
+
+#: searx/templates/oscar/result_templates/map.html:7
+#: searx/templates/simple/result_templates/map.html:7
+msgid "show map"
+msgstr "hiện bản đồ"
+
+#: searx/templates/oscar/result_templates/map.html:7
+#: searx/templates/simple/result_templates/map.html:7
+msgid "hide map"
+msgstr "ẩn bản đồ"
+
+#: searx/templates/oscar/result_templates/map.html:11
+#: searx/templates/simple/result_templates/map.html:11
+msgid "show details"
+msgstr "hiện chi tiết"
+
+#: searx/templates/oscar/result_templates/map.html:11
+#: searx/templates/simple/result_templates/map.html:11
+msgid "hide details"
+msgstr "ẩn chi tiết"
+
+#: searx/templates/oscar/result_templates/torrent.html:7
+#: searx/templates/simple/result_templates/torrent.html:11
+msgid "Filesize"
+msgstr "Kích thước tập tin"
+
+#: searx/templates/oscar/result_templates/torrent.html:9
+#: searx/templates/simple/result_templates/torrent.html:12
+msgid "Bytes"
+msgstr "Byte"
+
+#: searx/templates/oscar/result_templates/torrent.html:10
+#: searx/templates/simple/result_templates/torrent.html:13
+msgid "kiB"
+msgstr "kiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:11
+#: searx/templates/simple/result_templates/torrent.html:14
+msgid "MiB"
+msgstr "MiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:12
+#: searx/templates/simple/result_templates/torrent.html:15
+msgid "GiB"
+msgstr "GiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:13
+#: searx/templates/simple/result_templates/torrent.html:16
+msgid "TiB"
+msgstr "TiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:15
+#: searx/templates/simple/result_templates/torrent.html:20
+msgid "Number of Files"
+msgstr "Số lượng tập tin"
+
+#: searx/templates/oscar/result_templates/videos.html:7
+#: searx/templates/simple/result_templates/videos.html:6
+msgid "show video"
+msgstr "hiện "
+
+#: searx/templates/oscar/result_templates/videos.html:7
+#: searx/templates/simple/result_templates/videos.html:6
+msgid "hide video"
+msgstr "ẩn phim"
+
+#: searx/templates/pix-art/results.html:28
+msgid "Load more..."
+msgstr "Tải thêm..."
+
+#: searx/templates/simple/base.html:31
+msgid "No item found"
+msgstr "Không tìm thấy gì"
+
+#: searx/templates/simple/preferences.html:89
+msgid "Supports selected language"
+msgstr "Có hỗ trợ ngôn ngữ được chọn"
+
+#: searx/templates/simple/preferences.html:118
+msgid "User interface"
+msgstr "Giao diện người dùng"
+
+#: searx/templates/simple/preferences.html:154
+msgid "Privacy"
+msgstr "Quyền riêng tư"
diff --git a/searx/translations/zh_CN/LC_MESSAGES/messages.mo b/searx/translations/zh_CN/LC_MESSAGES/messages.mo
index b094298fdb..c2006aca13 100644
Binary files a/searx/translations/zh_CN/LC_MESSAGES/messages.mo and b/searx/translations/zh_CN/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/zh_CN/LC_MESSAGES/messages.po b/searx/translations/zh_CN/LC_MESSAGES/messages.po
index f7a7068eb6..78acb2d40b 100644
--- a/searx/translations/zh_CN/LC_MESSAGES/messages.po
+++ b/searx/translations/zh_CN/LC_MESSAGES/messages.po
@@ -3,15 +3,18 @@
# This file is distributed under the same license as the PROJECT project.
#
# Translators:
+# Crystal RainSlide, 2018
+# Mingye Wang <[email protected]>, 2018
+# Sion Kazama, 2018
# wenke, 2015
-# wenke, 2015-2017
+# wenke, 2015-2018
msgid ""
msgstr ""
"Project-Id-Version: searx\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2017-11-01 21:31+0100\n"
-"PO-Revision-Date: 2017-11-01 20:31+0000\n"
-"Last-Translator: Adam Tauber <[email protected]>\n"
+"PO-Revision-Date: 2018-12-10 08:32+0000\n"
+"Last-Translator: Crystal RainSlide\n"
"Language-Team: Chinese (China) (http://www.transifex.com/asciimoo/searx/language/zh_CN/)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
@@ -58,7 +61,7 @@ msgstr "视频"
#: searx/webapp.py:142
msgid "it"
-msgstr "it"
+msgstr "IT"
#: searx/webapp.py:143
msgid "news"
@@ -74,7 +77,7 @@ msgstr "学术"
#: searx/webapp.py:399 searx/webapp.py:658
msgid "Invalid settings, please edit your preferences"
-msgstr "无效的设置,请编辑你的首选项"
+msgstr "设置无效,请编辑您的首选项"
#: searx/webapp.py:415
msgid "Invalid settings"
@@ -86,11 +89,11 @@ msgstr "搜索错误"
#: searx/webapp.py:530
msgid "{minutes} minute(s) ago"
-msgstr "{minutes}分钟之前"
+msgstr "{minutes} 分钟前"
#: searx/webapp.py:532
msgid "{hours} hour(s), {minutes} minute(s) ago"
-msgstr "{hours}小时{minutes}分钟之前"
+msgstr "{hours} 小时 {minutes} 分钟前"
#: searx/answerers/random/answerer.py:53
msgid "Random value generator"
@@ -106,7 +109,7 @@ msgstr "统计功能"
#: searx/answerers/statistics/answerer.py:54
msgid "Compute {functions} of the arguments"
-msgstr "计算 {functions}参数"
+msgstr "计算 {functions} 参数"
#: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201
msgid "Engine time (sec)"
@@ -114,7 +117,7 @@ msgstr "搜索引擎时间(秒)"
#: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205
msgid "Page loads (sec)"
-msgstr "页面加载 (秒)"
+msgstr "页面加载(秒)"
#: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209
#: searx/templates/oscar/results.html:95
@@ -128,7 +131,7 @@ msgstr "得分"
#: searx/engines/__init__.py:210 searx/engines/flycheck___init__.py:217
msgid "Scores per result"
-msgstr "每个结果等分"
+msgstr "每个结果的分数"
#: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221
msgid "Errors"
@@ -136,19 +139,19 @@ msgstr "错误"
#: searx/engines/pdbe.py:87
msgid "{title} (OBSOLETE)"
-msgstr "{title} (OBSOLETE)"
+msgstr "{title} (过时)"
#: searx/engines/pdbe.py:91
msgid "This entry has been superseded by"
-msgstr "此条目已经被取代"
+msgstr "此条目已被取代"
#: searx/engines/pubmed.py:74
msgid "No abstract is available for this publication."
-msgstr ""
+msgstr "本出版物没有摘要。"
#: searx/plugins/https_rewrite.py:32
msgid "Rewrite HTTP links to HTTPS if possible"
-msgstr "尽可能重定向HTTP链接到HTTPS"
+msgstr "将支持 HTTPS 的 HTTP 链接改为 HTTPS 链接"
#: searx/plugins/infinite_scroll.py:3
msgid "Infinite scroll"
@@ -160,13 +163,13 @@ msgstr "滚动到当前页面底部时自动加载下一页"
#: searx/plugins/oa_doi_rewrite.py:9
msgid "Open Access DOI rewrite"
-msgstr ""
+msgstr "开放访问 DOI 重定向"
#: searx/plugins/oa_doi_rewrite.py:10
msgid ""
"Avoid paywalls by redirecting to open-access versions of publications when "
"available"
-msgstr "重定向到可浏览的文章来避免付费墙(如果可用)"
+msgstr "尽量重定向到开放访问的文章以避免付费墙(如果可用)"
#: searx/plugins/open_results_on_new_tab.py:18
#: searx/templates/oscar/preferences.html:114
@@ -178,7 +181,7 @@ msgstr "在新标签页打开搜索链接"
msgid ""
"Results are opened in the same window by default. This plugin overwrites the"
" default behaviour to open links on new tabs/windows. (JavaScript required)"
-msgstr "搜索结果默认在原窗口打开。这个插件使其在新标签页/窗口打开。(需要启用JavaScript )"
+msgstr "搜索结果默认在原窗口打开。此插件能使其在新标签页或新窗口打开。(需启用 JavaScript)"
#: searx/plugins/search_on_category_select.py:18
msgid "Search on category select"
@@ -188,13 +191,13 @@ msgstr "搜索类别选择"
msgid ""
"Perform search immediately if a category selected. Disable to select "
"multiple categories. (JavaScript required)"
-msgstr "选中一个类别立即搜索。禁用可以选择多个类别搜索。(需要启用JavaScript )"
+msgstr "选中一个类别后立即执行搜索。禁用后,可以选择多个类别一起搜索。(需启用 JavaScript)"
#: searx/plugins/self_info.py:20
msgid ""
"Displays your IP if the query is \"ip\" and your user agent if the query "
"contains \"user agent\"."
-msgstr "搜索“ip”显示你的ip以及搜索内容含有“user agent”显示你的user agent。"
+msgstr "当您搜索「ip」时,这将会显示您的 IP 地址;同理,在搜索「user agent」时,将会显示您的 User Agent。"
#: searx/plugins/tracker_url_remover.py:26
msgid "Tracker URL remover"
@@ -206,13 +209,13 @@ msgstr "从返回的链接中移除跟踪参数"
#: searx/plugins/vim_hotkeys.py:3
msgid "Vim-like hotkeys"
-msgstr "类vim快捷键"
+msgstr "Vim 式快捷键"
#: searx/plugins/vim_hotkeys.py:4
msgid ""
"Navigate search results with Vim-like hotkeys (JavaScript required). Press "
"\"h\" key on main or result page to get help."
-msgstr "使用类vim快捷键浏览搜索结果(JavaScript启用)。按“h”键获取帮助。"
+msgstr "使用 Vim 式快捷键浏览搜索结果(需要 JavaScript)。在主页或结果页按“h”键获取帮助。"
#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4
#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4
@@ -225,7 +228,7 @@ msgstr "未找到网页"
#: searx/templates/simple/404.html:6
#, python-format
msgid "Go to %(search_page)s."
-msgstr "返回%(search_page)s。"
+msgstr "前往 %(search_page)s。"
#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6
#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6
@@ -342,7 +345,7 @@ msgstr "安全搜索"
#: searx/templates/oscar/preferences.html:90
#: searx/templates/simple/preferences.html:66
msgid "Strict"
-msgstr "严格模式"
+msgstr "严格"
#: searx/templates/courgette/preferences.html:67
#: searx/templates/legacy/preferences.html:68
@@ -356,7 +359,7 @@ msgstr "中等"
#: searx/templates/oscar/preferences.html:92
#: searx/templates/simple/preferences.html:68
msgid "None"
-msgstr "不过滤"
+msgstr "无"
#: searx/templates/courgette/preferences.html:73
#: searx/templates/legacy/preferences.html:74
@@ -444,7 +447,7 @@ msgstr "阻止"
msgid ""
"These settings are stored in your cookies, this allows us not to store this "
"data about you."
-msgstr "这些设置保存在你的cookie,我们将不能保存你的数据。"
+msgstr "这些设置被存储在您的 cookie 中,这种保存设置的方式允许我们不存储您的设置数据。"
#: searx/templates/courgette/preferences.html:124
#: searx/templates/legacy/preferences.html:121
@@ -454,7 +457,7 @@ msgstr "这些设置保存在你的cookie,我们将不能保存你的数据。
msgid ""
"These cookies serve your sole convenience, we don't use these cookies to "
"track you."
-msgstr "这些cookie是为了让你更加方便,我们不会使用cookie追踪你。"
+msgstr "这些 Cookie 信息可辅助您便捷地使用服务,我们不会利用这些信息来追踪您。"
#: searx/templates/courgette/preferences.html:127
#: searx/templates/legacy/preferences.html:124
@@ -544,14 +547,14 @@ msgstr "原始上下文"
#: searx/templates/oscar/result_templates/torrent.html:6
#: searx/templates/simple/result_templates/torrent.html:9
msgid "Seeder"
-msgstr "Seeder"
+msgstr "做种"
#: searx/templates/courgette/result_templates/torrent.html:7
#: searx/templates/legacy/result_templates/torrent.html:11
#: searx/templates/oscar/result_templates/torrent.html:6
#: searx/templates/simple/result_templates/torrent.html:9
msgid "Leecher"
-msgstr "Leecher"
+msgstr "接收"
#: searx/templates/courgette/result_templates/torrent.html:9
#: searx/templates/legacy/result_templates/torrent.html:9
@@ -570,7 +573,7 @@ msgstr "种子文件"
#: searx/templates/legacy/categories.html:8
#: searx/templates/simple/categories.html:6
msgid "Click on the magnifier to perform search"
-msgstr "点击放大镜开始搜索"
+msgstr "点击放大镜按钮开始搜索"
#: searx/templates/legacy/preferences.html:84
#: searx/templates/oscar/preferences.html:113
@@ -622,7 +625,7 @@ msgstr "技术支持来自"
#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
msgid "a privacy-respecting, hackable metasearch engine"
-msgstr "一个尊重隐私,可再开发的元搜索引擎"
+msgstr "一个尊重隐私、可再开发的元搜索引擎"
#: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50
#: searx/templates/simple/macros.html:43
@@ -662,12 +665,12 @@ msgstr "智能答复"
#: searx/templates/oscar/preferences.html:17
#: searx/templates/oscar/preferences.html:272
msgid "Cookies"
-msgstr "Cookie"
+msgstr "Cookies"
#: searx/templates/oscar/preferences.html:42
#: searx/templates/simple/preferences.html:48
msgid "What language do you prefer for search?"
-msgstr "你搜索时更喜欢什么语言?"
+msgstr "您更喜欢使用什么语言进行搜索?"
#: searx/templates/oscar/preferences.html:48
#: searx/templates/simple/preferences.html:128
@@ -682,14 +685,14 @@ msgstr "自动补全搜索字词"
#: searx/templates/oscar/preferences.html:69
#: searx/templates/simple/preferences.html:173
msgid "Proxying image results through searx"
-msgstr "通过searx代理图片"
+msgstr "通过 searx 代理图片结果"
#: searx/templates/oscar/preferences.html:78
msgid ""
"Change how forms are submited, <a "
"href=\"http://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods\""
" rel=\"external\">learn more about request methods</a>"
-msgstr "更改请求方法,<a href=\"http://zh.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods\" rel=\"external\">深入了解请求方法</a>"
+msgstr "更改提交表单时使用的请求方法,<a href=\"https://zh.wikipedia.org/wiki/%E8%B6%85%E6%96%87%E6%9C%AC%E4%BC%A0%E8%BE%93%E5%8D%8F%E8%AE%AE#%E8%AF%B7%E6%B1%82%E6%96%B9%E6%B3%95\" rel=\"external\">深入了解请求方法</a>"
#: searx/templates/oscar/preferences.html:87
#: searx/templates/simple/preferences.html:71
@@ -699,12 +702,12 @@ msgstr "过滤内容"
#: searx/templates/oscar/preferences.html:97
#: searx/templates/simple/preferences.html:139
msgid "Change searx layout"
-msgstr "改变searx布局"
+msgstr "改变 searx 布局"
#: searx/templates/oscar/preferences.html:106
#: searx/templates/oscar/preferences.html:111
msgid "Choose style for this theme"
-msgstr "选择这个主题的样式"
+msgstr "选择此主题的样式"
#: searx/templates/oscar/preferences.html:106
#: searx/templates/oscar/preferences.html:111
@@ -713,13 +716,13 @@ msgstr "样式"
#: searx/templates/oscar/preferences.html:122
msgid "Open Access DOI resolver"
-msgstr ""
+msgstr "开放访问 DOI 解析器"
#: searx/templates/oscar/preferences.html:123
msgid ""
"Redirect to open-access versions of publications when available (plugin "
"required)"
-msgstr ""
+msgstr "尽可能重定向到出版物的开放访问版本(需要插件)"
#: searx/templates/oscar/preferences.html:163
#: searx/templates/oscar/preferences.html:175
@@ -752,7 +755,7 @@ msgstr "最大时间"
#: searx/templates/oscar/preferences.html:248
msgid "This is the list of searx's instant answering modules."
-msgstr "这是searx的即时回答模块。"
+msgstr "这是 searx 的即时回答模块列表。"
#: searx/templates/oscar/preferences.html:252
msgid "Name"
@@ -768,21 +771,21 @@ msgstr "描述"
#: searx/templates/oscar/preferences.html:255
msgid "Examples"
-msgstr "例子"
+msgstr "示例"
#: searx/templates/oscar/preferences.html:275
msgid ""
"This is the list of cookies and their values searx is storing on your "
"computer."
-msgstr "这里展示了searx保存在你的电脑上的cookie。"
+msgstr "此列表展示了 searx 在您设备上存储的 cookie 信息。"
#: searx/templates/oscar/preferences.html:276
msgid "With that list, you can assess searx transparency."
-msgstr "有了这个列表,你可以评估searx透明度。"
+msgstr "您可以基于此表格来评估 searx 的透明度。"
#: searx/templates/oscar/preferences.html:281
msgid "Cookie name"
-msgstr "cookie名称"
+msgstr "Cookie 名称"
#: searx/templates/oscar/preferences.html:282
msgid "Value"
@@ -796,7 +799,7 @@ msgstr "当前保存选项的搜索链接"
msgid ""
"Note: specifying custom settings in the search URL can reduce privacy by "
"leaking data to the clicked result sites."
-msgstr "注意:在搜索链接中保存自定义设置会把数据泄露给点击的网站泄漏隐私。"
+msgstr "注意:在搜索链接中保存自定义设置会把数据泄露给点击的结果网站,从而泄漏隐私。"
#: searx/templates/oscar/results.html:17
msgid "Search results"
@@ -810,7 +813,7 @@ msgstr "尝试搜索:"
#: searx/templates/oscar/results.html:100
#: searx/templates/simple/results.html:25
msgid "Engines cannot retrieve results"
-msgstr "引擎无法检索结果"
+msgstr "引擎无法获取结果"
#: searx/templates/oscar/results.html:131
msgid "Links"
@@ -858,29 +861,29 @@ msgstr "小心!"
#: searx/templates/oscar/messages/first_time.html:7
msgid "It look like you are using searx first time."
-msgstr "看起来你是第一次使用searx。"
+msgstr "看来这是您第一次使用 searx。"
#: searx/templates/oscar/messages/no_cookies.html:3
msgid "Information!"
-msgstr "信息!"
+msgstr "注意!"
#: searx/templates/oscar/messages/no_cookies.html:4
msgid "currently, there are no cookies defined."
-msgstr "目前还未保存cookie。"
+msgstr "目前,还没有任何信息被存储在 Cookie 中。"
#: searx/templates/oscar/messages/no_data_available.html:4
msgid "There is currently no data available. "
-msgstr "目前没有数据可用。"
+msgstr "目前没有可用的数据。"
#: searx/templates/oscar/messages/no_results.html:4
#: searx/templates/simple/messages/no_results.html:4
msgid "Engines cannot retrieve results."
-msgstr "引擎无法检索结果。"
+msgstr "引擎无法获取结果。"
#: searx/templates/oscar/messages/no_results.html:10
#: searx/templates/simple/messages/no_results.html:10
msgid "Please, try again later or find another searx instance."
-msgstr "请稍后再试或使用其他的searx实例。"
+msgstr "请稍后再试,或寻找其它的 searx 实例替代。"
#: searx/templates/oscar/messages/no_results.html:14
#: searx/templates/simple/messages/no_results.html:14
@@ -892,7 +895,7 @@ msgstr "抱歉!"
msgid ""
"we didn't find any results. Please use another query or search in more "
"categories."
-msgstr "我们没有找到任何结果。请换用其他词语或在更多类别中搜索。"
+msgstr "我们没有找到任何结果。请使用其它关键词或在更多类别中搜索。"
#: searx/templates/oscar/messages/save_settings_successfull.html:7
msgid "Well done!"
@@ -961,22 +964,22 @@ msgstr "B"
#: searx/templates/oscar/result_templates/torrent.html:10
#: searx/templates/simple/result_templates/torrent.html:13
msgid "kiB"
-msgstr "KB"
+msgstr "kiB"
#: searx/templates/oscar/result_templates/torrent.html:11
#: searx/templates/simple/result_templates/torrent.html:14
msgid "MiB"
-msgstr "MB"
+msgstr "MiB"
#: searx/templates/oscar/result_templates/torrent.html:12
#: searx/templates/simple/result_templates/torrent.html:15
msgid "GiB"
-msgstr "GB"
+msgstr "GiB"
#: searx/templates/oscar/result_templates/torrent.html:13
#: searx/templates/simple/result_templates/torrent.html:16
msgid "TiB"
-msgstr "TB"
+msgstr "TiB"
#: searx/templates/oscar/result_templates/torrent.html:15
#: searx/templates/simple/result_templates/torrent.html:20
@@ -995,7 +998,7 @@ msgstr "隐藏视频"
#: searx/templates/pix-art/results.html:28
msgid "Load more..."
-msgstr "载入更多..."
+msgstr "载入更多……"
#: searx/templates/simple/base.html:31
msgid "No item found"
@@ -1003,7 +1006,7 @@ msgstr "未找到条目"
#: searx/templates/simple/preferences.html:89
msgid "Supports selected language"
-msgstr "是否支持选定的语言"
+msgstr "支持选定的语言"
#: searx/templates/simple/preferences.html:118
msgid "User interface"
diff --git a/searx/translations/zh_Hant_TW/LC_MESSAGES/messages.mo b/searx/translations/zh_Hant_TW/LC_MESSAGES/messages.mo
index b7e099977f..0b2a3c921c 100644
Binary files a/searx/translations/zh_Hant_TW/LC_MESSAGES/messages.mo and b/searx/translations/zh_Hant_TW/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/zh_TW/LC_MESSAGES/messages.mo b/searx/translations/zh_TW/LC_MESSAGES/messages.mo
new file mode 100644
index 0000000000..b6d43e2a89
Binary files /dev/null and b/searx/translations/zh_TW/LC_MESSAGES/messages.mo differ
diff --git a/searx/translations/zh_TW/LC_MESSAGES/messages.po b/searx/translations/zh_TW/LC_MESSAGES/messages.po
new file mode 100644
index 0000000000..0a4796f97e
--- /dev/null
+++ b/searx/translations/zh_TW/LC_MESSAGES/messages.po
@@ -0,0 +1,1015 @@
+# Translations template for PROJECT.
+# Copyright (C) 2017 ORGANIZATION
+# This file is distributed under the same license as the PROJECT project.
+#
+# Translators:
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2016
+# Jeff Huang <[email protected]>, 2017
+# Mingye Wang <[email protected]>, 2018
+msgid ""
+msgstr ""
+"Project-Id-Version: searx\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2017-11-01 21:31+0100\n"
+"PO-Revision-Date: 2018-09-16 00:29+0000\n"
+"Last-Translator: Mingye Wang <[email protected]>\n"
+"Language-Team: Chinese (Taiwan) (http://www.transifex.com/asciimoo/searx/language/zh_TW/)\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.3.4\n"
+"Language: zh_TW\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+
+#: searx/search.py:137 searx/search.py:182
+msgid "timeout"
+msgstr "逾時"
+
+#: searx/search.py:144
+msgid "request exception"
+msgstr "請求例外"
+
+#: searx/search.py:151
+msgid "unexpected crash"
+msgstr "未預期的當機"
+
+#: searx/webapp.py:136
+msgid "files"
+msgstr "檔案"
+
+#: searx/webapp.py:137
+msgid "general"
+msgstr "一般"
+
+#: searx/webapp.py:138
+msgid "music"
+msgstr "音樂"
+
+#: searx/webapp.py:139
+msgid "social media"
+msgstr "社群媒體"
+
+#: searx/webapp.py:140
+msgid "images"
+msgstr "圖片"
+
+#: searx/webapp.py:141
+msgid "videos"
+msgstr "影片"
+
+#: searx/webapp.py:142
+msgid "it"
+msgstr "IT"
+
+#: searx/webapp.py:143
+msgid "news"
+msgstr "新聞"
+
+#: searx/webapp.py:144
+msgid "map"
+msgstr "地圖"
+
+#: searx/webapp.py:145
+msgid "science"
+msgstr "科學"
+
+#: searx/webapp.py:399 searx/webapp.py:658
+msgid "Invalid settings, please edit your preferences"
+msgstr "無效的設定,請編輯您的偏好設定"
+
+#: searx/webapp.py:415
+msgid "Invalid settings"
+msgstr "無效的設定"
+
+#: searx/webapp.py:449 searx/webapp.py:493
+msgid "search error"
+msgstr "搜尋錯誤"
+
+#: searx/webapp.py:530
+msgid "{minutes} minute(s) ago"
+msgstr "{minutes} 分鐘前"
+
+#: searx/webapp.py:532
+msgid "{hours} hour(s), {minutes} minute(s) ago"
+msgstr "{hours} 小時 {minutes} 分鐘前"
+
+#: searx/answerers/random/answerer.py:53
+msgid "Random value generator"
+msgstr "隨機數值產生器"
+
+#: searx/answerers/random/answerer.py:54
+msgid "Generate different random values"
+msgstr "生成不同的隨機數值"
+
+#: searx/answerers/statistics/answerer.py:53
+msgid "Statistics functions"
+msgstr "統計功能"
+
+#: searx/answerers/statistics/answerer.py:54
+msgid "Compute {functions} of the arguments"
+msgstr "計算 {functions} 參數"
+
+#: searx/engines/__init__.py:194 searx/engines/flycheck___init__.py:201
+msgid "Engine time (sec)"
+msgstr "引擎時間(秒)"
+
+#: searx/engines/__init__.py:198 searx/engines/flycheck___init__.py:205
+msgid "Page loads (sec)"
+msgstr "頁面載入(秒)"
+
+#: searx/engines/__init__.py:202 searx/engines/flycheck___init__.py:209
+#: searx/templates/oscar/results.html:95
+#: searx/templates/simple/results.html:20
+msgid "Number of results"
+msgstr "結果筆數"
+
+#: searx/engines/__init__.py:206 searx/engines/flycheck___init__.py:213
+msgid "Scores"
+msgstr "分數"
+
+#: searx/engines/__init__.py:210 searx/engines/flycheck___init__.py:217
+msgid "Scores per result"
+msgstr "每個結果的分數"
+
+#: searx/engines/__init__.py:214 searx/engines/flycheck___init__.py:221
+msgid "Errors"
+msgstr "錯誤"
+
+#: searx/engines/pdbe.py:87
+msgid "{title} (OBSOLETE)"
+msgstr "{title} (已過時)"
+
+#: searx/engines/pdbe.py:91
+msgid "This entry has been superseded by"
+msgstr "此條目已被取代"
+
+#: searx/engines/pubmed.py:74
+msgid "No abstract is available for this publication."
+msgstr "此出版品無可用摘要。"
+
+#: searx/plugins/https_rewrite.py:32
+msgid "Rewrite HTTP links to HTTPS if possible"
+msgstr "可以的話將 HTTP 連結重寫為 HTTPS"
+
+#: searx/plugins/infinite_scroll.py:3
+msgid "Infinite scroll"
+msgstr "無限捲動"
+
+#: searx/plugins/infinite_scroll.py:4
+msgid "Automatically load next page when scrolling to bottom of current page"
+msgstr "當捲動至目前頁面的底部時自動載入下一頁"
+
+#: searx/plugins/oa_doi_rewrite.py:9
+msgid "Open Access DOI rewrite"
+msgstr "開放存取 DOI 重寫"
+
+#: searx/plugins/oa_doi_rewrite.py:10
+msgid ""
+"Avoid paywalls by redirecting to open-access versions of publications when "
+"available"
+msgstr "盡可能重新導向至出版品的開放存取版本,來避免付費牆"
+
+#: searx/plugins/open_results_on_new_tab.py:18
+#: searx/templates/oscar/preferences.html:114
+#: searx/templates/simple/preferences.html:149
+msgid "Open result links on new browser tabs"
+msgstr "在新瀏覽器分頁中開啟結果連結"
+
+#: searx/plugins/open_results_on_new_tab.py:19
+msgid ""
+"Results are opened in the same window by default. This plugin overwrites the"
+" default behaviour to open links on new tabs/windows. (JavaScript required)"
+msgstr "結果預設會在同一個視窗開啟。這個外掛程式會覆寫預設行為,會在新分頁/視窗中開啟連結。(需要 JavaScript)"
+
+#: searx/plugins/search_on_category_select.py:18
+msgid "Search on category select"
+msgstr "類別選取搜尋"
+
+#: searx/plugins/search_on_category_select.py:19
+msgid ""
+"Perform search immediately if a category selected. Disable to select "
+"multiple categories. (JavaScript required)"
+msgstr "若分類被選取時立刻執行搜尋。停用以選取多個分類。(需要 JavaScript)"
+
+#: searx/plugins/self_info.py:20
+msgid ""
+"Displays your IP if the query is \"ip\" and your user agent if the query "
+"contains \"user agent\"."
+msgstr "若搜尋字串爲「ip」則顯示您的 IP,而若是「user agent」則顯示您的使用者代理字串。"
+
+#: searx/plugins/tracker_url_remover.py:26
+msgid "Tracker URL remover"
+msgstr "追蹤器 URL 移除器"
+
+#: searx/plugins/tracker_url_remover.py:27
+msgid "Remove trackers arguments from the returned URL"
+msgstr "從傳回的 URL 中移除追蹤器參數"
+
+#: searx/plugins/vim_hotkeys.py:3
+msgid "Vim-like hotkeys"
+msgstr "類 Vim 快捷鍵"
+
+#: searx/plugins/vim_hotkeys.py:4
+msgid ""
+"Navigate search results with Vim-like hotkeys (JavaScript required). Press "
+"\"h\" key on main or result page to get help."
+msgstr "以類 Vim 的快捷鍵導覽搜尋結果(需要 JavaScript)。在主頁面或結果頁面按「h」鍵以取得說明。"
+
+#: searx/templates/courgette/404.html:4 searx/templates/legacy/404.html:4
+#: searx/templates/oscar/404.html:4 searx/templates/pix-art/404.html:4
+#: searx/templates/simple/404.html:4
+msgid "Page not found"
+msgstr "找不到頁面"
+
+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6
+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6
+#: searx/templates/simple/404.html:6
+#, python-format
+msgid "Go to %(search_page)s."
+msgstr "到 %(search_page)s。"
+
+#: searx/templates/courgette/404.html:6 searx/templates/legacy/404.html:6
+#: searx/templates/oscar/404.html:6 searx/templates/pix-art/404.html:6
+#: searx/templates/simple/404.html:6
+msgid "search page"
+msgstr "搜尋頁面"
+
+#: searx/templates/courgette/index.html:9
+#: searx/templates/courgette/index.html:13
+#: searx/templates/courgette/results.html:5
+#: searx/templates/legacy/index.html:8 searx/templates/legacy/index.html:12
+#: searx/templates/oscar/navbar.html:7
+#: searx/templates/oscar/preferences.html:3
+#: searx/templates/pix-art/index.html:8
+msgid "preferences"
+msgstr "偏好設定"
+
+#: searx/templates/courgette/index.html:11
+#: searx/templates/legacy/index.html:10 searx/templates/oscar/about.html:2
+#: searx/templates/oscar/navbar.html:6 searx/templates/pix-art/index.html:7
+msgid "about"
+msgstr "關於"
+
+#: searx/templates/courgette/preferences.html:5
+#: searx/templates/legacy/preferences.html:5
+#: searx/templates/oscar/preferences.html:8
+#: searx/templates/pix-art/preferences.html:5
+#: searx/templates/simple/preferences.html:26
+msgid "Preferences"
+msgstr "偏好設定"
+
+#: searx/templates/courgette/preferences.html:9
+#: searx/templates/legacy/preferences.html:9
+#: searx/templates/oscar/preferences.html:33
+#: searx/templates/oscar/preferences.html:35
+#: searx/templates/simple/preferences.html:34
+msgid "Default categories"
+msgstr "預設分類"
+
+#: searx/templates/courgette/preferences.html:13
+#: searx/templates/legacy/preferences.html:14
+#: searx/templates/oscar/preferences.html:41
+#: searx/templates/pix-art/preferences.html:9
+#: searx/templates/simple/preferences.html:39
+#: searx/templates/simple/preferences.html:163
+msgid "Search language"
+msgstr "搜尋語言"
+
+#: searx/templates/courgette/preferences.html:16
+#: searx/templates/legacy/preferences.html:17
+#: searx/templates/oscar/languages.html:6
+#: searx/templates/pix-art/preferences.html:12
+#: searx/templates/simple/languages.html:2
+#: searx/templates/simple/preferences.html:42
+msgid "Default language"
+msgstr "預設語言"
+
+#: searx/templates/courgette/preferences.html:24
+#: searx/templates/legacy/preferences.html:25
+#: searx/templates/oscar/preferences.html:47
+#: searx/templates/pix-art/preferences.html:20
+#: searx/templates/simple/preferences.html:120
+msgid "Interface language"
+msgstr "介面語言"
+
+#: searx/templates/courgette/preferences.html:34
+#: searx/templates/legacy/preferences.html:35
+#: searx/templates/oscar/preferences.html:57
+#: searx/templates/simple/preferences.html:51
+msgid "Autocomplete"
+msgstr "自動完成"
+
+#: searx/templates/courgette/preferences.html:45
+#: searx/templates/legacy/preferences.html:46
+#: searx/templates/oscar/preferences.html:68
+#: searx/templates/simple/preferences.html:166
+msgid "Image proxy"
+msgstr "圖片代理伺服器"
+
+#: searx/templates/courgette/preferences.html:48
+#: searx/templates/legacy/preferences.html:49
+#: searx/templates/oscar/preferences.html:72
+#: searx/templates/simple/preferences.html:169
+msgid "Enabled"
+msgstr "已啟用"
+
+#: searx/templates/courgette/preferences.html:49
+#: searx/templates/legacy/preferences.html:50
+#: searx/templates/oscar/preferences.html:73
+#: searx/templates/simple/preferences.html:170
+msgid "Disabled"
+msgstr "已停用"
+
+#: searx/templates/courgette/preferences.html:54
+#: searx/templates/legacy/preferences.html:55
+#: searx/templates/oscar/preferences.html:77
+#: searx/templates/pix-art/preferences.html:30
+#: searx/templates/simple/preferences.html:156
+msgid "Method"
+msgstr "方法"
+
+#: searx/templates/courgette/preferences.html:63
+#: searx/templates/legacy/preferences.html:64
+#: searx/templates/oscar/preferences.html:86
+#: searx/templates/oscar/preferences.html:165
+#: searx/templates/oscar/preferences.html:173
+#: searx/templates/simple/preferences.html:63
+#: searx/templates/simple/preferences.html:90
+msgid "SafeSearch"
+msgstr "安全搜尋"
+
+#: searx/templates/courgette/preferences.html:66
+#: searx/templates/legacy/preferences.html:67
+#: searx/templates/oscar/preferences.html:90
+#: searx/templates/simple/preferences.html:66
+msgid "Strict"
+msgstr "嚴格"
+
+#: searx/templates/courgette/preferences.html:67
+#: searx/templates/legacy/preferences.html:68
+#: searx/templates/oscar/preferences.html:91
+#: searx/templates/simple/preferences.html:67
+msgid "Moderate"
+msgstr "適中"
+
+#: searx/templates/courgette/preferences.html:68
+#: searx/templates/legacy/preferences.html:69
+#: searx/templates/oscar/preferences.html:92
+#: searx/templates/simple/preferences.html:68
+msgid "None"
+msgstr "無"
+
+#: searx/templates/courgette/preferences.html:73
+#: searx/templates/legacy/preferences.html:74
+#: searx/templates/oscar/preferences.html:96
+#: searx/templates/pix-art/preferences.html:39
+#: searx/templates/simple/preferences.html:131
+msgid "Themes"
+msgstr "佈景主題"
+
+#: searx/templates/courgette/preferences.html:83
+msgid "Color"
+msgstr "顏色"
+
+#: searx/templates/courgette/preferences.html:86
+msgid "Blue (default)"
+msgstr "藍色(預設值)"
+
+#: searx/templates/courgette/preferences.html:87
+msgid "Violet"
+msgstr "紫色"
+
+#: searx/templates/courgette/preferences.html:88
+msgid "Green"
+msgstr "綠色"
+
+#: searx/templates/courgette/preferences.html:89
+msgid "Cyan"
+msgstr "青色"
+
+#: searx/templates/courgette/preferences.html:90
+msgid "Orange"
+msgstr "橘色"
+
+#: searx/templates/courgette/preferences.html:91
+msgid "Red"
+msgstr "紅色"
+
+#: searx/templates/courgette/preferences.html:96
+#: searx/templates/legacy/preferences.html:93
+#: searx/templates/pix-art/preferences.html:49
+#: searx/templates/simple/preferences.html:77
+msgid "Currently used search engines"
+msgstr "目前使用的搜尋引擎"
+
+#: searx/templates/courgette/preferences.html:100
+#: searx/templates/legacy/preferences.html:97
+#: searx/templates/oscar/preferences.html:162
+#: searx/templates/oscar/preferences.html:176
+#: searx/templates/pix-art/preferences.html:53
+#: searx/templates/simple/preferences.html:87
+msgid "Engine name"
+msgstr "引擎名稱"
+
+#: searx/templates/courgette/preferences.html:101
+#: searx/templates/legacy/preferences.html:98
+msgid "Category"
+msgstr "分類"
+
+#: searx/templates/courgette/preferences.html:102
+#: searx/templates/courgette/preferences.html:113
+#: searx/templates/legacy/preferences.html:99
+#: searx/templates/legacy/preferences.html:110
+#: searx/templates/oscar/preferences.html:161
+#: searx/templates/oscar/preferences.html:177
+#: searx/templates/pix-art/preferences.html:54
+#: searx/templates/pix-art/preferences.html:64
+#: searx/templates/simple/preferences.html:86
+msgid "Allow"
+msgstr "允許"
+
+#: searx/templates/courgette/preferences.html:102
+#: searx/templates/courgette/preferences.html:114
+#: searx/templates/legacy/preferences.html:99
+#: searx/templates/legacy/preferences.html:111
+#: searx/templates/pix-art/preferences.html:54
+#: searx/templates/pix-art/preferences.html:65
+msgid "Block"
+msgstr "封鎖"
+
+#: searx/templates/courgette/preferences.html:122
+#: searx/templates/legacy/preferences.html:119
+#: searx/templates/oscar/preferences.html:297
+#: searx/templates/pix-art/preferences.html:73
+#: searx/templates/simple/preferences.html:180
+msgid ""
+"These settings are stored in your cookies, this allows us not to store this "
+"data about you."
+msgstr "這些設定只會儲存在您的 cookies 中,這樣我們無需也不會存儲關於您的資訊。"
+
+#: searx/templates/courgette/preferences.html:124
+#: searx/templates/legacy/preferences.html:121
+#: searx/templates/oscar/preferences.html:299
+#: searx/templates/pix-art/preferences.html:75
+#: searx/templates/simple/preferences.html:182
+msgid ""
+"These cookies serve your sole convenience, we don't use these cookies to "
+"track you."
+msgstr "這些 cookies 僅做為提供您方便之用,我們不會使用這些 cookies 來追蹤您。"
+
+#: searx/templates/courgette/preferences.html:127
+#: searx/templates/legacy/preferences.html:124
+#: searx/templates/oscar/preferences.html:305
+#: searx/templates/pix-art/preferences.html:78
+#: searx/templates/simple/preferences.html:185
+msgid "save"
+msgstr "儲存"
+
+#: searx/templates/courgette/preferences.html:128
+#: searx/templates/legacy/preferences.html:125
+#: searx/templates/oscar/preferences.html:307
+#: searx/templates/simple/preferences.html:186
+msgid "Reset defaults"
+msgstr "重設為預設值"
+
+#: searx/templates/courgette/preferences.html:129
+#: searx/templates/legacy/preferences.html:126
+#: searx/templates/oscar/preferences.html:306
+#: searx/templates/pix-art/preferences.html:79
+#: searx/templates/simple/preferences.html:187
+msgid "back"
+msgstr "返回"
+
+#: searx/templates/courgette/results.html:12
+#: searx/templates/legacy/results.html:13
+#: searx/templates/oscar/results.html:136
+#: searx/templates/simple/results.html:58
+msgid "Search URL"
+msgstr "搜尋網址"
+
+#: searx/templates/courgette/results.html:16
+#: searx/templates/legacy/results.html:17
+#: searx/templates/oscar/results.html:141
+#: searx/templates/simple/results.html:62
+msgid "Download results"
+msgstr "下載結果"
+
+#: searx/templates/courgette/results.html:34
+#: searx/templates/legacy/results.html:35
+#: searx/templates/simple/results.html:10
+msgid "Answers"
+msgstr "答案"
+
+#: searx/templates/courgette/results.html:42
+#: searx/templates/legacy/results.html:43
+#: searx/templates/oscar/results.html:116
+#: searx/templates/simple/results.html:42
+msgid "Suggestions"
+msgstr "建議"
+
+#: searx/templates/courgette/results.html:70
+#: searx/templates/legacy/results.html:81
+#: searx/templates/oscar/results.html:68 searx/templates/oscar/results.html:78
+#: searx/templates/simple/results.html:130
+msgid "previous page"
+msgstr "上一頁"
+
+#: searx/templates/courgette/results.html:81
+#: searx/templates/legacy/results.html:92
+#: searx/templates/oscar/results.html:62 searx/templates/oscar/results.html:84
+#: searx/templates/simple/results.html:145
+msgid "next page"
+msgstr "下一頁"
+
+#: searx/templates/courgette/search.html:3
+#: searx/templates/legacy/search.html:3 searx/templates/oscar/search.html:6
+#: searx/templates/oscar/search_full.html:9
+#: searx/templates/pix-art/search.html:3 searx/templates/simple/search.html:4
+msgid "Search for..."
+msgstr "搜尋……"
+
+#: searx/templates/courgette/stats.html:4 searx/templates/legacy/stats.html:4
+#: searx/templates/oscar/stats.html:5 searx/templates/pix-art/stats.html:4
+#: searx/templates/simple/stats.html:7
+msgid "Engine stats"
+msgstr "引擎統計"
+
+#: searx/templates/courgette/result_templates/images.html:4
+#: searx/templates/legacy/result_templates/images.html:4
+#: searx/templates/pix-art/result_templates/images.html:4
+msgid "original context"
+msgstr "原始內容"
+
+#: searx/templates/courgette/result_templates/torrent.html:7
+#: searx/templates/legacy/result_templates/torrent.html:11
+#: searx/templates/oscar/result_templates/torrent.html:6
+#: searx/templates/simple/result_templates/torrent.html:9
+msgid "Seeder"
+msgstr "播種者"
+
+#: searx/templates/courgette/result_templates/torrent.html:7
+#: searx/templates/legacy/result_templates/torrent.html:11
+#: searx/templates/oscar/result_templates/torrent.html:6
+#: searx/templates/simple/result_templates/torrent.html:9
+msgid "Leecher"
+msgstr "接收者"
+
+#: searx/templates/courgette/result_templates/torrent.html:9
+#: searx/templates/legacy/result_templates/torrent.html:9
+#: searx/templates/oscar/macros.html:23
+#: searx/templates/simple/result_templates/torrent.html:6
+msgid "magnet link"
+msgstr "磁力連結"
+
+#: searx/templates/courgette/result_templates/torrent.html:10
+#: searx/templates/legacy/result_templates/torrent.html:10
+#: searx/templates/oscar/macros.html:24
+#: searx/templates/simple/result_templates/torrent.html:7
+msgid "torrent file"
+msgstr "torrent 檔案"
+
+#: searx/templates/legacy/categories.html:8
+#: searx/templates/simple/categories.html:6
+msgid "Click on the magnifier to perform search"
+msgstr "在磁鐵上點選以執行搜尋"
+
+#: searx/templates/legacy/preferences.html:84
+#: searx/templates/oscar/preferences.html:113
+#: searx/templates/simple/preferences.html:142
+msgid "Results on new tabs"
+msgstr "在新分頁開啟結果"
+
+#: searx/templates/legacy/preferences.html:87
+#: searx/templates/oscar/preferences.html:117
+#: searx/templates/simple/preferences.html:145
+msgid "On"
+msgstr "開啟"
+
+#: searx/templates/legacy/preferences.html:88
+#: searx/templates/oscar/preferences.html:118
+#: searx/templates/simple/preferences.html:146
+msgid "Off"
+msgstr "關閉"
+
+#: searx/templates/legacy/result_templates/code.html:3
+#: searx/templates/legacy/result_templates/default.html:3
+#: searx/templates/legacy/result_templates/map.html:9
+#: searx/templates/oscar/macros.html:34 searx/templates/oscar/macros.html:48
+#: searx/templates/simple/macros.html:43
+msgid "cached"
+msgstr "已快取"
+
+#: searx/templates/oscar/advanced.html:4
+msgid "Advanced settings"
+msgstr "進階設定"
+
+#: searx/templates/oscar/base.html:62
+#: searx/templates/oscar/messages/first_time.html:4
+#: searx/templates/oscar/messages/save_settings_successfull.html:5
+#: searx/templates/oscar/messages/unknow_error.html:5
+msgid "Close"
+msgstr "關閉"
+
+#: searx/templates/oscar/base.html:64
+#: searx/templates/oscar/messages/no_results.html:4
+#: searx/templates/simple/messages/no_results.html:4
+#: searx/templates/simple/results.html:25
+msgid "Error!"
+msgstr "錯誤!"
+
+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
+msgid "Powered by"
+msgstr "技術支援"
+
+#: searx/templates/oscar/base.html:90 searx/templates/simple/base.html:55
+msgid "a privacy-respecting, hackable metasearch engine"
+msgstr "一個尊重隱私,可再開發的集合式搜尋引擎"
+
+#: searx/templates/oscar/macros.html:36 searx/templates/oscar/macros.html:50
+#: searx/templates/simple/macros.html:43
+msgid "proxied"
+msgstr "已代理"
+
+#: searx/templates/oscar/macros.html:92
+msgid "supported"
+msgstr "支援"
+
+#: searx/templates/oscar/macros.html:96
+msgid "not supported"
+msgstr "不支援"
+
+#: searx/templates/oscar/preferences.html:13
+#: searx/templates/oscar/preferences.html:22
+#: searx/templates/simple/preferences.html:32
+msgid "General"
+msgstr "一般"
+
+#: searx/templates/oscar/preferences.html:14
+#: searx/templates/oscar/preferences.html:146
+#: searx/templates/simple/preferences.html:76
+msgid "Engines"
+msgstr "引擎"
+
+#: searx/templates/oscar/preferences.html:15
+#: searx/templates/oscar/preferences.html:219
+msgid "Plugins"
+msgstr "外掛程式"
+
+#: searx/templates/oscar/preferences.html:16
+#: searx/templates/oscar/preferences.html:245
+msgid "Answerers"
+msgstr "答案"
+
+#: searx/templates/oscar/preferences.html:17
+#: searx/templates/oscar/preferences.html:272
+msgid "Cookies"
+msgstr "Cookies"
+
+#: searx/templates/oscar/preferences.html:42
+#: searx/templates/simple/preferences.html:48
+msgid "What language do you prefer for search?"
+msgstr "您偏好用哪種語言搜尋?"
+
+#: searx/templates/oscar/preferences.html:48
+#: searx/templates/simple/preferences.html:128
+msgid "Change the language of the layout"
+msgstr "變更佈局的語言"
+
+#: searx/templates/oscar/preferences.html:58
+#: searx/templates/simple/preferences.html:60
+msgid "Find stuff as you type"
+msgstr "隨打即找"
+
+#: searx/templates/oscar/preferences.html:69
+#: searx/templates/simple/preferences.html:173
+msgid "Proxying image results through searx"
+msgstr "透過 searx 代理圖片結果"
+
+#: searx/templates/oscar/preferences.html:78
+msgid ""
+"Change how forms are submited, <a "
+"href=\"http://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods\""
+" rel=\"external\">learn more about request methods</a>"
+msgstr "變更遞交形式,<a href=\"https://zh.wikipedia.org/wiki/%E8%B6%85%E6%96%87%E6%9C%AC%E4%BC%A0%E8%BE%93%E5%8D%8F%E8%AE%AE#%E8%AF%B7%E6%B1%82%E6%96%B9%E6%B3%95\" rel=\"external\">看看更多關於請求方法的介紹</a>"
+
+#: searx/templates/oscar/preferences.html:87
+#: searx/templates/simple/preferences.html:71
+msgid "Filter content"
+msgstr "過濾內容"
+
+#: searx/templates/oscar/preferences.html:97
+#: searx/templates/simple/preferences.html:139
+msgid "Change searx layout"
+msgstr "變更 searx 佈局"
+
+#: searx/templates/oscar/preferences.html:106
+#: searx/templates/oscar/preferences.html:111
+msgid "Choose style for this theme"
+msgstr "選擇這個主題的樣式"
+
+#: searx/templates/oscar/preferences.html:106
+#: searx/templates/oscar/preferences.html:111
+msgid "Style"
+msgstr "樣式"
+
+#: searx/templates/oscar/preferences.html:122
+msgid "Open Access DOI resolver"
+msgstr "開放存取 DOI 解析器"
+
+#: searx/templates/oscar/preferences.html:123
+msgid ""
+"Redirect to open-access versions of publications when available (plugin "
+"required)"
+msgstr "盡可能重新導向至出版品的開放存取版本(需要外掛程式)"
+
+#: searx/templates/oscar/preferences.html:163
+#: searx/templates/oscar/preferences.html:175
+#: searx/templates/simple/preferences.html:88
+msgid "Shortcut"
+msgstr "快捷鍵"
+
+#: searx/templates/oscar/preferences.html:164
+#: searx/templates/oscar/preferences.html:174
+msgid "Selected language"
+msgstr "已選取的語言"
+
+#: searx/templates/oscar/preferences.html:166
+#: searx/templates/oscar/preferences.html:172
+#: searx/templates/simple/preferences.html:91
+msgid "Time range"
+msgstr "時間範圍"
+
+#: searx/templates/oscar/preferences.html:167
+#: searx/templates/oscar/preferences.html:171
+#: searx/templates/simple/preferences.html:92
+msgid "Avg. time"
+msgstr "平均時間"
+
+#: searx/templates/oscar/preferences.html:168
+#: searx/templates/oscar/preferences.html:170
+#: searx/templates/simple/preferences.html:93
+msgid "Max time"
+msgstr "最大時間"
+
+#: searx/templates/oscar/preferences.html:248
+msgid "This is the list of searx's instant answering modules."
+msgstr "這是 searx 的即時回覆模組清單。"
+
+#: searx/templates/oscar/preferences.html:252
+msgid "Name"
+msgstr "名稱"
+
+#: searx/templates/oscar/preferences.html:253
+msgid "Keywords"
+msgstr "關鍵字"
+
+#: searx/templates/oscar/preferences.html:254
+msgid "Description"
+msgstr "描述"
+
+#: searx/templates/oscar/preferences.html:255
+msgid "Examples"
+msgstr "範例"
+
+#: searx/templates/oscar/preferences.html:275
+msgid ""
+"This is the list of cookies and their values searx is storing on your "
+"computer."
+msgstr "這是 searx 儲存在您電腦上的 cookies 與它們的值的清單。"
+
+#: searx/templates/oscar/preferences.html:276
+msgid "With that list, you can assess searx transparency."
+msgstr "有了這份清單,您就可以評估 searx 的透明度。"
+
+#: searx/templates/oscar/preferences.html:281
+msgid "Cookie name"
+msgstr "Cookie 名稱"
+
+#: searx/templates/oscar/preferences.html:282
+msgid "Value"
+msgstr "值"
+
+#: searx/templates/oscar/preferences.html:301
+msgid "Search URL of the currently saved preferences"
+msgstr "目前偏好設定的搜尋 URL"
+
+#: searx/templates/oscar/preferences.html:301
+msgid ""
+"Note: specifying custom settings in the search URL can reduce privacy by "
+"leaking data to the clicked result sites."
+msgstr "注意:在搜尋 URL 中指定自訂的設定可能會降低隱私,因為會洩漏資料給點選的結果頁面。"
+
+#: searx/templates/oscar/results.html:17
+msgid "Search results"
+msgstr "搜尋結果"
+
+#: searx/templates/oscar/results.html:21
+#: searx/templates/simple/results.html:84
+msgid "Try searching for:"
+msgstr "嘗試搜尋:"
+
+#: searx/templates/oscar/results.html:100
+#: searx/templates/simple/results.html:25
+msgid "Engines cannot retrieve results"
+msgstr "引擎無法擷取結果"
+
+#: searx/templates/oscar/results.html:131
+msgid "Links"
+msgstr "連結"
+
+#: searx/templates/oscar/search.html:8
+#: searx/templates/oscar/search_full.html:11
+#: searx/templates/simple/search.html:5
+msgid "Start search"
+msgstr "開始搜尋"
+
+#: searx/templates/oscar/stats.html:2
+msgid "stats"
+msgstr "統計"
+
+#: searx/templates/oscar/time-range.html:3
+#: searx/templates/simple/time-range.html:3
+msgid "Anytime"
+msgstr "任何時間"
+
+#: searx/templates/oscar/time-range.html:6
+#: searx/templates/simple/time-range.html:6
+msgid "Last day"
+msgstr "一天內"
+
+#: searx/templates/oscar/time-range.html:9
+#: searx/templates/simple/time-range.html:9
+msgid "Last week"
+msgstr "一週內"
+
+#: searx/templates/oscar/time-range.html:12
+#: searx/templates/simple/time-range.html:12
+msgid "Last month"
+msgstr "一個月內"
+
+#: searx/templates/oscar/time-range.html:15
+#: searx/templates/simple/time-range.html:15
+msgid "Last year"
+msgstr "一年內"
+
+#: searx/templates/oscar/messages/first_time.html:6
+#: searx/templates/oscar/messages/no_data_available.html:3
+msgid "Heads up!"
+msgstr "注意!"
+
+#: searx/templates/oscar/messages/first_time.html:7
+msgid "It look like you are using searx first time."
+msgstr "看起來您是第一次使用 searx。"
+
+#: searx/templates/oscar/messages/no_cookies.html:3
+msgid "Information!"
+msgstr "資訊!"
+
+#: searx/templates/oscar/messages/no_cookies.html:4
+msgid "currently, there are no cookies defined."
+msgstr "目前未有已定義的 cookies。"
+
+#: searx/templates/oscar/messages/no_data_available.html:4
+msgid "There is currently no data available. "
+msgstr "目前無可用資料。"
+
+#: searx/templates/oscar/messages/no_results.html:4
+#: searx/templates/simple/messages/no_results.html:4
+msgid "Engines cannot retrieve results."
+msgstr "引擎無法擷取結果。"
+
+#: searx/templates/oscar/messages/no_results.html:10
+#: searx/templates/simple/messages/no_results.html:10
+msgid "Please, try again later or find another searx instance."
+msgstr "請再試一次或是使用其他 searx 實體搜尋。"
+
+#: searx/templates/oscar/messages/no_results.html:14
+#: searx/templates/simple/messages/no_results.html:14
+msgid "Sorry!"
+msgstr "抱歉!"
+
+#: searx/templates/oscar/messages/no_results.html:15
+#: searx/templates/simple/messages/no_results.html:15
+msgid ""
+"we didn't find any results. Please use another query or search in more "
+"categories."
+msgstr "我們找不到任何結果。請使用其他搜尋方式或在更多分類中搜尋。"
+
+#: searx/templates/oscar/messages/save_settings_successfull.html:7
+msgid "Well done!"
+msgstr "很好!"
+
+#: searx/templates/oscar/messages/save_settings_successfull.html:8
+msgid "Settings saved successfully."
+msgstr "設定成功儲存。"
+
+#: searx/templates/oscar/messages/unknow_error.html:7
+msgid "Oh snap!"
+msgstr "糟糕!"
+
+#: searx/templates/oscar/messages/unknow_error.html:8
+msgid "Something went wrong."
+msgstr "發生了一點問題。"
+
+#: searx/templates/oscar/result_templates/default.html:7
+#: searx/templates/simple/result_templates/default.html:6
+msgid "show media"
+msgstr "顯示媒體"
+
+#: searx/templates/oscar/result_templates/default.html:7
+#: searx/templates/simple/result_templates/default.html:6
+msgid "hide media"
+msgstr "隱藏媒體"
+
+#: searx/templates/oscar/result_templates/images.html:30
+msgid "Get image"
+msgstr "取得圖片"
+
+#: searx/templates/oscar/result_templates/images.html:33
+msgid "View source"
+msgstr "檢視來源"
+
+#: searx/templates/oscar/result_templates/map.html:7
+#: searx/templates/simple/result_templates/map.html:7
+msgid "show map"
+msgstr "顯示地圖"
+
+#: searx/templates/oscar/result_templates/map.html:7
+#: searx/templates/simple/result_templates/map.html:7
+msgid "hide map"
+msgstr "隱藏地圖"
+
+#: searx/templates/oscar/result_templates/map.html:11
+#: searx/templates/simple/result_templates/map.html:11
+msgid "show details"
+msgstr "顯示詳情"
+
+#: searx/templates/oscar/result_templates/map.html:11
+#: searx/templates/simple/result_templates/map.html:11
+msgid "hide details"
+msgstr "隱藏詳情"
+
+#: searx/templates/oscar/result_templates/torrent.html:7
+#: searx/templates/simple/result_templates/torrent.html:11
+msgid "Filesize"
+msgstr "檔案大小"
+
+#: searx/templates/oscar/result_templates/torrent.html:9
+#: searx/templates/simple/result_templates/torrent.html:12
+msgid "Bytes"
+msgstr "位元組"
+
+#: searx/templates/oscar/result_templates/torrent.html:10
+#: searx/templates/simple/result_templates/torrent.html:13
+msgid "kiB"
+msgstr "kiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:11
+#: searx/templates/simple/result_templates/torrent.html:14
+msgid "MiB"
+msgstr "MiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:12
+#: searx/templates/simple/result_templates/torrent.html:15
+msgid "GiB"
+msgstr "GiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:13
+#: searx/templates/simple/result_templates/torrent.html:16
+msgid "TiB"
+msgstr "TiB"
+
+#: searx/templates/oscar/result_templates/torrent.html:15
+#: searx/templates/simple/result_templates/torrent.html:20
+msgid "Number of Files"
+msgstr "檔案數量"
+
+#: searx/templates/oscar/result_templates/videos.html:7
+#: searx/templates/simple/result_templates/videos.html:6
+msgid "show video"
+msgstr "顯示影片"
+
+#: searx/templates/oscar/result_templates/videos.html:7
+#: searx/templates/simple/result_templates/videos.html:6
+msgid "hide video"
+msgstr "隱藏影片"
+
+#: searx/templates/pix-art/results.html:28
+msgid "Load more..."
+msgstr "載入更多……"
+
+#: searx/templates/simple/base.html:31
+msgid "No item found"
+msgstr "找不到項目"
+
+#: searx/templates/simple/preferences.html:89
+msgid "Supports selected language"
+msgstr "支援選定的語言"
+
+#: searx/templates/simple/preferences.html:118
+msgid "User interface"
+msgstr "使用者介面"
+
+#: searx/templates/simple/preferences.html:154
+msgid "Privacy"
+msgstr "隱私"
diff --git a/searx/version.py b/searx/version.py
index 11dfb1eeff..4e149722e5 100644
--- a/searx/version.py
+++ b/searx/version.py
@@ -18,7 +18,7 @@
# version of searx
VERSION_MAJOR = 0
-VERSION_MINOR = 14
+VERSION_MINOR = 15
VERSION_BUILD = 0
VERSION_STRING = "{0}.{1}.{2}".format(VERSION_MAJOR,
|
UTNkar__moore-554 | Fix cookies for Utnarm
Utnarm recently switched to utnarm.se instead of utnarm.utn.se. This lead to that you can’t sign in to utnarm.se. Per default, moore uses utn.se as cookie domain and since utnarm.se is a different top level domain the cookies can’t be used.
We need to dynamically add utnarm.se as a cookie domain. This python package might be useful https://github.com/ViktorStiskala/django-shared-session
| [
{
"content": "\"\"\"\nDjango settings for the production environment of Project Moore.\n\nFor more information regarding running in production see,\nSee https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.10/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.10/ref/settings/\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\nimport raven\n\nfrom .base import *\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\n 'DJANGO_SECRET',\n 'za7^0@54n&p-dg4)_l12q_3^o5awz_uym0osqaz2!myki_8kw0'\n)\n\n# Database\n# https://docs.djangoproject.com/en/1.10/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': os.environ.get('DJANGO_DB_NAME', 'moore'),\n 'USER': os.environ.get('DJANGO_DB_USER', 'moore'),\n 'PASSWORD': os.environ.get('DJANGO_DB_PASS'),\n 'HOST': os.environ.get('DJANGO_DB_HOST', '127.0.0.1'),\n 'PORT': os.environ.get('DJANGO_DB_PORT', '5432'),\n }\n}\n\n\n# CONN_MAX_AGE = 0\n\n# Base URL to use when referring to full URLs within the Wagtail admin\n# backend - e.g. in notification emails. Don't include '/admin' or a\n# trailing slash\nBASE_URL = 'https://utn.se'\n\nALLOWED_HOSTS = ['.utn.se', '.utnarm.se']\n\n# Email settings\nDEFAULT_FROM_EMAIL = '[email protected]'\n\nEMAIL_SUBJECT_PREFIX = '[UTN] '\n\n# Sentry Configuration - will be sent error messages\nRAVEN_CONFIG = {\n 'dsn': os.environ.get('SENTRY_DSN'),\n 'release': raven.fetch_git_sha(os.path.dirname(BASE_DIR)),\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'root': {\n 'level': 'WARNING',\n 'handlers': ['sentry'],\n },\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s '\n '%(process)d %(thread)d %(message)s'\n },\n },\n 'handlers': {\n 'sentry': {\n 'level': 'ERROR',\n 'class': 'raven.contrib.django.raven_compat'\n '.handlers.SentryHandler',\n 'tags': {'custom-tag': 'x'},\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose'\n }\n },\n 'loggers': {\n 'django.db.backends': {\n 'level': 'ERROR',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'raven': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'sentry.errors': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n },\n}\n\nCSRF_COOKIE_SECURE = True\n\nSESSION_COOKIE_DOMAIN = '.utn.se'\n\nSESSION_COOKIE_SECURE = True\n\nMELOS_URL = os.environ.get('MELOS_URL')\nMELOS_ORG_ID = os.environ.get('MELOS_ORG_ID')\nMELOS_ADMIN = os.environ.get('MELOS_ADMIN')\n\n# Google API\nGOOGLE_API_KEY = os.environ.get('GOOGLE_API_KEY')\n\nRECAPTCHA_PUBLIC_KEY = os.environ.get(\"RECAPTCHA_PUBLIC_KEY\", \"\")\nRECAPTCHA_PRIVATE_KEY = os.environ.get(\"RECAPTCHA_PRIVATE_KEY\", \"\")\n\ntry:\n from .local import *\nexcept ImportError:\n pass\n",
"path": "src/moore/settings/production.py"
}
] | [
{
"content": "\"\"\"\nDjango settings for the production environment of Project Moore.\n\nFor more information regarding running in production see,\nSee https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.10/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.10/ref/settings/\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\nimport raven\n\nfrom .base import *\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\n 'DJANGO_SECRET',\n 'za7^0@54n&p-dg4)_l12q_3^o5awz_uym0osqaz2!myki_8kw0'\n)\n\n# Database\n# https://docs.djangoproject.com/en/1.10/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': os.environ.get('DJANGO_DB_NAME', 'moore'),\n 'USER': os.environ.get('DJANGO_DB_USER', 'moore'),\n 'PASSWORD': os.environ.get('DJANGO_DB_PASS'),\n 'HOST': os.environ.get('DJANGO_DB_HOST', '127.0.0.1'),\n 'PORT': os.environ.get('DJANGO_DB_PORT', '5432'),\n }\n}\n\n\n# CONN_MAX_AGE = 0\n\n# Base URL to use when referring to full URLs within the Wagtail admin\n# backend - e.g. in notification emails. Don't include '/admin' or a\n# trailing slash\nBASE_URL = 'https://utn.se'\n\nALLOWED_HOSTS = ['.utn.se', '.utnarm.se']\n\n# Email settings\nDEFAULT_FROM_EMAIL = '[email protected]'\n\nEMAIL_SUBJECT_PREFIX = '[UTN] '\n\n# Sentry Configuration - will be sent error messages\nRAVEN_CONFIG = {\n 'dsn': os.environ.get('SENTRY_DSN'),\n 'release': raven.fetch_git_sha(os.path.dirname(BASE_DIR)),\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'root': {\n 'level': 'WARNING',\n 'handlers': ['sentry'],\n },\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s '\n '%(process)d %(thread)d %(message)s'\n },\n },\n 'handlers': {\n 'sentry': {\n 'level': 'ERROR',\n 'class': 'raven.contrib.django.raven_compat'\n '.handlers.SentryHandler',\n 'tags': {'custom-tag': 'x'},\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose'\n }\n },\n 'loggers': {\n 'django.db.backends': {\n 'level': 'ERROR',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'raven': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'sentry.errors': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n },\n}\n\nCSRF_COOKIE_SECURE = True\n\nSESSION_COOKIE_SECURE = True\n\nMELOS_URL = os.environ.get('MELOS_URL')\nMELOS_ORG_ID = os.environ.get('MELOS_ORG_ID')\nMELOS_ADMIN = os.environ.get('MELOS_ADMIN')\n\n# Google API\nGOOGLE_API_KEY = os.environ.get('GOOGLE_API_KEY')\n\nRECAPTCHA_PUBLIC_KEY = os.environ.get(\"RECAPTCHA_PUBLIC_KEY\", \"\")\nRECAPTCHA_PRIVATE_KEY = os.environ.get(\"RECAPTCHA_PRIVATE_KEY\", \"\")\n\ntry:\n from .local import *\nexcept ImportError:\n pass\n",
"path": "src/moore/settings/production.py"
}
] | diff --git a/src/moore/settings/production.py b/src/moore/settings/production.py
index 9fca8dec..4dd269e0 100644
--- a/src/moore/settings/production.py
+++ b/src/moore/settings/production.py
@@ -106,8 +106,6 @@
CSRF_COOKIE_SECURE = True
-SESSION_COOKIE_DOMAIN = '.utn.se'
-
SESSION_COOKIE_SECURE = True
MELOS_URL = os.environ.get('MELOS_URL')
|
chainer__chainer-1568 | Inconsistency between cupy.dstack and numpy.dstack
```
In [10]: import cupy, numpy
In [11]: a = cupy.arange(24).reshape(2, 3, 4)
In [12]: numpy.dstack((a.get(),))
Out[12]:
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
In [13]: cupy.dstack((a,))
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-13-aa573685da21> in <module>()
----> 1 cupy.dstack((a,))
/home/delta/dev/chainer/cupy/manipulation/join.py in dstack(tup)
101
102 """
--> 103 return concatenate(cupy.atleast_3d(*tup), 2)
104
105
/home/delta/dev/chainer/cupy/manipulation/join.py in concatenate(tup, axis)
59 ndim = a.ndim
60 shape = list(a.shape)
---> 61 axis = _get_positive_axis(a.ndim, axis)
62 continue
63
/home/delta/dev/chainer/cupy/manipulation/join.py in _get_positive_axis(ndim, axis)
167 a += ndim
168 if a < 0 or a >= ndim:
--> 169 raise IndexError('axis {} out of bounds [0, {})'.format(axis, ndim))
170 return a
IndexError: axis 2 out of bounds [0, 2)
```
| [
{
"content": "import numpy\nimport six\n\nimport cupy\n\n\ndef column_stack(tup):\n \"\"\"Stacks 1-D and 2-D arrays as columns into a 2-D array.\n\n A 1-D array is first converted to a 2-D column array. Then, the 2-D arrays\n are concatenated along the second axis.\n\n Args:\n tup (sequence of arrays): 1-D or 2-D arrays to be stacked.\n\n Returns:\n cupy.ndarray: A new 2-D array of stacked columns.\n\n .. seealso:: :func:`numpy.column_stack`\n\n \"\"\"\n if any(not isinstance(a, cupy.ndarray) for a in tup):\n raise TypeError('Only cupy arrays can be column stacked')\n\n lst = list(tup)\n for i, a in enumerate(lst):\n if a.ndim == 1:\n a = a[:, cupy.newaxis]\n lst[i] = a\n elif a.ndim != 2:\n raise ValueError(\n 'Only 1 or 2 dimensional arrays can be column stacked')\n\n return concatenate(lst, axis=1)\n\n\ndef concatenate(tup, axis=0):\n \"\"\"Joins arrays along an axis.\n\n Args:\n tup (sequence of arrays): Arrays to be joined. All of these should have\n same dimensionalities except the specified axis.\n axis (int): The axis to join arrays along.\n\n Returns:\n cupy.ndarray: Joined array.\n\n .. seealso:: :func:`numpy.concatenate`\n\n \"\"\"\n ndim = None\n shape = None\n for a in tup:\n if not isinstance(a, cupy.ndarray):\n raise TypeError('Only cupy arrays can be concatenated')\n if a.ndim == 0:\n raise TypeError('zero-dimensional arrays cannot be concatenated')\n if ndim is None:\n ndim = a.ndim\n shape = list(a.shape)\n axis = _get_positive_axis(a.ndim, axis)\n continue\n\n if a.ndim != ndim:\n raise ValueError(\n 'All arrays to concatenate must have the same ndim')\n if any(i != axis and shape[i] != a.shape[i]\n for i in six.moves.range(ndim)):\n raise ValueError(\n 'All arrays must have same shape except the axis to '\n 'concatenate')\n shape[axis] += a.shape[axis]\n\n if ndim is None:\n raise ValueError('Cannot concatenate from empty tuple')\n\n dtype = numpy.find_common_type([a.dtype for a in tup], [])\n ret = cupy.empty(shape, dtype=dtype)\n\n skip = (slice(None),) * axis\n i = 0\n for a in tup:\n aw = a.shape[axis]\n ret[skip + (slice(i, i + aw),)] = a\n i += aw\n\n return ret\n\n\ndef dstack(tup):\n \"\"\"Stacks arrays along the third axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\n by :func:`cupy.atleast_3d` before stacking.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.dstack`\n\n \"\"\"\n return concatenate(cupy.atleast_3d(*tup), 2)\n\n\ndef hstack(tup):\n \"\"\"Stacks arrays horizontally.\n\n If an input array has one dimension, then the array is treated as a\n horizontal vector and stacked along the first axis. Otherwise, the array is\n stacked along the second axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.hstack`\n\n \"\"\"\n arrs = [cupy.atleast_1d(a) for a in tup]\n axis = 1\n if arrs[0].ndim == 1:\n axis = 0\n return concatenate(arrs, axis)\n\n\ndef vstack(tup):\n \"\"\"Stacks arrays vertically.\n\n If an input array has one dimension, then the array is treated as a\n horizontal vector and stacked along the additional axis at the head.\n Otherwise, the array is stacked along the first axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\n by :func:`cupy.atleast_2d` before stacking.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.dstack`\n\n \"\"\"\n return concatenate([cupy.atleast_2d(m) for m in tup], 0)\n\n\ndef stack(tup, axis=0):\n \"\"\"Stacks arrays along a new axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked.\n axis (int): Axis along which the arrays are stacked.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.stack`\n \"\"\"\n return concatenate([cupy.expand_dims(x, axis) for x in tup], axis)\n\n\ndef _get_positive_axis(ndim, axis):\n a = axis\n if a < 0:\n a += ndim\n if a < 0 or a >= ndim:\n raise IndexError('axis {} out of bounds [0, {})'.format(axis, ndim))\n return a\n",
"path": "cupy/manipulation/join.py"
}
] | [
{
"content": "import numpy\nimport six\n\nimport cupy\n\n\ndef column_stack(tup):\n \"\"\"Stacks 1-D and 2-D arrays as columns into a 2-D array.\n\n A 1-D array is first converted to a 2-D column array. Then, the 2-D arrays\n are concatenated along the second axis.\n\n Args:\n tup (sequence of arrays): 1-D or 2-D arrays to be stacked.\n\n Returns:\n cupy.ndarray: A new 2-D array of stacked columns.\n\n .. seealso:: :func:`numpy.column_stack`\n\n \"\"\"\n if any(not isinstance(a, cupy.ndarray) for a in tup):\n raise TypeError('Only cupy arrays can be column stacked')\n\n lst = list(tup)\n for i, a in enumerate(lst):\n if a.ndim == 1:\n a = a[:, cupy.newaxis]\n lst[i] = a\n elif a.ndim != 2:\n raise ValueError(\n 'Only 1 or 2 dimensional arrays can be column stacked')\n\n return concatenate(lst, axis=1)\n\n\ndef concatenate(tup, axis=0):\n \"\"\"Joins arrays along an axis.\n\n Args:\n tup (sequence of arrays): Arrays to be joined. All of these should have\n same dimensionalities except the specified axis.\n axis (int): The axis to join arrays along.\n\n Returns:\n cupy.ndarray: Joined array.\n\n .. seealso:: :func:`numpy.concatenate`\n\n \"\"\"\n ndim = None\n shape = None\n for a in tup:\n if not isinstance(a, cupy.ndarray):\n raise TypeError('Only cupy arrays can be concatenated')\n if a.ndim == 0:\n raise TypeError('zero-dimensional arrays cannot be concatenated')\n if ndim is None:\n ndim = a.ndim\n shape = list(a.shape)\n axis = _get_positive_axis(a.ndim, axis)\n continue\n\n if a.ndim != ndim:\n raise ValueError(\n 'All arrays to concatenate must have the same ndim')\n if any(i != axis and shape[i] != a.shape[i]\n for i in six.moves.range(ndim)):\n raise ValueError(\n 'All arrays must have same shape except the axis to '\n 'concatenate')\n shape[axis] += a.shape[axis]\n\n if ndim is None:\n raise ValueError('Cannot concatenate from empty tuple')\n\n dtype = numpy.find_common_type([a.dtype for a in tup], [])\n ret = cupy.empty(shape, dtype=dtype)\n\n skip = (slice(None),) * axis\n i = 0\n for a in tup:\n aw = a.shape[axis]\n ret[skip + (slice(i, i + aw),)] = a\n i += aw\n\n return ret\n\n\ndef dstack(tup):\n \"\"\"Stacks arrays along the third axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\n by :func:`cupy.atleast_3d` before stacking.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.dstack`\n\n \"\"\"\n return concatenate([cupy.atleast_3d(m) for m in tup], 2)\n\n\ndef hstack(tup):\n \"\"\"Stacks arrays horizontally.\n\n If an input array has one dimension, then the array is treated as a\n horizontal vector and stacked along the first axis. Otherwise, the array is\n stacked along the second axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.hstack`\n\n \"\"\"\n arrs = [cupy.atleast_1d(a) for a in tup]\n axis = 1\n if arrs[0].ndim == 1:\n axis = 0\n return concatenate(arrs, axis)\n\n\ndef vstack(tup):\n \"\"\"Stacks arrays vertically.\n\n If an input array has one dimension, then the array is treated as a\n horizontal vector and stacked along the additional axis at the head.\n Otherwise, the array is stacked along the first axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\n by :func:`cupy.atleast_2d` before stacking.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.dstack`\n\n \"\"\"\n return concatenate([cupy.atleast_2d(m) for m in tup], 0)\n\n\ndef stack(tup, axis=0):\n \"\"\"Stacks arrays along a new axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked.\n axis (int): Axis along which the arrays are stacked.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.stack`\n \"\"\"\n return concatenate([cupy.expand_dims(x, axis) for x in tup], axis)\n\n\ndef _get_positive_axis(ndim, axis):\n a = axis\n if a < 0:\n a += ndim\n if a < 0 or a >= ndim:\n raise IndexError('axis {} out of bounds [0, {})'.format(axis, ndim))\n return a\n",
"path": "cupy/manipulation/join.py"
}
] | diff --git a/cupy/manipulation/join.py b/cupy/manipulation/join.py
index 14165a8ff2f0..6e077d78221b 100644
--- a/cupy/manipulation/join.py
+++ b/cupy/manipulation/join.py
@@ -100,7 +100,7 @@ def dstack(tup):
.. seealso:: :func:`numpy.dstack`
"""
- return concatenate(cupy.atleast_3d(*tup), 2)
+ return concatenate([cupy.atleast_3d(m) for m in tup], 2)
def hstack(tup):
diff --git a/tests/cupy_tests/manipulation_tests/test_join.py b/tests/cupy_tests/manipulation_tests/test_join.py
index 949ea1406001..e37ab83b5405 100644
--- a/tests/cupy_tests/manipulation_tests/test_join.py
+++ b/tests/cupy_tests/manipulation_tests/test_join.py
@@ -73,6 +73,21 @@ def test_dstack(self, xp):
c = testing.shaped_arange((1, 3), xp)
return xp.dstack((a, b, c))
+ @testing.numpy_cupy_array_equal()
+ def test_dstack_single_element(self, xp):
+ a = testing.shaped_arange((1, 2, 3), xp)
+ return xp.dstack((a,))
+
+ @testing.numpy_cupy_array_equal()
+ def test_dstack_single_element_2(self, xp):
+ a = testing.shaped_arange((1, 2), xp)
+ return xp.dstack((a,))
+
+ @testing.numpy_cupy_array_equal()
+ def test_dstack_single_element_3(self, xp):
+ a = testing.shaped_arange((1,), xp)
+ return xp.dstack((a,))
+
@testing.numpy_cupy_array_equal()
def test_hstack_vectors(self, xp):
a = xp.arange(3)
|
talonhub__community-740 | auto_insert is documented to call auto_format
I understand if you want to override `auto_insert` too because you're adjusting the cursor position afterwards, but the general intention is for `auto_format` to do text formatting
I think the dictation `auto_insert()` implementation should potentially first `text = auto_format(text)` (which is the identity function by default) as per the docstring:
```
auto_insert(text: str)
Insert text at the current cursor position, automatically formatting it using actions.auto_format(text)
```
https://github.com/knausj85/knausj_talon/blob/master/code/dictation.py#L401
| [
{
"content": "# Descended from https://github.com/dwiel/talon_community/blob/master/misc/dictation.py\nfrom talon import Module, Context, ui, actions, clip, app, grammar\nfrom typing import Optional, Tuple, Literal, Callable\nimport re\n\nmod = Module()\n\nsetting_context_sensitive_dictation = mod.setting(\n \"context_sensitive_dictation\",\n type=bool,\n default=False,\n desc=\"Look at surrounding text to improve auto-capitalization/spacing in dictation mode. By default, this works by selecting that text & copying it to the clipboard, so it may be slow or fail in some applications.\",\n)\n\nmod.list(\"prose_modifiers\", desc=\"Modifiers that can be used within prose\")\nmod.list(\"prose_snippets\", desc=\"Snippets that can be used within prose\")\nctx = Context()\n# Maps spoken forms to DictationFormat method names (see DictationFormat below).\nctx.lists[\"user.prose_modifiers\"] = {\n \"cap\": \"cap\",\n \"no cap\": \"no_cap\",\n \"no caps\": \"no_cap\", # \"no caps\" variant for Dragon\n \"no space\": \"no_space\",\n}\nctx.lists[\"user.prose_snippets\"] = {\n \"spacebar\": \" \",\n \"new line\": \"\\n\",\n \"new paragraph\": \"\\n\\n\",\n # Curly quotes are used to obtain proper spacing for left and right quotes, but will later be straightened.\n \"open quote\": \"“\",\n \"close quote\": \"”\",\n \"smiley\": \":-)\",\n \"winky\": \";-)\",\n \"frowny\": \":-(\",\n}\n\[email protected](rule=\"{user.prose_modifiers}\")\ndef prose_modifier(m) -> Callable:\n return getattr(DictationFormat, m.prose_modifiers)\n\[email protected](rule=\"({user.vocabulary} | <word>)\")\ndef word(m) -> str:\n \"\"\"A single word, including user-defined vocabulary.\"\"\"\n try:\n return m.vocabulary\n except AttributeError:\n return \" \".join(actions.user.replace_phrases(actions.dictate.parse_words(m.word)))\n\[email protected](rule=\"({user.vocabulary} | <phrase>)+\")\ndef text(m) -> str:\n \"\"\"A sequence of words, including user-defined vocabulary.\"\"\"\n return format_phrase(m)\n\[email protected](rule=\"({user.vocabulary} | {user.punctuation} | {user.prose_snippets} | <phrase> | <user.prose_modifier>)+\")\ndef prose(m) -> str:\n \"\"\"Mixed words and punctuation, auto-spaced & capitalized.\"\"\"\n # Straighten curly quotes that were introduced to obtain proper spacing.\n return apply_formatting(m).replace(\"“\", \"\\\"\").replace(\"”\", \"\\\"\")\n\[email protected](rule=\"({user.vocabulary} | {user.punctuation} | {user.prose_snippets} | <phrase>)+\")\ndef raw_prose(m) -> str:\n \"\"\"Mixed words and punctuation, auto-spaced & capitalized, without quote straightening and commands (for use in dictation mode).\"\"\"\n return apply_formatting(m)\n\n\f\n# ---------- FORMATTING ---------- #\ndef format_phrase(m):\n words = capture_to_words(m)\n result = \"\"\n for i, word in enumerate(words):\n if i > 0 and needs_space_between(words[i-1], word):\n result += \" \"\n result += word\n return result\n\ndef capture_to_words(m):\n words = []\n for item in m:\n words.extend(\n actions.user.replace_phrases(actions.dictate.parse_words(item))\n if isinstance(item, grammar.vm.Phrase)\n else [item])\n return words\n\ndef apply_formatting(m):\n formatter = DictationFormat()\n formatter.state = None\n result = \"\"\n for item in m:\n # prose modifiers (cap/no cap/no space) produce formatter callbacks.\n if isinstance(item, Callable):\n item(formatter)\n else:\n words = (actions.user.replace_phrases(actions.dictate.parse_words(item))\n if isinstance(item, grammar.vm.Phrase)\n else [item])\n for word in words:\n result += formatter.format(word)\n return result\n\n# There must be a simpler way to do this, but I don't see it right now.\nno_space_after = re.compile(r\"\"\"\n (?:\n [\\s\\-_/#@([{‘“] # characters that never need space after them\n | (?<!\\w)[$£€¥₩₽₹] # currency symbols not preceded by a word character\n # quotes preceded by beginning of string, space, opening braces, dash, or other quotes\n | (?: ^ | [\\s([{\\-'\"] ) ['\"]\n )$\"\"\", re.VERBOSE)\nno_space_before = re.compile(r\"\"\"\n ^(?:\n [\\s\\-_.,!?/%)\\]}’”] # characters that never need space before them\n | [$£€¥₩₽₹](?!\\w) # currency symbols not followed by a word character\n | [;:](?!-\\)|-\\() # colon or semicolon except for smiley faces\n # quotes followed by end of string, space, closing braces, dash, other quotes, or some punctuation.\n | ['\"] (?: $ | [\\s)\\]}\\-'\".,!?;:/] )\n # apostrophe s\n | 's(?!\\w)\n )\"\"\", re.VERBOSE)\n\ndef omit_space_before(text: str) -> bool:\n return not text or no_space_before.search(text)\ndef omit_space_after(text: str) -> bool:\n return not text or no_space_after.search(text)\ndef needs_space_between(before: str, after: str) -> bool:\n return not (omit_space_after(before) or omit_space_before(after))\n\n# # TESTS, uncomment to enable\n# assert needs_space_between(\"a\", \"break\")\n# assert needs_space_between(\"break\", \"a\")\n# assert needs_space_between(\".\", \"a\")\n# assert needs_space_between(\"said\", \"'hello\")\n# assert needs_space_between(\"hello'\", \"said\")\n# assert needs_space_between(\"hello.\", \"'John\")\n# assert needs_space_between(\"John.'\", \"They\")\n# assert needs_space_between(\"paid\", \"$50\")\n# assert needs_space_between(\"50$\", \"payment\")\n# assert not needs_space_between(\"\", \"\")\n# assert not needs_space_between(\"a\", \"\")\n# assert not needs_space_between(\"a\", \" \")\n# assert not needs_space_between(\"\", \"a\")\n# assert not needs_space_between(\" \", \"a\")\n# assert not needs_space_between(\"a\", \",\")\n# assert not needs_space_between(\"'\", \"a\")\n# assert not needs_space_between(\"a\", \"'\")\n# assert not needs_space_between(\"and-\", \"or\")\n# assert not needs_space_between(\"mary\", \"-kate\")\n# assert not needs_space_between(\"$\", \"50\")\n# assert not needs_space_between(\"US\", \"$\")\n# assert not needs_space_between(\"(\", \")\")\n# assert not needs_space_between(\"(\", \"e.g.\")\n# assert not needs_space_between(\"example\", \")\")\n# assert not needs_space_between(\"example\", '\".')\n# assert not needs_space_between(\"example\", '.\"')\n# assert not needs_space_between(\"hello'\", \".\")\n# assert not needs_space_between(\"hello.\", \"'\")\n\ndef auto_capitalize(text, state = None):\n \"\"\"\n Auto-capitalizes text. `state` argument means:\n\n - None: Don't capitalize initial word.\n - \"sentence start\": Capitalize initial word.\n - \"after newline\": Don't capitalize initial word, but we're after a newline.\n Used for double-newline detection.\n\n Returns (capitalized text, updated state).\n \"\"\"\n output = \"\"\n # Imagine a metaphorical \"capitalization charge\" travelling through the\n # string left-to-right.\n charge = state == \"sentence start\"\n newline = state == \"after newline\"\n for c in text:\n # Sentence endings & double newlines create a charge.\n if c in \".!?\" or (newline and c == \"\\n\"):\n charge = True\n # Alphanumeric characters and commas/colons absorb charge & try to\n # capitalize (for numbers & punctuation this does nothing, which is what\n # we want).\n elif charge and (c.isalnum() or c in \",:\"):\n charge = False\n c = c.capitalize()\n # Otherwise the charge just passes through.\n output += c\n newline = c == \"\\n\"\n return output, (\"sentence start\" if charge else\n \"after newline\" if newline else None)\n\n\f\n# ---------- DICTATION AUTO FORMATTING ---------- #\nclass DictationFormat:\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.reset_context()\n self.force_no_space = False\n self.force_capitalization = None # Can also be \"cap\" or \"no cap\".\n\n def reset_context(self):\n self.before = \"\"\n self.state = \"sentence start\"\n\n def update_context(self, before):\n if before is None: return\n self.reset_context()\n self.pass_through(before)\n\n def pass_through(self, text):\n _, self.state = auto_capitalize(text, self.state)\n self.before = text or self.before\n\n def format(self, text, auto_cap=True):\n if not self.force_no_space and needs_space_between(self.before, text):\n text = \" \" + text\n self.force_no_space = False\n if auto_cap:\n text, self.state = auto_capitalize(text, self.state)\n if self.force_capitalization == \"cap\":\n text = format_first_letter(text, lambda s: s.capitalize())\n self.force_capitalization = None\n if self.force_capitalization == \"no cap\":\n text = format_first_letter(text, lambda s: s.lower())\n self.force_capitalization = None\n self.before = text or self.before\n return text\n\n # These are used as callbacks by prose modifiers / dictation_mode commands.\n def cap(self): self.force_capitalization = \"cap\"\n def no_cap(self): self.force_capitalization = \"no cap\"\n def no_space(self):\n # This is typically used after repositioning the cursor, so it is helpful to\n # reset capitalization as well.\n #\n # FIXME: this sets state to \"sentence start\", capitalizing the next\n # word. probably undesirable, since most places are not the start of\n # sentences?\n self.reset_context()\n self.force_no_space = True\n\ndef format_first_letter(text, formatter):\n i = -1\n for i, c in enumerate(text):\n if c.isalpha():\n break\n if i >= 0 and i < len(text):\n text = text[:i] + formatter(text[i]) + text[i+1:]\n return text\n\ndictation_formatter = DictationFormat()\nui.register(\"app_deactivate\", lambda app: dictation_formatter.reset())\nui.register(\"win_focus\", lambda win: dictation_formatter.reset())\n\ndef reformat_last_utterance(formatter):\n text = actions.user.get_last_phrase()\n actions.user.clear_last_phrase()\n text = formatter(text)\n actions.user.add_phrase_to_history(text)\n actions.insert(text)\n\[email protected]_class\nclass Actions:\n def dictation_format_reset():\n \"\"\"Resets the dictation formatter\"\"\"\n return dictation_formatter.reset()\n\n def dictation_format_cap():\n \"\"\"Sets the dictation formatter to capitalize\"\"\"\n dictation_formatter.cap()\n\n def dictation_format_no_cap():\n \"\"\"Sets the dictation formatter to not capitalize\"\"\"\n dictation_formatter.no_cap()\n\n def dictation_format_no_space():\n \"\"\"Sets the dictation formatter to not prepend a space\"\"\"\n dictation_formatter.no_space()\n\n def dictation_reformat_cap():\n \"\"\"Capitalizes the last utterance\"\"\"\n reformat_last_utterance(lambda s: format_first_letter(s, lambda c: c.capitalize()))\n\n def dictation_reformat_no_cap():\n \"\"\"Lowercases the last utterance\"\"\"\n reformat_last_utterance(lambda s: format_first_letter(s, lambda c: c.lower()))\n\n def dictation_reformat_no_space():\n \"\"\"Removes space before the last utterance\"\"\"\n reformat_last_utterance(lambda s: s[1:] if s.startswith(\" \") else s)\n\n def dictation_insert_raw(text: str):\n \"\"\"Inserts text as-is, without invoking the dictation formatter.\"\"\"\n actions.user.dictation_insert(text, auto_cap=False)\n\n def dictation_insert(text: str, auto_cap: bool=True) -> str:\n \"\"\"Inserts dictated text, formatted appropriately.\"\"\"\n add_space_after = False\n if setting_context_sensitive_dictation.get():\n # Peek left if we might need leading space or auto-capitalization.\n if (not omit_space_before(text)\n or text != auto_capitalize(text, \"sentence start\")[0]):\n dictation_formatter.update_context(\n actions.user.dictation_peek_left(clobber=True))\n # Peek right if we might need trailing space. NB. We peek right\n # BEFORE insertion to avoid breaking the undo-chain between the\n # inserted text and the trailing space.\n if not omit_space_after(text):\n char = actions.user.dictation_peek_right()\n add_space_after = char is not None and needs_space_between(text, char)\n text = dictation_formatter.format(text, auto_cap)\n # Straighten curly quotes that were introduced to obtain proper\n # spacing. The formatter context still has the original curly quotes\n # so that future dictation is properly formatted.\n text = text.replace(\"“\", \"\\\"\").replace(\"”\", \"\\\"\")\n actions.user.add_phrase_to_history(text)\n # we insert the text all at once in case we have an implementation of\n # insert that is more efficient for long strings, eg. paste-to-insert\n actions.insert(text + (\" \" if add_space_after else \"\"))\n if add_space_after: actions.edit.left()\n\n def dictation_peek_left(clobber: bool = False) -> Optional[str]:\n \"\"\"\n Tries to get some text before the cursor, ideally a word or two, for the\n purpose of auto-spacing & -capitalization. Results are not guaranteed;\n dictation_peek_left() may return None to indicate no information. (Note\n that returning the empty string \"\" indicates there is nothing before\n cursor, ie. we are at the beginning of the document.)\n\n If there is currently a selection, dictation_peek_left() must leave it\n unchanged unless `clobber` is true, in which case it may clobber it.\n \"\"\"\n # Get rid of the selection if it exists.\n if clobber: actions.user.clobber_selection_if_exists()\n # Otherwise, if there's a selection, fail.\n elif \"\" != actions.edit.selected_text(): return None\n\n # In principle the previous word should suffice, but some applications\n # have a funny concept of what the previous word is (for example, they\n # may only take the \"`\" at the end of \"`foo`\"). To be double sure we\n # take two words left. I also tried taking a line up + a word left, but\n # edit.extend_up() = key(shift-up) doesn't work consistently in the\n # Slack webapp (sometimes escapes the text box).\n actions.edit.extend_word_left()\n actions.edit.extend_word_left()\n text = actions.edit.selected_text()\n # if we're at the beginning of the document/text box, we may not have\n # selected any text, in which case we shouldn't move the cursor.\n if text:\n # Unfortunately, in web Slack, if our selection ends at newline,\n # this will go right over the newline. Argh.\n actions.edit.right()\n return text\n\n def clobber_selection_if_exists():\n \"\"\"Deletes the currently selected text if it exists; otherwise does nothing.\"\"\"\n actions.key(\"space backspace\")\n # This space-backspace trick is fast and reliable but has the\n # side-effect of cluttering the undo history. Other options:\n #\n # 1. Call edit.cut() inside a clip.revert() block. This assumes\n # edit.cut() is supported AND will be a no-op if there's no\n # selection. Unfortunately, sometimes one or both of these is false,\n # eg. the notion webapp makes ctrl-x cut the current block by default\n # if nothing is selected.\n #\n # 2. Test whether a selection exists by asking whether\n # edit.selected_text() is empty; if it does, use edit.delete(). This\n # usually uses the clipboard, which can be quite slow. Also, not sure\n # how this would interact with switching edit.selected_text() to use\n # the selection clipboard on linux, which can be nonempty even if no\n # text is selected in the current application.\n #\n # Perhaps this ought to be configurable by a setting.\n\n def dictation_peek_right() -> Optional[str]:\n \"\"\"\n Tries to get a few characters after the cursor for auto-spacing.\n Results are not guaranteed; dictation_peek_right() may return None to\n indicate no information. (Note that returning the empty string \"\"\n indicates there is nothing after cursor, ie. we are at the end of the\n document.)\n \"\"\"\n # We grab two characters because I think that's what no_space_before\n # needs in the worst case. An example where the second character matters\n # is inserting before (1) \"' hello\" vs (2) \"'hello\". In case (1) we\n # don't want to add space, in case (2) we do.\n actions.edit.extend_right()\n actions.edit.extend_right()\n after = actions.edit.selected_text()\n if after: actions.edit.left()\n return after\n\n# Use the dictation formatter in dictation mode.\ndictation_ctx = Context()\ndictation_ctx.matches = r\"\"\"\nmode: dictation\n\"\"\"\n\n@dictation_ctx.action_class(\"main\")\nclass main_action:\n def auto_insert(text): actions.user.dictation_insert(text)\n",
"path": "code/dictation.py"
}
] | [
{
"content": "# Descended from https://github.com/dwiel/talon_community/blob/master/misc/dictation.py\nfrom talon import Module, Context, ui, actions, clip, app, grammar\nfrom typing import Optional, Tuple, Literal, Callable\nimport re\n\nmod = Module()\n\nsetting_context_sensitive_dictation = mod.setting(\n \"context_sensitive_dictation\",\n type=bool,\n default=False,\n desc=\"Look at surrounding text to improve auto-capitalization/spacing in dictation mode. By default, this works by selecting that text & copying it to the clipboard, so it may be slow or fail in some applications.\",\n)\n\nmod.list(\"prose_modifiers\", desc=\"Modifiers that can be used within prose\")\nmod.list(\"prose_snippets\", desc=\"Snippets that can be used within prose\")\nctx = Context()\n# Maps spoken forms to DictationFormat method names (see DictationFormat below).\nctx.lists[\"user.prose_modifiers\"] = {\n \"cap\": \"cap\",\n \"no cap\": \"no_cap\",\n \"no caps\": \"no_cap\", # \"no caps\" variant for Dragon\n \"no space\": \"no_space\",\n}\nctx.lists[\"user.prose_snippets\"] = {\n \"spacebar\": \" \",\n \"new line\": \"\\n\",\n \"new paragraph\": \"\\n\\n\",\n # Curly quotes are used to obtain proper spacing for left and right quotes, but will later be straightened.\n \"open quote\": \"“\",\n \"close quote\": \"”\",\n \"smiley\": \":-)\",\n \"winky\": \";-)\",\n \"frowny\": \":-(\",\n}\n\[email protected](rule=\"{user.prose_modifiers}\")\ndef prose_modifier(m) -> Callable:\n return getattr(DictationFormat, m.prose_modifiers)\n\[email protected](rule=\"({user.vocabulary} | <word>)\")\ndef word(m) -> str:\n \"\"\"A single word, including user-defined vocabulary.\"\"\"\n try:\n return m.vocabulary\n except AttributeError:\n return \" \".join(actions.user.replace_phrases(actions.dictate.parse_words(m.word)))\n\[email protected](rule=\"({user.vocabulary} | <phrase>)+\")\ndef text(m) -> str:\n \"\"\"A sequence of words, including user-defined vocabulary.\"\"\"\n return format_phrase(m)\n\[email protected](rule=\"({user.vocabulary} | {user.punctuation} | {user.prose_snippets} | <phrase> | <user.prose_modifier>)+\")\ndef prose(m) -> str:\n \"\"\"Mixed words and punctuation, auto-spaced & capitalized.\"\"\"\n # Straighten curly quotes that were introduced to obtain proper spacing.\n return apply_formatting(m).replace(\"“\", \"\\\"\").replace(\"”\", \"\\\"\")\n\[email protected](rule=\"({user.vocabulary} | {user.punctuation} | {user.prose_snippets} | <phrase>)+\")\ndef raw_prose(m) -> str:\n \"\"\"Mixed words and punctuation, auto-spaced & capitalized, without quote straightening and commands (for use in dictation mode).\"\"\"\n return apply_formatting(m)\n\n\f\n# ---------- FORMATTING ---------- #\ndef format_phrase(m):\n words = capture_to_words(m)\n result = \"\"\n for i, word in enumerate(words):\n if i > 0 and needs_space_between(words[i-1], word):\n result += \" \"\n result += word\n return result\n\ndef capture_to_words(m):\n words = []\n for item in m:\n words.extend(\n actions.user.replace_phrases(actions.dictate.parse_words(item))\n if isinstance(item, grammar.vm.Phrase)\n else [item])\n return words\n\ndef apply_formatting(m):\n formatter = DictationFormat()\n formatter.state = None\n result = \"\"\n for item in m:\n # prose modifiers (cap/no cap/no space) produce formatter callbacks.\n if isinstance(item, Callable):\n item(formatter)\n else:\n words = (actions.user.replace_phrases(actions.dictate.parse_words(item))\n if isinstance(item, grammar.vm.Phrase)\n else [item])\n for word in words:\n result += formatter.format(word)\n return result\n\n# There must be a simpler way to do this, but I don't see it right now.\nno_space_after = re.compile(r\"\"\"\n (?:\n [\\s\\-_/#@([{‘“] # characters that never need space after them\n | (?<!\\w)[$£€¥₩₽₹] # currency symbols not preceded by a word character\n # quotes preceded by beginning of string, space, opening braces, dash, or other quotes\n | (?: ^ | [\\s([{\\-'\"] ) ['\"]\n )$\"\"\", re.VERBOSE)\nno_space_before = re.compile(r\"\"\"\n ^(?:\n [\\s\\-_.,!?/%)\\]}’”] # characters that never need space before them\n | [$£€¥₩₽₹](?!\\w) # currency symbols not followed by a word character\n | [;:](?!-\\)|-\\() # colon or semicolon except for smiley faces\n # quotes followed by end of string, space, closing braces, dash, other quotes, or some punctuation.\n | ['\"] (?: $ | [\\s)\\]}\\-'\".,!?;:/] )\n # apostrophe s\n | 's(?!\\w)\n )\"\"\", re.VERBOSE)\n\ndef omit_space_before(text: str) -> bool:\n return not text or no_space_before.search(text)\ndef omit_space_after(text: str) -> bool:\n return not text or no_space_after.search(text)\ndef needs_space_between(before: str, after: str) -> bool:\n return not (omit_space_after(before) or omit_space_before(after))\n\n# # TESTS, uncomment to enable\n# assert needs_space_between(\"a\", \"break\")\n# assert needs_space_between(\"break\", \"a\")\n# assert needs_space_between(\".\", \"a\")\n# assert needs_space_between(\"said\", \"'hello\")\n# assert needs_space_between(\"hello'\", \"said\")\n# assert needs_space_between(\"hello.\", \"'John\")\n# assert needs_space_between(\"John.'\", \"They\")\n# assert needs_space_between(\"paid\", \"$50\")\n# assert needs_space_between(\"50$\", \"payment\")\n# assert not needs_space_between(\"\", \"\")\n# assert not needs_space_between(\"a\", \"\")\n# assert not needs_space_between(\"a\", \" \")\n# assert not needs_space_between(\"\", \"a\")\n# assert not needs_space_between(\" \", \"a\")\n# assert not needs_space_between(\"a\", \",\")\n# assert not needs_space_between(\"'\", \"a\")\n# assert not needs_space_between(\"a\", \"'\")\n# assert not needs_space_between(\"and-\", \"or\")\n# assert not needs_space_between(\"mary\", \"-kate\")\n# assert not needs_space_between(\"$\", \"50\")\n# assert not needs_space_between(\"US\", \"$\")\n# assert not needs_space_between(\"(\", \")\")\n# assert not needs_space_between(\"(\", \"e.g.\")\n# assert not needs_space_between(\"example\", \")\")\n# assert not needs_space_between(\"example\", '\".')\n# assert not needs_space_between(\"example\", '.\"')\n# assert not needs_space_between(\"hello'\", \".\")\n# assert not needs_space_between(\"hello.\", \"'\")\n\ndef auto_capitalize(text, state = None):\n \"\"\"\n Auto-capitalizes text. `state` argument means:\n\n - None: Don't capitalize initial word.\n - \"sentence start\": Capitalize initial word.\n - \"after newline\": Don't capitalize initial word, but we're after a newline.\n Used for double-newline detection.\n\n Returns (capitalized text, updated state).\n \"\"\"\n output = \"\"\n # Imagine a metaphorical \"capitalization charge\" travelling through the\n # string left-to-right.\n charge = state == \"sentence start\"\n newline = state == \"after newline\"\n for c in text:\n # Sentence endings & double newlines create a charge.\n if c in \".!?\" or (newline and c == \"\\n\"):\n charge = True\n # Alphanumeric characters and commas/colons absorb charge & try to\n # capitalize (for numbers & punctuation this does nothing, which is what\n # we want).\n elif charge and (c.isalnum() or c in \",:\"):\n charge = False\n c = c.capitalize()\n # Otherwise the charge just passes through.\n output += c\n newline = c == \"\\n\"\n return output, (\"sentence start\" if charge else\n \"after newline\" if newline else None)\n\n\f\n# ---------- DICTATION AUTO FORMATTING ---------- #\nclass DictationFormat:\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.reset_context()\n self.force_no_space = False\n self.force_capitalization = None # Can also be \"cap\" or \"no cap\".\n\n def reset_context(self):\n self.before = \"\"\n self.state = \"sentence start\"\n\n def update_context(self, before):\n if before is None: return\n self.reset_context()\n self.pass_through(before)\n\n def pass_through(self, text):\n _, self.state = auto_capitalize(text, self.state)\n self.before = text or self.before\n\n def format(self, text, auto_cap=True):\n if not self.force_no_space and needs_space_between(self.before, text):\n text = \" \" + text\n self.force_no_space = False\n if auto_cap:\n text, self.state = auto_capitalize(text, self.state)\n if self.force_capitalization == \"cap\":\n text = format_first_letter(text, lambda s: s.capitalize())\n self.force_capitalization = None\n if self.force_capitalization == \"no cap\":\n text = format_first_letter(text, lambda s: s.lower())\n self.force_capitalization = None\n self.before = text or self.before\n return text\n\n # These are used as callbacks by prose modifiers / dictation_mode commands.\n def cap(self): self.force_capitalization = \"cap\"\n def no_cap(self): self.force_capitalization = \"no cap\"\n def no_space(self):\n # This is typically used after repositioning the cursor, so it is helpful to\n # reset capitalization as well.\n #\n # FIXME: this sets state to \"sentence start\", capitalizing the next\n # word. probably undesirable, since most places are not the start of\n # sentences?\n self.reset_context()\n self.force_no_space = True\n\ndef format_first_letter(text, formatter):\n i = -1\n for i, c in enumerate(text):\n if c.isalpha():\n break\n if i >= 0 and i < len(text):\n text = text[:i] + formatter(text[i]) + text[i+1:]\n return text\n\ndictation_formatter = DictationFormat()\nui.register(\"app_deactivate\", lambda app: dictation_formatter.reset())\nui.register(\"win_focus\", lambda win: dictation_formatter.reset())\n\ndef reformat_last_utterance(formatter):\n text = actions.user.get_last_phrase()\n actions.user.clear_last_phrase()\n text = formatter(text)\n actions.user.add_phrase_to_history(text)\n actions.insert(text)\n\[email protected]_class\nclass Actions:\n def dictation_format_reset():\n \"\"\"Resets the dictation formatter\"\"\"\n return dictation_formatter.reset()\n\n def dictation_format_cap():\n \"\"\"Sets the dictation formatter to capitalize\"\"\"\n dictation_formatter.cap()\n\n def dictation_format_no_cap():\n \"\"\"Sets the dictation formatter to not capitalize\"\"\"\n dictation_formatter.no_cap()\n\n def dictation_format_no_space():\n \"\"\"Sets the dictation formatter to not prepend a space\"\"\"\n dictation_formatter.no_space()\n\n def dictation_reformat_cap():\n \"\"\"Capitalizes the last utterance\"\"\"\n reformat_last_utterance(lambda s: format_first_letter(s, lambda c: c.capitalize()))\n\n def dictation_reformat_no_cap():\n \"\"\"Lowercases the last utterance\"\"\"\n reformat_last_utterance(lambda s: format_first_letter(s, lambda c: c.lower()))\n\n def dictation_reformat_no_space():\n \"\"\"Removes space before the last utterance\"\"\"\n reformat_last_utterance(lambda s: s[1:] if s.startswith(\" \") else s)\n\n def dictation_insert_raw(text: str):\n \"\"\"Inserts text as-is, without invoking the dictation formatter.\"\"\"\n actions.user.dictation_insert(text, auto_cap=False)\n\n def dictation_insert(text: str, auto_cap: bool=True) -> str:\n \"\"\"Inserts dictated text, formatted appropriately.\"\"\"\n add_space_after = False\n if setting_context_sensitive_dictation.get():\n # Peek left if we might need leading space or auto-capitalization.\n if (not omit_space_before(text)\n or text != auto_capitalize(text, \"sentence start\")[0]):\n dictation_formatter.update_context(\n actions.user.dictation_peek_left(clobber=True))\n # Peek right if we might need trailing space. NB. We peek right\n # BEFORE insertion to avoid breaking the undo-chain between the\n # inserted text and the trailing space.\n if not omit_space_after(text):\n char = actions.user.dictation_peek_right()\n add_space_after = char is not None and needs_space_between(text, char)\n text = dictation_formatter.format(text, auto_cap)\n # Straighten curly quotes that were introduced to obtain proper\n # spacing. The formatter context still has the original curly quotes\n # so that future dictation is properly formatted.\n text = text.replace(\"“\", \"\\\"\").replace(\"”\", \"\\\"\")\n actions.user.add_phrase_to_history(text)\n # we insert the text all at once in case we have an implementation of\n # insert that is more efficient for long strings, eg. paste-to-insert\n actions.insert(text + (\" \" if add_space_after else \"\"))\n if add_space_after: actions.edit.left()\n\n def dictation_peek_left(clobber: bool = False) -> Optional[str]:\n \"\"\"\n Tries to get some text before the cursor, ideally a word or two, for the\n purpose of auto-spacing & -capitalization. Results are not guaranteed;\n dictation_peek_left() may return None to indicate no information. (Note\n that returning the empty string \"\" indicates there is nothing before\n cursor, ie. we are at the beginning of the document.)\n\n If there is currently a selection, dictation_peek_left() must leave it\n unchanged unless `clobber` is true, in which case it may clobber it.\n \"\"\"\n # Get rid of the selection if it exists.\n if clobber: actions.user.clobber_selection_if_exists()\n # Otherwise, if there's a selection, fail.\n elif \"\" != actions.edit.selected_text(): return None\n\n # In principle the previous word should suffice, but some applications\n # have a funny concept of what the previous word is (for example, they\n # may only take the \"`\" at the end of \"`foo`\"). To be double sure we\n # take two words left. I also tried taking a line up + a word left, but\n # edit.extend_up() = key(shift-up) doesn't work consistently in the\n # Slack webapp (sometimes escapes the text box).\n actions.edit.extend_word_left()\n actions.edit.extend_word_left()\n text = actions.edit.selected_text()\n # if we're at the beginning of the document/text box, we may not have\n # selected any text, in which case we shouldn't move the cursor.\n if text:\n # Unfortunately, in web Slack, if our selection ends at newline,\n # this will go right over the newline. Argh.\n actions.edit.right()\n return text\n\n def clobber_selection_if_exists():\n \"\"\"Deletes the currently selected text if it exists; otherwise does nothing.\"\"\"\n actions.key(\"space backspace\")\n # This space-backspace trick is fast and reliable but has the\n # side-effect of cluttering the undo history. Other options:\n #\n # 1. Call edit.cut() inside a clip.revert() block. This assumes\n # edit.cut() is supported AND will be a no-op if there's no\n # selection. Unfortunately, sometimes one or both of these is false,\n # eg. the notion webapp makes ctrl-x cut the current block by default\n # if nothing is selected.\n #\n # 2. Test whether a selection exists by asking whether\n # edit.selected_text() is empty; if it does, use edit.delete(). This\n # usually uses the clipboard, which can be quite slow. Also, not sure\n # how this would interact with switching edit.selected_text() to use\n # the selection clipboard on linux, which can be nonempty even if no\n # text is selected in the current application.\n #\n # Perhaps this ought to be configurable by a setting.\n\n def dictation_peek_right() -> Optional[str]:\n \"\"\"\n Tries to get a few characters after the cursor for auto-spacing.\n Results are not guaranteed; dictation_peek_right() may return None to\n indicate no information. (Note that returning the empty string \"\"\n indicates there is nothing after cursor, ie. we are at the end of the\n document.)\n \"\"\"\n # We grab two characters because I think that's what no_space_before\n # needs in the worst case. An example where the second character matters\n # is inserting before (1) \"' hello\" vs (2) \"'hello\". In case (1) we\n # don't want to add space, in case (2) we do.\n actions.edit.extend_right()\n actions.edit.extend_right()\n after = actions.edit.selected_text()\n if after: actions.edit.left()\n return after\n\n# Use the dictation formatter in dictation mode.\ndictation_ctx = Context()\ndictation_ctx.matches = r\"\"\"\nmode: dictation\n\"\"\"\n\n@dictation_ctx.action_class(\"main\")\nclass main_action:\n def auto_insert(text):\n actions.user.dictation_insert(actions.auto_format(text))\n",
"path": "code/dictation.py"
}
] | diff --git a/code/dictation.py b/code/dictation.py
index c0229af1ac..d25fb9c506 100644
--- a/code/dictation.py
+++ b/code/dictation.py
@@ -398,4 +398,5 @@ def dictation_peek_right() -> Optional[str]:
@dictation_ctx.action_class("main")
class main_action:
- def auto_insert(text): actions.user.dictation_insert(text)
+ def auto_insert(text):
+ actions.user.dictation_insert(actions.auto_format(text))
|
mlflow__mlflow-4368 | Make mlflow compatible with protobuf 3.6.1
## What changes are proposed in this pull request?
Make mlflow compatible with protobuf 3.6.1:
for protobuf ==3.6.1
Add EnumTypeWrapper.__getattr__ to access values
## How is this patch tested?
N/A
## Release Notes
### Is this a user-facing change?
- [x] No. You can skip the rest of this section.
- [ ] Yes. Give a description of this change to be included in the release notes for MLflow users.
(Details in 1-2 sentences. You can just refer to another PR with a description if this PR is part of a larger change.)
### What component(s), interfaces, languages, and integrations does this PR affect?
Components
- [ ] `area/artifacts`: Artifact stores and artifact logging
- [ ] `area/build`: Build and test infrastructure for MLflow
- [ ] `area/docs`: MLflow documentation pages
- [ ] `area/examples`: Example code
- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry
- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors
- [ ] `area/projects`: MLproject format, project running backends
- [ ] `area/scoring`: Local serving, model deployment tools, spark UDFs
- [ ] `area/server-infra`: MLflow server, JavaScript dev server
- [x] `area/tracking`: Tracking Service, tracking client APIs, autologging
Interface
- [ ] `area/uiux`: Front-end, user experience, JavaScript, plotting
- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models
- [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry
- [ ] `area/windows`: Windows support
Language
- [ ] `language/r`: R APIs and clients
- [ ] `language/java`: Java APIs and clients
- [ ] `language/new`: Proposals for new client languages
Integrations
- [ ] `integrations/azure`: Azure and Azure ML integrations
- [ ] `integrations/sagemaker`: SageMaker integrations
- [ ] `integrations/databricks`: Databricks integrations
<!--
Insert an empty named anchor here to allow jumping to this section with a fragment URL
(e.g. https://github.com/mlflow/mlflow/pull/123#user-content-release-note-category).
Note that GitHub prefixes anchor names in markdown with "user-content-".
-->
<a name="release-note-category"></a>
### How should the PR be classified in the release notes? Choose one:
- [ ] `rn/breaking-change` - The PR will be mentioned in the "Breaking Changes" section
- [x] `rn/none` - No description will be included. The PR will be mentioned only by the PR number in the "Small Bugfixes and Documentation Updates" section
- [ ] `rn/feature` - A new user-facing feature worth mentioning in the release notes
- [ ] `rn/bug-fix` - A user-facing bug fix worth mentioning in the release notes
- [ ] `rn/documentation` - A user-facing documentation change worth mentioning in the release notes
| [
{
"content": "import os\nimport logging\n\nfrom importlib.machinery import SourceFileLoader\nfrom setuptools import setup, find_packages\n\n_MLFLOW_SKINNY_ENV_VAR = \"MLFLOW_SKINNY\"\n\nversion = (\n SourceFileLoader(\"mlflow.version\", os.path.join(\"mlflow\", \"version.py\")).load_module().VERSION\n)\n\n\n# Get a list of all files in the JS directory to include in our module\ndef package_files(directory):\n paths = []\n for (path, _, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join(\"..\", path, filename))\n return paths\n\n\n# Prints out a set of paths (relative to the mlflow/ directory) of files in mlflow/server/js/build\n# to include in the wheel, e.g. \"../mlflow/server/js/build/index.html\"\njs_files = package_files(\"mlflow/server/js/build\")\nmodels_container_server_files = package_files(\"mlflow/models/container\")\nalembic_files = [\n \"../mlflow/store/db_migrations/alembic.ini\",\n \"../mlflow/temporary_db_migrations_for_pre_1_users/alembic.ini\",\n]\nextra_files = [\"ml-package-versions.yml\", \"pyspark/ml/log_model_allowlist.txt\"]\n\n\"\"\"\nMinimal requirements for the skinny MLflow client which provides a limited\nsubset of functionality such as: RESTful client functionality for Tracking and\nModel Registry, as well as support for Project execution against local backends\nand Databricks.\n\"\"\"\nSKINNY_REQUIREMENTS = [\n \"click>=7.0\",\n \"cloudpickle\",\n \"databricks-cli>=0.8.7\",\n \"entrypoints\",\n \"gitpython>=2.1.0\",\n \"pyyaml\",\n \"protobuf>=3.6.0\",\n \"pytz\",\n \"requests>=2.17.3\",\n \"packaging\",\n]\n\n\"\"\"\nThese are the core requirements for the complete MLflow platform, which augments\nthe skinny client functionality with support for running the MLflow Tracking\nServer & UI. It also adds project backends such as Docker and Kubernetes among\nother capabilities.\n\"\"\"\nCORE_REQUIREMENTS = SKINNY_REQUIREMENTS + [\n \"alembic<=1.4.1\",\n # Required\n \"docker>=4.0.0\",\n \"Flask\",\n \"gunicorn; platform_system != 'Windows'\",\n \"numpy\",\n \"pandas\",\n \"prometheus-flask-exporter\",\n \"querystring_parser\",\n # Pin sqlparse for: https://github.com/mlflow/mlflow/issues/3433\n \"sqlparse>=0.3.1\",\n # Required to run the MLflow server against SQL-backed storage\n \"sqlalchemy\",\n \"waitress; platform_system == 'Windows'\",\n]\n\n_is_mlflow_skinny = bool(os.environ.get(_MLFLOW_SKINNY_ENV_VAR))\nlogging.debug(\"{} env var is set: {}\".format(_MLFLOW_SKINNY_ENV_VAR, _is_mlflow_skinny))\n\nsetup(\n name=\"mlflow\" if not _is_mlflow_skinny else \"mlflow-skinny\",\n version=version,\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"mlflow\": js_files + models_container_server_files + alembic_files + extra_files}\n if not _is_mlflow_skinny\n # include alembic files to enable usage of the skinny client with SQL databases\n # if users install sqlalchemy, alembic, and sqlparse independently\n else {\"mlflow\": alembic_files + extra_files},\n install_requires=CORE_REQUIREMENTS if not _is_mlflow_skinny else SKINNY_REQUIREMENTS,\n extras_require={\n \"extras\": [\n \"scikit-learn\",\n # Required to log artifacts and models to HDFS artifact locations\n \"pyarrow\",\n # Required to log artifacts and models to AWS S3 artifact locations\n \"boto3\",\n \"mleap\",\n # Required to log artifacts and models to GCS artifact locations\n \"google-cloud-storage\",\n \"azureml-core>=1.2.0\",\n # Required to log artifacts to SFTP artifact locations\n \"pysftp\",\n # Required by the mlflow.projects module, when running projects against\n # a remote Kubernetes cluster\n \"kubernetes\",\n ],\n \"sqlserver\": [\"mlflow-dbstore\"],\n \"aliyun-oss\": [\"aliyunstoreplugin\"],\n },\n entry_points=\"\"\"\n [console_scripts]\n mlflow=mlflow.cli:cli\n \"\"\",\n zip_safe=False,\n author=\"Databricks\",\n description=\"MLflow: A Platform for ML Development and Productionization\",\n long_description=open(\"README.rst\").read()\n if not _is_mlflow_skinny\n else open(\"README_SKINNY.rst\").read() + open(\"README.rst\").read(),\n long_description_content_type=\"text/x-rst\",\n license=\"Apache License 2.0\",\n classifiers=[\"Intended Audience :: Developers\", \"Programming Language :: Python :: 3.6\"],\n keywords=\"ml ai databricks\",\n url=\"https://mlflow.org/\",\n python_requires=\">=3.6\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/mlflow/mlflow/issues\",\n \"Documentation\": \"https://mlflow.org/docs/latest/index.html\",\n \"Source Code\": \"https://github.com/mlflow/mlflow\",\n },\n)\n",
"path": "setup.py"
}
] | [
{
"content": "import os\nimport logging\n\nfrom importlib.machinery import SourceFileLoader\nfrom setuptools import setup, find_packages\n\n_MLFLOW_SKINNY_ENV_VAR = \"MLFLOW_SKINNY\"\n\nversion = (\n SourceFileLoader(\"mlflow.version\", os.path.join(\"mlflow\", \"version.py\")).load_module().VERSION\n)\n\n\n# Get a list of all files in the JS directory to include in our module\ndef package_files(directory):\n paths = []\n for (path, _, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join(\"..\", path, filename))\n return paths\n\n\n# Prints out a set of paths (relative to the mlflow/ directory) of files in mlflow/server/js/build\n# to include in the wheel, e.g. \"../mlflow/server/js/build/index.html\"\njs_files = package_files(\"mlflow/server/js/build\")\nmodels_container_server_files = package_files(\"mlflow/models/container\")\nalembic_files = [\n \"../mlflow/store/db_migrations/alembic.ini\",\n \"../mlflow/temporary_db_migrations_for_pre_1_users/alembic.ini\",\n]\nextra_files = [\"ml-package-versions.yml\", \"pyspark/ml/log_model_allowlist.txt\"]\n\n\"\"\"\nMinimal requirements for the skinny MLflow client which provides a limited\nsubset of functionality such as: RESTful client functionality for Tracking and\nModel Registry, as well as support for Project execution against local backends\nand Databricks.\n\"\"\"\nSKINNY_REQUIREMENTS = [\n \"click>=7.0\",\n \"cloudpickle\",\n \"databricks-cli>=0.8.7\",\n \"entrypoints\",\n \"gitpython>=2.1.0\",\n \"pyyaml\",\n \"protobuf>=3.7.0\",\n \"pytz\",\n \"requests>=2.17.3\",\n \"packaging\",\n]\n\n\"\"\"\nThese are the core requirements for the complete MLflow platform, which augments\nthe skinny client functionality with support for running the MLflow Tracking\nServer & UI. It also adds project backends such as Docker and Kubernetes among\nother capabilities.\n\"\"\"\nCORE_REQUIREMENTS = SKINNY_REQUIREMENTS + [\n \"alembic<=1.4.1\",\n # Required\n \"docker>=4.0.0\",\n \"Flask\",\n \"gunicorn; platform_system != 'Windows'\",\n \"numpy\",\n \"pandas\",\n \"prometheus-flask-exporter\",\n \"querystring_parser\",\n # Pin sqlparse for: https://github.com/mlflow/mlflow/issues/3433\n \"sqlparse>=0.3.1\",\n # Required to run the MLflow server against SQL-backed storage\n \"sqlalchemy\",\n \"waitress; platform_system == 'Windows'\",\n]\n\n_is_mlflow_skinny = bool(os.environ.get(_MLFLOW_SKINNY_ENV_VAR))\nlogging.debug(\"{} env var is set: {}\".format(_MLFLOW_SKINNY_ENV_VAR, _is_mlflow_skinny))\n\nsetup(\n name=\"mlflow\" if not _is_mlflow_skinny else \"mlflow-skinny\",\n version=version,\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"mlflow\": js_files + models_container_server_files + alembic_files + extra_files}\n if not _is_mlflow_skinny\n # include alembic files to enable usage of the skinny client with SQL databases\n # if users install sqlalchemy, alembic, and sqlparse independently\n else {\"mlflow\": alembic_files + extra_files},\n install_requires=CORE_REQUIREMENTS if not _is_mlflow_skinny else SKINNY_REQUIREMENTS,\n extras_require={\n \"extras\": [\n \"scikit-learn\",\n # Required to log artifacts and models to HDFS artifact locations\n \"pyarrow\",\n # Required to log artifacts and models to AWS S3 artifact locations\n \"boto3\",\n \"mleap\",\n # Required to log artifacts and models to GCS artifact locations\n \"google-cloud-storage\",\n \"azureml-core>=1.2.0\",\n # Required to log artifacts to SFTP artifact locations\n \"pysftp\",\n # Required by the mlflow.projects module, when running projects against\n # a remote Kubernetes cluster\n \"kubernetes\",\n ],\n \"sqlserver\": [\"mlflow-dbstore\"],\n \"aliyun-oss\": [\"aliyunstoreplugin\"],\n },\n entry_points=\"\"\"\n [console_scripts]\n mlflow=mlflow.cli:cli\n \"\"\",\n zip_safe=False,\n author=\"Databricks\",\n description=\"MLflow: A Platform for ML Development and Productionization\",\n long_description=open(\"README.rst\").read()\n if not _is_mlflow_skinny\n else open(\"README_SKINNY.rst\").read() + open(\"README.rst\").read(),\n long_description_content_type=\"text/x-rst\",\n license=\"Apache License 2.0\",\n classifiers=[\"Intended Audience :: Developers\", \"Programming Language :: Python :: 3.6\"],\n keywords=\"ml ai databricks\",\n url=\"https://mlflow.org/\",\n python_requires=\">=3.6\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/mlflow/mlflow/issues\",\n \"Documentation\": \"https://mlflow.org/docs/latest/index.html\",\n \"Source Code\": \"https://github.com/mlflow/mlflow\",\n },\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 1a98478b5e3d4..186d2f67fbf30 100644
--- a/setup.py
+++ b/setup.py
@@ -43,7 +43,7 @@ def package_files(directory):
"entrypoints",
"gitpython>=2.1.0",
"pyyaml",
- "protobuf>=3.6.0",
+ "protobuf>=3.7.0",
"pytz",
"requests>=2.17.3",
"packaging",
|
google__turbinia-616 | Add retries to tox
Tox fails when trying to check links within our docs if the link is temporarily down/unresponsive. Adding retries to sphinx config should take care of that.
| [
{
"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nfrom __future__ import unicode_literals\nimport re\n\nfrom recommonmark.parser import CommonMarkParser\nfrom recommonmark.transform import AutoStructify\nfrom docutils import nodes, transforms\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Turbinia'\ncopyright = '2020, Google Inc'\nauthor = 'Turbinia maintainers'\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage',\n 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx_markdown_tables',\n 'recommonmark'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'design/*']\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\nhtml_sidebars = {\n '**': [\n 'sidebar.html', 'localtoc.html', 'relations.html', 'sourcelink.html',\n 'searchbox.html'\n ]\n}\n\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'turbiniadoc'\n\nhtml_logo = \"images/turbinia-logo.jpg\"\n\n\nclass ProcessLink(transforms.Transform):\n \"\"\"Transform definition to parse .md references to internal pages.\"\"\"\n\n default_priority = 1000\n\n def find_replace(self, node):\n \"\"\"Parses URIs containing .md and replaces them with their HTML page.\"\"\"\n if isinstance(node, nodes.reference) and 'refuri' in node:\n r = node['refuri']\n if r.endswith('.md'):\n r = r[:-3] + '.html'\n node['refuri'] = r\n\n return node\n\n def traverse(self, node):\n \"\"\"Traverse the document tree rooted at node.\n node : docutil node\n current root node to traverse\n \"\"\"\n self.find_replace(node)\n\n for c in node.children:\n self.traverse(c)\n\n # pylint: disable=arguments-differ,attribute-defined-outside-init\n # this was taken from GRR's config file for documentation\n def apply(self):\n self.current_level = 0\n self.traverse(self.document)\n\n\ndef setup(app):\n \"\"\"Add custom parsers to Sphinx generation.\"\"\"\n app.add_config_value(\n 'recommonmark_config', {\n 'enable_auto_doc_ref': False,\n }, True)\n app.add_transform(AutoStructify)\n app.add_transform(ProcessLink)\n",
"path": "docs/conf.py"
}
] | [
{
"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nfrom __future__ import unicode_literals\nimport re\n\nfrom recommonmark.parser import CommonMarkParser\nfrom recommonmark.transform import AutoStructify\nfrom docutils import nodes, transforms\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Turbinia'\ncopyright = '2020, Google Inc'\nauthor = 'Turbinia maintainers'\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage',\n 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx_markdown_tables',\n 'recommonmark'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'design/*']\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\nhtml_sidebars = {\n '**': [\n 'sidebar.html', 'localtoc.html', 'relations.html', 'sourcelink.html',\n 'searchbox.html'\n ]\n}\n\n# Adding retries to linkchecks before declaring a link broken\nlinkcheck_retries = 3\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'turbiniadoc'\n\nhtml_logo = \"images/turbinia-logo.jpg\"\n\n\nclass ProcessLink(transforms.Transform):\n \"\"\"Transform definition to parse .md references to internal pages.\"\"\"\n\n default_priority = 1000\n\n def find_replace(self, node):\n \"\"\"Parses URIs containing .md and replaces them with their HTML page.\"\"\"\n if isinstance(node, nodes.reference) and 'refuri' in node:\n r = node['refuri']\n if r.endswith('.md'):\n r = r[:-3] + '.html'\n node['refuri'] = r\n\n return node\n\n def traverse(self, node):\n \"\"\"Traverse the document tree rooted at node.\n node : docutil node\n current root node to traverse\n \"\"\"\n self.find_replace(node)\n\n for c in node.children:\n self.traverse(c)\n\n # pylint: disable=arguments-differ,attribute-defined-outside-init\n # this was taken from GRR's config file for documentation\n def apply(self):\n self.current_level = 0\n self.traverse(self.document)\n\n\ndef setup(app):\n \"\"\"Add custom parsers to Sphinx generation.\"\"\"\n app.add_config_value(\n 'recommonmark_config', {\n 'enable_auto_doc_ref': False,\n }, True)\n app.add_transform(AutoStructify)\n app.add_transform(ProcessLink)\n",
"path": "docs/conf.py"
}
] | diff --git a/docs/conf.py b/docs/conf.py
index 68dbdd555..a0c2084bb 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -76,6 +76,8 @@
]
}
+# Adding retries to linkchecks before declaring a link broken
+linkcheck_retries = 3
# Output file base name for HTML help builder.
htmlhelp_basename = 'turbiniadoc'
|
getmoto__moto-1992 | Replace pyaml dependency with PyYAML
There is a dependency on pyaml in setup.py:
https://github.com/spulec/moto/blob/master/setup.py#L18
I think that this is intended to be PyYAML (which pyaml depends on), and I do not see any usages of pyaml itself in this codebase.
pyaml uses WTFPL (https://github.com/mk-fg/pretty-yaml/blob/master/COPYING) which is not approved by the OSI (https://opensource.org/minutes20090304)
| [
{
"content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\nimport setuptools\nfrom setuptools import setup, find_packages\nimport sys\n\n\ninstall_requires = [\n \"Jinja2>=2.7.3\",\n \"boto>=2.36.0\",\n \"boto3>=1.6.16\",\n \"botocore>=1.12.13\",\n \"cryptography>=2.3.0\",\n \"requests>=2.5\",\n \"xmltodict\",\n \"six>1.9\",\n \"werkzeug\",\n \"pyaml\",\n \"pytz\",\n \"python-dateutil<3.0.0,>=2.1\",\n \"python-jose<3.0.0\",\n \"mock\",\n \"docker>=2.5.1\",\n \"jsondiff==1.1.1\",\n \"aws-xray-sdk!=0.96,>=0.93\",\n \"responses>=0.9.0\",\n]\n\nextras_require = {\n 'server': ['flask'],\n}\n\n# https://hynek.me/articles/conditional-python-dependencies/\nif int(setuptools.__version__.split(\".\", 1)[0]) < 18:\n if sys.version_info[0:2] < (3, 3):\n install_requires.append(\"backports.tempfile\")\nelse:\n extras_require[\":python_version<'3.3'\"] = [\"backports.tempfile\"]\n\n\nsetup(\n name='moto',\n version='1.3.7',\n description='A library that allows your python tests to easily'\n ' mock out the boto library',\n author='Steve Pulec',\n author_email='[email protected]',\n url='https://github.com/spulec/moto',\n entry_points={\n 'console_scripts': [\n 'moto_server = moto.server:main',\n ],\n },\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n install_requires=install_requires,\n extras_require=extras_require,\n include_package_data=True,\n license=\"Apache\",\n test_suite=\"tests\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Software Development :: Testing\",\n ],\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\nimport setuptools\nfrom setuptools import setup, find_packages\nimport sys\n\n\ninstall_requires = [\n \"Jinja2>=2.7.3\",\n \"boto>=2.36.0\",\n \"boto3>=1.6.16\",\n \"botocore>=1.12.13\",\n \"cryptography>=2.3.0\",\n \"requests>=2.5\",\n \"xmltodict\",\n \"six>1.9\",\n \"werkzeug\",\n \"PyYAML\",\n \"pytz\",\n \"python-dateutil<3.0.0,>=2.1\",\n \"python-jose<3.0.0\",\n \"mock\",\n \"docker>=2.5.1\",\n \"jsondiff==1.1.1\",\n \"aws-xray-sdk!=0.96,>=0.93\",\n \"responses>=0.9.0\",\n]\n\nextras_require = {\n 'server': ['flask'],\n}\n\n# https://hynek.me/articles/conditional-python-dependencies/\nif int(setuptools.__version__.split(\".\", 1)[0]) < 18:\n if sys.version_info[0:2] < (3, 3):\n install_requires.append(\"backports.tempfile\")\nelse:\n extras_require[\":python_version<'3.3'\"] = [\"backports.tempfile\"]\n\n\nsetup(\n name='moto',\n version='1.3.7',\n description='A library that allows your python tests to easily'\n ' mock out the boto library',\n author='Steve Pulec',\n author_email='[email protected]',\n url='https://github.com/spulec/moto',\n entry_points={\n 'console_scripts': [\n 'moto_server = moto.server:main',\n ],\n },\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n install_requires=install_requires,\n extras_require=extras_require,\n include_package_data=True,\n license=\"Apache\",\n test_suite=\"tests\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Software Development :: Testing\",\n ],\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index a1b8c5daecbe..0598d7a10aa7 100755
--- a/setup.py
+++ b/setup.py
@@ -15,7 +15,7 @@
"xmltodict",
"six>1.9",
"werkzeug",
- "pyaml",
+ "PyYAML",
"pytz",
"python-dateutil<3.0.0,>=2.1",
"python-jose<3.0.0",
|
Nitrate__Nitrate-527 | Remove dependency mock
Use `unittest.mock` instead.
| [
{
"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 2.0,<3.0',\n 'django-contrib-comments == 1.9.1',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nextras_require = {\n 'mysql': ['mysqlclient >= 1.2.3'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ],\n\n 'multiauth': [\n 'social-auth-app-django == 3.1.0',\n ]\n}\n\nsetup(\n name='nitrate-tcms',\n version=pkg_version,\n description='A full-featured Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n python_requires='>=3.6',\n package_dir={'': 'src'},\n packages=find_packages('src', exclude=['test*']),\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Framework :: Django :: 2.2',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n project_urls={\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\n 'Documentation': 'https://nitrate.readthedocs.io/',\n },\n)\n",
"path": "setup.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 2.0,<3.0',\n 'django-contrib-comments == 1.9.1',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nextras_require = {\n 'mysql': ['mysqlclient >= 1.2.3'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ],\n\n 'multiauth': [\n 'social-auth-app-django == 3.1.0',\n ]\n}\n\nsetup(\n name='nitrate-tcms',\n version=pkg_version,\n description='A full-featured Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n python_requires='>=3.6',\n package_dir={'': 'src'},\n packages=find_packages('src', exclude=['test*']),\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Framework :: Django :: 2.2',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n project_urls={\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\n 'Documentation': 'https://nitrate.readthedocs.io/',\n },\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 80906912..f0b2be6b 100644
--- a/setup.py
+++ b/setup.py
@@ -46,7 +46,6 @@ def get_long_description():
'coverage',
'factory_boy',
'flake8',
- 'mock',
'pytest',
'pytest-cov',
'pytest-django',
diff --git a/src/tests/core/test_commands.py b/src/tests/core/test_commands.py
index a375629c..b2a83775 100644
--- a/src/tests/core/test_commands.py
+++ b/src/tests/core/test_commands.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
from django.contrib.auth.models import Group
-from mock import patch
+from unittest.mock import patch
from django import test
from django.core.management import call_command
diff --git a/src/tests/core/test_core.py b/src/tests/core/test_core.py
index 96388ed9..194a1ac9 100644
--- a/src/tests/core/test_core.py
+++ b/src/tests/core/test_core.py
@@ -2,8 +2,7 @@
import sys
import unittest
-from mock import patch
-from mock import Mock
+from unittest.mock import patch, Mock
from django import test
from django.conf import settings
diff --git a/src/tests/core/test_files.py b/src/tests/core/test_files.py
index 62e0aa46..0a449f4b 100644
--- a/src/tests/core/test_files.py
+++ b/src/tests/core/test_files.py
@@ -11,7 +11,7 @@
from django.test import RequestFactory
from django.urls import reverse
from django.conf import settings
-from mock import patch
+from unittest.mock import patch
from tcms.core.files import able_to_delete_attachment
from tcms.management.models import TestAttachment
diff --git a/src/tests/issuetracker/test_services.py b/src/tests/issuetracker/test_services.py
index 408000b4..f8f5716e 100644
--- a/src/tests/issuetracker/test_services.py
+++ b/src/tests/issuetracker/test_services.py
@@ -3,7 +3,7 @@
import unittest
from django import test
-from mock import Mock
+from unittest.mock import Mock
from tcms.issuetracker import services
from tcms.issuetracker.services import IssueTrackerService
diff --git a/src/tests/issuetracker/test_task.py b/src/tests/issuetracker/test_task.py
index e8f8b3f5..8e4fb306 100644
--- a/src/tests/issuetracker/test_task.py
+++ b/src/tests/issuetracker/test_task.py
@@ -2,7 +2,7 @@
import unittest
-from mock import patch, Mock
+from unittest.mock import patch, Mock
from tcms.issuetracker.task import bugzilla_external_track
diff --git a/src/tests/test_auth.py b/src/tests/test_auth.py
index 1806ae05..5810c48b 100644
--- a/src/tests/test_auth.py
+++ b/src/tests/test_auth.py
@@ -3,7 +3,7 @@
import datetime
from hashlib import sha1
-from mock import patch
+from unittest.mock import patch
from django.conf import settings
from django.contrib.auth.models import User
diff --git a/src/tests/testcases/test_models.py b/src/tests/testcases/test_models.py
index b1f25852..0814583b 100644
--- a/src/tests/testcases/test_models.py
+++ b/src/tests/testcases/test_models.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
from datetime import timedelta
-from mock import patch
+from unittest.mock import patch
from django.contrib.auth.models import User
from django.core import mail
diff --git a/src/tests/testcases/test_views.py b/src/tests/testcases/test_views.py
index 7b58d714..fd8caedf 100644
--- a/src/tests/testcases/test_views.py
+++ b/src/tests/testcases/test_views.py
@@ -7,8 +7,7 @@
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
from operator import attrgetter, itemgetter
-
-import mock
+from unittest.mock import patch
from django import test
from django.contrib.auth.models import User
@@ -62,13 +61,13 @@ def test_get_plan_id_from_post_request(self):
pk = plan_from_request_or_none(request, pk_enough=True)
self.assertEqual(1, pk)
- @mock.patch('tcms.testcases.views.get_object_or_404')
+ @patch('tcms.testcases.views.get_object_or_404')
def test_get_plan_object_from_get_request(self, get_object_or_404):
request = self.factory.get('/uri', data={'from_plan': 1})
plan = plan_from_request_or_none(request)
self.assertEqual(get_object_or_404.return_value, plan)
- @mock.patch('tcms.testcases.views.get_object_or_404')
+ @patch('tcms.testcases.views.get_object_or_404')
def test_get_plan_object_from_post_request(self, get_object_or_404):
request = self.factory.post('/uri', data={'from_plan': 1})
plan = plan_from_request_or_none(request)
@@ -84,14 +83,14 @@ def test_missing_plan_id_in_post_request(self):
plan = plan_from_request_or_none(request)
self.assertIsNone(plan)
- @mock.patch('tcms.testcases.views.get_object_or_404')
+ @patch('tcms.testcases.views.get_object_or_404')
def test_nonexisting_plan_id_from_get_request(self, get_object_or_404):
get_object_or_404.side_effect = Http404
request = self.factory.get('/uri', data={'from_plan': 1})
self.assertRaises(Http404, plan_from_request_or_none, request)
- @mock.patch('tcms.testcases.views.get_object_or_404')
+ @patch('tcms.testcases.views.get_object_or_404')
def test_nonexisting_plan_id_from_post_request(self, get_object_or_404):
get_object_or_404.side_effect = Http404
@@ -352,7 +351,7 @@ def test_fail_to_remove_if_component_not_exist(self):
data = json.loads(resp.content)
self.assertIn('Cannot remove component', data['response'])
- @mock.patch('tcms.testcases.models.TestCase.remove_component')
+ @patch('tcms.testcases.models.TestCase.remove_component')
def test_case_remove_component_fails(self, remove_component):
remove_component.side_effect = Exception
@@ -682,8 +681,8 @@ def test_remove_tags_from_cases(self):
TestCaseTag.objects.filter(
case=self.case_3.pk, tag=self.tag_python.pk).exists())
- @mock.patch('tcms.testcases.models.TestCase.remove_tag',
- side_effect=ValueError('value error'))
+ @patch('tcms.testcases.models.TestCase.remove_tag',
+ side_effect=ValueError('value error'))
def test_ensure_response_if_error_happens_when_remove_tag(self, remove_tag):
# This test does not care about what tags are removed from which cases
response = self.client.post(
@@ -1535,7 +1534,7 @@ def test_create_new_plan_case_rel_sort_key_for_link(self):
orig_plan=self.orphan_plan,
copy_case=False)
- @mock.patch('tcms.testplans.models.TestPlan.get_case_sortkey')
+ @patch('tcms.testplans.models.TestPlan.get_case_sortkey')
def test_clone_to_same_plan(self, get_case_sortkey):
# Make it easier to assert the new sort key.
get_case_sortkey.return_value = 100
@@ -1783,7 +1782,7 @@ def test_invalid_arguments(self):
data = json.loads(resp.content)
self.assertIn('Cannot add component', data['response'])
- @mock.patch('tcms.testcases.models.TestCase.add_component')
+ @patch('tcms.testcases.models.TestCase.add_component')
def test_failed_to_add_component(self, add_component):
add_component.side_effect = ValueError
@@ -1933,7 +1932,7 @@ def test_invalid_input_for_adding_an_issue(self):
['Invalid issue tracker that does not exist.'],
error_messages)
- @mock.patch('tcms.testcases.models.TestCase.add_issue')
+ @patch('tcms.testcases.models.TestCase.add_issue')
def test_fail_if_case_add_issue_fails(self, add_issue):
add_issue.side_effect = Exception('Something wrong')
@@ -2437,14 +2436,14 @@ def setUpTestData(cls):
cls.case_run_1.notes = 'Some notes'
cls.case_run_1.save()
- with mock.patch('django.utils.timezone.now') as mock_now:
+ with patch('django.utils.timezone.now') as mock_now:
cls.submit_date = datetime(2020, 1, 22, 19, 47, 30)
mock_now.return_value = cls.submit_date
add_comment(
cls.tester, 'testruns.testcaserun', [cls.case_run_1.pk],
'first comment')
- with mock.patch('django.utils.timezone.now') as mock_now:
+ with patch('django.utils.timezone.now') as mock_now:
cls.submit_date_later = cls.submit_date + timedelta(minutes=10)
mock_now.return_value = cls.submit_date_later
add_comment(
diff --git a/src/tests/testplans/test_forms.py b/src/tests/testplans/test_forms.py
index e16cb49d..996df0cd 100644
--- a/src/tests/testplans/test_forms.py
+++ b/src/tests/testplans/test_forms.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
import unittest
-from mock import Mock
+from unittest.mock import Mock
from tcms.testplans.forms import UploadedHTMLFile
diff --git a/src/tests/testplans/test_models.py b/src/tests/testplans/test_models.py
index 7f9e604f..2daec6a7 100644
--- a/src/tests/testplans/test_models.py
+++ b/src/tests/testplans/test_models.py
@@ -2,7 +2,7 @@
from django.conf import settings
from django import test
-from mock import patch
+from unittest.mock import patch
from tcms.testplans.helpers import email
from tcms.testplans.models import _disconnect_signals, TestPlan
diff --git a/src/tests/testruns/test_views.py b/src/tests/testruns/test_views.py
index 48426be7..1a9d7329 100644
--- a/src/tests/testruns/test_views.py
+++ b/src/tests/testruns/test_views.py
@@ -7,7 +7,7 @@
from http import HTTPStatus
from operator import attrgetter
-from mock import patch
+from unittest.mock import patch
from xml.etree import ElementTree
from django.db.models import Max
diff --git a/src/tests/xmlrpc/test_testcase.py b/src/tests/xmlrpc/test_testcase.py
index 7dfe0bfe..e4451228 100644
--- a/src/tests/xmlrpc/test_testcase.py
+++ b/src/tests/xmlrpc/test_testcase.py
@@ -7,7 +7,7 @@
from django_comments.models import Comment
from django.db.models import Max, Min
from django import test
-from mock import patch, PropertyMock
+from unittest.mock import patch, PropertyMock
from tcms.core.utils import checksum
from tcms.issuetracker.models import Issue
diff --git a/src/tests/xmlrpc/test_user.py b/src/tests/xmlrpc/test_user.py
index 09d16574..9a7b3332 100644
--- a/src/tests/xmlrpc/test_user.py
+++ b/src/tests/xmlrpc/test_user.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
-from mock import patch
+from unittest.mock import patch
from django.contrib.auth.models import User
from django.test import TestCase
|
flask-admin__flask-admin-1068 | Tests failing due to new Pillow 3.0.0
Tests are currently failing when a tiff file (https://github.com/flask-admin/flask-admin/blob/master/flask_admin/tests/data/copyleft.jpg) is uploaded:
```
======================================================================
FAIL: flask_admin.tests.test_form_upload.test_image_upload_field
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/virtualenv/python2.6.9/lib/python2.6/site-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/home/travis/build/flask-admin/flask-admin/flask_admin/tests/test_form_upload.py", line 225, in test_image_upload_field
ok_(my_form.validate())
AssertionError: None
>> raise AssertionError(None)
```
The issue has to do with the new 3.0.0 version of Pillow. I started an issue for it here: https://github.com/python-pillow/Pillow/issues/1466
A temporary solution might be to lock Pillow to 2.9.0. I can submit a pull request if that's what we decide to do: https://github.com/flask-admin/flask-admin/compare/master...pawl:fix_pillow_build
| [
{
"content": "# Fix for older setuptools\nimport re\nimport os\n\nfrom setuptools import setup, find_packages\n\n\ndef fpath(name):\n return os.path.join(os.path.dirname(__file__), name)\n\n\ndef read(fname):\n return open(fpath(fname)).read()\n\n\ndef desc():\n info = read('README.rst')\n try:\n return info + '\\n\\n' + read('doc/changelog.rst')\n except IOError:\n return info\n\n# grep flask_admin/__init__.py since python 3.x cannot import it before using 2to3\nfile_text = read(fpath('flask_admin/__init__.py'))\n\n\ndef grep(attrname):\n pattern = r\"{0}\\W*=\\W*'([^']+)'\".format(attrname)\n strval, = re.findall(pattern, file_text)\n return strval\n\n\nsetup(\n name='Flask-Admin',\n version=grep('__version__'),\n url='https://github.com/flask-admin/flask-admin/',\n license='BSD',\n author=grep('__author__'),\n author_email=grep('__email__'),\n description='Simple and extensible admin interface framework for Flask',\n long_description=desc(),\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n platforms='any',\n install_requires=[\n 'Flask>=0.7',\n 'wtforms'\n ],\n tests_require=[\n 'nose>=1.0',\n 'pillow',\n 'mongoengine',\n 'pymongo',\n 'wtf-peewee',\n 'sqlalchemy',\n 'flask-mongoengine',\n 'flask-sqlalchemy',\n 'flask-babelex',\n 'shapely',\n 'geoalchemy2',\n 'psycopg2',\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n ],\n test_suite='nose.collector'\n)\n",
"path": "setup.py"
}
] | [
{
"content": "# Fix for older setuptools\nimport re\nimport os\n\nfrom setuptools import setup, find_packages\n\n\ndef fpath(name):\n return os.path.join(os.path.dirname(__file__), name)\n\n\ndef read(fname):\n return open(fpath(fname)).read()\n\n\ndef desc():\n info = read('README.rst')\n try:\n return info + '\\n\\n' + read('doc/changelog.rst')\n except IOError:\n return info\n\n# grep flask_admin/__init__.py since python 3.x cannot import it before using 2to3\nfile_text = read(fpath('flask_admin/__init__.py'))\n\n\ndef grep(attrname):\n pattern = r\"{0}\\W*=\\W*'([^']+)'\".format(attrname)\n strval, = re.findall(pattern, file_text)\n return strval\n\n\nsetup(\n name='Flask-Admin',\n version=grep('__version__'),\n url='https://github.com/flask-admin/flask-admin/',\n license='BSD',\n author=grep('__author__'),\n author_email=grep('__email__'),\n description='Simple and extensible admin interface framework for Flask',\n long_description=desc(),\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n platforms='any',\n install_requires=[\n 'Flask>=0.7',\n 'wtforms'\n ],\n tests_require=[\n 'nose>=1.0',\n 'pillow==2.9.0',\n 'mongoengine',\n 'pymongo',\n 'wtf-peewee',\n 'sqlalchemy',\n 'flask-mongoengine',\n 'flask-sqlalchemy',\n 'flask-babelex',\n 'shapely',\n 'geoalchemy2',\n 'psycopg2',\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n ],\n test_suite='nose.collector'\n)\n",
"path": "setup.py"
}
] | diff --git a/examples/forms/requirements.txt b/examples/forms/requirements.txt
index 1d4552c1d..3c503e3c4 100644
--- a/examples/forms/requirements.txt
+++ b/examples/forms/requirements.txt
@@ -1,4 +1,4 @@
Flask
Flask-Admin
Flask-SQLAlchemy
-pillow
\ No newline at end of file
+pillow==2.9.0
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 7d96bdb70..a2435b77b 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -5,7 +5,7 @@ peewee
wtf-peewee
pymongo==2.8
flask-mongoengine
-pillow
+pillow==2.9.0
Babel<=1.3
flask-babelex
shapely==1.5.9
diff --git a/setup.py b/setup.py
index d46d6345e..b527c8dde 100644
--- a/setup.py
+++ b/setup.py
@@ -49,7 +49,7 @@ def grep(attrname):
],
tests_require=[
'nose>=1.0',
- 'pillow',
+ 'pillow==2.9.0',
'mongoengine',
'pymongo',
'wtf-peewee',
|
google-research__t5x-475 | Seg Fault after saving checkpoints
Hi,
I am getting a seg fault sometimes after the model has saved the checkpoint. It is not every checkpoint and seems to be random which checkpoints it crashes after. I am not sure if it is related to issue #340
For example, I am running `prompt_tuning/scripts/sst2-demo-xxl.sh`, and the output is below.
```
317 18:14:56.525280 140415323761728 utils.py:138] Saved Numpy Arrays for step 1104000 to gs://nicl/checkpoint_models/sst/full_dataset/prompt-tuning/t5-11b/numpy_checkpoints/checkpoint_1104000
I0317 18:14:56.604028 140415323761728 checkpoints.py:600] Saving checkpoint for step 1104000 to gs://nicl/checkpoint_models/sst/full_dataset/prompt-tuning/t5-11b/checkpoint_1104000.tmp-1647540896
I0317 18:14:56.614308 140622481194048 checkpoints.py:600] Saving checkpoint for step 1104000 to gs://nicl/checkpoint_models/sst/full_dataset/prompt-tuning/t5-11b/checkpoint_1104000.tmp-1647540896
I0317 18:14:56.624289 140590966570048 checkpoints.py:600] Saving checkpoint for step 1104000 to gs://nicl/checkpoint_models/sst/full_dataset/prompt-tuning/t5-11b/checkpoint_1104000.tmp-1647540896
I0317 18:14:56.653718 140272509271104 checkpoints.py:600] Saving checkpoint for step 1104000 to gs://nicl/checkpoint_models/sst/full_dataset/prompt-tuning/t5-11b/checkpoint_1104000.tmp-1647540896
Fatal Python error: Segmentation fault
Thread 0x00007fdb1dc01700 (most recent call first):
File "/home/dptam/.local/lib/python3.8/site-packages/jax/interpreters/pxla.py", line 664 in _sda_value
File "/home/dptam/.local/lib/python3.8/site-packages/jax/_src/device_array.py", line 266 in __array__
File "/home/dptam/.local/lib/python3.8/site-packages/t5x/checkpoints.py", line 447 in <lambda>
File "/home/dptam/.local/lib/python3.8/site-packages/t5x/checkpoint_importer.py", line 84 in get
File "/usr/lib/python3.8/concurrent/futures/thread.py", line 57 in run
File "/usr/lib/python3.8/concurrent/futures/thread.py", line 80 in _worker
File "/usr/lib/python3.8/threading.py", line 870 in run
File "/usr/lib/python3.8/threading.py", line 932 in _bootstrap_inner
File "/usr/lib/python3.8/threading.py", line 890 in _bootstrap
Thread 0x00007f56809df700 (most recent call first):
File "/usr/lib/python3.8/concurrent/futures/thread.py", line 78 in _worker
File "/usr/lib/python3.8/threading.py", line 870 in run
File "/usr/lib/python3.8/threading.py", line 932 in _bootstrap_inner
File "/usr/lib/python3.8/threading.py", line 890 in _bootstrap
Thread 0x00007f56c7aad700 (most recent call first):
File "/usr/lib/python3.8/concurrent/futures/thread.py", line 78 in _worker
File "/usr/lib/python3.8/threading.py", line 870 in run
File "/usr/lib/python3.8/threading.py", line 932 in _bootstrap_inner
File "/usr/lib/python3.8/threading.py", line 890 in _bootstrap
Thread 0x00007fdde29efc40 (most recent call first):
File "/home/dptam/.local/lib/python3.8/site-packages/t5x/checkpoints.py", line 693 in _write_array
https://symbolize.stripped_domain/r/?trace=7fdde2e4203b,7fdde2e420bf,e,5ef27540f,e,26f7c5aff,f,b15f59df&map=
E0317 18:14:57.770066 341059 process_state.cc:1062] RAW: Signal 11 raised at PC: 0x7fdde2e4203b while already in FailureSignalHandler!
E0317 18:14:57.770096 341059 process_state.cc:1065] RAW: tid: 341059 raised new signal
@ 0xf 1440 (unknown)
@ 0x25ed159b0 (unknown) (unknown)
@ 0x10 76231216 (unknown)
@ 0x261cdc840 (unknown) (unknown)
@ 0x2dfdd4780 (unknown) (unknown)
@ 0x5f1f8a120 (unknown) (unknown)
https://symbolize.stripped_domain/r/?trace=7fdde301ffd3,7fddd98d57f9,7fdde2e420bf,7,e,25ed159af,f,261cdc83f,2dfdd477f,5f1f8a11f&map=7a511a57244151c993b16b37978e7ed7:7fddcaefd000-7fddd9c3fd50
E0317 18:14:57.818885 341068 coredump_hook.cc:365] RAW: Remote crash data gathering hook invoked.
E0317 18:14:57.818900 341068 coredump_hook.cc:411] RAW: Skipping coredump since rlimit was 0 at process start.
E0317 18:14:57.818919 341068 client.cc:221] RAW: Coroner client retries enabled (b/136286901), will retry for up to 30 sec.
E0317 18:14:57.818922 341068 coredump_hook.cc:473] RAW: Sending fingerprint to remote end.
E0317 18:14:57.818928 341068 coredump_socket.cc:124] RAW: Stat failed errno=2 on socket /var/google/services/logmanagerd/remote_coredump.socket
E0317 18:14:57.818933 341068 coredump_hook.cc:477] RAW: Cannot send fingerprint to Coroner: [NOT_FOUND] Missing crash reporting socket. Is the listener running?
E0317 18:14:57.818938 341068 coredump_hook.cc:550] RAW: Discarding core.
prompt_tuning/scripts/sst2-demo-xxl.sh: line 37: 337643 Segmentation fault (core dumped) python3 -m t5x.train --gin_search_paths="${T5X_DIR},${FLAXFORMER_DIR},${PROMPT_DIR}" --gin_file="prompt_tuning/configs/models/t5_1_1_xxl_prompt.gin" --gin_file="prompt_tuning/configs/prompts/from_class_labels.gin" --gin_file="prompt_tuning/configs/runs/prompt_finetune.gin" --gin.CLASS_LABELS="['positive', 'negative']" --gin.MODEL_DIR="'${MODEL_DIR}'" --gin.MIXTURE_OR_TASK_NAME="'taskless_glue_sst2_v200_examples'" --gin.MIXTURE_OR_TASK_MODULE="'prompt_tuning.data.glue'" --gin.TASK_FEATURE_LENGTHS="{'inputs': 512, 'targets': 8}" --gin.INITIAL_CHECKPOINT_PATH="'${PRETRAINED_MODEL}'" --gin.TRAIN_STEPS="1_212_000" --gin.USE_CACHED_TASKS="False" --gin.BATCH_SIZE="16" --gin.partitioning.PjitPartitioner.model_parallel_submesh="(4, 4, 1, 2)" --tfds_data_dir=${TFDS_DATA_DIR}
##### Command execution on worker 3 failed with return code 139. Continuing.
prompt_tuning/scripts/sst2-demo-xxl.sh: line 37: 334750 Aborted (core dumped) python3 -m t5x.train --gin_search_paths="${T5X_DIR},${FLAXFORMER_DIR},${PROMPT_DIR}" --gin_file="prompt_tuning/configs/models/t5_1_1_xxl_prompt.gin" --gin_file="prompt_tuning/configs/prompts/from_class_labels.gin" --gin_file="prompt_tuning/configs/runs/prompt_finetune.gin" --gin.CLASS_LABELS="['positive', 'negative']" --gin.MODEL_DIR="'${MODEL_DIR}'" --gin.MIXTURE_OR_TASK_NAME="'taskless_glue_sst2_v200_examples'" --gin.MIXTURE_OR_TASK_MODULE="'prompt_tuning.data.glue'" --gin.TASK_FEATURE_LENGTHS="{'inputs': 512, 'targets': 8}" --gin.INITIAL_CHECKPOINT_PATH="'${PRETRAINED_MODEL}'" --gin.TRAIN_STEPS="1_212_000" --gin.USE_CACHED_TASKS="False" --gin.BATCH_SIZE="16" --gin.partitioning.PjitPartitioner.model_parallel_submesh="(4, 4, 1, 2)" --tfds_data_dir=${TFDS_DATA_DIR}
##### Command execution on worker 1 failed with return code 134. Continuing.
prompt_tuning/scripts/sst2-demo-xxl.sh: line 37: 335504 Aborted (core dumped) python3 -m t5x.train --gin_search_paths="${T5X_DIR},${FLAXFORMER_DIR},${PROMPT_DIR}" --gin_file="prompt_tuning/configs/models/t5_1_1_xxl_prompt.gin" --gin_file="prompt_tuning/configs/prompts/from_class_labels.gin" --gin_file="prompt_tuning/configs/runs/prompt_finetune.gin" --gin.CLASS_LABELS="['positive', 'negative']" --gin.MODEL_DIR="'${MODEL_DIR}'" --gin.MIXTURE_OR_TASK_NAME="'taskless_glue_sst2_v200_examples'" --gin.MIXTURE_OR_TASK_MODULE="'prompt_tuning.data.glue'" --gin.TASK_FEATURE_LENGTHS="{'inputs': 512, 'targets': 8}" --gin.INITIAL_CHECKPOINT_PATH="'${PRETRAINED_MODEL}'" --gin.TRAIN_STEPS="1_212_000" --gin.USE_CACHED_TASKS="False" --gin.BATCH_SIZE="16" --gin.partitioning.PjitPartitioner.model_parallel_submesh="(4, 4, 1, 2)" --tfds_data_dir=${TFDS_DATA_DIR}
##### Command execution on worker 0 failed with return code 134. Continuing.
```
Thanks
| [
{
"content": "# Copyright 2022 The T5X Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Install T5X.\"\"\"\n\nimport os\nimport sys\nimport setuptools\n\n# To enable importing version.py directly, we add its path to sys.path.\nversion_path = os.path.join(os.path.dirname(__file__), 't5x')\nsys.path.append(version_path)\nfrom version import __version__ # pylint: disable=g-import-not-at-top\n\n# Get the long description from the README file.\nwith open('README.md') as fp:\n _LONG_DESCRIPTION = fp.read()\n\n_jax_version = '0.2.27'\n_jaxlib_version = '0.1.76'\n\nsetuptools.setup(\n name='t5x',\n version=__version__,\n description='T5-eXtended in JAX',\n long_description=_LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n author='Google Inc.',\n author_email='[email protected]',\n url='http://github.com/google-research/t5x',\n license='Apache 2.0',\n packages=setuptools.find_packages(),\n package_data={\n '': ['**/*.gin'], # not all subdirectories may have __init__.py.\n },\n scripts=[],\n install_requires=[\n 'absl-py',\n 'cached_property',\n # TODO(adarob): Replace with 'clu' once >0.0.6 is released.\n 'clu @ git+https://github.com/google/CommonLoopUtils#egg=clu',\n 'flax @ git+https://github.com/google/flax#egg=flax',\n 'gin-config',\n f'jax >= {_jax_version}',\n f'jaxlib >= {_jaxlib_version}',\n 'numpy',\n 'seqio-nightly',\n 't5',\n 'tensorflow',\n 'tensorstore',\n ],\n extras_require={\n 'gcp': [\n 'gevent', 'google-api-python-client', 'google-compute-engine',\n 'google-cloud-storage', 'oauth2client'\n ],\n 'test': ['pytest'],\n\n # Cloud TPU requirements.\n 'tpu': [f'jax[tpu] >= {_jax_version}'],\n },\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n ],\n keywords='text nlp machinelearning',\n)\n",
"path": "setup.py"
}
] | [
{
"content": "# Copyright 2022 The T5X Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Install T5X.\"\"\"\n\nimport os\nimport sys\nimport setuptools\n\n# To enable importing version.py directly, we add its path to sys.path.\nversion_path = os.path.join(os.path.dirname(__file__), 't5x')\nsys.path.append(version_path)\nfrom version import __version__ # pylint: disable=g-import-not-at-top\n\n# Get the long description from the README file.\nwith open('README.md') as fp:\n _LONG_DESCRIPTION = fp.read()\n\n_jax_version = '0.2.27'\n_jaxlib_version = '0.1.76'\n\nsetuptools.setup(\n name='t5x',\n version=__version__,\n description='T5-eXtended in JAX',\n long_description=_LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n author='Google Inc.',\n author_email='[email protected]',\n url='http://github.com/google-research/t5x',\n license='Apache 2.0',\n packages=setuptools.find_packages(),\n package_data={\n '': ['**/*.gin'], # not all subdirectories may have __init__.py.\n },\n scripts=[],\n install_requires=[\n 'absl-py',\n 'cached_property',\n # TODO(adarob): Replace with 'clu' once >0.0.6 is released.\n 'clu @ git+https://github.com/google/CommonLoopUtils#egg=clu',\n 'flax @ git+https://github.com/google/flax#egg=flax',\n 'gin-config',\n f'jax >= {_jax_version}',\n f'jaxlib >= {_jaxlib_version}',\n 'numpy',\n 'seqio-nightly',\n 't5',\n 'tensorflow',\n 'tensorstore >= 0.1.20',\n ],\n extras_require={\n 'gcp': [\n 'gevent', 'google-api-python-client', 'google-compute-engine',\n 'google-cloud-storage', 'oauth2client'\n ],\n 'test': ['pytest'],\n\n # Cloud TPU requirements.\n 'tpu': [f'jax[tpu] >= {_jax_version}'],\n },\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n ],\n keywords='text nlp machinelearning',\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index d9dbe74ea..02f838b6a 100644
--- a/setup.py
+++ b/setup.py
@@ -58,7 +58,7 @@
'seqio-nightly',
't5',
'tensorflow',
- 'tensorstore',
+ 'tensorstore >= 0.1.20',
],
extras_require={
'gcp': [
|
pallets__werkzeug-2320 | ProxyMiddleware encodes characters allowed in path
We have a dollar sign in URL and proxy middleware converts it to `%24`. Unfortunately, for backend server url with `$` and url with `%24` are two different urls. It could be fixed by extending safe characters in middleware as it done in [url_fix](https://github.com/pallets/werkzeug/blob/9efe8c00dcb2b6fc086961ba304729db01912652/src/werkzeug/urls.py#L691) function, but it seems to me that the right way is to make the middleware to use `REQUEST_URI`/`RAW_URI` when possible. This way also can fix middleware for cases like the one described in https://github.com/pallets/werkzeug/issues/766.
I can send PR, but since it will require some efforts on making `remove_prefix` option working correctly and to ensure backward compatibility for public method `proxy_to`, I'd like to discuss this solution first. If it can be accepted, then I proceed.
| [
{
"content": "\"\"\"Functions for working with URLs.\n\nContains implementations of functions from :mod:`urllib.parse` that\nhandle bytes and strings.\n\"\"\"\nimport codecs\nimport os\nimport re\nimport typing as t\n\nfrom ._internal import _check_str_tuple\nfrom ._internal import _decode_idna\nfrom ._internal import _encode_idna\nfrom ._internal import _make_encode_wrapper\nfrom ._internal import _to_str\n\nif t.TYPE_CHECKING:\n from . import datastructures as ds\n\n# A regular expression for what a valid schema looks like\n_scheme_re = re.compile(r\"^[a-zA-Z0-9+-.]+$\")\n\n# Characters that are safe in any part of an URL.\n_always_safe = frozenset(\n bytearray(\n b\"abcdefghijklmnopqrstuvwxyz\"\n b\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n b\"0123456789\"\n b\"-._~\"\n )\n)\n\n_hexdigits = \"0123456789ABCDEFabcdef\"\n_hextobyte = {\n f\"{a}{b}\".encode(\"ascii\"): int(f\"{a}{b}\", 16)\n for a in _hexdigits\n for b in _hexdigits\n}\n_bytetohex = [f\"%{char:02X}\".encode(\"ascii\") for char in range(256)]\n\n\nclass _URLTuple(t.NamedTuple):\n scheme: str\n netloc: str\n path: str\n query: str\n fragment: str\n\n\nclass BaseURL(_URLTuple):\n \"\"\"Superclass of :py:class:`URL` and :py:class:`BytesURL`.\"\"\"\n\n __slots__ = ()\n _at: str\n _colon: str\n _lbracket: str\n _rbracket: str\n\n def __str__(self) -> str:\n return self.to_url()\n\n def replace(self, **kwargs: t.Any) -> \"BaseURL\":\n \"\"\"Return an URL with the same values, except for those parameters\n given new values by whichever keyword arguments are specified.\"\"\"\n return self._replace(**kwargs)\n\n @property\n def host(self) -> t.Optional[str]:\n \"\"\"The host part of the URL if available, otherwise `None`. The\n host is either the hostname or the IP address mentioned in the\n URL. It will not contain the port.\n \"\"\"\n return self._split_host()[0]\n\n @property\n def ascii_host(self) -> t.Optional[str]:\n \"\"\"Works exactly like :attr:`host` but will return a result that\n is restricted to ASCII. If it finds a netloc that is not ASCII\n it will attempt to idna decode it. This is useful for socket\n operations when the URL might include internationalized characters.\n \"\"\"\n rv = self.host\n if rv is not None and isinstance(rv, str):\n try:\n rv = _encode_idna(rv) # type: ignore\n except UnicodeError:\n rv = rv.encode(\"ascii\", \"ignore\") # type: ignore\n return _to_str(rv, \"ascii\", \"ignore\")\n\n @property\n def port(self) -> t.Optional[int]:\n \"\"\"The port in the URL as an integer if it was present, `None`\n otherwise. This does not fill in default ports.\n \"\"\"\n try:\n rv = int(_to_str(self._split_host()[1]))\n if 0 <= rv <= 65535:\n return rv\n except (ValueError, TypeError):\n pass\n return None\n\n @property\n def auth(self) -> t.Optional[str]:\n \"\"\"The authentication part in the URL if available, `None`\n otherwise.\n \"\"\"\n return self._split_netloc()[0]\n\n @property\n def username(self) -> t.Optional[str]:\n \"\"\"The username if it was part of the URL, `None` otherwise.\n This undergoes URL decoding and will always be a string.\n \"\"\"\n rv = self._split_auth()[0]\n if rv is not None:\n return _url_unquote_legacy(rv)\n return None\n\n @property\n def raw_username(self) -> t.Optional[str]:\n \"\"\"The username if it was part of the URL, `None` otherwise.\n Unlike :attr:`username` this one is not being decoded.\n \"\"\"\n return self._split_auth()[0]\n\n @property\n def password(self) -> t.Optional[str]:\n \"\"\"The password if it was part of the URL, `None` otherwise.\n This undergoes URL decoding and will always be a string.\n \"\"\"\n rv = self._split_auth()[1]\n if rv is not None:\n return _url_unquote_legacy(rv)\n return None\n\n @property\n def raw_password(self) -> t.Optional[str]:\n \"\"\"The password if it was part of the URL, `None` otherwise.\n Unlike :attr:`password` this one is not being decoded.\n \"\"\"\n return self._split_auth()[1]\n\n def decode_query(self, *args: t.Any, **kwargs: t.Any) -> \"ds.MultiDict[str, str]\":\n \"\"\"Decodes the query part of the URL. Ths is a shortcut for\n calling :func:`url_decode` on the query argument. The arguments and\n keyword arguments are forwarded to :func:`url_decode` unchanged.\n \"\"\"\n return url_decode(self.query, *args, **kwargs)\n\n def join(self, *args: t.Any, **kwargs: t.Any) -> \"BaseURL\":\n \"\"\"Joins this URL with another one. This is just a convenience\n function for calling into :meth:`url_join` and then parsing the\n return value again.\n \"\"\"\n return url_parse(url_join(self, *args, **kwargs))\n\n def to_url(self) -> str:\n \"\"\"Returns a URL string or bytes depending on the type of the\n information stored. This is just a convenience function\n for calling :meth:`url_unparse` for this URL.\n \"\"\"\n return url_unparse(self)\n\n def encode_netloc(self) -> str:\n \"\"\"Encodes the netloc part to an ASCII safe URL as bytes.\"\"\"\n rv = self.ascii_host or \"\"\n if \":\" in rv:\n rv = f\"[{rv}]\"\n port = self.port\n if port is not None:\n rv = f\"{rv}:{port}\"\n auth = \":\".join(\n filter(\n None,\n [\n url_quote(self.raw_username or \"\", \"utf-8\", \"strict\", \"/:%\"),\n url_quote(self.raw_password or \"\", \"utf-8\", \"strict\", \"/:%\"),\n ],\n )\n )\n if auth:\n rv = f\"{auth}@{rv}\"\n return rv\n\n def decode_netloc(self) -> str:\n \"\"\"Decodes the netloc part into a string.\"\"\"\n rv = _decode_idna(self.host or \"\")\n\n if \":\" in rv:\n rv = f\"[{rv}]\"\n port = self.port\n if port is not None:\n rv = f\"{rv}:{port}\"\n auth = \":\".join(\n filter(\n None,\n [\n _url_unquote_legacy(self.raw_username or \"\", \"/:%@\"),\n _url_unquote_legacy(self.raw_password or \"\", \"/:%@\"),\n ],\n )\n )\n if auth:\n rv = f\"{auth}@{rv}\"\n return rv\n\n def to_uri_tuple(self) -> \"BaseURL\":\n \"\"\"Returns a :class:`BytesURL` tuple that holds a URI. This will\n encode all the information in the URL properly to ASCII using the\n rules a web browser would follow.\n\n It's usually more interesting to directly call :meth:`iri_to_uri` which\n will return a string.\n \"\"\"\n return url_parse(iri_to_uri(self))\n\n def to_iri_tuple(self) -> \"BaseURL\":\n \"\"\"Returns a :class:`URL` tuple that holds a IRI. This will try\n to decode as much information as possible in the URL without\n losing information similar to how a web browser does it for the\n URL bar.\n\n It's usually more interesting to directly call :meth:`uri_to_iri` which\n will return a string.\n \"\"\"\n return url_parse(uri_to_iri(self))\n\n def get_file_location(\n self, pathformat: t.Optional[str] = None\n ) -> t.Tuple[t.Optional[str], t.Optional[str]]:\n \"\"\"Returns a tuple with the location of the file in the form\n ``(server, location)``. If the netloc is empty in the URL or\n points to localhost, it's represented as ``None``.\n\n The `pathformat` by default is autodetection but needs to be set\n when working with URLs of a specific system. The supported values\n are ``'windows'`` when working with Windows or DOS paths and\n ``'posix'`` when working with posix paths.\n\n If the URL does not point to a local file, the server and location\n are both represented as ``None``.\n\n :param pathformat: The expected format of the path component.\n Currently ``'windows'`` and ``'posix'`` are\n supported. Defaults to ``None`` which is\n autodetect.\n \"\"\"\n if self.scheme != \"file\":\n return None, None\n\n path = url_unquote(self.path)\n host = self.netloc or None\n\n if pathformat is None:\n if os.name == \"nt\":\n pathformat = \"windows\"\n else:\n pathformat = \"posix\"\n\n if pathformat == \"windows\":\n if path[:1] == \"/\" and path[1:2].isalpha() and path[2:3] in \"|:\":\n path = f\"{path[1:2]}:{path[3:]}\"\n windows_share = path[:3] in (\"\\\\\" * 3, \"/\" * 3)\n import ntpath\n\n path = ntpath.normpath(path)\n # Windows shared drives are represented as ``\\\\host\\\\directory``.\n # That results in a URL like ``file://///host/directory``, and a\n # path like ``///host/directory``. We need to special-case this\n # because the path contains the hostname.\n if windows_share and host is None:\n parts = path.lstrip(\"\\\\\").split(\"\\\\\", 1)\n if len(parts) == 2:\n host, path = parts\n else:\n host = parts[0]\n path = \"\"\n elif pathformat == \"posix\":\n import posixpath\n\n path = posixpath.normpath(path)\n else:\n raise TypeError(f\"Invalid path format {pathformat!r}\")\n\n if host in (\"127.0.0.1\", \"::1\", \"localhost\"):\n host = None\n\n return host, path\n\n def _split_netloc(self) -> t.Tuple[t.Optional[str], str]:\n if self._at in self.netloc:\n auth, _, netloc = self.netloc.partition(self._at)\n return auth, netloc\n return None, self.netloc\n\n def _split_auth(self) -> t.Tuple[t.Optional[str], t.Optional[str]]:\n auth = self._split_netloc()[0]\n if not auth:\n return None, None\n if self._colon not in auth:\n return auth, None\n\n username, _, password = auth.partition(self._colon)\n return username, password\n\n def _split_host(self) -> t.Tuple[t.Optional[str], t.Optional[str]]:\n rv = self._split_netloc()[1]\n if not rv:\n return None, None\n\n if not rv.startswith(self._lbracket):\n if self._colon in rv:\n host, _, port = rv.partition(self._colon)\n return host, port\n return rv, None\n\n idx = rv.find(self._rbracket)\n if idx < 0:\n return rv, None\n\n host = rv[1:idx]\n rest = rv[idx + 1 :]\n if rest.startswith(self._colon):\n return host, rest[1:]\n return host, None\n\n\nclass URL(BaseURL):\n \"\"\"Represents a parsed URL. This behaves like a regular tuple but\n also has some extra attributes that give further insight into the\n URL.\n \"\"\"\n\n __slots__ = ()\n _at = \"@\"\n _colon = \":\"\n _lbracket = \"[\"\n _rbracket = \"]\"\n\n def encode(self, charset: str = \"utf-8\", errors: str = \"replace\") -> \"BytesURL\":\n \"\"\"Encodes the URL to a tuple made out of bytes. The charset is\n only being used for the path, query and fragment.\n \"\"\"\n return BytesURL(\n self.scheme.encode(\"ascii\"), # type: ignore\n self.encode_netloc(),\n self.path.encode(charset, errors), # type: ignore\n self.query.encode(charset, errors), # type: ignore\n self.fragment.encode(charset, errors), # type: ignore\n )\n\n\nclass BytesURL(BaseURL):\n \"\"\"Represents a parsed URL in bytes.\"\"\"\n\n __slots__ = ()\n _at = b\"@\" # type: ignore\n _colon = b\":\" # type: ignore\n _lbracket = b\"[\" # type: ignore\n _rbracket = b\"]\" # type: ignore\n\n def __str__(self) -> str:\n return self.to_url().decode(\"utf-8\", \"replace\") # type: ignore\n\n def encode_netloc(self) -> bytes: # type: ignore\n \"\"\"Returns the netloc unchanged as bytes.\"\"\"\n return self.netloc # type: ignore\n\n def decode(self, charset: str = \"utf-8\", errors: str = \"replace\") -> \"URL\":\n \"\"\"Decodes the URL to a tuple made out of strings. The charset is\n only being used for the path, query and fragment.\n \"\"\"\n return URL(\n self.scheme.decode(\"ascii\"), # type: ignore\n self.decode_netloc(),\n self.path.decode(charset, errors), # type: ignore\n self.query.decode(charset, errors), # type: ignore\n self.fragment.decode(charset, errors), # type: ignore\n )\n\n\n_unquote_maps: t.Dict[t.FrozenSet[int], t.Dict[bytes, int]] = {frozenset(): _hextobyte}\n\n\ndef _unquote_to_bytes(\n string: t.Union[str, bytes], unsafe: t.Union[str, bytes] = \"\"\n) -> bytes:\n if isinstance(string, str):\n string = string.encode(\"utf-8\")\n\n if isinstance(unsafe, str):\n unsafe = unsafe.encode(\"utf-8\")\n\n unsafe = frozenset(bytearray(unsafe))\n groups = iter(string.split(b\"%\"))\n result = bytearray(next(groups, b\"\"))\n\n try:\n hex_to_byte = _unquote_maps[unsafe]\n except KeyError:\n hex_to_byte = _unquote_maps[unsafe] = {\n h: b for h, b in _hextobyte.items() if b not in unsafe\n }\n\n for group in groups:\n code = group[:2]\n\n if code in hex_to_byte:\n result.append(hex_to_byte[code])\n result.extend(group[2:])\n else:\n result.append(37) # %\n result.extend(group)\n\n return bytes(result)\n\n\ndef _url_encode_impl(\n obj: t.Union[t.Mapping[str, str], t.Iterable[t.Tuple[str, str]]],\n charset: str,\n sort: bool,\n key: t.Optional[t.Callable[[t.Tuple[str, str]], t.Any]],\n) -> t.Iterator[str]:\n from .datastructures import iter_multi_items\n\n iterable: t.Iterable[t.Tuple[str, str]] = iter_multi_items(obj)\n\n if sort:\n iterable = sorted(iterable, key=key)\n\n for key_str, value_str in iterable:\n if value_str is None:\n continue\n\n if not isinstance(key_str, bytes):\n key_bytes = str(key_str).encode(charset)\n else:\n key_bytes = key_str\n\n if not isinstance(value_str, bytes):\n value_bytes = str(value_str).encode(charset)\n else:\n value_bytes = value_str\n\n yield f\"{_fast_url_quote_plus(key_bytes)}={_fast_url_quote_plus(value_bytes)}\"\n\n\ndef _url_unquote_legacy(value: str, unsafe: str = \"\") -> str:\n try:\n return url_unquote(value, charset=\"utf-8\", errors=\"strict\", unsafe=unsafe)\n except UnicodeError:\n return url_unquote(value, charset=\"latin1\", unsafe=unsafe)\n\n\ndef url_parse(\n url: str, scheme: t.Optional[str] = None, allow_fragments: bool = True\n) -> BaseURL:\n \"\"\"Parses a URL from a string into a :class:`URL` tuple. If the URL\n is lacking a scheme it can be provided as second argument. Otherwise,\n it is ignored. Optionally fragments can be stripped from the URL\n by setting `allow_fragments` to `False`.\n\n The inverse of this function is :func:`url_unparse`.\n\n :param url: the URL to parse.\n :param scheme: the default schema to use if the URL is schemaless.\n :param allow_fragments: if set to `False` a fragment will be removed\n from the URL.\n \"\"\"\n s = _make_encode_wrapper(url)\n is_text_based = isinstance(url, str)\n\n if scheme is None:\n scheme = s(\"\")\n netloc = query = fragment = s(\"\")\n i = url.find(s(\":\"))\n if i > 0 and _scheme_re.match(_to_str(url[:i], errors=\"replace\")):\n # make sure \"iri\" is not actually a port number (in which case\n # \"scheme\" is really part of the path)\n rest = url[i + 1 :]\n if not rest or any(c not in s(\"0123456789\") for c in rest):\n # not a port number\n scheme, url = url[:i].lower(), rest\n\n if url[:2] == s(\"//\"):\n delim = len(url)\n for c in s(\"/?#\"):\n wdelim = url.find(c, 2)\n if wdelim >= 0:\n delim = min(delim, wdelim)\n netloc, url = url[2:delim], url[delim:]\n if (s(\"[\") in netloc and s(\"]\") not in netloc) or (\n s(\"]\") in netloc and s(\"[\") not in netloc\n ):\n raise ValueError(\"Invalid IPv6 URL\")\n\n if allow_fragments and s(\"#\") in url:\n url, fragment = url.split(s(\"#\"), 1)\n if s(\"?\") in url:\n url, query = url.split(s(\"?\"), 1)\n\n result_type = URL if is_text_based else BytesURL\n return result_type(scheme, netloc, url, query, fragment)\n\n\ndef _make_fast_url_quote(\n charset: str = \"utf-8\",\n errors: str = \"strict\",\n safe: t.Union[str, bytes] = \"/:\",\n unsafe: t.Union[str, bytes] = \"\",\n) -> t.Callable[[bytes], str]:\n \"\"\"Precompile the translation table for a URL encoding function.\n\n Unlike :func:`url_quote`, the generated function only takes the\n string to quote.\n\n :param charset: The charset to encode the result with.\n :param errors: How to handle encoding errors.\n :param safe: An optional sequence of safe characters to never encode.\n :param unsafe: An optional sequence of unsafe characters to always encode.\n \"\"\"\n if isinstance(safe, str):\n safe = safe.encode(charset, errors)\n\n if isinstance(unsafe, str):\n unsafe = unsafe.encode(charset, errors)\n\n safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))\n table = [chr(c) if c in safe else f\"%{c:02X}\" for c in range(256)]\n\n def quote(string: bytes) -> str:\n return \"\".join([table[c] for c in string])\n\n return quote\n\n\n_fast_url_quote = _make_fast_url_quote()\n_fast_quote_plus = _make_fast_url_quote(safe=\" \", unsafe=\"+\")\n\n\ndef _fast_url_quote_plus(string: bytes) -> str:\n return _fast_quote_plus(string).replace(\" \", \"+\")\n\n\ndef url_quote(\n string: t.Union[str, bytes],\n charset: str = \"utf-8\",\n errors: str = \"strict\",\n safe: t.Union[str, bytes] = \"/:\",\n unsafe: t.Union[str, bytes] = \"\",\n) -> str:\n \"\"\"URL encode a single string with a given encoding.\n\n :param s: the string to quote.\n :param charset: the charset to be used.\n :param safe: an optional sequence of safe characters.\n :param unsafe: an optional sequence of unsafe characters.\n\n .. versionadded:: 0.9.2\n The `unsafe` parameter was added.\n \"\"\"\n if not isinstance(string, (str, bytes, bytearray)):\n string = str(string)\n if isinstance(string, str):\n string = string.encode(charset, errors)\n if isinstance(safe, str):\n safe = safe.encode(charset, errors)\n if isinstance(unsafe, str):\n unsafe = unsafe.encode(charset, errors)\n safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))\n rv = bytearray()\n for char in bytearray(string):\n if char in safe:\n rv.append(char)\n else:\n rv.extend(_bytetohex[char])\n return bytes(rv).decode(charset)\n\n\ndef url_quote_plus(\n string: str, charset: str = \"utf-8\", errors: str = \"strict\", safe: str = \"\"\n) -> str:\n \"\"\"URL encode a single string with the given encoding and convert\n whitespace to \"+\".\n\n :param s: The string to quote.\n :param charset: The charset to be used.\n :param safe: An optional sequence of safe characters.\n \"\"\"\n return url_quote(string, charset, errors, safe + \" \", \"+\").replace(\" \", \"+\")\n\n\ndef url_unparse(components: t.Tuple[str, str, str, str, str]) -> str:\n \"\"\"The reverse operation to :meth:`url_parse`. This accepts arbitrary\n as well as :class:`URL` tuples and returns a URL as a string.\n\n :param components: the parsed URL as tuple which should be converted\n into a URL string.\n \"\"\"\n _check_str_tuple(components)\n scheme, netloc, path, query, fragment = components\n s = _make_encode_wrapper(scheme)\n url = s(\"\")\n\n # We generally treat file:///x and file:/x the same which is also\n # what browsers seem to do. This also allows us to ignore a schema\n # register for netloc utilization or having to differentiate between\n # empty and missing netloc.\n if netloc or (scheme and path.startswith(s(\"/\"))):\n if path and path[:1] != s(\"/\"):\n path = s(\"/\") + path\n url = s(\"//\") + (netloc or s(\"\")) + path\n elif path:\n url += path\n if scheme:\n url = scheme + s(\":\") + url\n if query:\n url = url + s(\"?\") + query\n if fragment:\n url = url + s(\"#\") + fragment\n return url\n\n\ndef url_unquote(\n s: t.Union[str, bytes],\n charset: str = \"utf-8\",\n errors: str = \"replace\",\n unsafe: str = \"\",\n) -> str:\n \"\"\"URL decode a single string with a given encoding. If the charset\n is set to `None` no decoding is performed and raw bytes are\n returned.\n\n :param s: the string to unquote.\n :param charset: the charset of the query string. If set to `None`\n no decoding will take place.\n :param errors: the error handling for the charset decoding.\n \"\"\"\n rv = _unquote_to_bytes(s, unsafe)\n if charset is None:\n return rv\n return rv.decode(charset, errors)\n\n\ndef url_unquote_plus(\n s: t.Union[str, bytes], charset: str = \"utf-8\", errors: str = \"replace\"\n) -> str:\n \"\"\"URL decode a single string with the given `charset` and decode \"+\" to\n whitespace.\n\n Per default encoding errors are ignored. If you want a different behavior\n you can set `errors` to ``'replace'`` or ``'strict'``.\n\n :param s: The string to unquote.\n :param charset: the charset of the query string. If set to `None`\n no decoding will take place.\n :param errors: The error handling for the `charset` decoding.\n \"\"\"\n if isinstance(s, str):\n s = s.replace(\"+\", \" \")\n else:\n s = s.replace(b\"+\", b\" \")\n return url_unquote(s, charset, errors)\n\n\ndef url_fix(s: str, charset: str = \"utf-8\") -> str:\n r\"\"\"Sometimes you get an URL by a user that just isn't a real URL because\n it contains unsafe characters like ' ' and so on. This function can fix\n some of the problems in a similar way browsers handle data entered by the\n user:\n\n >>> url_fix('http://de.wikipedia.org/wiki/Elf (Begriffskl\\xe4rung)')\n 'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)'\n\n :param s: the string with the URL to fix.\n :param charset: The target charset for the URL if the url was given\n as a string.\n \"\"\"\n # First step is to switch to text processing and to convert\n # backslashes (which are invalid in URLs anyways) to slashes. This is\n # consistent with what Chrome does.\n s = _to_str(s, charset, \"replace\").replace(\"\\\\\", \"/\")\n\n # For the specific case that we look like a malformed windows URL\n # we want to fix this up manually:\n if s.startswith(\"file://\") and s[7:8].isalpha() and s[8:10] in (\":/\", \"|/\"):\n s = f\"file:///{s[7:]}\"\n\n url = url_parse(s)\n path = url_quote(url.path, charset, safe=\"/%+$!*'(),\")\n qs = url_quote_plus(url.query, charset, safe=\":&%=+$!*'(),\")\n anchor = url_quote_plus(url.fragment, charset, safe=\":&%=+$!*'(),\")\n return url_unparse((url.scheme, url.encode_netloc(), path, qs, anchor))\n\n\n# not-unreserved characters remain quoted when unquoting to IRI\n_to_iri_unsafe = \"\".join([chr(c) for c in range(128) if c not in _always_safe])\n\n\ndef _codec_error_url_quote(e: UnicodeError) -> t.Tuple[str, int]:\n \"\"\"Used in :func:`uri_to_iri` after unquoting to re-quote any\n invalid bytes.\n \"\"\"\n # the docs state that UnicodeError does have these attributes,\n # but mypy isn't picking them up\n out = _fast_url_quote(e.object[e.start : e.end]) # type: ignore\n return out, e.end # type: ignore\n\n\ncodecs.register_error(\"werkzeug.url_quote\", _codec_error_url_quote)\n\n\ndef uri_to_iri(\n uri: t.Union[str, t.Tuple[str, str, str, str, str]],\n charset: str = \"utf-8\",\n errors: str = \"werkzeug.url_quote\",\n) -> str:\n \"\"\"Convert a URI to an IRI. All valid UTF-8 characters are unquoted,\n leaving all reserved and invalid characters quoted. If the URL has\n a domain, it is decoded from Punycode.\n\n >>> uri_to_iri(\"http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF\")\n 'http://\\\\u2603.net/p\\\\xe5th?q=\\\\xe8ry%DF'\n\n :param uri: The URI to convert.\n :param charset: The encoding to encode unquoted bytes with.\n :param errors: Error handler to use during ``bytes.encode``. By\n default, invalid bytes are left quoted.\n\n .. versionchanged:: 0.15\n All reserved and invalid characters remain quoted. Previously,\n only some reserved characters were preserved, and invalid bytes\n were replaced instead of left quoted.\n\n .. versionadded:: 0.6\n \"\"\"\n if isinstance(uri, tuple):\n uri = url_unparse(uri)\n\n uri = url_parse(_to_str(uri, charset))\n path = url_unquote(uri.path, charset, errors, _to_iri_unsafe)\n query = url_unquote(uri.query, charset, errors, _to_iri_unsafe)\n fragment = url_unquote(uri.fragment, charset, errors, _to_iri_unsafe)\n return url_unparse((uri.scheme, uri.decode_netloc(), path, query, fragment))\n\n\n# reserved characters remain unquoted when quoting to URI\n_to_uri_safe = \":/?#[]@!$&'()*+,;=%\"\n\n\ndef iri_to_uri(\n iri: t.Union[str, t.Tuple[str, str, str, str, str]],\n charset: str = \"utf-8\",\n errors: str = \"strict\",\n safe_conversion: bool = False,\n) -> str:\n \"\"\"Convert an IRI to a URI. All non-ASCII and unsafe characters are\n quoted. If the URL has a domain, it is encoded to Punycode.\n\n >>> iri_to_uri('http://\\\\u2603.net/p\\\\xe5th?q=\\\\xe8ry%DF')\n 'http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF'\n\n :param iri: The IRI to convert.\n :param charset: The encoding of the IRI.\n :param errors: Error handler to use during ``bytes.encode``.\n :param safe_conversion: Return the URL unchanged if it only contains\n ASCII characters and no whitespace. See the explanation below.\n\n There is a general problem with IRI conversion with some protocols\n that are in violation of the URI specification. Consider the\n following two IRIs::\n\n magnet:?xt=uri:whatever\n itms-services://?action=download-manifest\n\n After parsing, we don't know if the scheme requires the ``//``,\n which is dropped if empty, but conveys different meanings in the\n final URL if it's present or not. In this case, you can use\n ``safe_conversion``, which will return the URL unchanged if it only\n contains ASCII characters and no whitespace. This can result in a\n URI with unquoted characters if it was not already quoted correctly,\n but preserves the URL's semantics. Werkzeug uses this for the\n ``Location`` header for redirects.\n\n .. versionchanged:: 0.15\n All reserved characters remain unquoted. Previously, only some\n reserved characters were left unquoted.\n\n .. versionchanged:: 0.9.6\n The ``safe_conversion`` parameter was added.\n\n .. versionadded:: 0.6\n \"\"\"\n if isinstance(iri, tuple):\n iri = url_unparse(iri)\n\n if safe_conversion:\n # If we're not sure if it's safe to convert the URL, and it only\n # contains ASCII characters, return it unconverted.\n try:\n native_iri = _to_str(iri)\n ascii_iri = native_iri.encode(\"ascii\")\n\n # Only return if it doesn't have whitespace. (Why?)\n if len(ascii_iri.split()) == 1:\n return native_iri\n except UnicodeError:\n pass\n\n iri = url_parse(_to_str(iri, charset, errors))\n path = url_quote(iri.path, charset, errors, _to_uri_safe)\n query = url_quote(iri.query, charset, errors, _to_uri_safe)\n fragment = url_quote(iri.fragment, charset, errors, _to_uri_safe)\n return url_unparse((iri.scheme, iri.encode_netloc(), path, query, fragment))\n\n\ndef url_decode(\n s: t.AnyStr,\n charset: str = \"utf-8\",\n include_empty: bool = True,\n errors: str = \"replace\",\n separator: str = \"&\",\n cls: t.Optional[t.Type[\"ds.MultiDict\"]] = None,\n) -> \"ds.MultiDict[str, str]\":\n \"\"\"Parse a query string and return it as a :class:`MultiDict`.\n\n :param s: The query string to parse.\n :param charset: Decode bytes to string with this charset. If not\n given, bytes are returned as-is.\n :param include_empty: Include keys with empty values in the dict.\n :param errors: Error handling behavior when decoding bytes.\n :param separator: Separator character between pairs.\n :param cls: Container to hold result instead of :class:`MultiDict`.\n\n .. versionchanged:: 2.0\n The ``decode_keys`` parameter is deprecated and will be removed\n in Werkzeug 2.1.\n\n .. versionchanged:: 0.5\n In previous versions \";\" and \"&\" could be used for url decoding.\n Now only \"&\" is supported. If you want to use \";\", a different\n ``separator`` can be provided.\n\n .. versionchanged:: 0.5\n The ``cls`` parameter was added.\n \"\"\"\n if cls is None:\n from .datastructures import MultiDict # noqa: F811\n\n cls = MultiDict\n if isinstance(s, str) and not isinstance(separator, str):\n separator = separator.decode(charset or \"ascii\")\n elif isinstance(s, bytes) and not isinstance(separator, bytes):\n separator = separator.encode(charset or \"ascii\") # type: ignore\n return cls(\n _url_decode_impl(\n s.split(separator), charset, include_empty, errors # type: ignore\n )\n )\n\n\ndef url_decode_stream(\n stream: t.IO[bytes],\n charset: str = \"utf-8\",\n include_empty: bool = True,\n errors: str = \"replace\",\n separator: bytes = b\"&\",\n cls: t.Optional[t.Type[\"ds.MultiDict\"]] = None,\n limit: t.Optional[int] = None,\n) -> \"ds.MultiDict[str, str]\":\n \"\"\"Works like :func:`url_decode` but decodes a stream. The behavior\n of stream and limit follows functions like\n :func:`~werkzeug.wsgi.make_line_iter`. The generator of pairs is\n directly fed to the `cls` so you can consume the data while it's\n parsed.\n\n :param stream: a stream with the encoded querystring\n :param charset: the charset of the query string. If set to `None`\n no decoding will take place.\n :param include_empty: Set to `False` if you don't want empty values to\n appear in the dict.\n :param errors: the decoding error behavior.\n :param separator: the pair separator to be used, defaults to ``&``\n :param cls: an optional dict class to use. If this is not specified\n or `None` the default :class:`MultiDict` is used.\n :param limit: the content length of the URL data. Not necessary if\n a limited stream is provided.\n\n .. versionchanged:: 2.0\n The ``decode_keys`` and ``return_iterator`` parameters are\n deprecated and will be removed in Werkzeug 2.1.\n\n .. versionadded:: 0.8\n \"\"\"\n from .wsgi import make_chunk_iter\n\n pair_iter = make_chunk_iter(stream, separator, limit)\n decoder = _url_decode_impl(pair_iter, charset, include_empty, errors)\n\n if cls is None:\n from .datastructures import MultiDict # noqa: F811\n\n cls = MultiDict\n\n return cls(decoder)\n\n\ndef _url_decode_impl(\n pair_iter: t.Iterable[t.AnyStr], charset: str, include_empty: bool, errors: str\n) -> t.Iterator[t.Tuple[str, str]]:\n for pair in pair_iter:\n if not pair:\n continue\n s = _make_encode_wrapper(pair)\n equal = s(\"=\")\n if equal in pair:\n key, value = pair.split(equal, 1)\n else:\n if not include_empty:\n continue\n key = pair\n value = s(\"\")\n yield (\n url_unquote_plus(key, charset, errors),\n url_unquote_plus(value, charset, errors),\n )\n\n\ndef url_encode(\n obj: t.Union[t.Mapping[str, str], t.Iterable[t.Tuple[str, str]]],\n charset: str = \"utf-8\",\n sort: bool = False,\n key: t.Optional[t.Callable[[t.Tuple[str, str]], t.Any]] = None,\n separator: str = \"&\",\n) -> str:\n \"\"\"URL encode a dict/`MultiDict`. If a value is `None` it will not appear\n in the result string. Per default only values are encoded into the target\n charset strings.\n\n :param obj: the object to encode into a query string.\n :param charset: the charset of the query string.\n :param sort: set to `True` if you want parameters to be sorted by `key`.\n :param separator: the separator to be used for the pairs.\n :param key: an optional function to be used for sorting. For more details\n check out the :func:`sorted` documentation.\n\n .. versionchanged:: 2.0\n The ``encode_keys`` parameter is deprecated and will be removed\n in Werkzeug 2.1.\n\n .. versionchanged:: 0.5\n Added the ``sort``, ``key``, and ``separator`` parameters.\n \"\"\"\n separator = _to_str(separator, \"ascii\")\n return separator.join(_url_encode_impl(obj, charset, sort, key))\n\n\ndef url_encode_stream(\n obj: t.Union[t.Mapping[str, str], t.Iterable[t.Tuple[str, str]]],\n stream: t.Optional[t.IO[str]] = None,\n charset: str = \"utf-8\",\n sort: bool = False,\n key: t.Optional[t.Callable[[t.Tuple[str, str]], t.Any]] = None,\n separator: str = \"&\",\n) -> None:\n \"\"\"Like :meth:`url_encode` but writes the results to a stream\n object. If the stream is `None` a generator over all encoded\n pairs is returned.\n\n :param obj: the object to encode into a query string.\n :param stream: a stream to write the encoded object into or `None` if\n an iterator over the encoded pairs should be returned. In\n that case the separator argument is ignored.\n :param charset: the charset of the query string.\n :param sort: set to `True` if you want parameters to be sorted by `key`.\n :param separator: the separator to be used for the pairs.\n :param key: an optional function to be used for sorting. For more details\n check out the :func:`sorted` documentation.\n\n .. versionchanged:: 2.0\n The ``encode_keys`` parameter is deprecated and will be removed\n in Werkzeug 2.1.\n\n .. versionadded:: 0.8\n \"\"\"\n separator = _to_str(separator, \"ascii\")\n gen = _url_encode_impl(obj, charset, sort, key)\n if stream is None:\n return gen # type: ignore\n for idx, chunk in enumerate(gen):\n if idx:\n stream.write(separator)\n stream.write(chunk)\n return None\n\n\ndef url_join(\n base: t.Union[str, t.Tuple[str, str, str, str, str]],\n url: t.Union[str, t.Tuple[str, str, str, str, str]],\n allow_fragments: bool = True,\n) -> str:\n \"\"\"Join a base URL and a possibly relative URL to form an absolute\n interpretation of the latter.\n\n :param base: the base URL for the join operation.\n :param url: the URL to join.\n :param allow_fragments: indicates whether fragments should be allowed.\n \"\"\"\n if isinstance(base, tuple):\n base = url_unparse(base)\n if isinstance(url, tuple):\n url = url_unparse(url)\n\n _check_str_tuple((base, url))\n s = _make_encode_wrapper(base)\n\n if not base:\n return url\n if not url:\n return base\n\n bscheme, bnetloc, bpath, bquery, bfragment = url_parse(\n base, allow_fragments=allow_fragments\n )\n scheme, netloc, path, query, fragment = url_parse(url, bscheme, allow_fragments)\n if scheme != bscheme:\n return url\n if netloc:\n return url_unparse((scheme, netloc, path, query, fragment))\n netloc = bnetloc\n\n if path[:1] == s(\"/\"):\n segments = path.split(s(\"/\"))\n elif not path:\n segments = bpath.split(s(\"/\"))\n if not query:\n query = bquery\n else:\n segments = bpath.split(s(\"/\"))[:-1] + path.split(s(\"/\"))\n\n # If the rightmost part is \"./\" we want to keep the slash but\n # remove the dot.\n if segments[-1] == s(\".\"):\n segments[-1] = s(\"\")\n\n # Resolve \"..\" and \".\"\n segments = [segment for segment in segments if segment != s(\".\")]\n while True:\n i = 1\n n = len(segments) - 1\n while i < n:\n if segments[i] == s(\"..\") and segments[i - 1] not in (s(\"\"), s(\"..\")):\n del segments[i - 1 : i + 1]\n break\n i += 1\n else:\n break\n\n # Remove trailing \"..\" if the URL is absolute\n unwanted_marker = [s(\"\"), s(\"..\")]\n while segments[:2] == unwanted_marker:\n del segments[1]\n\n path = s(\"/\").join(segments)\n return url_unparse((scheme, netloc, path, query, fragment))\n",
"path": "src/werkzeug/urls.py"
}
] | [
{
"content": "\"\"\"Functions for working with URLs.\n\nContains implementations of functions from :mod:`urllib.parse` that\nhandle bytes and strings.\n\"\"\"\nimport codecs\nimport os\nimport re\nimport typing as t\n\nfrom ._internal import _check_str_tuple\nfrom ._internal import _decode_idna\nfrom ._internal import _encode_idna\nfrom ._internal import _make_encode_wrapper\nfrom ._internal import _to_str\n\nif t.TYPE_CHECKING:\n from . import datastructures as ds\n\n# A regular expression for what a valid schema looks like\n_scheme_re = re.compile(r\"^[a-zA-Z0-9+-.]+$\")\n\n# Characters that are safe in any part of an URL.\n_always_safe = frozenset(\n bytearray(\n b\"abcdefghijklmnopqrstuvwxyz\"\n b\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n b\"0123456789\"\n b\"-._~\"\n b\"$!'()*+,;\" # RFC3986 sub-delims set, not including query string delimiters &=\n )\n)\n\n_hexdigits = \"0123456789ABCDEFabcdef\"\n_hextobyte = {\n f\"{a}{b}\".encode(\"ascii\"): int(f\"{a}{b}\", 16)\n for a in _hexdigits\n for b in _hexdigits\n}\n_bytetohex = [f\"%{char:02X}\".encode(\"ascii\") for char in range(256)]\n\n\nclass _URLTuple(t.NamedTuple):\n scheme: str\n netloc: str\n path: str\n query: str\n fragment: str\n\n\nclass BaseURL(_URLTuple):\n \"\"\"Superclass of :py:class:`URL` and :py:class:`BytesURL`.\"\"\"\n\n __slots__ = ()\n _at: str\n _colon: str\n _lbracket: str\n _rbracket: str\n\n def __str__(self) -> str:\n return self.to_url()\n\n def replace(self, **kwargs: t.Any) -> \"BaseURL\":\n \"\"\"Return an URL with the same values, except for those parameters\n given new values by whichever keyword arguments are specified.\"\"\"\n return self._replace(**kwargs)\n\n @property\n def host(self) -> t.Optional[str]:\n \"\"\"The host part of the URL if available, otherwise `None`. The\n host is either the hostname or the IP address mentioned in the\n URL. It will not contain the port.\n \"\"\"\n return self._split_host()[0]\n\n @property\n def ascii_host(self) -> t.Optional[str]:\n \"\"\"Works exactly like :attr:`host` but will return a result that\n is restricted to ASCII. If it finds a netloc that is not ASCII\n it will attempt to idna decode it. This is useful for socket\n operations when the URL might include internationalized characters.\n \"\"\"\n rv = self.host\n if rv is not None and isinstance(rv, str):\n try:\n rv = _encode_idna(rv) # type: ignore\n except UnicodeError:\n rv = rv.encode(\"ascii\", \"ignore\") # type: ignore\n return _to_str(rv, \"ascii\", \"ignore\")\n\n @property\n def port(self) -> t.Optional[int]:\n \"\"\"The port in the URL as an integer if it was present, `None`\n otherwise. This does not fill in default ports.\n \"\"\"\n try:\n rv = int(_to_str(self._split_host()[1]))\n if 0 <= rv <= 65535:\n return rv\n except (ValueError, TypeError):\n pass\n return None\n\n @property\n def auth(self) -> t.Optional[str]:\n \"\"\"The authentication part in the URL if available, `None`\n otherwise.\n \"\"\"\n return self._split_netloc()[0]\n\n @property\n def username(self) -> t.Optional[str]:\n \"\"\"The username if it was part of the URL, `None` otherwise.\n This undergoes URL decoding and will always be a string.\n \"\"\"\n rv = self._split_auth()[0]\n if rv is not None:\n return _url_unquote_legacy(rv)\n return None\n\n @property\n def raw_username(self) -> t.Optional[str]:\n \"\"\"The username if it was part of the URL, `None` otherwise.\n Unlike :attr:`username` this one is not being decoded.\n \"\"\"\n return self._split_auth()[0]\n\n @property\n def password(self) -> t.Optional[str]:\n \"\"\"The password if it was part of the URL, `None` otherwise.\n This undergoes URL decoding and will always be a string.\n \"\"\"\n rv = self._split_auth()[1]\n if rv is not None:\n return _url_unquote_legacy(rv)\n return None\n\n @property\n def raw_password(self) -> t.Optional[str]:\n \"\"\"The password if it was part of the URL, `None` otherwise.\n Unlike :attr:`password` this one is not being decoded.\n \"\"\"\n return self._split_auth()[1]\n\n def decode_query(self, *args: t.Any, **kwargs: t.Any) -> \"ds.MultiDict[str, str]\":\n \"\"\"Decodes the query part of the URL. Ths is a shortcut for\n calling :func:`url_decode` on the query argument. The arguments and\n keyword arguments are forwarded to :func:`url_decode` unchanged.\n \"\"\"\n return url_decode(self.query, *args, **kwargs)\n\n def join(self, *args: t.Any, **kwargs: t.Any) -> \"BaseURL\":\n \"\"\"Joins this URL with another one. This is just a convenience\n function for calling into :meth:`url_join` and then parsing the\n return value again.\n \"\"\"\n return url_parse(url_join(self, *args, **kwargs))\n\n def to_url(self) -> str:\n \"\"\"Returns a URL string or bytes depending on the type of the\n information stored. This is just a convenience function\n for calling :meth:`url_unparse` for this URL.\n \"\"\"\n return url_unparse(self)\n\n def encode_netloc(self) -> str:\n \"\"\"Encodes the netloc part to an ASCII safe URL as bytes.\"\"\"\n rv = self.ascii_host or \"\"\n if \":\" in rv:\n rv = f\"[{rv}]\"\n port = self.port\n if port is not None:\n rv = f\"{rv}:{port}\"\n auth = \":\".join(\n filter(\n None,\n [\n url_quote(self.raw_username or \"\", \"utf-8\", \"strict\", \"/:%\"),\n url_quote(self.raw_password or \"\", \"utf-8\", \"strict\", \"/:%\"),\n ],\n )\n )\n if auth:\n rv = f\"{auth}@{rv}\"\n return rv\n\n def decode_netloc(self) -> str:\n \"\"\"Decodes the netloc part into a string.\"\"\"\n rv = _decode_idna(self.host or \"\")\n\n if \":\" in rv:\n rv = f\"[{rv}]\"\n port = self.port\n if port is not None:\n rv = f\"{rv}:{port}\"\n auth = \":\".join(\n filter(\n None,\n [\n _url_unquote_legacy(self.raw_username or \"\", \"/:%@\"),\n _url_unquote_legacy(self.raw_password or \"\", \"/:%@\"),\n ],\n )\n )\n if auth:\n rv = f\"{auth}@{rv}\"\n return rv\n\n def to_uri_tuple(self) -> \"BaseURL\":\n \"\"\"Returns a :class:`BytesURL` tuple that holds a URI. This will\n encode all the information in the URL properly to ASCII using the\n rules a web browser would follow.\n\n It's usually more interesting to directly call :meth:`iri_to_uri` which\n will return a string.\n \"\"\"\n return url_parse(iri_to_uri(self))\n\n def to_iri_tuple(self) -> \"BaseURL\":\n \"\"\"Returns a :class:`URL` tuple that holds a IRI. This will try\n to decode as much information as possible in the URL without\n losing information similar to how a web browser does it for the\n URL bar.\n\n It's usually more interesting to directly call :meth:`uri_to_iri` which\n will return a string.\n \"\"\"\n return url_parse(uri_to_iri(self))\n\n def get_file_location(\n self, pathformat: t.Optional[str] = None\n ) -> t.Tuple[t.Optional[str], t.Optional[str]]:\n \"\"\"Returns a tuple with the location of the file in the form\n ``(server, location)``. If the netloc is empty in the URL or\n points to localhost, it's represented as ``None``.\n\n The `pathformat` by default is autodetection but needs to be set\n when working with URLs of a specific system. The supported values\n are ``'windows'`` when working with Windows or DOS paths and\n ``'posix'`` when working with posix paths.\n\n If the URL does not point to a local file, the server and location\n are both represented as ``None``.\n\n :param pathformat: The expected format of the path component.\n Currently ``'windows'`` and ``'posix'`` are\n supported. Defaults to ``None`` which is\n autodetect.\n \"\"\"\n if self.scheme != \"file\":\n return None, None\n\n path = url_unquote(self.path)\n host = self.netloc or None\n\n if pathformat is None:\n if os.name == \"nt\":\n pathformat = \"windows\"\n else:\n pathformat = \"posix\"\n\n if pathformat == \"windows\":\n if path[:1] == \"/\" and path[1:2].isalpha() and path[2:3] in \"|:\":\n path = f\"{path[1:2]}:{path[3:]}\"\n windows_share = path[:3] in (\"\\\\\" * 3, \"/\" * 3)\n import ntpath\n\n path = ntpath.normpath(path)\n # Windows shared drives are represented as ``\\\\host\\\\directory``.\n # That results in a URL like ``file://///host/directory``, and a\n # path like ``///host/directory``. We need to special-case this\n # because the path contains the hostname.\n if windows_share and host is None:\n parts = path.lstrip(\"\\\\\").split(\"\\\\\", 1)\n if len(parts) == 2:\n host, path = parts\n else:\n host = parts[0]\n path = \"\"\n elif pathformat == \"posix\":\n import posixpath\n\n path = posixpath.normpath(path)\n else:\n raise TypeError(f\"Invalid path format {pathformat!r}\")\n\n if host in (\"127.0.0.1\", \"::1\", \"localhost\"):\n host = None\n\n return host, path\n\n def _split_netloc(self) -> t.Tuple[t.Optional[str], str]:\n if self._at in self.netloc:\n auth, _, netloc = self.netloc.partition(self._at)\n return auth, netloc\n return None, self.netloc\n\n def _split_auth(self) -> t.Tuple[t.Optional[str], t.Optional[str]]:\n auth = self._split_netloc()[0]\n if not auth:\n return None, None\n if self._colon not in auth:\n return auth, None\n\n username, _, password = auth.partition(self._colon)\n return username, password\n\n def _split_host(self) -> t.Tuple[t.Optional[str], t.Optional[str]]:\n rv = self._split_netloc()[1]\n if not rv:\n return None, None\n\n if not rv.startswith(self._lbracket):\n if self._colon in rv:\n host, _, port = rv.partition(self._colon)\n return host, port\n return rv, None\n\n idx = rv.find(self._rbracket)\n if idx < 0:\n return rv, None\n\n host = rv[1:idx]\n rest = rv[idx + 1 :]\n if rest.startswith(self._colon):\n return host, rest[1:]\n return host, None\n\n\nclass URL(BaseURL):\n \"\"\"Represents a parsed URL. This behaves like a regular tuple but\n also has some extra attributes that give further insight into the\n URL.\n \"\"\"\n\n __slots__ = ()\n _at = \"@\"\n _colon = \":\"\n _lbracket = \"[\"\n _rbracket = \"]\"\n\n def encode(self, charset: str = \"utf-8\", errors: str = \"replace\") -> \"BytesURL\":\n \"\"\"Encodes the URL to a tuple made out of bytes. The charset is\n only being used for the path, query and fragment.\n \"\"\"\n return BytesURL(\n self.scheme.encode(\"ascii\"), # type: ignore\n self.encode_netloc(),\n self.path.encode(charset, errors), # type: ignore\n self.query.encode(charset, errors), # type: ignore\n self.fragment.encode(charset, errors), # type: ignore\n )\n\n\nclass BytesURL(BaseURL):\n \"\"\"Represents a parsed URL in bytes.\"\"\"\n\n __slots__ = ()\n _at = b\"@\" # type: ignore\n _colon = b\":\" # type: ignore\n _lbracket = b\"[\" # type: ignore\n _rbracket = b\"]\" # type: ignore\n\n def __str__(self) -> str:\n return self.to_url().decode(\"utf-8\", \"replace\") # type: ignore\n\n def encode_netloc(self) -> bytes: # type: ignore\n \"\"\"Returns the netloc unchanged as bytes.\"\"\"\n return self.netloc # type: ignore\n\n def decode(self, charset: str = \"utf-8\", errors: str = \"replace\") -> \"URL\":\n \"\"\"Decodes the URL to a tuple made out of strings. The charset is\n only being used for the path, query and fragment.\n \"\"\"\n return URL(\n self.scheme.decode(\"ascii\"), # type: ignore\n self.decode_netloc(),\n self.path.decode(charset, errors), # type: ignore\n self.query.decode(charset, errors), # type: ignore\n self.fragment.decode(charset, errors), # type: ignore\n )\n\n\n_unquote_maps: t.Dict[t.FrozenSet[int], t.Dict[bytes, int]] = {frozenset(): _hextobyte}\n\n\ndef _unquote_to_bytes(\n string: t.Union[str, bytes], unsafe: t.Union[str, bytes] = \"\"\n) -> bytes:\n if isinstance(string, str):\n string = string.encode(\"utf-8\")\n\n if isinstance(unsafe, str):\n unsafe = unsafe.encode(\"utf-8\")\n\n unsafe = frozenset(bytearray(unsafe))\n groups = iter(string.split(b\"%\"))\n result = bytearray(next(groups, b\"\"))\n\n try:\n hex_to_byte = _unquote_maps[unsafe]\n except KeyError:\n hex_to_byte = _unquote_maps[unsafe] = {\n h: b for h, b in _hextobyte.items() if b not in unsafe\n }\n\n for group in groups:\n code = group[:2]\n\n if code in hex_to_byte:\n result.append(hex_to_byte[code])\n result.extend(group[2:])\n else:\n result.append(37) # %\n result.extend(group)\n\n return bytes(result)\n\n\ndef _url_encode_impl(\n obj: t.Union[t.Mapping[str, str], t.Iterable[t.Tuple[str, str]]],\n charset: str,\n sort: bool,\n key: t.Optional[t.Callable[[t.Tuple[str, str]], t.Any]],\n) -> t.Iterator[str]:\n from .datastructures import iter_multi_items\n\n iterable: t.Iterable[t.Tuple[str, str]] = iter_multi_items(obj)\n\n if sort:\n iterable = sorted(iterable, key=key)\n\n for key_str, value_str in iterable:\n if value_str is None:\n continue\n\n if not isinstance(key_str, bytes):\n key_bytes = str(key_str).encode(charset)\n else:\n key_bytes = key_str\n\n if not isinstance(value_str, bytes):\n value_bytes = str(value_str).encode(charset)\n else:\n value_bytes = value_str\n\n yield f\"{_fast_url_quote_plus(key_bytes)}={_fast_url_quote_plus(value_bytes)}\"\n\n\ndef _url_unquote_legacy(value: str, unsafe: str = \"\") -> str:\n try:\n return url_unquote(value, charset=\"utf-8\", errors=\"strict\", unsafe=unsafe)\n except UnicodeError:\n return url_unquote(value, charset=\"latin1\", unsafe=unsafe)\n\n\ndef url_parse(\n url: str, scheme: t.Optional[str] = None, allow_fragments: bool = True\n) -> BaseURL:\n \"\"\"Parses a URL from a string into a :class:`URL` tuple. If the URL\n is lacking a scheme it can be provided as second argument. Otherwise,\n it is ignored. Optionally fragments can be stripped from the URL\n by setting `allow_fragments` to `False`.\n\n The inverse of this function is :func:`url_unparse`.\n\n :param url: the URL to parse.\n :param scheme: the default schema to use if the URL is schemaless.\n :param allow_fragments: if set to `False` a fragment will be removed\n from the URL.\n \"\"\"\n s = _make_encode_wrapper(url)\n is_text_based = isinstance(url, str)\n\n if scheme is None:\n scheme = s(\"\")\n netloc = query = fragment = s(\"\")\n i = url.find(s(\":\"))\n if i > 0 and _scheme_re.match(_to_str(url[:i], errors=\"replace\")):\n # make sure \"iri\" is not actually a port number (in which case\n # \"scheme\" is really part of the path)\n rest = url[i + 1 :]\n if not rest or any(c not in s(\"0123456789\") for c in rest):\n # not a port number\n scheme, url = url[:i].lower(), rest\n\n if url[:2] == s(\"//\"):\n delim = len(url)\n for c in s(\"/?#\"):\n wdelim = url.find(c, 2)\n if wdelim >= 0:\n delim = min(delim, wdelim)\n netloc, url = url[2:delim], url[delim:]\n if (s(\"[\") in netloc and s(\"]\") not in netloc) or (\n s(\"]\") in netloc and s(\"[\") not in netloc\n ):\n raise ValueError(\"Invalid IPv6 URL\")\n\n if allow_fragments and s(\"#\") in url:\n url, fragment = url.split(s(\"#\"), 1)\n if s(\"?\") in url:\n url, query = url.split(s(\"?\"), 1)\n\n result_type = URL if is_text_based else BytesURL\n return result_type(scheme, netloc, url, query, fragment)\n\n\ndef _make_fast_url_quote(\n charset: str = \"utf-8\",\n errors: str = \"strict\",\n safe: t.Union[str, bytes] = \"/:\",\n unsafe: t.Union[str, bytes] = \"\",\n) -> t.Callable[[bytes], str]:\n \"\"\"Precompile the translation table for a URL encoding function.\n\n Unlike :func:`url_quote`, the generated function only takes the\n string to quote.\n\n :param charset: The charset to encode the result with.\n :param errors: How to handle encoding errors.\n :param safe: An optional sequence of safe characters to never encode.\n :param unsafe: An optional sequence of unsafe characters to always encode.\n \"\"\"\n if isinstance(safe, str):\n safe = safe.encode(charset, errors)\n\n if isinstance(unsafe, str):\n unsafe = unsafe.encode(charset, errors)\n\n safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))\n table = [chr(c) if c in safe else f\"%{c:02X}\" for c in range(256)]\n\n def quote(string: bytes) -> str:\n return \"\".join([table[c] for c in string])\n\n return quote\n\n\n_fast_url_quote = _make_fast_url_quote()\n_fast_quote_plus = _make_fast_url_quote(safe=\" \", unsafe=\"+\")\n\n\ndef _fast_url_quote_plus(string: bytes) -> str:\n return _fast_quote_plus(string).replace(\" \", \"+\")\n\n\ndef url_quote(\n string: t.Union[str, bytes],\n charset: str = \"utf-8\",\n errors: str = \"strict\",\n safe: t.Union[str, bytes] = \"/:\",\n unsafe: t.Union[str, bytes] = \"\",\n) -> str:\n \"\"\"URL encode a single string with a given encoding.\n\n :param s: the string to quote.\n :param charset: the charset to be used.\n :param safe: an optional sequence of safe characters.\n :param unsafe: an optional sequence of unsafe characters.\n\n .. versionadded:: 0.9.2\n The `unsafe` parameter was added.\n \"\"\"\n if not isinstance(string, (str, bytes, bytearray)):\n string = str(string)\n if isinstance(string, str):\n string = string.encode(charset, errors)\n if isinstance(safe, str):\n safe = safe.encode(charset, errors)\n if isinstance(unsafe, str):\n unsafe = unsafe.encode(charset, errors)\n safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))\n rv = bytearray()\n for char in bytearray(string):\n if char in safe:\n rv.append(char)\n else:\n rv.extend(_bytetohex[char])\n return bytes(rv).decode(charset)\n\n\ndef url_quote_plus(\n string: str, charset: str = \"utf-8\", errors: str = \"strict\", safe: str = \"\"\n) -> str:\n \"\"\"URL encode a single string with the given encoding and convert\n whitespace to \"+\".\n\n :param s: The string to quote.\n :param charset: The charset to be used.\n :param safe: An optional sequence of safe characters.\n \"\"\"\n return url_quote(string, charset, errors, safe + \" \", \"+\").replace(\" \", \"+\")\n\n\ndef url_unparse(components: t.Tuple[str, str, str, str, str]) -> str:\n \"\"\"The reverse operation to :meth:`url_parse`. This accepts arbitrary\n as well as :class:`URL` tuples and returns a URL as a string.\n\n :param components: the parsed URL as tuple which should be converted\n into a URL string.\n \"\"\"\n _check_str_tuple(components)\n scheme, netloc, path, query, fragment = components\n s = _make_encode_wrapper(scheme)\n url = s(\"\")\n\n # We generally treat file:///x and file:/x the same which is also\n # what browsers seem to do. This also allows us to ignore a schema\n # register for netloc utilization or having to differentiate between\n # empty and missing netloc.\n if netloc or (scheme and path.startswith(s(\"/\"))):\n if path and path[:1] != s(\"/\"):\n path = s(\"/\") + path\n url = s(\"//\") + (netloc or s(\"\")) + path\n elif path:\n url += path\n if scheme:\n url = scheme + s(\":\") + url\n if query:\n url = url + s(\"?\") + query\n if fragment:\n url = url + s(\"#\") + fragment\n return url\n\n\ndef url_unquote(\n s: t.Union[str, bytes],\n charset: str = \"utf-8\",\n errors: str = \"replace\",\n unsafe: str = \"\",\n) -> str:\n \"\"\"URL decode a single string with a given encoding. If the charset\n is set to `None` no decoding is performed and raw bytes are\n returned.\n\n :param s: the string to unquote.\n :param charset: the charset of the query string. If set to `None`\n no decoding will take place.\n :param errors: the error handling for the charset decoding.\n \"\"\"\n rv = _unquote_to_bytes(s, unsafe)\n if charset is None:\n return rv\n return rv.decode(charset, errors)\n\n\ndef url_unquote_plus(\n s: t.Union[str, bytes], charset: str = \"utf-8\", errors: str = \"replace\"\n) -> str:\n \"\"\"URL decode a single string with the given `charset` and decode \"+\" to\n whitespace.\n\n Per default encoding errors are ignored. If you want a different behavior\n you can set `errors` to ``'replace'`` or ``'strict'``.\n\n :param s: The string to unquote.\n :param charset: the charset of the query string. If set to `None`\n no decoding will take place.\n :param errors: The error handling for the `charset` decoding.\n \"\"\"\n if isinstance(s, str):\n s = s.replace(\"+\", \" \")\n else:\n s = s.replace(b\"+\", b\" \")\n return url_unquote(s, charset, errors)\n\n\ndef url_fix(s: str, charset: str = \"utf-8\") -> str:\n r\"\"\"Sometimes you get an URL by a user that just isn't a real URL because\n it contains unsafe characters like ' ' and so on. This function can fix\n some of the problems in a similar way browsers handle data entered by the\n user:\n\n >>> url_fix('http://de.wikipedia.org/wiki/Elf (Begriffskl\\xe4rung)')\n 'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)'\n\n :param s: the string with the URL to fix.\n :param charset: The target charset for the URL if the url was given\n as a string.\n \"\"\"\n # First step is to switch to text processing and to convert\n # backslashes (which are invalid in URLs anyways) to slashes. This is\n # consistent with what Chrome does.\n s = _to_str(s, charset, \"replace\").replace(\"\\\\\", \"/\")\n\n # For the specific case that we look like a malformed windows URL\n # we want to fix this up manually:\n if s.startswith(\"file://\") and s[7:8].isalpha() and s[8:10] in (\":/\", \"|/\"):\n s = f\"file:///{s[7:]}\"\n\n url = url_parse(s)\n path = url_quote(url.path, charset, safe=\"/%+$!*'(),\")\n qs = url_quote_plus(url.query, charset, safe=\":&%=+$!*'(),\")\n anchor = url_quote_plus(url.fragment, charset, safe=\":&%=+$!*'(),\")\n return url_unparse((url.scheme, url.encode_netloc(), path, qs, anchor))\n\n\n# not-unreserved characters remain quoted when unquoting to IRI\n_to_iri_unsafe = \"\".join([chr(c) for c in range(128) if c not in _always_safe])\n\n\ndef _codec_error_url_quote(e: UnicodeError) -> t.Tuple[str, int]:\n \"\"\"Used in :func:`uri_to_iri` after unquoting to re-quote any\n invalid bytes.\n \"\"\"\n # the docs state that UnicodeError does have these attributes,\n # but mypy isn't picking them up\n out = _fast_url_quote(e.object[e.start : e.end]) # type: ignore\n return out, e.end # type: ignore\n\n\ncodecs.register_error(\"werkzeug.url_quote\", _codec_error_url_quote)\n\n\ndef uri_to_iri(\n uri: t.Union[str, t.Tuple[str, str, str, str, str]],\n charset: str = \"utf-8\",\n errors: str = \"werkzeug.url_quote\",\n) -> str:\n \"\"\"Convert a URI to an IRI. All valid UTF-8 characters are unquoted,\n leaving all reserved and invalid characters quoted. If the URL has\n a domain, it is decoded from Punycode.\n\n >>> uri_to_iri(\"http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF\")\n 'http://\\\\u2603.net/p\\\\xe5th?q=\\\\xe8ry%DF'\n\n :param uri: The URI to convert.\n :param charset: The encoding to encode unquoted bytes with.\n :param errors: Error handler to use during ``bytes.encode``. By\n default, invalid bytes are left quoted.\n\n .. versionchanged:: 0.15\n All reserved and invalid characters remain quoted. Previously,\n only some reserved characters were preserved, and invalid bytes\n were replaced instead of left quoted.\n\n .. versionadded:: 0.6\n \"\"\"\n if isinstance(uri, tuple):\n uri = url_unparse(uri)\n\n uri = url_parse(_to_str(uri, charset))\n path = url_unquote(uri.path, charset, errors, _to_iri_unsafe)\n query = url_unquote(uri.query, charset, errors, _to_iri_unsafe)\n fragment = url_unquote(uri.fragment, charset, errors, _to_iri_unsafe)\n return url_unparse((uri.scheme, uri.decode_netloc(), path, query, fragment))\n\n\n# reserved characters remain unquoted when quoting to URI\n_to_uri_safe = \":/?#[]@!$&'()*+,;=%\"\n\n\ndef iri_to_uri(\n iri: t.Union[str, t.Tuple[str, str, str, str, str]],\n charset: str = \"utf-8\",\n errors: str = \"strict\",\n safe_conversion: bool = False,\n) -> str:\n \"\"\"Convert an IRI to a URI. All non-ASCII and unsafe characters are\n quoted. If the URL has a domain, it is encoded to Punycode.\n\n >>> iri_to_uri('http://\\\\u2603.net/p\\\\xe5th?q=\\\\xe8ry%DF')\n 'http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF'\n\n :param iri: The IRI to convert.\n :param charset: The encoding of the IRI.\n :param errors: Error handler to use during ``bytes.encode``.\n :param safe_conversion: Return the URL unchanged if it only contains\n ASCII characters and no whitespace. See the explanation below.\n\n There is a general problem with IRI conversion with some protocols\n that are in violation of the URI specification. Consider the\n following two IRIs::\n\n magnet:?xt=uri:whatever\n itms-services://?action=download-manifest\n\n After parsing, we don't know if the scheme requires the ``//``,\n which is dropped if empty, but conveys different meanings in the\n final URL if it's present or not. In this case, you can use\n ``safe_conversion``, which will return the URL unchanged if it only\n contains ASCII characters and no whitespace. This can result in a\n URI with unquoted characters if it was not already quoted correctly,\n but preserves the URL's semantics. Werkzeug uses this for the\n ``Location`` header for redirects.\n\n .. versionchanged:: 0.15\n All reserved characters remain unquoted. Previously, only some\n reserved characters were left unquoted.\n\n .. versionchanged:: 0.9.6\n The ``safe_conversion`` parameter was added.\n\n .. versionadded:: 0.6\n \"\"\"\n if isinstance(iri, tuple):\n iri = url_unparse(iri)\n\n if safe_conversion:\n # If we're not sure if it's safe to convert the URL, and it only\n # contains ASCII characters, return it unconverted.\n try:\n native_iri = _to_str(iri)\n ascii_iri = native_iri.encode(\"ascii\")\n\n # Only return if it doesn't have whitespace. (Why?)\n if len(ascii_iri.split()) == 1:\n return native_iri\n except UnicodeError:\n pass\n\n iri = url_parse(_to_str(iri, charset, errors))\n path = url_quote(iri.path, charset, errors, _to_uri_safe)\n query = url_quote(iri.query, charset, errors, _to_uri_safe)\n fragment = url_quote(iri.fragment, charset, errors, _to_uri_safe)\n return url_unparse((iri.scheme, iri.encode_netloc(), path, query, fragment))\n\n\ndef url_decode(\n s: t.AnyStr,\n charset: str = \"utf-8\",\n include_empty: bool = True,\n errors: str = \"replace\",\n separator: str = \"&\",\n cls: t.Optional[t.Type[\"ds.MultiDict\"]] = None,\n) -> \"ds.MultiDict[str, str]\":\n \"\"\"Parse a query string and return it as a :class:`MultiDict`.\n\n :param s: The query string to parse.\n :param charset: Decode bytes to string with this charset. If not\n given, bytes are returned as-is.\n :param include_empty: Include keys with empty values in the dict.\n :param errors: Error handling behavior when decoding bytes.\n :param separator: Separator character between pairs.\n :param cls: Container to hold result instead of :class:`MultiDict`.\n\n .. versionchanged:: 2.0\n The ``decode_keys`` parameter is deprecated and will be removed\n in Werkzeug 2.1.\n\n .. versionchanged:: 0.5\n In previous versions \";\" and \"&\" could be used for url decoding.\n Now only \"&\" is supported. If you want to use \";\", a different\n ``separator`` can be provided.\n\n .. versionchanged:: 0.5\n The ``cls`` parameter was added.\n \"\"\"\n if cls is None:\n from .datastructures import MultiDict # noqa: F811\n\n cls = MultiDict\n if isinstance(s, str) and not isinstance(separator, str):\n separator = separator.decode(charset or \"ascii\")\n elif isinstance(s, bytes) and not isinstance(separator, bytes):\n separator = separator.encode(charset or \"ascii\") # type: ignore\n return cls(\n _url_decode_impl(\n s.split(separator), charset, include_empty, errors # type: ignore\n )\n )\n\n\ndef url_decode_stream(\n stream: t.IO[bytes],\n charset: str = \"utf-8\",\n include_empty: bool = True,\n errors: str = \"replace\",\n separator: bytes = b\"&\",\n cls: t.Optional[t.Type[\"ds.MultiDict\"]] = None,\n limit: t.Optional[int] = None,\n) -> \"ds.MultiDict[str, str]\":\n \"\"\"Works like :func:`url_decode` but decodes a stream. The behavior\n of stream and limit follows functions like\n :func:`~werkzeug.wsgi.make_line_iter`. The generator of pairs is\n directly fed to the `cls` so you can consume the data while it's\n parsed.\n\n :param stream: a stream with the encoded querystring\n :param charset: the charset of the query string. If set to `None`\n no decoding will take place.\n :param include_empty: Set to `False` if you don't want empty values to\n appear in the dict.\n :param errors: the decoding error behavior.\n :param separator: the pair separator to be used, defaults to ``&``\n :param cls: an optional dict class to use. If this is not specified\n or `None` the default :class:`MultiDict` is used.\n :param limit: the content length of the URL data. Not necessary if\n a limited stream is provided.\n\n .. versionchanged:: 2.0\n The ``decode_keys`` and ``return_iterator`` parameters are\n deprecated and will be removed in Werkzeug 2.1.\n\n .. versionadded:: 0.8\n \"\"\"\n from .wsgi import make_chunk_iter\n\n pair_iter = make_chunk_iter(stream, separator, limit)\n decoder = _url_decode_impl(pair_iter, charset, include_empty, errors)\n\n if cls is None:\n from .datastructures import MultiDict # noqa: F811\n\n cls = MultiDict\n\n return cls(decoder)\n\n\ndef _url_decode_impl(\n pair_iter: t.Iterable[t.AnyStr], charset: str, include_empty: bool, errors: str\n) -> t.Iterator[t.Tuple[str, str]]:\n for pair in pair_iter:\n if not pair:\n continue\n s = _make_encode_wrapper(pair)\n equal = s(\"=\")\n if equal in pair:\n key, value = pair.split(equal, 1)\n else:\n if not include_empty:\n continue\n key = pair\n value = s(\"\")\n yield (\n url_unquote_plus(key, charset, errors),\n url_unquote_plus(value, charset, errors),\n )\n\n\ndef url_encode(\n obj: t.Union[t.Mapping[str, str], t.Iterable[t.Tuple[str, str]]],\n charset: str = \"utf-8\",\n sort: bool = False,\n key: t.Optional[t.Callable[[t.Tuple[str, str]], t.Any]] = None,\n separator: str = \"&\",\n) -> str:\n \"\"\"URL encode a dict/`MultiDict`. If a value is `None` it will not appear\n in the result string. Per default only values are encoded into the target\n charset strings.\n\n :param obj: the object to encode into a query string.\n :param charset: the charset of the query string.\n :param sort: set to `True` if you want parameters to be sorted by `key`.\n :param separator: the separator to be used for the pairs.\n :param key: an optional function to be used for sorting. For more details\n check out the :func:`sorted` documentation.\n\n .. versionchanged:: 2.0\n The ``encode_keys`` parameter is deprecated and will be removed\n in Werkzeug 2.1.\n\n .. versionchanged:: 0.5\n Added the ``sort``, ``key``, and ``separator`` parameters.\n \"\"\"\n separator = _to_str(separator, \"ascii\")\n return separator.join(_url_encode_impl(obj, charset, sort, key))\n\n\ndef url_encode_stream(\n obj: t.Union[t.Mapping[str, str], t.Iterable[t.Tuple[str, str]]],\n stream: t.Optional[t.IO[str]] = None,\n charset: str = \"utf-8\",\n sort: bool = False,\n key: t.Optional[t.Callable[[t.Tuple[str, str]], t.Any]] = None,\n separator: str = \"&\",\n) -> None:\n \"\"\"Like :meth:`url_encode` but writes the results to a stream\n object. If the stream is `None` a generator over all encoded\n pairs is returned.\n\n :param obj: the object to encode into a query string.\n :param stream: a stream to write the encoded object into or `None` if\n an iterator over the encoded pairs should be returned. In\n that case the separator argument is ignored.\n :param charset: the charset of the query string.\n :param sort: set to `True` if you want parameters to be sorted by `key`.\n :param separator: the separator to be used for the pairs.\n :param key: an optional function to be used for sorting. For more details\n check out the :func:`sorted` documentation.\n\n .. versionchanged:: 2.0\n The ``encode_keys`` parameter is deprecated and will be removed\n in Werkzeug 2.1.\n\n .. versionadded:: 0.8\n \"\"\"\n separator = _to_str(separator, \"ascii\")\n gen = _url_encode_impl(obj, charset, sort, key)\n if stream is None:\n return gen # type: ignore\n for idx, chunk in enumerate(gen):\n if idx:\n stream.write(separator)\n stream.write(chunk)\n return None\n\n\ndef url_join(\n base: t.Union[str, t.Tuple[str, str, str, str, str]],\n url: t.Union[str, t.Tuple[str, str, str, str, str]],\n allow_fragments: bool = True,\n) -> str:\n \"\"\"Join a base URL and a possibly relative URL to form an absolute\n interpretation of the latter.\n\n :param base: the base URL for the join operation.\n :param url: the URL to join.\n :param allow_fragments: indicates whether fragments should be allowed.\n \"\"\"\n if isinstance(base, tuple):\n base = url_unparse(base)\n if isinstance(url, tuple):\n url = url_unparse(url)\n\n _check_str_tuple((base, url))\n s = _make_encode_wrapper(base)\n\n if not base:\n return url\n if not url:\n return base\n\n bscheme, bnetloc, bpath, bquery, bfragment = url_parse(\n base, allow_fragments=allow_fragments\n )\n scheme, netloc, path, query, fragment = url_parse(url, bscheme, allow_fragments)\n if scheme != bscheme:\n return url\n if netloc:\n return url_unparse((scheme, netloc, path, query, fragment))\n netloc = bnetloc\n\n if path[:1] == s(\"/\"):\n segments = path.split(s(\"/\"))\n elif not path:\n segments = bpath.split(s(\"/\"))\n if not query:\n query = bquery\n else:\n segments = bpath.split(s(\"/\"))[:-1] + path.split(s(\"/\"))\n\n # If the rightmost part is \"./\" we want to keep the slash but\n # remove the dot.\n if segments[-1] == s(\".\"):\n segments[-1] = s(\"\")\n\n # Resolve \"..\" and \".\"\n segments = [segment for segment in segments if segment != s(\".\")]\n while True:\n i = 1\n n = len(segments) - 1\n while i < n:\n if segments[i] == s(\"..\") and segments[i - 1] not in (s(\"\"), s(\"..\")):\n del segments[i - 1 : i + 1]\n break\n i += 1\n else:\n break\n\n # Remove trailing \"..\" if the URL is absolute\n unwanted_marker = [s(\"\"), s(\"..\")]\n while segments[:2] == unwanted_marker:\n del segments[1]\n\n path = s(\"/\").join(segments)\n return url_unparse((scheme, netloc, path, query, fragment))\n",
"path": "src/werkzeug/urls.py"
}
] | diff --git a/CHANGES.rst b/CHANGES.rst
index 88bee32ea..661c3d38a 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -63,6 +63,8 @@ Unreleased
a list, to custom URL converters. :issue:`2249`
- ``run_simple`` shows instructions for dealing with "address already
in use" errors, including extra instructions for macOS. :pr:`2321`
+- Extend list of characters considered always safe in URLs based on
+ :rfc:`3986`. :issue:`2319`
Version 2.0.3
diff --git a/src/werkzeug/urls.py b/src/werkzeug/urls.py
index 1cb9418d2..67c08b0bc 100644
--- a/src/werkzeug/urls.py
+++ b/src/werkzeug/urls.py
@@ -27,6 +27,7 @@
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789"
b"-._~"
+ b"$!'()*+,;" # RFC3986 sub-delims set, not including query string delimiters &=
)
)
diff --git a/tests/middleware/test_http_proxy.py b/tests/middleware/test_http_proxy.py
index b39cd3517..2b3bc55bf 100644
--- a/tests/middleware/test_http_proxy.py
+++ b/tests/middleware/test_http_proxy.py
@@ -45,3 +45,7 @@ def test_http_proxy(standard_app):
assert "HTTP_X_SPECIAL" not in r.json
assert r.json["HTTP_HOST"] == "127.0.0.1"
assert r.json["PATH_INFO"] == "/autohost/aha"
+
+ # test if characters allowed in URL are not encoded by proxy
+ r = client.get("/autohost/$")
+ assert r.json["REQUEST_URI"] == "/autohost/$"
|
wright-group__WrightTools-640 | CITATION text file not distributed
| [
{
"content": "#! /usr/bin/env python3\n\nimport os\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(fname):\n return open(os.path.join(here, fname)).read()\n\n\nextra_files = {\n \"WrightTools\": [\n \"datasets\",\n \"datasets/*\",\n \"datasets/*/*\",\n \"datasets/*/*/*\",\n \"datasets/*/*/*/*\",\n \"VERSION\",\n \"WT5_VERSION\",\n ]\n}\n\nwith open(os.path.join(here, \"WrightTools\", \"VERSION\")) as version_file:\n version = version_file.read().strip()\n\nsetup(\n name=\"WrightTools\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n package_data=extra_files,\n python_requires=\">=3.5\",\n setup_requires=[\"pytest-runner\"],\n tests_require=[\n \"pytest\",\n \"pytest-cov\",\n \"sphinx==1.6.5\",\n \"sphinx-gallery==0.1.12\",\n \"sphinx-rtd-theme\",\n ],\n install_requires=[\n \"h5py\",\n \"imageio\",\n \"matplotlib>=2.0\",\n \"numexpr\",\n \"numpy\",\n \"python-dateutil\",\n \"pytz\",\n \"scipy\",\n \"tidy_headers\",\n ],\n extras_require={\n \"docs\": [\"sphinx-gallery==0.1.12\"],\n \"dev\": [\"black\", \"pre-commit\", \"pydocstyle\"],\n },\n version=version,\n description=\"Tools for loading, processing, and plotting multidimensional spectroscopy data.\",\n long_description=read(\"README.rst\"),\n author=\"WrightTools Developers\",\n license=\"MIT\",\n url=\"http://wright.tools\",\n keywords=\"spectroscopy science multidimensional visualization\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Scientific/Engineering\",\n ],\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#! /usr/bin/env python3\n\nimport os\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(fname):\n return open(os.path.join(here, fname)).read()\n\n\nextra_files = {\n \"WrightTools\": [\n \"datasets\",\n \"datasets/*\",\n \"datasets/*/*\",\n \"datasets/*/*/*\",\n \"datasets/*/*/*/*\",\n \"CITATION\",\n \"VERSION\",\n \"WT5_VERSION\",\n ]\n}\n\nwith open(os.path.join(here, \"WrightTools\", \"VERSION\")) as version_file:\n version = version_file.read().strip()\n\nsetup(\n name=\"WrightTools\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n package_data=extra_files,\n python_requires=\">=3.5\",\n setup_requires=[\"pytest-runner\"],\n tests_require=[\n \"pytest\",\n \"pytest-cov\",\n \"sphinx==1.6.5\",\n \"sphinx-gallery==0.1.12\",\n \"sphinx-rtd-theme\",\n ],\n install_requires=[\n \"h5py\",\n \"imageio\",\n \"matplotlib>=2.0\",\n \"numexpr\",\n \"numpy\",\n \"python-dateutil\",\n \"pytz\",\n \"scipy\",\n \"tidy_headers\",\n ],\n extras_require={\n \"docs\": [\"sphinx-gallery==0.1.12\"],\n \"dev\": [\"black\", \"pre-commit\", \"pydocstyle\"],\n },\n version=version,\n description=\"Tools for loading, processing, and plotting multidimensional spectroscopy data.\",\n long_description=read(\"README.rst\"),\n author=\"WrightTools Developers\",\n license=\"MIT\",\n url=\"http://wright.tools\",\n keywords=\"spectroscopy science multidimensional visualization\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Scientific/Engineering\",\n ],\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index d79eb6dfa..fa23eb776 100755
--- a/setup.py
+++ b/setup.py
@@ -18,6 +18,7 @@ def read(fname):
"datasets/*/*",
"datasets/*/*/*",
"datasets/*/*/*/*",
+ "CITATION",
"VERSION",
"WT5_VERSION",
]
|
typeddjango__django-stubs-1782 | Make mypy a soft dependency
Given that there are several popular alternatives to mypy (e.g. pyright and pytype), mypy should be an optional dependency, installable with e.g. `django-stubs[mypy]`.
I haven't tested it myself yet, but if `django-stubs` doesn't work with these "alternative" typecheckers, then I'd suggest that it should explicitly mentioned that this is a mypy-only stubs package.
| [
{
"content": "#!/usr/bin/env python\nimport os\nfrom typing import List\n\nfrom setuptools import find_packages, setup\n\n\ndef find_stub_files(name: str) -> List[str]:\n result = []\n for root, _dirs, files in os.walk(name):\n for file in files:\n if file.endswith(\".pyi\"):\n if os.path.sep in root:\n sub_root = root.split(os.path.sep, 1)[-1]\n file = os.path.join(sub_root, file)\n result.append(file)\n return result\n\n\nwith open(\"README.md\") as f:\n readme = f.read()\n\ndependencies = [\n \"mypy>=1.0.0\",\n \"django\",\n \"django-stubs-ext>=4.2.5\",\n \"tomli; python_version < '3.11'\",\n # Types:\n \"typing-extensions\",\n \"types-pytz\",\n \"types-PyYAML\",\n]\n\n# Keep compatible-mypy major.minor version pinned to what we use in CI (requirements.txt)\nextras_require = {\n \"compatible-mypy\": [\"mypy~=1.6.0\"],\n}\n\nsetup(\n name=\"django-stubs\",\n version=\"4.2.5\",\n description=\"Mypy stubs for Django\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n license=\"MIT\",\n license_files=[\"LICENSE.md\"],\n url=\"https://github.com/typeddjango/django-stubs\",\n author=\"Maksim Kurnikov\",\n author_email=\"[email protected]\",\n maintainer=\"Marti Raudsepp\",\n maintainer_email=\"[email protected]\",\n py_modules=[],\n python_requires=\">=3.8\",\n install_requires=dependencies,\n extras_require=extras_require,\n packages=[\"django-stubs\", *find_packages(exclude=[\"scripts\"])],\n package_data={\n \"django-stubs\": find_stub_files(\"django-stubs\"),\n \"mypy_django_plugin\": [\"py.typed\"],\n },\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3.12\",\n \"Typing :: Typed\",\n \"Framework :: Django\",\n \"Framework :: Django :: 3.2\",\n \"Framework :: Django :: 4.1\",\n \"Framework :: Django :: 4.2\",\n ],\n project_urls={\n \"Release notes\": \"https://github.com/typeddjango/django-stubs/releases\",\n },\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\nimport os\nfrom typing import List\n\nfrom setuptools import find_packages, setup\n\n\ndef find_stub_files(name: str) -> List[str]:\n result = []\n for root, _dirs, files in os.walk(name):\n for file in files:\n if file.endswith(\".pyi\"):\n if os.path.sep in root:\n sub_root = root.split(os.path.sep, 1)[-1]\n file = os.path.join(sub_root, file)\n result.append(file)\n return result\n\n\nwith open(\"README.md\") as f:\n readme = f.read()\n\ndependencies = [\n \"django\",\n \"django-stubs-ext>=4.2.5\",\n \"tomli; python_version < '3.11'\",\n # Types:\n \"typing-extensions\",\n \"types-pytz\",\n \"types-PyYAML\",\n]\n\n# Keep compatible-mypy major.minor version pinned to what we use in CI (requirements.txt)\nextras_require = {\n \"compatible-mypy\": [\"mypy~=1.6.0\"],\n}\n\nsetup(\n name=\"django-stubs\",\n version=\"4.2.5\",\n description=\"Mypy stubs for Django\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n license=\"MIT\",\n license_files=[\"LICENSE.md\"],\n url=\"https://github.com/typeddjango/django-stubs\",\n author=\"Maksim Kurnikov\",\n author_email=\"[email protected]\",\n maintainer=\"Marti Raudsepp\",\n maintainer_email=\"[email protected]\",\n py_modules=[],\n python_requires=\">=3.8\",\n install_requires=dependencies,\n extras_require=extras_require,\n packages=[\"django-stubs\", *find_packages(exclude=[\"scripts\"])],\n package_data={\n \"django-stubs\": find_stub_files(\"django-stubs\"),\n \"mypy_django_plugin\": [\"py.typed\"],\n },\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3.12\",\n \"Typing :: Typed\",\n \"Framework :: Django\",\n \"Framework :: Django :: 3.2\",\n \"Framework :: Django :: 4.1\",\n \"Framework :: Django :: 4.2\",\n ],\n project_urls={\n \"Release notes\": \"https://github.com/typeddjango/django-stubs/releases\",\n },\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index b5067b3fe..5973d2e63 100644
--- a/setup.py
+++ b/setup.py
@@ -21,7 +21,6 @@ def find_stub_files(name: str) -> List[str]:
readme = f.read()
dependencies = [
- "mypy>=1.0.0",
"django",
"django-stubs-ext>=4.2.5",
"tomli; python_version < '3.11'",
|
optuna__optuna-4965 | Use `__future__.annotations` everywhere in the Optuna code base
### Motivation
Optuna drops Python 3.6 from v3.1, so we can use `__future__.annotations`, which simplifies the code base. See [PEP 563](https://peps.python.org/pep-0563/), [PEP584](https://peps.python.org/pep-0584/), [PEP 585](https://peps.python.org/pep-0585/), and [PEP 604](https://peps.python.org/pep-0604/) for more details. This issue suggests to use the module and simplifies the code base.
### Suggestion
Use `__future__.annotations` for each file and simplify the type annotations. The list of classes whose type annotations can be simplified is [here](https://peps.python.org/pep-0585/#implementation). The list of files where the `__future__.annotations` can be used is as follows. In order to reduce review costs and to encourage more contributors to work on it, please, as a rule, fix one file per PR.
- [x] optuna/_convert_positional_args.py
- [x] optuna/visualization/_optimization_history.py
- [x] optuna/visualization/_hypervolume_history.py
- [x] optuna/visualization/_edf.py
- [x] optuna/visualization/_pareto_front.py
- [x] optuna/visualization/matplotlib/_optimization_history.py
- [x] optuna/visualization/matplotlib/_hypervolume_history.py
- [x] optuna/visualization/matplotlib/_edf.py
- [x] optuna/visualization/matplotlib/_pareto_front.py
- [x] optuna/visualization/matplotlib/_contour.py
- [x] optuna/visualization/_utils.py
- [x] optuna/logging.py
- [ ] optuna/storages/_base.py
- [ ] optuna/storages/_cached_storage.py
- [ ] optuna/storages/__init__.py
- [ ] optuna/storages/_heartbeat.py
- [ ] optuna/storages/_in_memory.py
- [ ] optuna/storages/_rdb/models.py
- [ ] optuna/storages/_rdb/storage.py
- [ ] optuna/storages/_rdb/alembic/versions/v3.0.0.c.py
- [ ] optuna/storages/_rdb/alembic/versions/v3.0.0.d.py
- [ ] optuna/storages/_rdb/alembic/versions/v3.0.0.a.py
- [ ] optuna/storages/_journal/file.py
- [ ] optuna/storages/_journal/redis.py
- [ ] optuna/storages/_journal/storage.py
- [ ] optuna/storages/_journal/base.py
- [ ] optuna/study/_dataframe.py
- [ ] optuna/study/_optimize.py
- [ ] optuna/study/_tell.py
- [ ] optuna/study/_multi_objective.py
- [ ] optuna/study/_frozen.py
- [ ] optuna/study/study.py
- [ ] optuna/study/_study_summary.py
- [ ] optuna/search_space/group_decomposed.py
- [ ] optuna/search_space/intersection.py
- [ ] optuna/_typing.py
- [ ] optuna/_deprecated.py
- [ ] optuna/pruners/_hyperband.py
- [ ] optuna/pruners/_patient.py
- [ ] optuna/pruners/_successive_halving.py
- [ ] optuna/pruners/_percentile.py
- [ ] optuna/pruners/_threshold.py
- [ ] optuna/trial/_base.py
- [ ] optuna/trial/_fixed.py
- [ ] optuna/trial/_trial.py
- [ ] optuna/trial/_frozen.py
- [ ] optuna/integration/cma.py
- [ ] optuna/integration/shap.py
- [ ] optuna/integration/lightgbm.py
- [ ] optuna/integration/pytorch_distributed.py
- [ ] optuna/integration/_lightgbm_tuner/optimize.py
- [ ] optuna/integration/_lightgbm_tuner/alias.py
- [ ] optuna/integration/mlflow.py
- [ ] optuna/integration/wandb.py
- [ ] optuna/integration/catboost.py
- [ ] optuna/integration/skopt.py
- [ ] optuna/integration/botorch.py
- [ ] optuna/integration/dask.py
- [x] optuna/integration/sklearn.py
- [ ] optuna/integration/tensorboard.py
- [ ] optuna/terminator/callback.py
- [ ] optuna/terminator/terminator.py
- [ ] optuna/terminator/improvement/_preprocessing.py
- [ ] optuna/terminator/improvement/gp/botorch.py
- [ ] optuna/terminator/improvement/gp/base.py
- [ ] optuna/terminator/improvement/evaluator.py
- [ ] optuna/importance/_base.py
- [ ] optuna/importance/_mean_decrease_impurity.py
- [ ] optuna/importance/__init__.py
- [ ] optuna/importance/_fanova/_fanova.py
- [ ] optuna/importance/_fanova/_evaluator.py
- [ ] optuna/importance/_fanova/_tree.py
- [ ] optuna/_imports.py
- [ ] optuna/testing/tempfile_pool.py
- [ ] optuna/testing/threading.py
- [ ] optuna/testing/distributions.py
- [ ] optuna/testing/samplers.py
- [ ] optuna/testing/storages.py
- [ ] optuna/distributions.py
- [ ] optuna/cli.py
- [ ] optuna/multi_objective/visualization/_pareto_front.py
- [ ] optuna/multi_objective/trial.py
- [ ] optuna/multi_objective/samplers/_base.py
- [ ] optuna/multi_objective/samplers/_nsga2.py
- [ ] optuna/multi_objective/samplers/_adapter.py
- [ ] optuna/multi_objective/samplers/_random.py
- [ ] optuna/multi_objective/samplers/_motpe.py
- [ ] optuna/multi_objective/study.py
- [ ] optuna/_experimental.py
- [ ] optuna/samplers/_base.py
- [ ] optuna/samplers/nsgaii/_crossovers/_undx.py
- [ ] optuna/samplers/nsgaii/_crossovers/_spx.py
- [ ] optuna/samplers/nsgaii/_crossovers/_sbx.py
- [ ] optuna/samplers/nsgaii/_crossovers/_vsbx.py
- [ ] optuna/samplers/nsgaii/_sampler.py
- [ ] optuna/samplers/nsgaii/_crossover.py
- [ ] optuna/samplers/_search_space/intersection.py
- [ ] optuna/samplers/_qmc.py
- [ ] optuna/samplers/_tpe/probability_distributions.py
- [ ] optuna/samplers/_tpe/_truncnorm.py
- [ ] optuna/samplers/_tpe/multi_objective_sampler.py
- [ ] optuna/samplers/_tpe/parzen_estimator.py
- [ ] optuna/samplers/_tpe/sampler.py
- [ ] optuna/samplers/_random.py
- [ ] optuna/samplers/_cmaes.py
- [ ] optuna/samplers/_partial_fixed.py
- [ ] optuna/samplers/_brute_force.py
- [ ] optuna/samplers/_nsgaiii.py
- [ ] optuna/samplers/_grid.py
- [ ] optuna/_hypervolume/wfg.py
- [ ] optuna/_hypervolume/hssp.py
- [ ] optuna/progress_bar.py
- [ ] optuna/_transform.py
- [ ] optuna/_callbacks.py
- [ ] tests/multi_objective_tests/test_study.py
- [ ] tests/multi_objective_tests/samplers_tests/test_motpe.py
- [ ] tests/multi_objective_tests/samplers_tests/test_nsga2.py
- [ ] tests/multi_objective_tests/test_trial.py
- [ ] tests/multi_objective_tests/visualization_tests/test_pareto_front.py
- [ ] tests/trial_tests/test_frozen.py
- [ ] tests/trial_tests/test_trials.py
- [ ] tests/trial_tests/test_trial.py
- [ ] tests/pruners_tests/test_percentile.py
- [ ] tests/pruners_tests/test_median.py
- [ ] tests/pruners_tests/test_patient.py
- [ ] tests/pruners_tests/test_successive_halving.py
- [ ] tests/study_tests/test_optimize.py
- [ ] tests/study_tests/test_study.py
- [ ] tests/hypervolume_tests/test_hssp.py
- [x] tests/integration_tests/test_skopt.py
- [x] tests/integration_tests/test_pytorch_lightning.py
- [ ] tests/integration_tests/test_shap.py
- [ ] tests/integration_tests/test_cma.py
- [ ] tests/integration_tests/test_pytorch_distributed.py
- [ ] tests/integration_tests/lightgbm_tuner_tests/test_optimize.py
- [ ] tests/integration_tests/lightgbm_tuner_tests/test_alias.py
- [ ] tests/integration_tests/test_botorch.py
- [ ] tests/integration_tests/test_mlflow.py
- [ ] tests/integration_tests/test_mxnet.py
- [ ] tests/integration_tests/test_wandb.py
- [ ] tests/importance_tests/fanova_tests/test_tree.py
- [ ] tests/importance_tests/test_mean_decrease_impurity.py
- [ ] tests/importance_tests/test_fanova.py
- [ ] tests/importance_tests/test_init.py
- [ ] tests/test_convert_positional_args.py
- [ ] tests/test_deprecated.py
- [ ] tests/storages_tests/test_journal.py
- [ ] tests/storages_tests/test_heartbeat.py
- [ ] tests/storages_tests/test_storages.py
- [ ] tests/storages_tests/rdb_tests/test_storage.py
- [ ] tests/storages_tests/rdb_tests/create_db.py
- [ ] tests/storages_tests/test_with_server.py
- [ ] tests/samplers_tests/test_grid.py
- [ ] tests/samplers_tests/tpe_tests/test_parzen_estimator.py
- [ ] tests/samplers_tests/tpe_tests/test_multi_objective_sampler.py
- [ ] tests/samplers_tests/tpe_tests/test_sampler.py
- [ ] tests/samplers_tests/test_cmaes.py
- [ ] tests/samplers_tests/test_samplers.py
- [x] tests/samplers_tests/test_nsgaii.py
- [x] tests/samplers_tests/test_nsgaiii.py
- [ ] tests/samplers_tests/test_qmc.py
- [ ] tests/test_distributions.py
- [ ] tests/test_multi_objective.py
- [ ] tests/test_cli.py
- [ ] tests/visualization_tests/test_hypervolume_history.py
- [ ] tests/visualization_tests/test_pareto_front.py
- [ ] tests/terminator_tests/improvement_tests/test_evaluator.py
- [ ] benchmarks/kurobako/problems/wfg/transformation_functions.py
- [ ] benchmarks/bayesmark/report_bayesmark.py
- [ ] benchmarks/bayesmark/optuna_optimizer.py
### Additional context (optional)
The above list is generated by the following script.
<details>
<summary>script</summary>
```python
import os
import pathlib
PATTERS = [
"from typing import Union",
"from typing import Optional",
"from typing import Tuple",
"from typing import List",
"from typing import Dict",
"from typing import Set",
"from typing import FrozenSet",
"from typing import Type",
"from typing import FrozenSet",
"from typing import Sequence",
]
def get_filenames_to_be_simplified(dir_path):
ret = []
for f in os.listdir(dir_path):
file_path = os.path.join(dir_path, f)
if not os.path.isfile(file_path):
ret.extend(get_filenames_to_be_simplified(file_path))
else:
try:
with open(file_path) as fd:
contents = fd.read()
if any([s in contents for s in PATTERS]):
ret.append(str(file_path))
except UnicodeDecodeError as e:
pass
return ret
def main():
dirs = ["optuna", "tests", "benchmarks"]
for dir_name in dirs:
filenames = get_filenames_to_be_simplified(pathlib.Path(dir_name))
for filename in filenames:
print(f"- [ ] {filename}")
if __name__ == "__main__":
main()
```
</details>
| [
{
"content": "from __future__ import annotations\n\nfrom typing import NamedTuple\nfrom typing import Sequence\n\nimport numpy as np\n\nfrom optuna._experimental import experimental_func\nfrom optuna._hypervolume import WFG\nfrom optuna.logging import get_logger\nfrom optuna.samplers._base import _CONSTRAINTS_KEY\nfrom optuna.study import Study\nfrom optuna.study._multi_objective import _dominates\nfrom optuna.study._study_direction import StudyDirection\nfrom optuna.trial import FrozenTrial\nfrom optuna.trial import TrialState\nfrom optuna.visualization._plotly_imports import _imports\n\n\nif _imports.is_successful():\n from optuna.visualization._plotly_imports import go\n\n_logger = get_logger(__name__)\n\n\nclass _HypervolumeHistoryInfo(NamedTuple):\n trial_numbers: list[int]\n values: list[float]\n\n\n@experimental_func(\"3.3.0\")\ndef plot_hypervolume_history(\n study: Study,\n reference_point: Sequence[float],\n) -> \"go.Figure\":\n \"\"\"Plot hypervolume history of all trials in a study.\n\n Example:\n\n The following code snippet shows how to plot optimization history.\n\n .. plotly::\n\n import optuna\n\n\n def objective(trial):\n x = trial.suggest_float(\"x\", 0, 5)\n y = trial.suggest_float(\"y\", 0, 3)\n\n v0 = 4 * x ** 2 + 4 * y ** 2\n v1 = (x - 5) ** 2 + (y - 5) ** 2\n return v0, v1\n\n\n study = optuna.create_study(directions=[\"minimize\", \"minimize\"])\n study.optimize(objective, n_trials=50)\n\n reference_point=[100., 50.]\n fig = optuna.visualization.plot_hypervolume_history(study, reference_point)\n fig.show()\n\n Args:\n study:\n A :class:`~optuna.study.Study` object whose trials are plotted for their hypervolumes.\n The number of objectives must be 2 or more.\n\n reference_point:\n A reference point to use for hypervolume computation.\n The dimension of the reference point must be the same as the number of objectives.\n\n Returns:\n A :class:`plotly.graph_objs.Figure` object.\n \"\"\"\n\n _imports.check()\n\n if not study._is_multi_objective():\n raise ValueError(\n \"Study must be multi-objective. For single-objective optimization, \"\n \"please use plot_optimization_history instead.\"\n )\n\n if len(reference_point) != len(study.directions):\n raise ValueError(\n \"The dimension of the reference point must be the same as the number of objectives.\"\n )\n\n info = _get_hypervolume_history_info(study, np.asarray(reference_point, dtype=np.float64))\n return _get_hypervolume_history_plot(info)\n\n\ndef _get_hypervolume_history_plot(\n info: _HypervolumeHistoryInfo,\n) -> \"go.Figure\":\n layout = go.Layout(\n title=\"Hypervolume History Plot\",\n xaxis={\"title\": \"Trial\"},\n yaxis={\"title\": \"Hypervolume\"},\n )\n\n data = go.Scatter(\n x=info.trial_numbers,\n y=info.values,\n mode=\"lines+markers\",\n )\n return go.Figure(data=data, layout=layout)\n\n\ndef _get_hypervolume_history_info(\n study: Study,\n reference_point: np.ndarray,\n) -> _HypervolumeHistoryInfo:\n completed_trials = study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,))\n\n if len(completed_trials) == 0:\n _logger.warning(\"Your study does not have any completed trials.\")\n\n # Our hypervolume computation module assumes that all objectives are minimized.\n # Here we transform the objective values and the reference point.\n signs = np.asarray([1 if d == StudyDirection.MINIMIZE else -1 for d in study.directions])\n minimization_reference_point = signs * reference_point\n\n # Only feasible trials are considered in hypervolume computation.\n trial_numbers = []\n values = []\n best_trials: list[FrozenTrial] = []\n hypervolume = 0.0\n for trial in completed_trials:\n trial_numbers.append(trial.number)\n\n has_constraints = _CONSTRAINTS_KEY in trial.system_attrs\n if has_constraints:\n constraints_values = trial.system_attrs[_CONSTRAINTS_KEY]\n if any(map(lambda x: x > 0.0, constraints_values)):\n # The trial is infeasible.\n values.append(hypervolume)\n continue\n\n if any(map(lambda t: _dominates(t, trial, study.directions), best_trials)):\n # The trial is not on the Pareto front.\n values.append(hypervolume)\n continue\n\n best_trials = list(\n filter(lambda t: not _dominates(trial, t, study.directions), best_trials)\n ) + [trial]\n\n solution_set = np.asarray(\n list(\n filter(\n lambda v: (v <= minimization_reference_point).all(),\n [signs * trial.values for trial in best_trials],\n )\n )\n )\n if solution_set.size > 0:\n hypervolume = WFG().compute(solution_set, minimization_reference_point)\n values.append(hypervolume)\n\n if len(best_trials) == 0:\n _logger.warning(\"Your study does not have any feasible trials.\")\n\n return _HypervolumeHistoryInfo(trial_numbers, values)\n",
"path": "optuna/visualization/_hypervolume_history.py"
}
] | [
{
"content": "from __future__ import annotations\n\nfrom collections.abc import Sequence\nfrom typing import NamedTuple\n\nimport numpy as np\n\nfrom optuna._experimental import experimental_func\nfrom optuna._hypervolume import WFG\nfrom optuna.logging import get_logger\nfrom optuna.samplers._base import _CONSTRAINTS_KEY\nfrom optuna.study import Study\nfrom optuna.study._multi_objective import _dominates\nfrom optuna.study._study_direction import StudyDirection\nfrom optuna.trial import FrozenTrial\nfrom optuna.trial import TrialState\nfrom optuna.visualization._plotly_imports import _imports\n\n\nif _imports.is_successful():\n from optuna.visualization._plotly_imports import go\n\n_logger = get_logger(__name__)\n\n\nclass _HypervolumeHistoryInfo(NamedTuple):\n trial_numbers: list[int]\n values: list[float]\n\n\n@experimental_func(\"3.3.0\")\ndef plot_hypervolume_history(\n study: Study,\n reference_point: Sequence[float],\n) -> \"go.Figure\":\n \"\"\"Plot hypervolume history of all trials in a study.\n\n Example:\n\n The following code snippet shows how to plot optimization history.\n\n .. plotly::\n\n import optuna\n\n\n def objective(trial):\n x = trial.suggest_float(\"x\", 0, 5)\n y = trial.suggest_float(\"y\", 0, 3)\n\n v0 = 4 * x ** 2 + 4 * y ** 2\n v1 = (x - 5) ** 2 + (y - 5) ** 2\n return v0, v1\n\n\n study = optuna.create_study(directions=[\"minimize\", \"minimize\"])\n study.optimize(objective, n_trials=50)\n\n reference_point=[100., 50.]\n fig = optuna.visualization.plot_hypervolume_history(study, reference_point)\n fig.show()\n\n Args:\n study:\n A :class:`~optuna.study.Study` object whose trials are plotted for their hypervolumes.\n The number of objectives must be 2 or more.\n\n reference_point:\n A reference point to use for hypervolume computation.\n The dimension of the reference point must be the same as the number of objectives.\n\n Returns:\n A :class:`plotly.graph_objs.Figure` object.\n \"\"\"\n\n _imports.check()\n\n if not study._is_multi_objective():\n raise ValueError(\n \"Study must be multi-objective. For single-objective optimization, \"\n \"please use plot_optimization_history instead.\"\n )\n\n if len(reference_point) != len(study.directions):\n raise ValueError(\n \"The dimension of the reference point must be the same as the number of objectives.\"\n )\n\n info = _get_hypervolume_history_info(study, np.asarray(reference_point, dtype=np.float64))\n return _get_hypervolume_history_plot(info)\n\n\ndef _get_hypervolume_history_plot(\n info: _HypervolumeHistoryInfo,\n) -> \"go.Figure\":\n layout = go.Layout(\n title=\"Hypervolume History Plot\",\n xaxis={\"title\": \"Trial\"},\n yaxis={\"title\": \"Hypervolume\"},\n )\n\n data = go.Scatter(\n x=info.trial_numbers,\n y=info.values,\n mode=\"lines+markers\",\n )\n return go.Figure(data=data, layout=layout)\n\n\ndef _get_hypervolume_history_info(\n study: Study,\n reference_point: np.ndarray,\n) -> _HypervolumeHistoryInfo:\n completed_trials = study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,))\n\n if len(completed_trials) == 0:\n _logger.warning(\"Your study does not have any completed trials.\")\n\n # Our hypervolume computation module assumes that all objectives are minimized.\n # Here we transform the objective values and the reference point.\n signs = np.asarray([1 if d == StudyDirection.MINIMIZE else -1 for d in study.directions])\n minimization_reference_point = signs * reference_point\n\n # Only feasible trials are considered in hypervolume computation.\n trial_numbers = []\n values = []\n best_trials: list[FrozenTrial] = []\n hypervolume = 0.0\n for trial in completed_trials:\n trial_numbers.append(trial.number)\n\n has_constraints = _CONSTRAINTS_KEY in trial.system_attrs\n if has_constraints:\n constraints_values = trial.system_attrs[_CONSTRAINTS_KEY]\n if any(map(lambda x: x > 0.0, constraints_values)):\n # The trial is infeasible.\n values.append(hypervolume)\n continue\n\n if any(map(lambda t: _dominates(t, trial, study.directions), best_trials)):\n # The trial is not on the Pareto front.\n values.append(hypervolume)\n continue\n\n best_trials = list(\n filter(lambda t: not _dominates(trial, t, study.directions), best_trials)\n ) + [trial]\n\n solution_set = np.asarray(\n list(\n filter(\n lambda v: (v <= minimization_reference_point).all(),\n [signs * trial.values for trial in best_trials],\n )\n )\n )\n if solution_set.size > 0:\n hypervolume = WFG().compute(solution_set, minimization_reference_point)\n values.append(hypervolume)\n\n if len(best_trials) == 0:\n _logger.warning(\"Your study does not have any feasible trials.\")\n\n return _HypervolumeHistoryInfo(trial_numbers, values)\n",
"path": "optuna/visualization/_hypervolume_history.py"
}
] | diff --git a/optuna/visualization/_hypervolume_history.py b/optuna/visualization/_hypervolume_history.py
index c1fff4b877..39e6c23e53 100644
--- a/optuna/visualization/_hypervolume_history.py
+++ b/optuna/visualization/_hypervolume_history.py
@@ -1,7 +1,7 @@
from __future__ import annotations
+from collections.abc import Sequence
from typing import NamedTuple
-from typing import Sequence
import numpy as np
|
ansible__ansible-modules-core-3859 | Bug in regex checker for azure_rm_virtualnetwork
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
azure_rm_virtualnetwork
##### ANSIBLE VERSION
2.2.0
##### CONFIGURATION
N/A
##### OS / ENVIRONMENT
Ubuntu 14.04
##### SUMMARY
azure_rm_virtualnetwork contains code to check for the validity of the "name" parameter (https://github.com/ansible/ansible-modules-core/blob/devel/cloud/azure/azure_rm_virtualnetwork.py#L148). That regex does not take into account that the "." character is valid, as long as it's neither at the start or end of the name string.
##### STEPS TO REPRODUCE
```
- name: Create virtual network
azure_rm_virtualnetwork:
name: My.Sweet.Network
```
##### EXPECTED RESULTS
The network should get created, since it has a valid name
##### ACTUAL RESULTS
msg": "Parameter error: name must begin with a letter or number, end with a letter, number or underscore and may contain only letters, numbers, periods, underscores or hyphens."}
| [
{
"content": "#!/usr/bin/python\n#\n# Copyright (c) 2016 Matt Davis, <[email protected]>\n# Chris Houseknecht, <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\n\nDOCUMENTATION = '''\n---\nmodule: azure_rm_virtualnetwork\nversion_added: \"2.1\"\nshort_description: Manage Azure virtual networks.\ndescription:\n - Create, update or delete a virtual networks. Allows setting and updating the available IPv4 address ranges\n and setting custom DNS servers. Use the azure_rm_subnet module to associate subnets with a virtual network.\noptions:\n resource_group:\n description:\n - name of resource group.\n required: true\n address_prefixes_cidr:\n description:\n - List of IPv4 address ranges where each is formatted using CIDR notation. Required when creating\n a new virtual network or using purge_address_prefixes.\n aliases:\n - address_prefixes\n default: null\n required: false\n dns_servers:\n description:\n - Custom list of DNS servers. Maximum length of two. The first server in the list will be treated\n as the Primary server. This is an explicit list. Existing DNS servers will be replaced with the\n specified list. Use the purge_dns_servers option to remove all custom DNS servers and revert to\n default Azure servers.\n default: null\n required: false\n location:\n description:\n - Valid azure location. Defaults to location of the resource group.\n default: resource_group location\n required: false\n name:\n description:\n - name of the virtual network.\n required: true\n purge_address_prefixes:\n description:\n - Use with state present to remove any existing address_prefixes.\n default: false\n purge_dns_servers:\n description:\n - Use with state present to remove existing DNS servers, reverting to default Azure servers. Mutually\n exclusive with dns_servers.\n default: false\n required: false\n state:\n description:\n - Assert the state of the virtual network. Use 'present' to create or update and\n 'absent' to delete.\n default: present\n choices:\n - absent\n - present\n required: false\n\nextends_documentation_fragment:\n - azure\n - azure_tags\n\nauthor:\n - \"Chris Houseknecht (@chouseknecht)\"\n - \"Matt Davis (@nitzmahone)\"\n\n'''\n\nEXAMPLES = '''\n - name: Create a virtual network\n azure_rm_virtualnetwork:\n name: foobar\n resource_group: Testing\n address_prefixes_cidr:\n - \"10.1.0.0/16\"\n - \"172.100.0.0/16\"\n dns_servers:\n - \"127.0.0.1\"\n - \"127.0.0.2\"\n tags:\n testing: testing\n delete: on-exit\n\n - name: Delete a virtual network\n azure_rm_virtualnetwork:\n name: foobar\n resource_group: Testing\n state: absent\n'''\nRETURN = '''\nstate:\n description: Current state of the virtual network.\n returned: always\n type: dict\n sample: {\n \"address_prefixes\": [\n \"10.1.0.0/16\",\n \"172.100.0.0/16\"\n ],\n \"dns_servers\": [\n \"127.0.0.1\",\n \"127.0.0.3\"\n ],\n \"etag\": 'W/\"0712e87c-f02f-4bb3-8b9e-2da0390a3886\"',\n \"id\": \"/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/my_test_network\",\n \"location\": \"eastus\",\n \"name\": \"my_test_network\",\n \"provisioning_state\": \"Succeeded\",\n \"tags\": null,\n \"type\": \"Microsoft.Network/virtualNetworks\"\n }\n'''\n\n\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.azure_rm_common import *\n\ntry:\n from msrestazure.azure_exceptions import CloudError\n from azure.mgmt.network.models import VirtualNetwork, AddressSpace, DhcpOptions\nexcept ImportError:\n # This is handled in azure_rm_common\n pass\n\n\nNAME_PATTERN = re.compile(r\"^[a-zA-Z0-9_]{1,61}[a-z0-9_]$\")\n\n\ndef virtual_network_to_dict(vnet):\n '''\n Convert a virtual network object to a dict.\n :param vnet: VirtualNet object\n :return: dict\n '''\n results = dict(\n id=vnet.id,\n name=vnet.name,\n location=vnet.location,\n type=vnet.type,\n tags=vnet.tags,\n provisioning_state=vnet.provisioning_state,\n etag=vnet.etag\n )\n if vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0:\n results['dns_servers'] = []\n for server in vnet.dhcp_options.dns_servers:\n results['dns_servers'].append(server)\n if vnet.address_space and len(vnet.address_space.address_prefixes) > 0:\n results['address_prefixes'] = []\n for space in vnet.address_space.address_prefixes:\n results['address_prefixes'].append(space)\n return results\n\n\nclass AzureRMVirtualNetwork(AzureRMModuleBase):\n\n def __init__(self):\n\n self.module_arg_spec = dict(\n resource_group=dict(type='str', required=True),\n name=dict(type='str', required=True),\n state=dict(type='str', default='present', choices=['present', 'absent']),\n location=dict(type='str'),\n address_prefixes_cidr=dict(type='list', aliases=['address_prefixes']),\n dns_servers=dict(type='list',),\n purge_address_prefixes=dict(type='bool', default=False, aliases=['purge']),\n purge_dns_servers=dict(type='bool', default=False),\n )\n\n mutually_exclusive = [\n ('dns_servers', 'purge_dns_servers')\n ]\n\n required_if = [\n ('purge_address_prefixes', True, ['address_prefixes_cidr'])\n ]\n\n self.resource_group = None\n self.name = None\n self.state = None\n self.location = None\n self.address_prefixes_cidr = None\n self.purge_address_prefixes = None\n self.dns_servers = None\n self.purge_dns_servers = None\n\n self.results=dict(\n changed=False,\n state=dict()\n )\n\n super(AzureRMVirtualNetwork, self).__init__(self.module_arg_spec,\n mutually_exclusive=mutually_exclusive,\n required_if=required_if,\n supports_check_mode=True)\n\n def exec_module(self, **kwargs):\n\n for key in self.module_arg_spec.keys() + ['tags']:\n setattr(self, key, kwargs[key])\n\n self.results['check_mode'] = self.check_mode\n\n resource_group = self.get_resource_group(self.resource_group)\n if not self.location:\n # Set default location\n self.location = resource_group.location\n\n if not NAME_PATTERN.match(self.name):\n self.fail(\"Parameter error: name must begin with a letter or number, end with a letter, number \"\n \"or underscore and may contain only letters, numbers, periods, underscores or hyphens.\")\n\n if self.state == 'present' and self.purge_address_prefixes:\n for prefix in self.address_prefixes_cidr:\n if not CIDR_PATTERN.match(prefix):\n self.fail(\"Parameter error: invalid address prefix value {0}\".format(prefix))\n\n if self.dns_servers and len(self.dns_servers) > 2:\n self.fail(\"Parameter error: You can provide a maximum of 2 DNS servers.\")\n\n changed = False\n results = dict()\n\n try:\n self.log('Fetching vnet {0}'.format(self.name))\n vnet = self.network_client.virtual_networks.get(self.resource_group, self.name)\n\n results = virtual_network_to_dict(vnet)\n self.log('Vnet exists {0}'.format(self.name))\n self.log(results, pretty_print=True)\n self.check_provisioning_state(vnet, self.state)\n\n if self.state == 'present':\n if self.address_prefixes_cidr:\n existing_address_prefix_set = set(vnet.address_space.address_prefixes)\n requested_address_prefix_set = set(self.address_prefixes_cidr)\n missing_prefixes = requested_address_prefix_set - existing_address_prefix_set\n extra_prefixes = existing_address_prefix_set - requested_address_prefix_set\n if len(missing_prefixes) > 0:\n self.log('CHANGED: there are missing address_prefixes')\n changed = True\n if not self.purge_address_prefixes:\n # add the missing prefixes\n for prefix in missing_prefixes:\n results['address_prefixes'].append(prefix)\n\n if len(extra_prefixes) > 0 and self.purge_address_prefixes:\n self.log('CHANGED: there are address_prefixes to purge')\n changed = True\n # replace existing address prefixes with requested set\n results['address_prefixes'] = self.address_prefixes_cidr\n\n update_tags, results['tags'] = self.update_tags(results['tags'])\n if update_tags:\n changed = True\n\n if self.dns_servers:\n existing_dns_set = set(vnet.dhcp_options.dns_servers)\n requested_dns_set = set(self.dns_servers)\n if existing_dns_set != requested_dns_set:\n self.log('CHANGED: replacing DNS servers')\n changed = True\n results['dns_servers'] = self.dns_servers\n\n if self.purge_dns_servers and vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0:\n self.log('CHANGED: purging existing DNS servers')\n changed = True\n results['dns_servers'] = []\n elif self.state == 'absent':\n self.log(\"CHANGED: vnet exists but requested state is 'absent'\")\n changed = True\n except CloudError:\n self.log('Vnet {0} does not exist'.format(self.name))\n if self.state == 'present':\n self.log(\"CHANGED: vnet {0} does not exist but requested state is 'present'\".format(self.name))\n changed = True\n\n self.results['changed'] = changed\n self.results['state'] = results\n\n if self.check_mode:\n return self.results\n\n if changed:\n if self.state == 'present':\n if not results:\n # create a new virtual network\n self.log(\"Create virtual network {0}\".format(self.name))\n if not self.address_prefixes_cidr:\n self.fail('Parameter error: address_prefixes_cidr required when creating a virtual network')\n vnet = VirtualNetwork(\n location=self.location,\n address_space=AddressSpace(\n address_prefixes=self.address_prefixes_cidr\n )\n )\n if self.dns_servers:\n vnet.dhcp_options = DhcpOptions(\n dns_servers=self.dns_servers\n )\n if self.tags:\n vnet.tags = self.tags\n self.results['state'] = self.create_or_update_vnet(vnet)\n else:\n # update existing virtual network\n self.log(\"Update virtual network {0}\".format(self.name))\n vnet = VirtualNetwork(\n location=results['location'],\n address_space=AddressSpace(\n address_prefixes=results['address_prefixes']\n ),\n tags=results['tags']\n )\n if results.get('dns_servers'):\n vnet.dhcp_options = DhcpOptions(\n dns_servers=results['dns_servers']\n )\n self.results['state'] = self.create_or_update_vnet(vnet)\n elif self.state == 'absent':\n self.delete_virtual_network()\n self.results['state']['status'] = 'Deleted'\n\n\n return self.results\n\n def create_or_update_vnet(self, vnet):\n try:\n poller = self.network_client.virtual_networks.create_or_update(self.resource_group, self.name, vnet)\n new_vnet = self.get_poller_result(poller)\n except Exception as exc:\n self.fail(\"Error creating or updating virtual network {0} - {1}\".format(self.name, str(exc)))\n return virtual_network_to_dict(new_vnet)\n\n def delete_virtual_network(self):\n try:\n poller = self.network_client.virtual_networks.delete(self.resource_group, self.name)\n result = self.get_poller_result(poller)\n except Exception as exc:\n self.fail(\"Error deleting virtual network {0} - {1}\".format(self.name, str(exc)))\n return result\n\n\ndef main():\n AzureRMVirtualNetwork()\n\nif __name__ == '__main__':\n main()\n\n",
"path": "cloud/azure/azure_rm_virtualnetwork.py"
}
] | [
{
"content": "#!/usr/bin/python\n#\n# Copyright (c) 2016 Matt Davis, <[email protected]>\n# Chris Houseknecht, <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\n\nDOCUMENTATION = '''\n---\nmodule: azure_rm_virtualnetwork\nversion_added: \"2.1\"\nshort_description: Manage Azure virtual networks.\ndescription:\n - Create, update or delete a virtual networks. Allows setting and updating the available IPv4 address ranges\n and setting custom DNS servers. Use the azure_rm_subnet module to associate subnets with a virtual network.\noptions:\n resource_group:\n description:\n - name of resource group.\n required: true\n address_prefixes_cidr:\n description:\n - List of IPv4 address ranges where each is formatted using CIDR notation. Required when creating\n a new virtual network or using purge_address_prefixes.\n aliases:\n - address_prefixes\n default: null\n required: false\n dns_servers:\n description:\n - Custom list of DNS servers. Maximum length of two. The first server in the list will be treated\n as the Primary server. This is an explicit list. Existing DNS servers will be replaced with the\n specified list. Use the purge_dns_servers option to remove all custom DNS servers and revert to\n default Azure servers.\n default: null\n required: false\n location:\n description:\n - Valid azure location. Defaults to location of the resource group.\n default: resource_group location\n required: false\n name:\n description:\n - name of the virtual network.\n required: true\n purge_address_prefixes:\n description:\n - Use with state present to remove any existing address_prefixes.\n default: false\n purge_dns_servers:\n description:\n - Use with state present to remove existing DNS servers, reverting to default Azure servers. Mutually\n exclusive with dns_servers.\n default: false\n required: false\n state:\n description:\n - Assert the state of the virtual network. Use 'present' to create or update and\n 'absent' to delete.\n default: present\n choices:\n - absent\n - present\n required: false\n\nextends_documentation_fragment:\n - azure\n - azure_tags\n\nauthor:\n - \"Chris Houseknecht (@chouseknecht)\"\n - \"Matt Davis (@nitzmahone)\"\n\n'''\n\nEXAMPLES = '''\n - name: Create a virtual network\n azure_rm_virtualnetwork:\n name: foobar\n resource_group: Testing\n address_prefixes_cidr:\n - \"10.1.0.0/16\"\n - \"172.100.0.0/16\"\n dns_servers:\n - \"127.0.0.1\"\n - \"127.0.0.2\"\n tags:\n testing: testing\n delete: on-exit\n\n - name: Delete a virtual network\n azure_rm_virtualnetwork:\n name: foobar\n resource_group: Testing\n state: absent\n'''\nRETURN = '''\nstate:\n description: Current state of the virtual network.\n returned: always\n type: dict\n sample: {\n \"address_prefixes\": [\n \"10.1.0.0/16\",\n \"172.100.0.0/16\"\n ],\n \"dns_servers\": [\n \"127.0.0.1\",\n \"127.0.0.3\"\n ],\n \"etag\": 'W/\"0712e87c-f02f-4bb3-8b9e-2da0390a3886\"',\n \"id\": \"/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/my_test_network\",\n \"location\": \"eastus\",\n \"name\": \"my_test_network\",\n \"provisioning_state\": \"Succeeded\",\n \"tags\": null,\n \"type\": \"Microsoft.Network/virtualNetworks\"\n }\n'''\n\n\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.azure_rm_common import *\n\ntry:\n from msrestazure.azure_exceptions import CloudError\n from azure.mgmt.network.models import VirtualNetwork, AddressSpace, DhcpOptions\nexcept ImportError:\n # This is handled in azure_rm_common\n pass\n\n\nNAME_PATTERN = re.compile(r\"^[a-zA-Z0-9]+[a-zA-Z0-9\\._-]+[a-zA-Z0-9_]+$\")\n\n\ndef virtual_network_to_dict(vnet):\n '''\n Convert a virtual network object to a dict.\n :param vnet: VirtualNet object\n :return: dict\n '''\n results = dict(\n id=vnet.id,\n name=vnet.name,\n location=vnet.location,\n type=vnet.type,\n tags=vnet.tags,\n provisioning_state=vnet.provisioning_state,\n etag=vnet.etag\n )\n if vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0:\n results['dns_servers'] = []\n for server in vnet.dhcp_options.dns_servers:\n results['dns_servers'].append(server)\n if vnet.address_space and len(vnet.address_space.address_prefixes) > 0:\n results['address_prefixes'] = []\n for space in vnet.address_space.address_prefixes:\n results['address_prefixes'].append(space)\n return results\n\n\nclass AzureRMVirtualNetwork(AzureRMModuleBase):\n\n def __init__(self):\n\n self.module_arg_spec = dict(\n resource_group=dict(type='str', required=True),\n name=dict(type='str', required=True),\n state=dict(type='str', default='present', choices=['present', 'absent']),\n location=dict(type='str'),\n address_prefixes_cidr=dict(type='list', aliases=['address_prefixes']),\n dns_servers=dict(type='list',),\n purge_address_prefixes=dict(type='bool', default=False, aliases=['purge']),\n purge_dns_servers=dict(type='bool', default=False),\n )\n\n mutually_exclusive = [\n ('dns_servers', 'purge_dns_servers')\n ]\n\n required_if = [\n ('purge_address_prefixes', True, ['address_prefixes_cidr'])\n ]\n\n self.resource_group = None\n self.name = None\n self.state = None\n self.location = None\n self.address_prefixes_cidr = None\n self.purge_address_prefixes = None\n self.dns_servers = None\n self.purge_dns_servers = None\n\n self.results=dict(\n changed=False,\n state=dict()\n )\n\n super(AzureRMVirtualNetwork, self).__init__(self.module_arg_spec,\n mutually_exclusive=mutually_exclusive,\n required_if=required_if,\n supports_check_mode=True)\n\n def exec_module(self, **kwargs):\n\n for key in self.module_arg_spec.keys() + ['tags']:\n setattr(self, key, kwargs[key])\n\n self.results['check_mode'] = self.check_mode\n\n resource_group = self.get_resource_group(self.resource_group)\n if not self.location:\n # Set default location\n self.location = resource_group.location\n\n if not NAME_PATTERN.match(self.name):\n self.fail(\"Parameter error: name must begin with a letter or number, end with a letter, number \"\n \"or underscore and may contain only letters, numbers, periods, underscores or hyphens.\")\n\n if self.state == 'present' and self.purge_address_prefixes:\n for prefix in self.address_prefixes_cidr:\n if not CIDR_PATTERN.match(prefix):\n self.fail(\"Parameter error: invalid address prefix value {0}\".format(prefix))\n\n if self.dns_servers and len(self.dns_servers) > 2:\n self.fail(\"Parameter error: You can provide a maximum of 2 DNS servers.\")\n\n changed = False\n results = dict()\n\n try:\n self.log('Fetching vnet {0}'.format(self.name))\n vnet = self.network_client.virtual_networks.get(self.resource_group, self.name)\n\n results = virtual_network_to_dict(vnet)\n self.log('Vnet exists {0}'.format(self.name))\n self.log(results, pretty_print=True)\n self.check_provisioning_state(vnet, self.state)\n\n if self.state == 'present':\n if self.address_prefixes_cidr:\n existing_address_prefix_set = set(vnet.address_space.address_prefixes)\n requested_address_prefix_set = set(self.address_prefixes_cidr)\n missing_prefixes = requested_address_prefix_set - existing_address_prefix_set\n extra_prefixes = existing_address_prefix_set - requested_address_prefix_set\n if len(missing_prefixes) > 0:\n self.log('CHANGED: there are missing address_prefixes')\n changed = True\n if not self.purge_address_prefixes:\n # add the missing prefixes\n for prefix in missing_prefixes:\n results['address_prefixes'].append(prefix)\n\n if len(extra_prefixes) > 0 and self.purge_address_prefixes:\n self.log('CHANGED: there are address_prefixes to purge')\n changed = True\n # replace existing address prefixes with requested set\n results['address_prefixes'] = self.address_prefixes_cidr\n\n update_tags, results['tags'] = self.update_tags(results['tags'])\n if update_tags:\n changed = True\n\n if self.dns_servers:\n existing_dns_set = set(vnet.dhcp_options.dns_servers)\n requested_dns_set = set(self.dns_servers)\n if existing_dns_set != requested_dns_set:\n self.log('CHANGED: replacing DNS servers')\n changed = True\n results['dns_servers'] = self.dns_servers\n\n if self.purge_dns_servers and vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0:\n self.log('CHANGED: purging existing DNS servers')\n changed = True\n results['dns_servers'] = []\n elif self.state == 'absent':\n self.log(\"CHANGED: vnet exists but requested state is 'absent'\")\n changed = True\n except CloudError:\n self.log('Vnet {0} does not exist'.format(self.name))\n if self.state == 'present':\n self.log(\"CHANGED: vnet {0} does not exist but requested state is 'present'\".format(self.name))\n changed = True\n\n self.results['changed'] = changed\n self.results['state'] = results\n\n if self.check_mode:\n return self.results\n\n if changed:\n if self.state == 'present':\n if not results:\n # create a new virtual network\n self.log(\"Create virtual network {0}\".format(self.name))\n if not self.address_prefixes_cidr:\n self.fail('Parameter error: address_prefixes_cidr required when creating a virtual network')\n vnet = VirtualNetwork(\n location=self.location,\n address_space=AddressSpace(\n address_prefixes=self.address_prefixes_cidr\n )\n )\n if self.dns_servers:\n vnet.dhcp_options = DhcpOptions(\n dns_servers=self.dns_servers\n )\n if self.tags:\n vnet.tags = self.tags\n self.results['state'] = self.create_or_update_vnet(vnet)\n else:\n # update existing virtual network\n self.log(\"Update virtual network {0}\".format(self.name))\n vnet = VirtualNetwork(\n location=results['location'],\n address_space=AddressSpace(\n address_prefixes=results['address_prefixes']\n ),\n tags=results['tags']\n )\n if results.get('dns_servers'):\n vnet.dhcp_options = DhcpOptions(\n dns_servers=results['dns_servers']\n )\n self.results['state'] = self.create_or_update_vnet(vnet)\n elif self.state == 'absent':\n self.delete_virtual_network()\n self.results['state']['status'] = 'Deleted'\n\n\n return self.results\n\n def create_or_update_vnet(self, vnet):\n try:\n poller = self.network_client.virtual_networks.create_or_update(self.resource_group, self.name, vnet)\n new_vnet = self.get_poller_result(poller)\n except Exception as exc:\n self.fail(\"Error creating or updating virtual network {0} - {1}\".format(self.name, str(exc)))\n return virtual_network_to_dict(new_vnet)\n\n def delete_virtual_network(self):\n try:\n poller = self.network_client.virtual_networks.delete(self.resource_group, self.name)\n result = self.get_poller_result(poller)\n except Exception as exc:\n self.fail(\"Error deleting virtual network {0} - {1}\".format(self.name, str(exc)))\n return result\n\n\ndef main():\n AzureRMVirtualNetwork()\n\nif __name__ == '__main__':\n main()\n\n",
"path": "cloud/azure/azure_rm_virtualnetwork.py"
}
] | diff --git a/cloud/azure/azure_rm_virtualnetwork.py b/cloud/azure/azure_rm_virtualnetwork.py
index 9f1e7e61f23..d7bbdd00d85 100644
--- a/cloud/azure/azure_rm_virtualnetwork.py
+++ b/cloud/azure/azure_rm_virtualnetwork.py
@@ -145,7 +145,7 @@
pass
-NAME_PATTERN = re.compile(r"^[a-zA-Z0-9_]{1,61}[a-z0-9_]$")
+NAME_PATTERN = re.compile(r"^[a-zA-Z0-9]+[a-zA-Z0-9\._-]+[a-zA-Z0-9_]+$")
def virtual_network_to_dict(vnet):
|
optuna__optuna-1882 | Remove the document for `with_traceback` method of Optuna's exception classes
Currently, Optuna's exception classes have the documentations of `with_traceback` method, which is inherited from `Exception`. I don't think it is informative for readers and it can be removed from the reference.

The following `Exception` has the `with_traceback` method.
- [ ] `optuna.exceptions.CLIUsageError`
- [ ] `optuna.exceptions.OptunaError`
- [ ] `optuna.exceptions.TrialPruned`
- [ ] `optuna.exceptions.CLIUsageError`
- [ ] `optuna.exceptions.StorageInternalError`
- [ ] `optuna.exceptions.DuplicatedStudyError`
CC @keisuke-umezawa Please let me know if you have any comments.
| [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nimport pkg_resources\n\nfrom sphinx_gallery.sorting import FileNameSortKey\n\n__version__ = pkg_resources.get_distribution('optuna').version\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Optuna'\ncopyright = '2018, Optuna Contributors.'\nauthor = 'Optuna Contributors.'\n\n# The short X.Y version\nversion = __version__\n# The full version, including alpha/beta/rc tags\nrelease = __version__\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.githubpages',\n 'cliff.sphinxext',\n 'sphinx_gallery.gen_gallery',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n 'logo_only': True\n}\n\nhtml_favicon = '../image/favicon.ico'\n\nhtml_logo = '../image/optuna-logo.png'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static', 'plotly_figures']\nhtml_css_files = [\"css/custom.css\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Optunadoc'\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'Optuna.tex', 'Optuna Documentation', 'Optuna Contributors.', 'manual'),\n]\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, 'optuna', 'Optuna Documentation', [author], 1)]\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Optuna', 'Optuna Documentation', author, 'Optuna',\n 'One line description of project.', 'Miscellaneous'),\n]\n\nintersphinx_mapping = {'python': ('https://docs.python.org/3', None)}\n\n# -- Extension configuration -------------------------------------------------\nautosummary_generate = True\nautodoc_default_options = {\n 'members': True,\n 'inherited-members': True,\n}\n\nsphinx_gallery_conf = {\n 'examples_dirs': [\n '../../tutorial',\n ],\n 'gallery_dirs': [\n 'tutorial',\n ],\n 'within_subsection_order': FileNameSortKey,\n 'filename_pattern': r'/*\\.py',\n 'first_notebook_cell': None,\n}\n",
"path": "docs/source/conf.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nimport pkg_resources\n\nfrom sphinx_gallery.sorting import FileNameSortKey\n\n__version__ = pkg_resources.get_distribution('optuna').version\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Optuna'\ncopyright = '2018, Optuna Contributors.'\nauthor = 'Optuna Contributors.'\n\n# The short X.Y version\nversion = __version__\n# The full version, including alpha/beta/rc tags\nrelease = __version__\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.githubpages',\n 'cliff.sphinxext',\n 'sphinx_gallery.gen_gallery',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n 'logo_only': True\n}\n\nhtml_favicon = '../image/favicon.ico'\n\nhtml_logo = '../image/optuna-logo.png'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static', 'plotly_figures']\nhtml_css_files = [\"css/custom.css\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Optunadoc'\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'Optuna.tex', 'Optuna Documentation', 'Optuna Contributors.', 'manual'),\n]\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, 'optuna', 'Optuna Documentation', [author], 1)]\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Optuna', 'Optuna Documentation', author, 'Optuna',\n 'One line description of project.', 'Miscellaneous'),\n]\n\nintersphinx_mapping = {'python': ('https://docs.python.org/3', None)}\n\n# -- Extension configuration -------------------------------------------------\nautosummary_generate = True\nautodoc_default_options = {\n 'members': True,\n 'inherited-members': True,\n 'exclude-members': 'with_traceback',\n}\n\nsphinx_gallery_conf = {\n 'examples_dirs': [\n '../../tutorial',\n ],\n 'gallery_dirs': [\n 'tutorial',\n ],\n 'within_subsection_order': FileNameSortKey,\n 'filename_pattern': r'/*\\.py',\n 'first_notebook_cell': None,\n}\n",
"path": "docs/source/conf.py"
}
] | diff --git a/docs/source/conf.py b/docs/source/conf.py
index 16d954afae..05540085a2 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -172,6 +172,7 @@
autodoc_default_options = {
'members': True,
'inherited-members': True,
+ 'exclude-members': 'with_traceback',
}
sphinx_gallery_conf = {
|
bokeh__bokeh-1948 | `publishing` example from the `embed` directory fails
With an error message that is not particularly helpfull. Using bokeh 0.8.
```
➜ embed python publishing.py
Using saved session configuration for http://localhost:5006/
To override, pass 'load_from_config=False' to Session
INFO:requests.packages.urllib3.connectionpool:Starting new HTTP connection (1): localhost
Traceback (most recent call last):
File "publishing.py", line 11, in <module>
Session().register('testuser', 'testpassword')
File "/Users/nicolas/anaconda/lib/python2.7/site-packages/bokeh/session.py", line 208, in register
raise RuntimeError("Unknown Error")
RuntimeError: Unknown Error
```
| [
{
"content": "# The plot server must be running\n# Go to http://localhost:5006/bokeh to view this plot\n\nimport time\n\nimport numpy as np\n\nfrom bokeh.plotting import *\nfrom bokeh.session import Session\nfrom bokeh import embed\nSession().register('testuser', 'testpassword')\nN = 80\nx = np.linspace(0, 4*np.pi, N)\ny = np.sin(x)\noutput_server(\"line_animate\")\nTOOLS = \"pan,wheel_zoom,box_zoom,reset,save,box_select\"\np = figure(tools=TOOLS)\np.circle(x, y, color=\"#3333ee\", name=\"sin\")\npush()\nrenderer = p.select(dict(name=\"sin\"))\nds = renderer[0].data_source\ncursession().publish()\ntag = embed.autoload_server(p, cursession(), public=True)\nhtml = \"\"\"\n<html>\n<head></head>\n<body>\n%s\n</body>\n</html>\n\"\"\"\nhtml = html % (tag)\nwith open(\"publishing.html\", \"w+\") as f:\n f.write(html)\nwhile True:\n for i in np.hstack((np.linspace(1, -1, 100), np.linspace(-1, 1, 100))):\n ds.data[\"y\"] = y * i\n cursession().store_objects(ds)\n time.sleep(1.0)\n",
"path": "examples/embed/publishing.py"
}
] | [
{
"content": "# The plot server must be running\n# The server must run with --multi-user for this example to work\n# Go to http://localhost:5006/bokeh to view this plot\n\nimport time\n\nimport numpy as np\n\nfrom bokeh.plotting import *\nfrom bokeh.session import Session\nfrom bokeh import embed\nSession().register('testuser', 'testpassword')\nN = 80\nx = np.linspace(0, 4*np.pi, N)\ny = np.sin(x)\noutput_server(\"line_animate\")\nTOOLS = \"pan,wheel_zoom,box_zoom,reset,save,box_select\"\np = figure(tools=TOOLS)\np.circle(x, y, color=\"#3333ee\", name=\"sin\")\npush()\nrenderer = p.select(dict(name=\"sin\"))\nds = renderer[0].data_source\ncursession().publish()\ntag = embed.autoload_server(p, cursession(), public=True)\nhtml = \"\"\"\n<html>\n<head></head>\n<body>\n%s\n</body>\n</html>\n\"\"\"\nhtml = html % (tag)\nwith open(\"publishing.html\", \"w+\") as f:\n f.write(html)\nwhile True:\n for i in np.hstack((np.linspace(1, -1, 100), np.linspace(-1, 1, 100))):\n ds.data[\"y\"] = y * i\n cursession().store_objects(ds)\n time.sleep(1.0)\n",
"path": "examples/embed/publishing.py"
}
] | diff --git a/examples/embed/README.md b/examples/embed/README.md
index f4e77c448c9..9b98f24661e 100644
--- a/examples/embed/README.md
+++ b/examples/embed/README.md
@@ -2,7 +2,11 @@ To try these example you first have to start the bokeh-server, ie.,
bokeh-server --backend=memory
-and then run the examples:
+Some examples (e.g. publishing) need the server to run in multi-user mode:
+
+ bokeh-server -m --backend=memory
+
+Then run the examples:
python widget.py
@@ -10,6 +14,7 @@ or
python animated.py
+
To view them, start a web server in this directory, for instance, the server
built into python:
diff --git a/examples/embed/publishing.py b/examples/embed/publishing.py
index a099eda61ea..d0dcd17888b 100644
--- a/examples/embed/publishing.py
+++ b/examples/embed/publishing.py
@@ -1,4 +1,5 @@
# The plot server must be running
+# The server must run with --multi-user for this example to work
# Go to http://localhost:5006/bokeh to view this plot
import time
|
mozilla__bugbug-3941 | [model:accessibility] Add the model to `http_service` and `data_pipeline`
Depends on merging: #3775
| [
{
"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nimport os\nfrom datetime import timedelta\nfrom functools import lru_cache\nfrom typing import Sequence\n\nimport orjson\nimport requests\nimport zstandard\nfrom redis import Redis\n\nfrom bugbug import bugzilla, repository, test_scheduling\nfrom bugbug.github import Github\nfrom bugbug.model import Model\nfrom bugbug.models import testselect\nfrom bugbug.utils import get_hgmo_stack\nfrom bugbug_http.readthrough_cache import ReadthroughTTLCache\n\nlogging.basicConfig(level=logging.INFO)\nLOGGER = logging.getLogger()\n\nMODELS_NAMES = [\n \"defectenhancementtask\",\n \"component\",\n \"invalidcompatibilityreport\",\n \"needsdiagnosis\",\n \"regression\",\n \"stepstoreproduce\",\n \"spambug\",\n \"testlabelselect\",\n \"testgroupselect\",\n]\n\nDEFAULT_EXPIRATION_TTL = 7 * 24 * 3600 # A week\nredis = Redis.from_url(os.environ.get(\"REDIS_URL\", \"redis://localhost/0\"))\n\nMODEL_CACHE: ReadthroughTTLCache[str, Model] = ReadthroughTTLCache(\n timedelta(hours=1), lambda m: Model.load(f\"{m}model\")\n)\nMODEL_CACHE.start_ttl_thread()\n\ncctx = zstandard.ZstdCompressor(level=10)\n\n\ndef setkey(key: str, value: bytes, compress: bool = False) -> None:\n LOGGER.debug(f\"Storing data at {key}: {value!r}\")\n if compress:\n value = cctx.compress(value)\n redis.set(key, value)\n redis.expire(key, DEFAULT_EXPIRATION_TTL)\n\n\ndef classify_bug(model_name: str, bug_ids: Sequence[int], bugzilla_token: str) -> str:\n from bugbug_http.app import JobInfo\n\n # This should be called in a process worker so it should be safe to set\n # the token here\n bug_ids_set = set(map(int, bug_ids))\n bugzilla.set_token(bugzilla_token)\n\n bugs = bugzilla.get(bug_ids)\n\n missing_bugs = bug_ids_set.difference(bugs.keys())\n\n for bug_id in missing_bugs:\n job = JobInfo(classify_bug, model_name, bug_id)\n\n # TODO: Find a better error format\n setkey(job.result_key, orjson.dumps({\"available\": False}))\n\n if not bugs:\n return \"NOK\"\n\n model = MODEL_CACHE.get(model_name)\n\n if not model:\n LOGGER.info(\"Missing model %r, aborting\" % model_name)\n return \"NOK\"\n\n model_extra_data = model.get_extra_data()\n\n # TODO: Classify could choke on a single bug which could make the whole\n # job to fails. What should we do here?\n probs = model.classify(list(bugs.values()), True)\n indexes = probs.argmax(axis=-1)\n suggestions = model.le.inverse_transform(indexes)\n\n probs_list = probs.tolist()\n indexes_list = indexes.tolist()\n suggestions_list = suggestions.tolist()\n\n for i, bug_id in enumerate(bugs.keys()):\n data = {\n \"prob\": probs_list[i],\n \"index\": indexes_list[i],\n \"class\": suggestions_list[i],\n \"extra_data\": model_extra_data,\n }\n\n job = JobInfo(classify_bug, model_name, bug_id)\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n # Save the bug last change\n setkey(job.change_time_key, bugs[bug_id][\"last_change_time\"].encode())\n\n return \"OK\"\n\n\ndef classify_issue(\n model_name: str, owner: str, repo: str, issue_nums: Sequence[int]\n) -> str:\n from bugbug_http.app import JobInfo\n\n github = Github(owner=owner, repo=repo)\n\n issue_ids_set = set(map(int, issue_nums))\n\n issues = {\n issue_num: github.fetch_issue_by_number(owner, repo, issue_num, True)\n for issue_num in issue_nums\n }\n\n missing_issues = issue_ids_set.difference(issues.keys())\n\n for issue_id in missing_issues:\n job = JobInfo(classify_issue, model_name, owner, repo, issue_id)\n\n # TODO: Find a better error format\n setkey(job.result_key, orjson.dumps({\"available\": False}))\n\n if not issues:\n return \"NOK\"\n\n model = MODEL_CACHE.get(model_name)\n\n if not model:\n LOGGER.info(\"Missing model %r, aborting\" % model_name)\n return \"NOK\"\n\n model_extra_data = model.get_extra_data()\n\n # TODO: Classify could choke on a single bug which could make the whole\n # job to fail. What should we do here?\n probs = model.classify(list(issues.values()), True)\n indexes = probs.argmax(axis=-1)\n suggestions = model.le.inverse_transform(indexes)\n\n probs_list = probs.tolist()\n indexes_list = indexes.tolist()\n suggestions_list = suggestions.tolist()\n\n for i, issue_id in enumerate(issues.keys()):\n data = {\n \"prob\": probs_list[i],\n \"index\": indexes_list[i],\n \"class\": suggestions_list[i],\n \"extra_data\": model_extra_data,\n }\n\n job = JobInfo(classify_issue, model_name, owner, repo, issue_id)\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n # Save the bug last change\n setkey(job.change_time_key, issues[issue_id][\"updated_at\"].encode())\n\n return \"OK\"\n\n\ndef classify_broken_site_report(model_name: str, reports_data: list[dict]) -> str:\n from bugbug_http.app import JobInfo\n\n reports = {\n report[\"uuid\"]: {\"title\": report[\"title\"], \"body\": report[\"body\"]}\n for report in reports_data\n }\n\n if not reports:\n return \"NOK\"\n\n model = MODEL_CACHE.get(model_name)\n\n if not model:\n LOGGER.info(\"Missing model %r, aborting\" % model_name)\n return \"NOK\"\n\n model_extra_data = model.get_extra_data()\n probs = model.classify(list(reports.values()), True)\n indexes = probs.argmax(axis=-1)\n suggestions = model.le.inverse_transform(indexes)\n\n probs_list = probs.tolist()\n indexes_list = indexes.tolist()\n suggestions_list = suggestions.tolist()\n\n for i, report_uuid in enumerate(reports.keys()):\n data = {\n \"prob\": probs_list[i],\n \"index\": indexes_list[i],\n \"class\": suggestions_list[i],\n \"extra_data\": model_extra_data,\n }\n\n job = JobInfo(classify_broken_site_report, model_name, report_uuid)\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n return \"OK\"\n\n\n@lru_cache(maxsize=None)\ndef get_known_tasks() -> tuple[str, ...]:\n with open(\"known_tasks\", \"r\") as f:\n return tuple(line.strip() for line in f)\n\n\ndef schedule_tests(branch: str, rev: str) -> str:\n from bugbug_http import REPO_DIR\n from bugbug_http.app import JobInfo\n\n job = JobInfo(schedule_tests, branch, rev)\n LOGGER.info(\"Processing %s...\", job)\n\n # Pull the revision to the local repository\n LOGGER.info(\"Pulling commits from the remote repository...\")\n repository.pull(REPO_DIR, branch, rev)\n\n # Load the full stack of patches leading to that revision\n LOGGER.info(\"Loading commits to analyze using automationrelevance...\")\n try:\n revs = get_hgmo_stack(branch, rev)\n except requests.exceptions.RequestException:\n LOGGER.warning(f\"Push not found for {branch} @ {rev}!\")\n return \"NOK\"\n\n test_selection_threshold = float(\n os.environ.get(\"TEST_SELECTION_CONFIDENCE_THRESHOLD\", 0.5)\n )\n\n # On \"try\", consider commits from other branches too (see https://bugzilla.mozilla.org/show_bug.cgi?id=1790493).\n # On other repos, only consider \"tip\" commits (to exclude commits such as https://hg.mozilla.org/integration/autoland/rev/961f253985a4388008700a6a6fde80f4e17c0b4b).\n if branch == \"try\":\n repo_branch = None\n else:\n repo_branch = \"tip\"\n\n # Analyze patches.\n commits = repository.download_commits(\n REPO_DIR,\n revs=revs,\n branch=repo_branch,\n save=False,\n use_single_process=True,\n include_no_bug=True,\n )\n\n if len(commits) > 0:\n testlabelselect_model = MODEL_CACHE.get(\"testlabelselect\")\n testgroupselect_model = MODEL_CACHE.get(\"testgroupselect\")\n\n tasks = testlabelselect_model.select_tests(commits, test_selection_threshold)\n\n reduced = testselect.reduce_configs(\n set(t for t, c in tasks.items() if c >= 0.8), 1.0\n )\n\n reduced_higher = testselect.reduce_configs(\n set(t for t, c in tasks.items() if c >= 0.9), 1.0\n )\n\n groups = testgroupselect_model.select_tests(commits, test_selection_threshold)\n\n config_groups = testselect.select_configs(groups.keys(), 0.9)\n else:\n tasks = {}\n reduced = set()\n groups = {}\n config_groups = {}\n\n data = {\n \"tasks\": tasks,\n \"groups\": groups,\n \"config_groups\": config_groups,\n \"reduced_tasks\": {t: c for t, c in tasks.items() if t in reduced},\n \"reduced_tasks_higher\": {t: c for t, c in tasks.items() if t in reduced_higher},\n \"known_tasks\": get_known_tasks(),\n }\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n return \"OK\"\n\n\ndef get_config_specific_groups(config: str) -> str:\n from bugbug_http.app import JobInfo\n\n job = JobInfo(get_config_specific_groups, config)\n LOGGER.info(\"Processing %s...\", job)\n\n equivalence_sets = testselect._get_equivalence_sets(0.9)\n\n past_failures_data = test_scheduling.PastFailures(\"group\", True)\n\n setkey(\n job.result_key,\n orjson.dumps(\n [\n {\"name\": group}\n for group in past_failures_data.all_runnables\n if any(\n equivalence_set == {config}\n for equivalence_set in equivalence_sets[group]\n )\n ]\n ),\n compress=True,\n )\n\n return \"OK\"\n",
"path": "http_service/bugbug_http/models.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nimport os\nfrom datetime import timedelta\nfrom functools import lru_cache\nfrom typing import Sequence\n\nimport orjson\nimport requests\nimport zstandard\nfrom redis import Redis\n\nfrom bugbug import bugzilla, repository, test_scheduling\nfrom bugbug.github import Github\nfrom bugbug.model import Model\nfrom bugbug.models import testselect\nfrom bugbug.utils import get_hgmo_stack\nfrom bugbug_http.readthrough_cache import ReadthroughTTLCache\n\nlogging.basicConfig(level=logging.INFO)\nLOGGER = logging.getLogger()\n\nMODELS_NAMES = [\n \"defectenhancementtask\",\n \"component\",\n \"invalidcompatibilityreport\",\n \"needsdiagnosis\",\n \"regression\",\n \"stepstoreproduce\",\n \"spambug\",\n \"testlabelselect\",\n \"testgroupselect\",\n \"accessibility\",\n]\n\nDEFAULT_EXPIRATION_TTL = 7 * 24 * 3600 # A week\nredis = Redis.from_url(os.environ.get(\"REDIS_URL\", \"redis://localhost/0\"))\n\nMODEL_CACHE: ReadthroughTTLCache[str, Model] = ReadthroughTTLCache(\n timedelta(hours=1), lambda m: Model.load(f\"{m}model\")\n)\nMODEL_CACHE.start_ttl_thread()\n\ncctx = zstandard.ZstdCompressor(level=10)\n\n\ndef setkey(key: str, value: bytes, compress: bool = False) -> None:\n LOGGER.debug(f\"Storing data at {key}: {value!r}\")\n if compress:\n value = cctx.compress(value)\n redis.set(key, value)\n redis.expire(key, DEFAULT_EXPIRATION_TTL)\n\n\ndef classify_bug(model_name: str, bug_ids: Sequence[int], bugzilla_token: str) -> str:\n from bugbug_http.app import JobInfo\n\n # This should be called in a process worker so it should be safe to set\n # the token here\n bug_ids_set = set(map(int, bug_ids))\n bugzilla.set_token(bugzilla_token)\n\n bugs = bugzilla.get(bug_ids)\n\n missing_bugs = bug_ids_set.difference(bugs.keys())\n\n for bug_id in missing_bugs:\n job = JobInfo(classify_bug, model_name, bug_id)\n\n # TODO: Find a better error format\n setkey(job.result_key, orjson.dumps({\"available\": False}))\n\n if not bugs:\n return \"NOK\"\n\n model = MODEL_CACHE.get(model_name)\n\n if not model:\n LOGGER.info(\"Missing model %r, aborting\" % model_name)\n return \"NOK\"\n\n model_extra_data = model.get_extra_data()\n\n # TODO: Classify could choke on a single bug which could make the whole\n # job to fails. What should we do here?\n probs = model.classify(list(bugs.values()), True)\n indexes = probs.argmax(axis=-1)\n suggestions = model.le.inverse_transform(indexes)\n\n probs_list = probs.tolist()\n indexes_list = indexes.tolist()\n suggestions_list = suggestions.tolist()\n\n for i, bug_id in enumerate(bugs.keys()):\n data = {\n \"prob\": probs_list[i],\n \"index\": indexes_list[i],\n \"class\": suggestions_list[i],\n \"extra_data\": model_extra_data,\n }\n\n job = JobInfo(classify_bug, model_name, bug_id)\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n # Save the bug last change\n setkey(job.change_time_key, bugs[bug_id][\"last_change_time\"].encode())\n\n return \"OK\"\n\n\ndef classify_issue(\n model_name: str, owner: str, repo: str, issue_nums: Sequence[int]\n) -> str:\n from bugbug_http.app import JobInfo\n\n github = Github(owner=owner, repo=repo)\n\n issue_ids_set = set(map(int, issue_nums))\n\n issues = {\n issue_num: github.fetch_issue_by_number(owner, repo, issue_num, True)\n for issue_num in issue_nums\n }\n\n missing_issues = issue_ids_set.difference(issues.keys())\n\n for issue_id in missing_issues:\n job = JobInfo(classify_issue, model_name, owner, repo, issue_id)\n\n # TODO: Find a better error format\n setkey(job.result_key, orjson.dumps({\"available\": False}))\n\n if not issues:\n return \"NOK\"\n\n model = MODEL_CACHE.get(model_name)\n\n if not model:\n LOGGER.info(\"Missing model %r, aborting\" % model_name)\n return \"NOK\"\n\n model_extra_data = model.get_extra_data()\n\n # TODO: Classify could choke on a single bug which could make the whole\n # job to fail. What should we do here?\n probs = model.classify(list(issues.values()), True)\n indexes = probs.argmax(axis=-1)\n suggestions = model.le.inverse_transform(indexes)\n\n probs_list = probs.tolist()\n indexes_list = indexes.tolist()\n suggestions_list = suggestions.tolist()\n\n for i, issue_id in enumerate(issues.keys()):\n data = {\n \"prob\": probs_list[i],\n \"index\": indexes_list[i],\n \"class\": suggestions_list[i],\n \"extra_data\": model_extra_data,\n }\n\n job = JobInfo(classify_issue, model_name, owner, repo, issue_id)\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n # Save the bug last change\n setkey(job.change_time_key, issues[issue_id][\"updated_at\"].encode())\n\n return \"OK\"\n\n\ndef classify_broken_site_report(model_name: str, reports_data: list[dict]) -> str:\n from bugbug_http.app import JobInfo\n\n reports = {\n report[\"uuid\"]: {\"title\": report[\"title\"], \"body\": report[\"body\"]}\n for report in reports_data\n }\n\n if not reports:\n return \"NOK\"\n\n model = MODEL_CACHE.get(model_name)\n\n if not model:\n LOGGER.info(\"Missing model %r, aborting\" % model_name)\n return \"NOK\"\n\n model_extra_data = model.get_extra_data()\n probs = model.classify(list(reports.values()), True)\n indexes = probs.argmax(axis=-1)\n suggestions = model.le.inverse_transform(indexes)\n\n probs_list = probs.tolist()\n indexes_list = indexes.tolist()\n suggestions_list = suggestions.tolist()\n\n for i, report_uuid in enumerate(reports.keys()):\n data = {\n \"prob\": probs_list[i],\n \"index\": indexes_list[i],\n \"class\": suggestions_list[i],\n \"extra_data\": model_extra_data,\n }\n\n job = JobInfo(classify_broken_site_report, model_name, report_uuid)\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n return \"OK\"\n\n\n@lru_cache(maxsize=None)\ndef get_known_tasks() -> tuple[str, ...]:\n with open(\"known_tasks\", \"r\") as f:\n return tuple(line.strip() for line in f)\n\n\ndef schedule_tests(branch: str, rev: str) -> str:\n from bugbug_http import REPO_DIR\n from bugbug_http.app import JobInfo\n\n job = JobInfo(schedule_tests, branch, rev)\n LOGGER.info(\"Processing %s...\", job)\n\n # Pull the revision to the local repository\n LOGGER.info(\"Pulling commits from the remote repository...\")\n repository.pull(REPO_DIR, branch, rev)\n\n # Load the full stack of patches leading to that revision\n LOGGER.info(\"Loading commits to analyze using automationrelevance...\")\n try:\n revs = get_hgmo_stack(branch, rev)\n except requests.exceptions.RequestException:\n LOGGER.warning(f\"Push not found for {branch} @ {rev}!\")\n return \"NOK\"\n\n test_selection_threshold = float(\n os.environ.get(\"TEST_SELECTION_CONFIDENCE_THRESHOLD\", 0.5)\n )\n\n # On \"try\", consider commits from other branches too (see https://bugzilla.mozilla.org/show_bug.cgi?id=1790493).\n # On other repos, only consider \"tip\" commits (to exclude commits such as https://hg.mozilla.org/integration/autoland/rev/961f253985a4388008700a6a6fde80f4e17c0b4b).\n if branch == \"try\":\n repo_branch = None\n else:\n repo_branch = \"tip\"\n\n # Analyze patches.\n commits = repository.download_commits(\n REPO_DIR,\n revs=revs,\n branch=repo_branch,\n save=False,\n use_single_process=True,\n include_no_bug=True,\n )\n\n if len(commits) > 0:\n testlabelselect_model = MODEL_CACHE.get(\"testlabelselect\")\n testgroupselect_model = MODEL_CACHE.get(\"testgroupselect\")\n\n tasks = testlabelselect_model.select_tests(commits, test_selection_threshold)\n\n reduced = testselect.reduce_configs(\n set(t for t, c in tasks.items() if c >= 0.8), 1.0\n )\n\n reduced_higher = testselect.reduce_configs(\n set(t for t, c in tasks.items() if c >= 0.9), 1.0\n )\n\n groups = testgroupselect_model.select_tests(commits, test_selection_threshold)\n\n config_groups = testselect.select_configs(groups.keys(), 0.9)\n else:\n tasks = {}\n reduced = set()\n groups = {}\n config_groups = {}\n\n data = {\n \"tasks\": tasks,\n \"groups\": groups,\n \"config_groups\": config_groups,\n \"reduced_tasks\": {t: c for t, c in tasks.items() if t in reduced},\n \"reduced_tasks_higher\": {t: c for t, c in tasks.items() if t in reduced_higher},\n \"known_tasks\": get_known_tasks(),\n }\n setkey(job.result_key, orjson.dumps(data), compress=True)\n\n return \"OK\"\n\n\ndef get_config_specific_groups(config: str) -> str:\n from bugbug_http.app import JobInfo\n\n job = JobInfo(get_config_specific_groups, config)\n LOGGER.info(\"Processing %s...\", job)\n\n equivalence_sets = testselect._get_equivalence_sets(0.9)\n\n past_failures_data = test_scheduling.PastFailures(\"group\", True)\n\n setkey(\n job.result_key,\n orjson.dumps(\n [\n {\"name\": group}\n for group in past_failures_data.all_runnables\n if any(\n equivalence_set == {config}\n for equivalence_set in equivalence_sets[group]\n )\n ]\n ),\n compress=True,\n )\n\n return \"OK\"\n",
"path": "http_service/bugbug_http/models.py"
}
] | diff --git a/http_service/bugbug_http/models.py b/http_service/bugbug_http/models.py
index 6fb1247b65..3615050925 100644
--- a/http_service/bugbug_http/models.py
+++ b/http_service/bugbug_http/models.py
@@ -34,6 +34,7 @@
"spambug",
"testlabelselect",
"testgroupselect",
+ "accessibility",
]
DEFAULT_EXPIRATION_TTL = 7 * 24 * 3600 # A week
diff --git a/infra/data-pipeline.yml b/infra/data-pipeline.yml
index b1160932bb..c0ad93b0c6 100644
--- a/infra/data-pipeline.yml
+++ b/infra/data-pipeline.yml
@@ -1003,6 +1003,44 @@ tasks:
owner: [email protected]
source: ${repository}/raw/master/data-pipeline.yml
+ - ID: train-accessibility
+ created: { $fromNow: "" }
+ deadline: { $fromNow: "3 days" }
+ expires: { $fromNow: "1 year" }
+ provisionerId: proj-bugbug
+ workerType: compute-smaller
+ dependencies:
+ - bugs-retrieval
+ payload:
+ maxRunTime: 25200
+ image: mozilla/bugbug-base:${version}
+ command:
+ - bugbug-train
+ - accessibility
+
+ artifacts:
+ public/accessibilitymodel.tar.zst:
+ expires: { $fromNow: "1 month" }
+ path: /accessibilitymodel.tar.zst
+ type: file
+ public/metrics.json:
+ expires: { $fromNow: "1 year" }
+ path: /metrics.json
+ type: file
+
+ routes:
+ - [email protected]
+ - notify.irc-channel.#bugbug.on-failed
+ - index.project.bugbug.train_accessibility.${version}
+ - index.project.bugbug.train_accessibility.per_version.${version}.${year}.${month}.${day}.${hour}.${minute}.${second}
+ - index.project.bugbug.train_accessibility.per_date.${year}.${month}.${day}.${hour}.${minute}.${second}.${version}
+ - index.project.bugbug.train_accessibility.latest
+ metadata:
+ name: bugbug train accessibility model
+ description: bugbug train accessibility model
+ owner: [email protected]
+ source: ${repository}/raw/master/data-pipeline.yml
+
- ID: train-test-label-select
created: { $fromNow: "" }
deadline: { $fromNow: "5 days" }
@@ -1215,6 +1253,7 @@ tasks:
- train-test-group-select
- train-test-failure
- train-needsdiagnosis
+ - train-accessibility
payload:
maxRunTime: 3600
image: mozilla/bugbug-base:${version}
@@ -1250,6 +1289,7 @@ tasks:
- train-test-label-select
- train-test-group-select
- train-needsdiagnosis
+ - train-accessibility
payload:
capabilities:
privileged: true
|
getmoto__moto-698 | Unable to create a key with a trailing slash using OrdinaryCallingFormat
When using OrdinaryCallingFormat, it's not possible to create a key ending with a slash (e.g. when mimicking directory creation), since this is stripped off when parsing the key name. I can't comment on S3, but this is at least different behaviour from Ceph.
For example, the below fails as is, but works if the connection uses SubdomainCallingFormat instead.
```
import boto
import moto
import unittest
class TestCreatingKeyEndingWithSlash(unittest.TestCase):
@moto.mock_s3
def test_ordinary_calling_format(self):
bucket_name = 'testbucket'
key_name = 'key_ending_with_slash/'
conn = boto.connect_s3('access_key', 'secret_key',
calling_format=boto.s3.connection.OrdinaryCallingFormat())
bucket = conn.create_bucket(bucket_name)
key = boto.s3.key.Key(bucket)
key.key = key_name
key.set_contents_from_string('')
self.assertIn(key_name, [k.name for k in bucket.get_all_keys()])
```
| [
{
"content": "from __future__ import unicode_literals\nfrom six.moves.urllib.parse import urlparse\n\n\ndef bucket_name_from_url(url):\n pth = urlparse(url).path.lstrip(\"/\")\n\n l = pth.lstrip(\"/\").split(\"/\")\n if len(l) == 0 or l[0] == \"\":\n return None\n return l[0]\n\n\ndef parse_key_name(path):\n return \"/\".join(path.rstrip(\"/\").split(\"/\")[2:])\n\n\ndef is_delete_keys(request, path, bucket_name):\n return (\n path == u'/' + bucket_name + u'/?delete' or\n path == u'/' + bucket_name + u'?delete' or\n (path == u'/' + bucket_name and\n getattr(request, \"query_string\", \"\") == \"delete\")\n )\n",
"path": "moto/s3bucket_path/utils.py"
}
] | [
{
"content": "from __future__ import unicode_literals\nfrom six.moves.urllib.parse import urlparse\n\n\ndef bucket_name_from_url(url):\n pth = urlparse(url).path.lstrip(\"/\")\n\n l = pth.lstrip(\"/\").split(\"/\")\n if len(l) == 0 or l[0] == \"\":\n return None\n return l[0]\n\n\ndef parse_key_name(path):\n return \"/\".join(path.split(\"/\")[2:])\n\n\ndef is_delete_keys(request, path, bucket_name):\n return (\n path == u'/' + bucket_name + u'/?delete' or\n path == u'/' + bucket_name + u'?delete' or\n (path == u'/' + bucket_name and\n getattr(request, \"query_string\", \"\") == \"delete\")\n )\n",
"path": "moto/s3bucket_path/utils.py"
}
] | diff --git a/moto/s3bucket_path/utils.py b/moto/s3bucket_path/utils.py
index aa7dc12f0961..e10e64fb6492 100644
--- a/moto/s3bucket_path/utils.py
+++ b/moto/s3bucket_path/utils.py
@@ -12,7 +12,7 @@ def bucket_name_from_url(url):
def parse_key_name(path):
- return "/".join(path.rstrip("/").split("/")[2:])
+ return "/".join(path.split("/")[2:])
def is_delete_keys(request, path, bucket_name):
diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py
index 95a755ab13d5..5519f0c5759a 100644
--- a/tests/test_s3/test_s3.py
+++ b/tests/test_s3/test_s3.py
@@ -1076,3 +1076,20 @@ def test_website_configuration_xml():
bucket = conn.create_bucket('test-bucket')
bucket.set_website_configuration_xml(TEST_XML)
bucket.get_website_configuration_xml().should.equal(TEST_XML)
+
+
+@mock_s3
+def test_key_with_trailing_slash_in_ordinary_calling_format():
+ conn = boto.connect_s3(
+ 'access_key',
+ 'secret_key',
+ calling_format=boto.s3.connection.OrdinaryCallingFormat()
+ )
+ bucket = conn.create_bucket('test_bucket_name')
+
+ key_name = 'key_with_slash/'
+
+ key = Key(bucket, key_name)
+ key.set_contents_from_string('some value')
+
+ [k.name for k in bucket.get_all_keys()].should.contain(key_name)
|
pyca__cryptography-7106 | adding custom attribute that have octetstring parameter
looks like I still can't put an octetstring attribute even if I used _tag in addattribute function that will be added in version 37 because _ASN1Type doesn't have octetstring(tag 04) in it.
(PS: why it needs some whitelist for allowed tag types?)
attribute I wanted to add:
https://cabforum.org/wp-content/uploads/CA-Browser-Forum-BR-1.8.2.pdf
appendix B, 2-b
cabf OBJECT IDENTIFIER ::= { joint-iso-itu-t(2) international-organizations(23) ca-browser-forum(140) }
caSigningNonce ATTRIBUTE ::= {
WITH SYNTAX OCTET STRING
EQUALITY MATCHING RULE octetStringMatch
SINGLE VALUE TRUE
ID { cabf-caSigningNonce }
}
cabf-caSigningNonce OBJECT IDENTIFIER ::= { cabf 41 }
https://github.com/pyca/cryptography/pull/7038
tested locally by editing name.py, rust part of code doesn't complain.
| [
{
"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nimport binascii\nimport re\nimport sys\nimport typing\nimport warnings\n\nfrom cryptography import utils\nfrom cryptography.hazmat.bindings._rust import (\n x509 as rust_x509,\n)\nfrom cryptography.x509.oid import NameOID, ObjectIdentifier\n\n\nclass _ASN1Type(utils.Enum):\n BitString = 3\n UTF8String = 12\n NumericString = 18\n PrintableString = 19\n T61String = 20\n IA5String = 22\n UTCTime = 23\n GeneralizedTime = 24\n VisibleString = 26\n UniversalString = 28\n BMPString = 30\n\n\n_ASN1_TYPE_TO_ENUM = {i.value: i for i in _ASN1Type}\n_NAMEOID_DEFAULT_TYPE: typing.Dict[ObjectIdentifier, _ASN1Type] = {\n NameOID.COUNTRY_NAME: _ASN1Type.PrintableString,\n NameOID.JURISDICTION_COUNTRY_NAME: _ASN1Type.PrintableString,\n NameOID.SERIAL_NUMBER: _ASN1Type.PrintableString,\n NameOID.DN_QUALIFIER: _ASN1Type.PrintableString,\n NameOID.EMAIL_ADDRESS: _ASN1Type.IA5String,\n NameOID.DOMAIN_COMPONENT: _ASN1Type.IA5String,\n}\n\n# Type alias\n_OidNameMap = typing.Mapping[ObjectIdentifier, str]\n\n#: Short attribute names from RFC 4514:\n#: https://tools.ietf.org/html/rfc4514#page-7\n_NAMEOID_TO_NAME: _OidNameMap = {\n NameOID.COMMON_NAME: \"CN\",\n NameOID.LOCALITY_NAME: \"L\",\n NameOID.STATE_OR_PROVINCE_NAME: \"ST\",\n NameOID.ORGANIZATION_NAME: \"O\",\n NameOID.ORGANIZATIONAL_UNIT_NAME: \"OU\",\n NameOID.COUNTRY_NAME: \"C\",\n NameOID.STREET_ADDRESS: \"STREET\",\n NameOID.DOMAIN_COMPONENT: \"DC\",\n NameOID.USER_ID: \"UID\",\n}\n_NAME_TO_NAMEOID = {v: k for k, v in _NAMEOID_TO_NAME.items()}\n\n\ndef _escape_dn_value(val: typing.Union[str, bytes]) -> str:\n \"\"\"Escape special characters in RFC4514 Distinguished Name value.\"\"\"\n\n if not val:\n return \"\"\n\n # RFC 4514 Section 2.4 defines the value as being the # (U+0023) character\n # followed by the hexadecimal encoding of the octets.\n if isinstance(val, bytes):\n return \"#\" + binascii.hexlify(val).decode(\"utf8\")\n\n # See https://tools.ietf.org/html/rfc4514#section-2.4\n val = val.replace(\"\\\\\", \"\\\\\\\\\")\n val = val.replace('\"', '\\\\\"')\n val = val.replace(\"+\", \"\\\\+\")\n val = val.replace(\",\", \"\\\\,\")\n val = val.replace(\";\", \"\\\\;\")\n val = val.replace(\"<\", \"\\\\<\")\n val = val.replace(\">\", \"\\\\>\")\n val = val.replace(\"\\0\", \"\\\\00\")\n\n if val[0] in (\"#\", \" \"):\n val = \"\\\\\" + val\n if val[-1] == \" \":\n val = val[:-1] + \"\\\\ \"\n\n return val\n\n\ndef _unescape_dn_value(val: str) -> str:\n if not val:\n return \"\"\n\n # See https://tools.ietf.org/html/rfc4514#section-3\n\n # special = escaped / SPACE / SHARP / EQUALS\n # escaped = DQUOTE / PLUS / COMMA / SEMI / LANGLE / RANGLE\n def sub(m):\n val = m.group(1)\n # Regular escape\n if len(val) == 1:\n return val\n # Hex-value scape\n return chr(int(val, 16))\n\n return _RFC4514NameParser._PAIR_RE.sub(sub, val)\n\n\nclass NameAttribute:\n def __init__(\n self,\n oid: ObjectIdentifier,\n value: typing.Union[str, bytes],\n _type: typing.Optional[_ASN1Type] = None,\n *,\n _validate: bool = True,\n ) -> None:\n if not isinstance(oid, ObjectIdentifier):\n raise TypeError(\n \"oid argument must be an ObjectIdentifier instance.\"\n )\n if _type == _ASN1Type.BitString:\n if oid != NameOID.X500_UNIQUE_IDENTIFIER:\n raise TypeError(\n \"oid must be X500_UNIQUE_IDENTIFIER for BitString type.\"\n )\n if not isinstance(value, bytes):\n raise TypeError(\"value must be bytes for BitString\")\n else:\n if not isinstance(value, str):\n raise TypeError(\"value argument must be a str\")\n\n if (\n oid == NameOID.COUNTRY_NAME\n or oid == NameOID.JURISDICTION_COUNTRY_NAME\n ):\n assert isinstance(value, str)\n c_len = len(value.encode(\"utf8\"))\n if c_len != 2 and _validate is True:\n raise ValueError(\n \"Country name must be a 2 character country code\"\n )\n elif c_len != 2:\n warnings.warn(\n \"Country names should be two characters, but the \"\n \"attribute is {} characters in length.\".format(c_len),\n stacklevel=2,\n )\n\n # The appropriate ASN1 string type varies by OID and is defined across\n # multiple RFCs including 2459, 3280, and 5280. In general UTF8String\n # is preferred (2459), but 3280 and 5280 specify several OIDs with\n # alternate types. This means when we see the sentinel value we need\n # to look up whether the OID has a non-UTF8 type. If it does, set it\n # to that. Otherwise, UTF8!\n if _type is None:\n _type = _NAMEOID_DEFAULT_TYPE.get(oid, _ASN1Type.UTF8String)\n\n if not isinstance(_type, _ASN1Type):\n raise TypeError(\"_type must be from the _ASN1Type enum\")\n\n self._oid = oid\n self._value = value\n self._type = _type\n\n @property\n def oid(self) -> ObjectIdentifier:\n return self._oid\n\n @property\n def value(self) -> typing.Union[str, bytes]:\n return self._value\n\n @property\n def rfc4514_attribute_name(self) -> str:\n \"\"\"\n The short attribute name (for example \"CN\") if available,\n otherwise the OID dotted string.\n \"\"\"\n return _NAMEOID_TO_NAME.get(self.oid, self.oid.dotted_string)\n\n def rfc4514_string(\n self, attr_name_overrides: typing.Optional[_OidNameMap] = None\n ) -> str:\n \"\"\"\n Format as RFC4514 Distinguished Name string.\n\n Use short attribute name if available, otherwise fall back to OID\n dotted string.\n \"\"\"\n attr_name = (\n attr_name_overrides.get(self.oid) if attr_name_overrides else None\n )\n if attr_name is None:\n attr_name = self.rfc4514_attribute_name\n\n return f\"{attr_name}={_escape_dn_value(self.value)}\"\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, NameAttribute):\n return NotImplemented\n\n return self.oid == other.oid and self.value == other.value\n\n def __hash__(self) -> int:\n return hash((self.oid, self.value))\n\n def __repr__(self) -> str:\n return \"<NameAttribute(oid={0.oid}, value={0.value!r})>\".format(self)\n\n\nclass RelativeDistinguishedName:\n def __init__(self, attributes: typing.Iterable[NameAttribute]):\n attributes = list(attributes)\n if not attributes:\n raise ValueError(\"a relative distinguished name cannot be empty\")\n if not all(isinstance(x, NameAttribute) for x in attributes):\n raise TypeError(\"attributes must be an iterable of NameAttribute\")\n\n # Keep list and frozenset to preserve attribute order where it matters\n self._attributes = attributes\n self._attribute_set = frozenset(attributes)\n\n if len(self._attribute_set) != len(attributes):\n raise ValueError(\"duplicate attributes are not allowed\")\n\n def get_attributes_for_oid(\n self, oid: ObjectIdentifier\n ) -> typing.List[NameAttribute]:\n return [i for i in self if i.oid == oid]\n\n def rfc4514_string(\n self, attr_name_overrides: typing.Optional[_OidNameMap] = None\n ) -> str:\n \"\"\"\n Format as RFC4514 Distinguished Name string.\n\n Within each RDN, attributes are joined by '+', although that is rarely\n used in certificates.\n \"\"\"\n return \"+\".join(\n attr.rfc4514_string(attr_name_overrides)\n for attr in self._attributes\n )\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, RelativeDistinguishedName):\n return NotImplemented\n\n return self._attribute_set == other._attribute_set\n\n def __hash__(self) -> int:\n return hash(self._attribute_set)\n\n def __iter__(self) -> typing.Iterator[NameAttribute]:\n return iter(self._attributes)\n\n def __len__(self) -> int:\n return len(self._attributes)\n\n def __repr__(self) -> str:\n return \"<RelativeDistinguishedName({})>\".format(self.rfc4514_string())\n\n\nclass Name:\n @typing.overload\n def __init__(self, attributes: typing.Iterable[NameAttribute]) -> None:\n ...\n\n @typing.overload\n def __init__(\n self, attributes: typing.Iterable[RelativeDistinguishedName]\n ) -> None:\n ...\n\n def __init__(\n self,\n attributes: typing.Iterable[\n typing.Union[NameAttribute, RelativeDistinguishedName]\n ],\n ) -> None:\n attributes = list(attributes)\n if all(isinstance(x, NameAttribute) for x in attributes):\n self._attributes = [\n RelativeDistinguishedName([typing.cast(NameAttribute, x)])\n for x in attributes\n ]\n elif all(isinstance(x, RelativeDistinguishedName) for x in attributes):\n self._attributes = typing.cast(\n typing.List[RelativeDistinguishedName], attributes\n )\n else:\n raise TypeError(\n \"attributes must be a list of NameAttribute\"\n \" or a list RelativeDistinguishedName\"\n )\n\n @classmethod\n def from_rfc4514_string(cls, data: str) -> \"Name\":\n return _RFC4514NameParser(data).parse()\n\n def rfc4514_string(\n self, attr_name_overrides: typing.Optional[_OidNameMap] = None\n ) -> str:\n \"\"\"\n Format as RFC4514 Distinguished Name string.\n For example 'CN=foobar.com,O=Foo Corp,C=US'\n\n An X.509 name is a two-level structure: a list of sets of attributes.\n Each list element is separated by ',' and within each list element, set\n elements are separated by '+'. The latter is almost never used in\n real world certificates. According to RFC4514 section 2.1 the\n RDNSequence must be reversed when converting to string representation.\n \"\"\"\n return \",\".join(\n attr.rfc4514_string(attr_name_overrides)\n for attr in reversed(self._attributes)\n )\n\n def get_attributes_for_oid(\n self, oid: ObjectIdentifier\n ) -> typing.List[NameAttribute]:\n return [i for i in self if i.oid == oid]\n\n @property\n def rdns(self) -> typing.List[RelativeDistinguishedName]:\n return self._attributes\n\n def public_bytes(self, backend: typing.Any = None) -> bytes:\n return rust_x509.encode_name_bytes(self)\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, Name):\n return NotImplemented\n\n return self._attributes == other._attributes\n\n def __hash__(self) -> int:\n # TODO: this is relatively expensive, if this looks like a bottleneck\n # for you, consider optimizing!\n return hash(tuple(self._attributes))\n\n def __iter__(self) -> typing.Iterator[NameAttribute]:\n for rdn in self._attributes:\n for ava in rdn:\n yield ava\n\n def __len__(self) -> int:\n return sum(len(rdn) for rdn in self._attributes)\n\n def __repr__(self) -> str:\n rdns = \",\".join(attr.rfc4514_string() for attr in self._attributes)\n return \"<Name({})>\".format(rdns)\n\n\nclass _RFC4514NameParser:\n _OID_RE = re.compile(r\"(0|([1-9]\\d*))(\\.(0|([1-9]\\d*)))+\")\n _DESCR_RE = re.compile(r\"[a-zA-Z][a-zA-Z\\d-]*\")\n\n _PAIR = r\"\\\\([\\\\ #=\\\"\\+,;<>]|[\\da-zA-Z]{2})\"\n _PAIR_RE = re.compile(_PAIR)\n _LUTF1 = r\"[\\x01-\\x1f\\x21\\x24-\\x2A\\x2D-\\x3A\\x3D\\x3F-\\x5B\\x5D-\\x7F]\"\n _SUTF1 = r\"[\\x01-\\x21\\x23-\\x2A\\x2D-\\x3A\\x3D\\x3F-\\x5B\\x5D-\\x7F]\"\n _TUTF1 = r\"[\\x01-\\x1F\\x21\\x23-\\x2A\\x2D-\\x3A\\x3D\\x3F-\\x5B\\x5D-\\x7F]\"\n _UTFMB = rf\"[\\x80-{chr(sys.maxunicode)}]\"\n _LEADCHAR = rf\"{_LUTF1}|{_UTFMB}\"\n _STRINGCHAR = rf\"{_SUTF1}|{_UTFMB}\"\n _TRAILCHAR = rf\"{_TUTF1}|{_UTFMB}\"\n _STRING_RE = re.compile(\n rf\"\"\"\n (\n ({_LEADCHAR}|{_PAIR})\n (\n ({_STRINGCHAR}|{_PAIR})*\n ({_TRAILCHAR}|{_PAIR})\n )?\n )?\n \"\"\",\n re.VERBOSE,\n )\n _HEXSTRING_RE = re.compile(r\"#([\\da-zA-Z]{2})+\")\n\n def __init__(self, data: str) -> None:\n self._data = data\n self._idx = 0\n\n def _has_data(self) -> bool:\n return self._idx < len(self._data)\n\n def _peek(self) -> typing.Optional[str]:\n if self._has_data():\n return self._data[self._idx]\n return None\n\n def _read_char(self, ch: str) -> None:\n if self._peek() != ch:\n raise ValueError\n self._idx += 1\n\n def _read_re(self, pat) -> str:\n match = pat.match(self._data, pos=self._idx)\n if match is None:\n raise ValueError\n val = match.group()\n self._idx += len(val)\n return val\n\n def parse(self) -> Name:\n rdns = [self._parse_rdn()]\n\n while self._has_data():\n self._read_char(\",\")\n rdns.append(self._parse_rdn())\n\n return Name(rdns)\n\n def _parse_rdn(self) -> RelativeDistinguishedName:\n nas = [self._parse_na()]\n while self._peek() == \"+\":\n self._read_char(\"+\")\n nas.append(self._parse_na())\n\n return RelativeDistinguishedName(nas)\n\n def _parse_na(self) -> NameAttribute:\n try:\n oid_value = self._read_re(self._OID_RE)\n except ValueError:\n name = self._read_re(self._DESCR_RE)\n oid = _NAME_TO_NAMEOID.get(name)\n if oid is None:\n raise ValueError\n else:\n oid = ObjectIdentifier(oid_value)\n\n self._read_char(\"=\")\n if self._peek() == \"#\":\n value = self._read_re(self._HEXSTRING_RE)\n value = binascii.unhexlify(value[1:]).decode()\n else:\n raw_value = self._read_re(self._STRING_RE)\n value = _unescape_dn_value(raw_value)\n\n return NameAttribute(oid, value)\n",
"path": "src/cryptography/x509/name.py"
}
] | [
{
"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nimport binascii\nimport re\nimport sys\nimport typing\nimport warnings\n\nfrom cryptography import utils\nfrom cryptography.hazmat.bindings._rust import (\n x509 as rust_x509,\n)\nfrom cryptography.x509.oid import NameOID, ObjectIdentifier\n\n\nclass _ASN1Type(utils.Enum):\n BitString = 3\n OctetString = 4\n UTF8String = 12\n NumericString = 18\n PrintableString = 19\n T61String = 20\n IA5String = 22\n UTCTime = 23\n GeneralizedTime = 24\n VisibleString = 26\n UniversalString = 28\n BMPString = 30\n\n\n_ASN1_TYPE_TO_ENUM = {i.value: i for i in _ASN1Type}\n_NAMEOID_DEFAULT_TYPE: typing.Dict[ObjectIdentifier, _ASN1Type] = {\n NameOID.COUNTRY_NAME: _ASN1Type.PrintableString,\n NameOID.JURISDICTION_COUNTRY_NAME: _ASN1Type.PrintableString,\n NameOID.SERIAL_NUMBER: _ASN1Type.PrintableString,\n NameOID.DN_QUALIFIER: _ASN1Type.PrintableString,\n NameOID.EMAIL_ADDRESS: _ASN1Type.IA5String,\n NameOID.DOMAIN_COMPONENT: _ASN1Type.IA5String,\n}\n\n# Type alias\n_OidNameMap = typing.Mapping[ObjectIdentifier, str]\n\n#: Short attribute names from RFC 4514:\n#: https://tools.ietf.org/html/rfc4514#page-7\n_NAMEOID_TO_NAME: _OidNameMap = {\n NameOID.COMMON_NAME: \"CN\",\n NameOID.LOCALITY_NAME: \"L\",\n NameOID.STATE_OR_PROVINCE_NAME: \"ST\",\n NameOID.ORGANIZATION_NAME: \"O\",\n NameOID.ORGANIZATIONAL_UNIT_NAME: \"OU\",\n NameOID.COUNTRY_NAME: \"C\",\n NameOID.STREET_ADDRESS: \"STREET\",\n NameOID.DOMAIN_COMPONENT: \"DC\",\n NameOID.USER_ID: \"UID\",\n}\n_NAME_TO_NAMEOID = {v: k for k, v in _NAMEOID_TO_NAME.items()}\n\n\ndef _escape_dn_value(val: typing.Union[str, bytes]) -> str:\n \"\"\"Escape special characters in RFC4514 Distinguished Name value.\"\"\"\n\n if not val:\n return \"\"\n\n # RFC 4514 Section 2.4 defines the value as being the # (U+0023) character\n # followed by the hexadecimal encoding of the octets.\n if isinstance(val, bytes):\n return \"#\" + binascii.hexlify(val).decode(\"utf8\")\n\n # See https://tools.ietf.org/html/rfc4514#section-2.4\n val = val.replace(\"\\\\\", \"\\\\\\\\\")\n val = val.replace('\"', '\\\\\"')\n val = val.replace(\"+\", \"\\\\+\")\n val = val.replace(\",\", \"\\\\,\")\n val = val.replace(\";\", \"\\\\;\")\n val = val.replace(\"<\", \"\\\\<\")\n val = val.replace(\">\", \"\\\\>\")\n val = val.replace(\"\\0\", \"\\\\00\")\n\n if val[0] in (\"#\", \" \"):\n val = \"\\\\\" + val\n if val[-1] == \" \":\n val = val[:-1] + \"\\\\ \"\n\n return val\n\n\ndef _unescape_dn_value(val: str) -> str:\n if not val:\n return \"\"\n\n # See https://tools.ietf.org/html/rfc4514#section-3\n\n # special = escaped / SPACE / SHARP / EQUALS\n # escaped = DQUOTE / PLUS / COMMA / SEMI / LANGLE / RANGLE\n def sub(m):\n val = m.group(1)\n # Regular escape\n if len(val) == 1:\n return val\n # Hex-value scape\n return chr(int(val, 16))\n\n return _RFC4514NameParser._PAIR_RE.sub(sub, val)\n\n\nclass NameAttribute:\n def __init__(\n self,\n oid: ObjectIdentifier,\n value: typing.Union[str, bytes],\n _type: typing.Optional[_ASN1Type] = None,\n *,\n _validate: bool = True,\n ) -> None:\n if not isinstance(oid, ObjectIdentifier):\n raise TypeError(\n \"oid argument must be an ObjectIdentifier instance.\"\n )\n if _type == _ASN1Type.BitString:\n if oid != NameOID.X500_UNIQUE_IDENTIFIER:\n raise TypeError(\n \"oid must be X500_UNIQUE_IDENTIFIER for BitString type.\"\n )\n if not isinstance(value, bytes):\n raise TypeError(\"value must be bytes for BitString\")\n else:\n if not isinstance(value, str):\n raise TypeError(\"value argument must be a str\")\n\n if (\n oid == NameOID.COUNTRY_NAME\n or oid == NameOID.JURISDICTION_COUNTRY_NAME\n ):\n assert isinstance(value, str)\n c_len = len(value.encode(\"utf8\"))\n if c_len != 2 and _validate is True:\n raise ValueError(\n \"Country name must be a 2 character country code\"\n )\n elif c_len != 2:\n warnings.warn(\n \"Country names should be two characters, but the \"\n \"attribute is {} characters in length.\".format(c_len),\n stacklevel=2,\n )\n\n # The appropriate ASN1 string type varies by OID and is defined across\n # multiple RFCs including 2459, 3280, and 5280. In general UTF8String\n # is preferred (2459), but 3280 and 5280 specify several OIDs with\n # alternate types. This means when we see the sentinel value we need\n # to look up whether the OID has a non-UTF8 type. If it does, set it\n # to that. Otherwise, UTF8!\n if _type is None:\n _type = _NAMEOID_DEFAULT_TYPE.get(oid, _ASN1Type.UTF8String)\n\n if not isinstance(_type, _ASN1Type):\n raise TypeError(\"_type must be from the _ASN1Type enum\")\n\n self._oid = oid\n self._value = value\n self._type = _type\n\n @property\n def oid(self) -> ObjectIdentifier:\n return self._oid\n\n @property\n def value(self) -> typing.Union[str, bytes]:\n return self._value\n\n @property\n def rfc4514_attribute_name(self) -> str:\n \"\"\"\n The short attribute name (for example \"CN\") if available,\n otherwise the OID dotted string.\n \"\"\"\n return _NAMEOID_TO_NAME.get(self.oid, self.oid.dotted_string)\n\n def rfc4514_string(\n self, attr_name_overrides: typing.Optional[_OidNameMap] = None\n ) -> str:\n \"\"\"\n Format as RFC4514 Distinguished Name string.\n\n Use short attribute name if available, otherwise fall back to OID\n dotted string.\n \"\"\"\n attr_name = (\n attr_name_overrides.get(self.oid) if attr_name_overrides else None\n )\n if attr_name is None:\n attr_name = self.rfc4514_attribute_name\n\n return f\"{attr_name}={_escape_dn_value(self.value)}\"\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, NameAttribute):\n return NotImplemented\n\n return self.oid == other.oid and self.value == other.value\n\n def __hash__(self) -> int:\n return hash((self.oid, self.value))\n\n def __repr__(self) -> str:\n return \"<NameAttribute(oid={0.oid}, value={0.value!r})>\".format(self)\n\n\nclass RelativeDistinguishedName:\n def __init__(self, attributes: typing.Iterable[NameAttribute]):\n attributes = list(attributes)\n if not attributes:\n raise ValueError(\"a relative distinguished name cannot be empty\")\n if not all(isinstance(x, NameAttribute) for x in attributes):\n raise TypeError(\"attributes must be an iterable of NameAttribute\")\n\n # Keep list and frozenset to preserve attribute order where it matters\n self._attributes = attributes\n self._attribute_set = frozenset(attributes)\n\n if len(self._attribute_set) != len(attributes):\n raise ValueError(\"duplicate attributes are not allowed\")\n\n def get_attributes_for_oid(\n self, oid: ObjectIdentifier\n ) -> typing.List[NameAttribute]:\n return [i for i in self if i.oid == oid]\n\n def rfc4514_string(\n self, attr_name_overrides: typing.Optional[_OidNameMap] = None\n ) -> str:\n \"\"\"\n Format as RFC4514 Distinguished Name string.\n\n Within each RDN, attributes are joined by '+', although that is rarely\n used in certificates.\n \"\"\"\n return \"+\".join(\n attr.rfc4514_string(attr_name_overrides)\n for attr in self._attributes\n )\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, RelativeDistinguishedName):\n return NotImplemented\n\n return self._attribute_set == other._attribute_set\n\n def __hash__(self) -> int:\n return hash(self._attribute_set)\n\n def __iter__(self) -> typing.Iterator[NameAttribute]:\n return iter(self._attributes)\n\n def __len__(self) -> int:\n return len(self._attributes)\n\n def __repr__(self) -> str:\n return \"<RelativeDistinguishedName({})>\".format(self.rfc4514_string())\n\n\nclass Name:\n @typing.overload\n def __init__(self, attributes: typing.Iterable[NameAttribute]) -> None:\n ...\n\n @typing.overload\n def __init__(\n self, attributes: typing.Iterable[RelativeDistinguishedName]\n ) -> None:\n ...\n\n def __init__(\n self,\n attributes: typing.Iterable[\n typing.Union[NameAttribute, RelativeDistinguishedName]\n ],\n ) -> None:\n attributes = list(attributes)\n if all(isinstance(x, NameAttribute) for x in attributes):\n self._attributes = [\n RelativeDistinguishedName([typing.cast(NameAttribute, x)])\n for x in attributes\n ]\n elif all(isinstance(x, RelativeDistinguishedName) for x in attributes):\n self._attributes = typing.cast(\n typing.List[RelativeDistinguishedName], attributes\n )\n else:\n raise TypeError(\n \"attributes must be a list of NameAttribute\"\n \" or a list RelativeDistinguishedName\"\n )\n\n @classmethod\n def from_rfc4514_string(cls, data: str) -> \"Name\":\n return _RFC4514NameParser(data).parse()\n\n def rfc4514_string(\n self, attr_name_overrides: typing.Optional[_OidNameMap] = None\n ) -> str:\n \"\"\"\n Format as RFC4514 Distinguished Name string.\n For example 'CN=foobar.com,O=Foo Corp,C=US'\n\n An X.509 name is a two-level structure: a list of sets of attributes.\n Each list element is separated by ',' and within each list element, set\n elements are separated by '+'. The latter is almost never used in\n real world certificates. According to RFC4514 section 2.1 the\n RDNSequence must be reversed when converting to string representation.\n \"\"\"\n return \",\".join(\n attr.rfc4514_string(attr_name_overrides)\n for attr in reversed(self._attributes)\n )\n\n def get_attributes_for_oid(\n self, oid: ObjectIdentifier\n ) -> typing.List[NameAttribute]:\n return [i for i in self if i.oid == oid]\n\n @property\n def rdns(self) -> typing.List[RelativeDistinguishedName]:\n return self._attributes\n\n def public_bytes(self, backend: typing.Any = None) -> bytes:\n return rust_x509.encode_name_bytes(self)\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, Name):\n return NotImplemented\n\n return self._attributes == other._attributes\n\n def __hash__(self) -> int:\n # TODO: this is relatively expensive, if this looks like a bottleneck\n # for you, consider optimizing!\n return hash(tuple(self._attributes))\n\n def __iter__(self) -> typing.Iterator[NameAttribute]:\n for rdn in self._attributes:\n for ava in rdn:\n yield ava\n\n def __len__(self) -> int:\n return sum(len(rdn) for rdn in self._attributes)\n\n def __repr__(self) -> str:\n rdns = \",\".join(attr.rfc4514_string() for attr in self._attributes)\n return \"<Name({})>\".format(rdns)\n\n\nclass _RFC4514NameParser:\n _OID_RE = re.compile(r\"(0|([1-9]\\d*))(\\.(0|([1-9]\\d*)))+\")\n _DESCR_RE = re.compile(r\"[a-zA-Z][a-zA-Z\\d-]*\")\n\n _PAIR = r\"\\\\([\\\\ #=\\\"\\+,;<>]|[\\da-zA-Z]{2})\"\n _PAIR_RE = re.compile(_PAIR)\n _LUTF1 = r\"[\\x01-\\x1f\\x21\\x24-\\x2A\\x2D-\\x3A\\x3D\\x3F-\\x5B\\x5D-\\x7F]\"\n _SUTF1 = r\"[\\x01-\\x21\\x23-\\x2A\\x2D-\\x3A\\x3D\\x3F-\\x5B\\x5D-\\x7F]\"\n _TUTF1 = r\"[\\x01-\\x1F\\x21\\x23-\\x2A\\x2D-\\x3A\\x3D\\x3F-\\x5B\\x5D-\\x7F]\"\n _UTFMB = rf\"[\\x80-{chr(sys.maxunicode)}]\"\n _LEADCHAR = rf\"{_LUTF1}|{_UTFMB}\"\n _STRINGCHAR = rf\"{_SUTF1}|{_UTFMB}\"\n _TRAILCHAR = rf\"{_TUTF1}|{_UTFMB}\"\n _STRING_RE = re.compile(\n rf\"\"\"\n (\n ({_LEADCHAR}|{_PAIR})\n (\n ({_STRINGCHAR}|{_PAIR})*\n ({_TRAILCHAR}|{_PAIR})\n )?\n )?\n \"\"\",\n re.VERBOSE,\n )\n _HEXSTRING_RE = re.compile(r\"#([\\da-zA-Z]{2})+\")\n\n def __init__(self, data: str) -> None:\n self._data = data\n self._idx = 0\n\n def _has_data(self) -> bool:\n return self._idx < len(self._data)\n\n def _peek(self) -> typing.Optional[str]:\n if self._has_data():\n return self._data[self._idx]\n return None\n\n def _read_char(self, ch: str) -> None:\n if self._peek() != ch:\n raise ValueError\n self._idx += 1\n\n def _read_re(self, pat) -> str:\n match = pat.match(self._data, pos=self._idx)\n if match is None:\n raise ValueError\n val = match.group()\n self._idx += len(val)\n return val\n\n def parse(self) -> Name:\n rdns = [self._parse_rdn()]\n\n while self._has_data():\n self._read_char(\",\")\n rdns.append(self._parse_rdn())\n\n return Name(rdns)\n\n def _parse_rdn(self) -> RelativeDistinguishedName:\n nas = [self._parse_na()]\n while self._peek() == \"+\":\n self._read_char(\"+\")\n nas.append(self._parse_na())\n\n return RelativeDistinguishedName(nas)\n\n def _parse_na(self) -> NameAttribute:\n try:\n oid_value = self._read_re(self._OID_RE)\n except ValueError:\n name = self._read_re(self._DESCR_RE)\n oid = _NAME_TO_NAMEOID.get(name)\n if oid is None:\n raise ValueError\n else:\n oid = ObjectIdentifier(oid_value)\n\n self._read_char(\"=\")\n if self._peek() == \"#\":\n value = self._read_re(self._HEXSTRING_RE)\n value = binascii.unhexlify(value[1:]).decode()\n else:\n raw_value = self._read_re(self._STRING_RE)\n value = _unescape_dn_value(raw_value)\n\n return NameAttribute(oid, value)\n",
"path": "src/cryptography/x509/name.py"
}
] | diff --git a/src/cryptography/x509/name.py b/src/cryptography/x509/name.py
index 8b7e4ab430e4..4b32115781d1 100644
--- a/src/cryptography/x509/name.py
+++ b/src/cryptography/x509/name.py
@@ -17,6 +17,7 @@
class _ASN1Type(utils.Enum):
BitString = 3
+ OctetString = 4
UTF8String = 12
NumericString = 18
PrintableString = 19
|
liqd__a4-meinberlin-1740 | 2 link limit on nav items
affects the footer, need more than 2 items
| [
{
"content": "from django.db import models\nfrom modelcluster.fields import ParentalKey\nfrom modelcluster.models import ClusterableModel\nfrom wagtail.admin import edit_handlers\nfrom wagtail.core.models import Orderable\nfrom wagtail.snippets.models import register_snippet\n\n\nclass MenuItem(models.Model):\n title = models.CharField(max_length=255)\n link_page = models.ForeignKey('wagtailcore.Page')\n\n @property\n def url(self):\n return self.link_page.url\n\n def __str__(self):\n return self.title\n\n panels = [\n edit_handlers.FieldPanel('title'),\n edit_handlers.PageChooserPanel('link_page')\n ]\n\n\n@register_snippet\nclass NavigationMenu(ClusterableModel):\n title = models.CharField(max_length=255, null=False, blank=False)\n\n def __str__(self):\n return self.title\n\n panels = [\n edit_handlers.FieldPanel('title'),\n edit_handlers.InlinePanel('items', max_num=2)\n ]\n\n\nclass NavigationMenuItem(Orderable, MenuItem):\n parent = ParentalKey('meinberlin_cms.NavigationMenu', related_name='items')\n",
"path": "meinberlin/apps/cms/models/navigation_menues.py"
}
] | [
{
"content": "from django.db import models\nfrom modelcluster.fields import ParentalKey\nfrom modelcluster.models import ClusterableModel\nfrom wagtail.admin import edit_handlers\nfrom wagtail.core.models import Orderable\nfrom wagtail.snippets.models import register_snippet\n\n\nclass MenuItem(models.Model):\n title = models.CharField(max_length=255)\n link_page = models.ForeignKey('wagtailcore.Page')\n\n @property\n def url(self):\n return self.link_page.url\n\n def __str__(self):\n return self.title\n\n panels = [\n edit_handlers.FieldPanel('title'),\n edit_handlers.PageChooserPanel('link_page')\n ]\n\n\n@register_snippet\nclass NavigationMenu(ClusterableModel):\n title = models.CharField(max_length=255, null=False, blank=False)\n\n def __str__(self):\n return self.title\n\n panels = [\n edit_handlers.FieldPanel('title'),\n edit_handlers.InlinePanel('items')\n ]\n\n\nclass NavigationMenuItem(Orderable, MenuItem):\n parent = ParentalKey('meinberlin_cms.NavigationMenu', related_name='items')\n",
"path": "meinberlin/apps/cms/models/navigation_menues.py"
}
] | diff --git a/meinberlin/apps/cms/models/navigation_menues.py b/meinberlin/apps/cms/models/navigation_menues.py
index 87eec0169b..d8b2ad88ff 100644
--- a/meinberlin/apps/cms/models/navigation_menues.py
+++ b/meinberlin/apps/cms/models/navigation_menues.py
@@ -32,7 +32,7 @@ def __str__(self):
panels = [
edit_handlers.FieldPanel('title'),
- edit_handlers.InlinePanel('items', max_num=2)
+ edit_handlers.InlinePanel('items')
]
diff --git a/meinberlin/assets/scss/components/_footer.scss b/meinberlin/assets/scss/components/_footer.scss
index 17616dd3b9..a04f779997 100644
--- a/meinberlin/assets/scss/components/_footer.scss
+++ b/meinberlin/assets/scss/components/_footer.scss
@@ -20,7 +20,9 @@ body {
.footer-nav__link {
display: inline-block;
- padding: 0.7em;
+ padding-left: $padding;
+ padding-top: 0.7em;
+ padding-bottom: 0.7em;
color: inherit;
text-decoration: none;
@@ -43,3 +45,11 @@ body {
float: left;
}
}
+
+.beberlin__logo {
+ padding-right: $padding;
+}
+
+.berlin__logo {
+ padding-left: $padding;
+}
diff --git a/meinberlin/templates/footer.html b/meinberlin/templates/footer.html
index ba327ddc68..506015f099 100644
--- a/meinberlin/templates/footer.html
+++ b/meinberlin/templates/footer.html
@@ -3,11 +3,17 @@
<footer class="main-footer">
<div class="l-wrapper">
<div class="shariff" data-theme="white"></div>
- <nav class="footer-nav">
+ <nav class="footer-nav u-spacer-bottom-double">
{% get_menu "footer-nav" as footer_nav %}
{% for item in footer_nav %}
<a class="footer-nav__link" href="{{ item.url }}">{{ item.title }}</a>
{% endfor %}
</nav>
+ <div class="footer__logo">
+ <img class="beberlin__logo" src="{% static 'images/beberlin.svg' %}" alt="be Berlin" width="127.5" height="30"/>
+ <a href="https://www.berlin.de/" class="berlin__logo" rel="home">
+ <img src="{% static 'images/berlin_de.png' %}" alt="Zur Homepage von Berlin.de" width="125" height="31"/>
+ </a>
+ </div>
</div>
</footer>
|
pytorch__rl-1536 | [BUG] TruncatedNormal crashing when computing entropy
## Describe the bug
Calling `.entropy()` on a `TruncatedNormal` distribution causes the code to crash.
## To Reproduce
First crash happened using a PPO agent with entropy bonus turned on and actor parametrized with a `TruncatedNormal`.
A simple snippet to reproduce is the following:
```python
import torch
from torchrl.modules.distributions import IndependentNormal, TruncatedNormal
if __name__ == '__main__':
loc, scale = torch.zeros(1), torch.ones(1)
d1 = IndependentNormal(loc, scale)
print(d1.entropy())
d2 = TruncatedNormal(loc, scale)
print(d2.entropy())
```
```bash
tensor(1.4189)
Traceback (most recent call last):
File "/home/diego/Desktop/test.py", line 10, in <module>
print(d2.entropy())
File "/home/diego/miniconda3/envs/pytorch/lib/python3.10/site-packages/torch/distributions/independent.py", line 103, in entropy
entropy = self.base_dist.entropy()
TypeError: 'Tensor' object is not callable
```
## Expected behavior
The entropy value should be returned.
## System info
* Python 3.10.12
* torch 2.0.1
```python
import torchrl, numpy, sys
print(torchrl.__version__, numpy.__version__, sys.version, sys.platform)
```
```
0.1.1 1.25.1 3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0] linux
```
## Reason and Possible fixes
In the `TruncatedStandardNormal` class, the `self._entropy` attribute is a constant tensor computed at initialization. For some reason, calling `TruncatedStandardNormal.entropy` returns the `self._entropy` attribute, rather than the `entropy()` property:
```python
import torch
from torchrl.modules.distributions.truncated_normal import TruncatedStandardNormal
loc, scale = torch.zeros(1), torch.ones(1)
print(TruncatedStandardNormal(loc, scale).entropy)
print(TruncatedStandardNormal(loc, scale).entropy())
```
```bash
tensor([-0.0104])
Traceback (most recent call last):
File "/home/diego/Desktop/test.py", line 5, in <module>
print(TruncatedStandardNormal(loc, scale).entropy())
TypeError: 'Tensor' object is not callable
```
## Checklist
- [x] I have checked that there is no similar issue in the repo (**required**)
- [x] I have read the [documentation](https://github.com/pytorch/rl/tree/main/docs/) (**required**)
- [x] I have provided a minimal working example to reproduce the bug (**required**)
| [
{
"content": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\n# from https://github.com/toshas/torch_truncnorm\n\nimport math\nfrom numbers import Number\n\nimport torch\nfrom torch.distributions import constraints, Distribution\nfrom torch.distributions.utils import broadcast_all\n\nCONST_SQRT_2 = math.sqrt(2)\nCONST_INV_SQRT_2PI = 1 / math.sqrt(2 * math.pi)\nCONST_INV_SQRT_2 = 1 / math.sqrt(2)\nCONST_LOG_INV_SQRT_2PI = math.log(CONST_INV_SQRT_2PI)\nCONST_LOG_SQRT_2PI_E = 0.5 * math.log(2 * math.pi * math.e)\n\n\nclass TruncatedStandardNormal(Distribution):\n \"\"\"Truncated Standard Normal distribution.\n\n Source: https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\n \"\"\"\n\n arg_constraints = {\n \"a\": constraints.real,\n \"b\": constraints.real,\n }\n has_rsample = True\n eps = 1e-6\n\n def __init__(self, a, b, validate_args=None):\n self.a, self.b = broadcast_all(a, b)\n if isinstance(a, Number) and isinstance(b, Number):\n batch_shape = torch.Size()\n else:\n batch_shape = self.a.size()\n super(TruncatedStandardNormal, self).__init__(\n batch_shape, validate_args=validate_args\n )\n if self.a.dtype != self.b.dtype:\n raise ValueError(\"Truncation bounds types are different\")\n if any(\n (self.a >= self.b)\n .view(\n -1,\n )\n .tolist()\n ):\n raise ValueError(\"Incorrect truncation range\")\n eps = self.eps\n self._dtype_min_gt_0 = eps\n self._dtype_max_lt_1 = 1 - eps\n self._little_phi_a = self._little_phi(self.a)\n self._little_phi_b = self._little_phi(self.b)\n self._big_phi_a = self._big_phi(self.a)\n self._big_phi_b = self._big_phi(self.b)\n self._Z = (self._big_phi_b - self._big_phi_a).clamp(eps, 1 - eps)\n self._log_Z = self._Z.log()\n little_phi_coeff_a = torch.nan_to_num(self.a, nan=math.nan)\n little_phi_coeff_b = torch.nan_to_num(self.b, nan=math.nan)\n self._lpbb_m_lpaa_d_Z = (\n self._little_phi_b * little_phi_coeff_b\n - self._little_phi_a * little_phi_coeff_a\n ) / self._Z\n self._mean = -(self._little_phi_b - self._little_phi_a) / self._Z\n self._variance = (\n 1\n - self._lpbb_m_lpaa_d_Z\n - ((self._little_phi_b - self._little_phi_a) / self._Z) ** 2\n )\n self._entropy = CONST_LOG_SQRT_2PI_E + self._log_Z - 0.5 * self._lpbb_m_lpaa_d_Z\n\n @constraints.dependent_property\n def support(self):\n return constraints.interval(self.a, self.b)\n\n @property\n def mean(self):\n return self._mean\n\n @property\n def variance(self):\n return self._variance\n\n @property\n def entropy(self):\n return self._entropy\n\n @property\n def auc(self):\n return self._Z\n\n @staticmethod\n def _little_phi(x):\n return (-(x**2) * 0.5).exp() * CONST_INV_SQRT_2PI\n\n def _big_phi(self, x):\n phi = 0.5 * (1 + (x * CONST_INV_SQRT_2).erf())\n return phi.clamp(self.eps, 1 - self.eps)\n\n @staticmethod\n def _inv_big_phi(x):\n return CONST_SQRT_2 * (2 * x - 1).erfinv()\n\n def cdf(self, value):\n if self._validate_args:\n self._validate_sample(value)\n return ((self._big_phi(value) - self._big_phi_a) / self._Z).clamp(0, 1)\n\n def icdf(self, value):\n y = self._big_phi_a + value * self._Z\n y = y.clamp(self.eps, 1 - self.eps)\n return self._inv_big_phi(y)\n\n def log_prob(self, value):\n if self._validate_args:\n self._validate_sample(value)\n return CONST_LOG_INV_SQRT_2PI - self._log_Z - (value**2) * 0.5\n\n def rsample(self, sample_shape=None):\n if sample_shape is None:\n sample_shape = torch.Size([])\n shape = self._extended_shape(sample_shape)\n p = torch.empty(shape, device=self.a.device).uniform_(\n self._dtype_min_gt_0, self._dtype_max_lt_1\n )\n return self.icdf(p)\n\n\nclass TruncatedNormal(TruncatedStandardNormal):\n \"\"\"Truncated Normal distribution.\n\n https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\n \"\"\"\n\n has_rsample = True\n\n def __init__(self, loc, scale, a, b, validate_args=None):\n scale = scale.clamp_min(self.eps)\n self.loc, self.scale, a, b = broadcast_all(loc, scale, a, b)\n self._non_std_a = a\n self._non_std_b = b\n a = (a - self.loc) / self.scale\n b = (b - self.loc) / self.scale\n super(TruncatedNormal, self).__init__(a, b, validate_args=validate_args)\n self._log_scale = self.scale.log()\n self._mean = self._mean * self.scale + self.loc\n self._variance = self._variance * self.scale**2\n self._entropy += self._log_scale\n\n def _to_std_rv(self, value):\n return (value - self.loc) / self.scale\n\n def _from_std_rv(self, value):\n return value * self.scale + self.loc\n\n def cdf(self, value):\n return super(TruncatedNormal, self).cdf(self._to_std_rv(value))\n\n def icdf(self, value):\n sample = self._from_std_rv(super().icdf(value))\n\n # clamp data but keep gradients\n sample_clip = torch.stack(\n [sample.detach(), self._non_std_a.detach().expand_as(sample)], 0\n ).max(0)[0]\n sample_clip = torch.stack(\n [sample_clip, self._non_std_b.detach().expand_as(sample)], 0\n ).min(0)[0]\n sample.data.copy_(sample_clip)\n return sample\n\n def log_prob(self, value):\n value = self._to_std_rv(value)\n return super(TruncatedNormal, self).log_prob(value) - self._log_scale\n",
"path": "torchrl/modules/distributions/truncated_normal.py"
}
] | [
{
"content": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\n# from https://github.com/toshas/torch_truncnorm\n\nimport math\nfrom numbers import Number\n\nimport torch\nfrom torch.distributions import constraints, Distribution\nfrom torch.distributions.utils import broadcast_all\n\nCONST_SQRT_2 = math.sqrt(2)\nCONST_INV_SQRT_2PI = 1 / math.sqrt(2 * math.pi)\nCONST_INV_SQRT_2 = 1 / math.sqrt(2)\nCONST_LOG_INV_SQRT_2PI = math.log(CONST_INV_SQRT_2PI)\nCONST_LOG_SQRT_2PI_E = 0.5 * math.log(2 * math.pi * math.e)\n\n\nclass TruncatedStandardNormal(Distribution):\n \"\"\"Truncated Standard Normal distribution.\n\n Source: https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\n \"\"\"\n\n arg_constraints = {\n \"a\": constraints.real,\n \"b\": constraints.real,\n }\n has_rsample = True\n eps = 1e-6\n\n def __init__(self, a, b, validate_args=None):\n self.a, self.b = broadcast_all(a, b)\n if isinstance(a, Number) and isinstance(b, Number):\n batch_shape = torch.Size()\n else:\n batch_shape = self.a.size()\n super(TruncatedStandardNormal, self).__init__(\n batch_shape, validate_args=validate_args\n )\n if self.a.dtype != self.b.dtype:\n raise ValueError(\"Truncation bounds types are different\")\n if any(\n (self.a >= self.b)\n .view(\n -1,\n )\n .tolist()\n ):\n raise ValueError(\"Incorrect truncation range\")\n eps = self.eps\n self._dtype_min_gt_0 = eps\n self._dtype_max_lt_1 = 1 - eps\n self._little_phi_a = self._little_phi(self.a)\n self._little_phi_b = self._little_phi(self.b)\n self._big_phi_a = self._big_phi(self.a)\n self._big_phi_b = self._big_phi(self.b)\n self._Z = (self._big_phi_b - self._big_phi_a).clamp(eps, 1 - eps)\n self._log_Z = self._Z.log()\n little_phi_coeff_a = torch.nan_to_num(self.a, nan=math.nan)\n little_phi_coeff_b = torch.nan_to_num(self.b, nan=math.nan)\n self._lpbb_m_lpaa_d_Z = (\n self._little_phi_b * little_phi_coeff_b\n - self._little_phi_a * little_phi_coeff_a\n ) / self._Z\n self._mean = -(self._little_phi_b - self._little_phi_a) / self._Z\n self._variance = (\n 1\n - self._lpbb_m_lpaa_d_Z\n - ((self._little_phi_b - self._little_phi_a) / self._Z) ** 2\n )\n self._entropy = CONST_LOG_SQRT_2PI_E + self._log_Z - 0.5 * self._lpbb_m_lpaa_d_Z\n\n @constraints.dependent_property\n def support(self):\n return constraints.interval(self.a, self.b)\n\n @property\n def mean(self):\n return self._mean\n\n @property\n def variance(self):\n return self._variance\n\n def entropy(self):\n return self._entropy\n\n @property\n def auc(self):\n return self._Z\n\n @staticmethod\n def _little_phi(x):\n return (-(x**2) * 0.5).exp() * CONST_INV_SQRT_2PI\n\n def _big_phi(self, x):\n phi = 0.5 * (1 + (x * CONST_INV_SQRT_2).erf())\n return phi.clamp(self.eps, 1 - self.eps)\n\n @staticmethod\n def _inv_big_phi(x):\n return CONST_SQRT_2 * (2 * x - 1).erfinv()\n\n def cdf(self, value):\n if self._validate_args:\n self._validate_sample(value)\n return ((self._big_phi(value) - self._big_phi_a) / self._Z).clamp(0, 1)\n\n def icdf(self, value):\n y = self._big_phi_a + value * self._Z\n y = y.clamp(self.eps, 1 - self.eps)\n return self._inv_big_phi(y)\n\n def log_prob(self, value):\n if self._validate_args:\n self._validate_sample(value)\n return CONST_LOG_INV_SQRT_2PI - self._log_Z - (value**2) * 0.5\n\n def rsample(self, sample_shape=None):\n if sample_shape is None:\n sample_shape = torch.Size([])\n shape = self._extended_shape(sample_shape)\n p = torch.empty(shape, device=self.a.device).uniform_(\n self._dtype_min_gt_0, self._dtype_max_lt_1\n )\n return self.icdf(p)\n\n\nclass TruncatedNormal(TruncatedStandardNormal):\n \"\"\"Truncated Normal distribution.\n\n https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\n \"\"\"\n\n has_rsample = True\n\n def __init__(self, loc, scale, a, b, validate_args=None):\n scale = scale.clamp_min(self.eps)\n self.loc, self.scale, a, b = broadcast_all(loc, scale, a, b)\n self._non_std_a = a\n self._non_std_b = b\n a = (a - self.loc) / self.scale\n b = (b - self.loc) / self.scale\n super(TruncatedNormal, self).__init__(a, b, validate_args=validate_args)\n self._log_scale = self.scale.log()\n self._mean = self._mean * self.scale + self.loc\n self._variance = self._variance * self.scale**2\n self._entropy += self._log_scale\n\n def _to_std_rv(self, value):\n return (value - self.loc) / self.scale\n\n def _from_std_rv(self, value):\n return value * self.scale + self.loc\n\n def cdf(self, value):\n return super(TruncatedNormal, self).cdf(self._to_std_rv(value))\n\n def icdf(self, value):\n sample = self._from_std_rv(super().icdf(value))\n\n # clamp data but keep gradients\n sample_clip = torch.stack(\n [sample.detach(), self._non_std_a.detach().expand_as(sample)], 0\n ).max(0)[0]\n sample_clip = torch.stack(\n [sample_clip, self._non_std_b.detach().expand_as(sample)], 0\n ).min(0)[0]\n sample.data.copy_(sample_clip)\n return sample\n\n def log_prob(self, value):\n value = self._to_std_rv(value)\n return super(TruncatedNormal, self).log_prob(value) - self._log_scale\n",
"path": "torchrl/modules/distributions/truncated_normal.py"
}
] | diff --git a/test/test_distributions.py b/test/test_distributions.py
index c804dcf7e53..30bb0288dd4 100644
--- a/test/test_distributions.py
+++ b/test/test_distributions.py
@@ -30,40 +30,46 @@
@pytest.mark.skipif(torch.__version__ < "2.0", reason="torch 2.0 is required")
@pytest.mark.parametrize("device", get_default_devices())
[email protected]("div_up", [1, 2])
[email protected]("div_down", [1, 2])
-def test_delta(device, div_up, div_down):
- x = torch.randn(1000000, 4, device=device, dtype=torch.double)
- d = Delta(x)
- assert d.log_prob(d.mode).shape == x.shape[:-1]
- assert (d.log_prob(d.mode) == float("inf")).all()
-
- x = torch.randn(1000000, 4, device=device, dtype=torch.double)
- d = TanhDelta(x, -1 / div_down, 1.0 / div_up, atol=1e-4, rtol=1e-4)
- xinv = d.transforms[0].inv(d.mode)
- assert d.base_dist._is_equal(xinv).all()
- assert d.log_prob(d.mode).shape == x.shape[:-1]
- assert (d.log_prob(d.mode) == float("inf")).all()
-
- x = torch.randn(1000000, 4, device=device, dtype=torch.double)
- d = TanhDelta(
- x,
- -torch.ones_like(x) / div_down,
- torch.ones_like(x) / div_up,
- atol=1e-4,
- rtol=1e-4,
- )
- xinv = d.transforms[0].inv(d.mode)
- assert d.base_dist._is_equal(xinv).all()
- assert d.log_prob(d.mode).shape == x.shape[:-1]
- assert (d.log_prob(d.mode) == float("inf")).all()
+class TestDelta:
+ def test_delta_logprob(self, device):
+ x = torch.randn(1000000, 4, device=device, dtype=torch.double)
+ d = Delta(x)
+ assert d.log_prob(d.mode).shape == x.shape[:-1]
+ assert (d.log_prob(d.mode) == float("inf")).all()
+
+ @pytest.mark.parametrize("div_up", [1, 2])
+ @pytest.mark.parametrize("div_down", [1, 2])
+ def test_tanhdelta_logprob(self, device, div_up, div_down):
+ x = torch.randn(1000000, 4, device=device, dtype=torch.double)
+ d = TanhDelta(x, -1 / div_down, 1.0 / div_up, atol=1e-4, rtol=1e-4)
+ xinv = d.transforms[0].inv(d.mode)
+ assert d.base_dist._is_equal(xinv).all()
+ assert d.log_prob(d.mode).shape == x.shape[:-1]
+ assert (d.log_prob(d.mode) == float("inf")).all()
+
+ @pytest.mark.parametrize("div_up", [1, 2])
+ @pytest.mark.parametrize("div_down", [1, 2])
+ def test_tanhdelta_inv(self, device, div_up, div_down):
+ x = torch.randn(1000000, 4, device=device, dtype=torch.double)
+ d = TanhDelta(
+ x,
+ -torch.ones_like(x) / div_down,
+ torch.ones_like(x) / div_up,
+ atol=1e-4,
+ rtol=1e-4,
+ )
+ xinv = d.transforms[0].inv(d.mode)
+ assert d.base_dist._is_equal(xinv).all()
+ assert d.log_prob(d.mode).shape == x.shape[:-1]
+ assert (d.log_prob(d.mode) == float("inf")).all()
- x = torch.randn(1000000, 4, device=device)
- d = TanhDelta(x, -torch.ones_like(x), torch.ones_like(x), atol=1e-4, rtol=1e-4)
- xinv = d.transforms[0].inv(d.mode)
- assert d.base_dist._is_equal(xinv).all()
- assert d.log_prob(d.mode).shape == x.shape[:-1]
- assert (d.log_prob(d.mode) == float("inf")).all()
+ def test_tanhdelta_inv_ones(self, device):
+ x = torch.randn(1000000, 4, device=device)
+ d = TanhDelta(x, -torch.ones_like(x), torch.ones_like(x), atol=1e-4, rtol=1e-4)
+ xinv = d.transforms[0].inv(d.mode)
+ assert d.base_dist._is_equal(xinv).all()
+ assert d.log_prob(d.mode).shape == x.shape[:-1]
+ assert (d.log_prob(d.mode) == float("inf")).all()
def _map_all(*tensors_or_other, device):
@@ -74,42 +80,43 @@ def _map_all(*tensors_or_other, device):
yield t
[email protected](
- "min", [-torch.ones(3), -1, 3 * torch.tensor([-1.0, -2.0, -0.5]), -0.1]
-)
[email protected](
- "max", [torch.ones(3), 1, 3 * torch.tensor([1.0, 2.0, 0.5]), 0.1]
-)
[email protected](
- "vecs",
- [
- (torch.tensor([0.1, 10.0, 5.0]), torch.tensor([0.1, 10.0, 5.0])),
- (torch.zeros(7, 3), torch.ones(7, 3)),
- ],
-)
[email protected](
- "upscale", [torch.ones(3), 1, 3 * torch.tensor([1.0, 2.0, 0.5]), 3]
-)
[email protected]("shape", [torch.Size([]), torch.Size([3, 4])])
[email protected]("device", get_default_devices())
-def test_tanhnormal(min, max, vecs, upscale, shape, device):
- min, max, vecs, upscale, shape = _map_all(
- min, max, vecs, upscale, shape, device=device
+class TestTanhNormal:
+ @pytest.mark.parametrize(
+ "min", [-torch.ones(3), -1, 3 * torch.tensor([-1.0, -2.0, -0.5]), -0.1]
)
- torch.manual_seed(0)
- d = TanhNormal(
- *vecs,
- upscale=upscale,
- min=min,
- max=max,
+ @pytest.mark.parametrize(
+ "max", [torch.ones(3), 1, 3 * torch.tensor([1.0, 2.0, 0.5]), 0.1]
)
- for _ in range(100):
- a = d.rsample(shape)
- assert a.shape[: len(shape)] == shape
- assert (a >= d.min).all()
- assert (a <= d.max).all()
- lp = d.log_prob(a)
- assert torch.isfinite(lp).all()
+ @pytest.mark.parametrize(
+ "vecs",
+ [
+ (torch.tensor([0.1, 10.0, 5.0]), torch.tensor([0.1, 10.0, 5.0])),
+ (torch.zeros(7, 3), torch.ones(7, 3)),
+ ],
+ )
+ @pytest.mark.parametrize(
+ "upscale", [torch.ones(3), 1, 3 * torch.tensor([1.0, 2.0, 0.5]), 3]
+ )
+ @pytest.mark.parametrize("shape", [torch.Size([]), torch.Size([3, 4])])
+ @pytest.mark.parametrize("device", get_default_devices())
+ def test_tanhnormal(self, min, max, vecs, upscale, shape, device):
+ min, max, vecs, upscale, shape = _map_all(
+ min, max, vecs, upscale, shape, device=device
+ )
+ torch.manual_seed(0)
+ d = TanhNormal(
+ *vecs,
+ upscale=upscale,
+ min=min,
+ max=max,
+ )
+ for _ in range(100):
+ a = d.rsample(shape)
+ assert a.shape[: len(shape)] == shape
+ assert (a >= d.min).all()
+ assert (a <= d.max).all()
+ lp = d.log_prob(a)
+ assert torch.isfinite(lp).all()
@pytest.mark.parametrize(
@@ -130,24 +137,40 @@ def test_tanhnormal(min, max, vecs, upscale, shape, device):
)
@pytest.mark.parametrize("shape", [torch.Size([]), torch.Size([3, 4])])
@pytest.mark.parametrize("device", get_default_devices())
-def test_truncnormal(min, max, vecs, upscale, shape, device):
- torch.manual_seed(0)
- min, max, vecs, upscale, shape = _map_all(
- min, max, vecs, upscale, shape, device=device
- )
- d = TruncatedNormal(
- *vecs,
- upscale=upscale,
- min=min,
- max=max,
- )
- for _ in range(100):
- a = d.rsample(shape)
- assert a.shape[: len(shape)] == shape
- assert (a >= d.min).all()
- assert (a <= d.max).all()
- lp = d.log_prob(a)
- assert torch.isfinite(lp).all()
+class TestTruncatedNormal:
+ def test_truncnormal(self, min, max, vecs, upscale, shape, device):
+ torch.manual_seed(0)
+ min, max, vecs, upscale, shape = _map_all(
+ min, max, vecs, upscale, shape, device=device
+ )
+ d = TruncatedNormal(
+ *vecs,
+ upscale=upscale,
+ min=min,
+ max=max,
+ )
+ for _ in range(100):
+ a = d.rsample(shape)
+ assert a.shape[: len(shape)] == shape
+ assert (a >= d.min).all()
+ assert (a <= d.max).all()
+ lp = d.log_prob(a)
+ assert torch.isfinite(lp).all()
+
+ def test_truncnormal_mode(self, min, max, vecs, upscale, shape, device):
+ torch.manual_seed(0)
+ min, max, vecs, upscale, shape = _map_all(
+ min, max, vecs, upscale, shape, device=device
+ )
+ d = TruncatedNormal(
+ *vecs,
+ upscale=upscale,
+ min=min,
+ max=max,
+ )
+ assert d.mode is not None
+ assert d.entropy() is not None
+ assert d.mean is not None
@pytest.mark.parametrize(
diff --git a/torchrl/modules/distributions/truncated_normal.py b/torchrl/modules/distributions/truncated_normal.py
index 1dfde393709..59b95658ea5 100644
--- a/torchrl/modules/distributions/truncated_normal.py
+++ b/torchrl/modules/distributions/truncated_normal.py
@@ -87,7 +87,6 @@ def mean(self):
def variance(self):
return self._variance
- @property
def entropy(self):
return self._entropy
|
conda__conda-build-3082 | Build variants does not work with source repo revision variations
With a `meta.yaml` that contains:
```
source:
svn_url: an_url
svn_rev: {{ version }}
```
and a `conda_build_config.yaml` with:
```
version:
- 1
- 2
```
Both packages are built from the same sources of the first version, the repo is only checked out once.
Am I doing it properly or is it a bug?
| [
{
"content": "from __future__ import absolute_import, division, print_function\n\nimport contextlib\nimport json\nimport logging\nimport multiprocessing\nimport os\nimport platform\nimport re\nimport subprocess\nimport sys\nimport warnings\nfrom glob import glob\nfrom os.path import join, normpath\n\n# noqa here because PY3 is used only on windows, and trips up flake8 otherwise.\nfrom .conda_interface import text_type, PY3 # noqa\nfrom .conda_interface import (CondaError, LinkError, LockError, NoPackagesFoundError,\n PaddingError, UnsatisfiableError)\nfrom .conda_interface import display_actions, execute_actions, execute_plan, install_actions\nfrom .conda_interface import memoized\nfrom .conda_interface import package_cache, TemporaryDirectory\nfrom .conda_interface import pkgs_dirs, root_dir, symlink_conda\n\nfrom conda_build import utils\nfrom conda_build.exceptions import DependencyNeedsBuildingError\nfrom conda_build.features import feature_list\nfrom conda_build.index import get_build_index\nfrom conda_build.os_utils import external\nfrom conda_build.utils import ensure_list, prepend_bin_path\nfrom conda_build.variants import get_default_variant\n\n\n# these are things that we provide env vars for more explicitly. This list disables the\n# pass-through of variant values to env vars for these keys.\nLANGUAGES = ('PERL', 'LUA', 'R', \"NUMPY\", 'PYTHON')\n\n\ndef get_perl_ver(config):\n return '.'.join(config.variant.get('perl', get_default_variant(config)['perl']).split('.')[:2])\n\n\ndef get_lua_ver(config):\n return '.'.join(config.variant.get('lua', get_default_variant(config)['lua']).split('.')[:2])\n\n\ndef get_py_ver(config):\n py = config.variant.get('python', get_default_variant(config)['python'])\n if not hasattr(py, 'split'):\n py = py[0]\n return '.'.join(py.split('.')[:2])\n\n\ndef get_r_ver(config):\n return '.'.join(config.variant.get('r_base',\n get_default_variant(config)['r_base']).split('.')[:3])\n\n\ndef get_npy_ver(config):\n conda_npy = ''.join(str(config.variant.get('numpy') or\n get_default_variant(config)['numpy']).split('.'))\n # Convert int -> string, e.g.\n # 17 -> '1.7'\n # 110 -> '1.10'\n return conda_npy[0] + '.' + conda_npy[1:]\n\n\ndef get_lua_include_dir(config):\n return join(config.host_prefix, \"include\")\n\n\n@memoized\ndef verify_git_repo(git_exe, git_dir, git_url, git_commits_since_tag, debug=False,\n expected_rev='HEAD'):\n env = os.environ.copy()\n log = utils.get_logger(__name__)\n\n if debug:\n stderr = None\n else:\n FNULL = open(os.devnull, 'w')\n stderr = FNULL\n\n if not expected_rev:\n return False\n\n OK = True\n\n env['GIT_DIR'] = git_dir\n try:\n # Verify current commit (minus our locally applied patches) matches expected commit\n current_commit = utils.check_output_env([git_exe,\n \"log\",\n \"-n1\",\n \"--format=%H\",\n \"HEAD\" + \"^\" * git_commits_since_tag],\n env=env, stderr=stderr)\n current_commit = current_commit.decode('utf-8')\n expected_tag_commit = utils.check_output_env([git_exe, \"log\", \"-n1\", \"--format=%H\",\n expected_rev],\n env=env, stderr=stderr)\n expected_tag_commit = expected_tag_commit.decode('utf-8')\n\n if current_commit != expected_tag_commit:\n return False\n\n # Verify correct remote url. Need to find the git cache directory,\n # and check the remote from there.\n cache_details = utils.check_output_env([git_exe, \"remote\", \"-v\"], env=env,\n stderr=stderr)\n cache_details = cache_details.decode('utf-8')\n cache_dir = cache_details.split('\\n')[0].split()[1]\n\n if not isinstance(cache_dir, str):\n # On Windows, subprocess env can't handle unicode.\n cache_dir = cache_dir.encode(sys.getfilesystemencoding() or 'utf-8')\n\n try:\n remote_details = utils.check_output_env([git_exe, \"--git-dir\", cache_dir,\n \"remote\", \"-v\"],\n env=env, stderr=stderr)\n except subprocess.CalledProcessError:\n if sys.platform == 'win32' and cache_dir.startswith('/'):\n cache_dir = utils.convert_unix_path_to_win(cache_dir)\n remote_details = utils.check_output_env([git_exe, \"--git-dir\", cache_dir,\n \"remote\", \"-v\"],\n env=env, stderr=stderr)\n remote_details = remote_details.decode('utf-8')\n remote_url = remote_details.split('\\n')[0].split()[1]\n\n # on windows, remote URL comes back to us as cygwin or msys format. Python doesn't\n # know how to normalize it. Need to convert it to a windows path.\n if sys.platform == 'win32' and remote_url.startswith('/'):\n remote_url = utils.convert_unix_path_to_win(git_url)\n\n if os.path.exists(remote_url):\n # Local filepaths are allowed, but make sure we normalize them\n remote_url = normpath(remote_url)\n\n # If the current source directory in conda-bld/work doesn't match the user's\n # metadata git_url or git_rev, then we aren't looking at the right source.\n if not os.path.isdir(remote_url) and remote_url.lower() != git_url.lower():\n log.debug(\"remote does not match git_url\")\n log.debug(\"Remote: \" + remote_url.lower())\n log.debug(\"git_url: \" + git_url.lower())\n OK = False\n except subprocess.CalledProcessError as error:\n log.debug(\"Error obtaining git information in verify_git_repo. Error was: \")\n log.debug(str(error))\n OK = False\n finally:\n if not debug:\n FNULL.close()\n return OK\n\n\n@memoized\ndef get_git_info(git_exe, repo, debug):\n \"\"\"\n Given a repo to a git repo, return a dictionary of:\n GIT_DESCRIBE_TAG\n GIT_DESCRIBE_NUMBER\n GIT_DESCRIBE_HASH\n GIT_FULL_HASH\n GIT_BUILD_STR\n from the output of git describe.\n :return:\n \"\"\"\n d = {}\n log = utils.get_logger(__name__)\n\n if debug:\n stderr = None\n else:\n FNULL = open(os.devnull, 'w')\n stderr = FNULL\n\n # grab information from describe\n env = os.environ.copy()\n env['GIT_DIR'] = repo\n keys = [\"GIT_DESCRIBE_TAG\", \"GIT_DESCRIBE_NUMBER\", \"GIT_DESCRIBE_HASH\"]\n\n try:\n output = utils.check_output_env([git_exe, \"describe\", \"--tags\", \"--long\", \"HEAD\"],\n env=env, cwd=os.path.dirname(repo),\n stderr=stderr).splitlines()[0]\n output = output.decode('utf-8')\n parts = output.rsplit('-', 2)\n if len(parts) == 3:\n d.update(dict(zip(keys, parts)))\n except subprocess.CalledProcessError:\n msg = (\n \"Failed to obtain git tag information.\\n\"\n \"Consider using annotated tags if you are not already \"\n \"as they are more reliable when used with git describe.\"\n )\n log.debug(msg)\n\n try:\n # get the _full_ hash of the current HEAD\n output = utils.check_output_env([git_exe, \"rev-parse\", \"HEAD\"],\n env=env, cwd=os.path.dirname(repo),\n stderr=stderr).splitlines()[0]\n output = output.decode('utf-8')\n\n d['GIT_FULL_HASH'] = output\n except subprocess.CalledProcessError as error:\n log.debug(\"Error obtaining git commit information. Error was: \")\n log.debug(str(error))\n\n # set up the build string\n if \"GIT_DESCRIBE_NUMBER\" in d and \"GIT_DESCRIBE_HASH\" in d:\n d['GIT_BUILD_STR'] = '{}_{}'.format(d[\"GIT_DESCRIBE_NUMBER\"],\n d[\"GIT_DESCRIBE_HASH\"])\n\n # issues on Windows with the next line of the command prompt being recorded here.\n assert not any(\"\\n\" in value for value in d.values())\n return d\n\n\ndef get_hg_build_info(repo):\n env = os.environ.copy()\n env['HG_DIR'] = repo\n env = {str(key): str(value) for key, value in env.items()}\n\n d = {}\n cmd = [\"hg\", \"log\", \"--template\",\n \"{rev}|{node|short}|{latesttag}|{latesttagdistance}|{branch}\",\n \"--rev\", \".\"]\n output = utils.check_output_env(cmd, env=env, cwd=os.path.dirname(repo))\n output = output.decode('utf-8')\n rev, short_id, tag, distance, branch = output.split('|')\n if tag != 'null':\n d['HG_LATEST_TAG'] = tag\n if branch == \"\":\n branch = 'default'\n d['HG_BRANCH'] = branch\n d['HG_NUM_ID'] = rev\n d['HG_LATEST_TAG_DISTANCE'] = distance\n d['HG_SHORT_ID'] = short_id\n d['HG_BUILD_STR'] = '{}_{}'.format(d['HG_NUM_ID'], d['HG_SHORT_ID'])\n return d\n\n\ndef get_dict(m, prefix=None, for_env=True, skip_build_id=False, escape_backslash=False):\n if not prefix:\n prefix = m.config.host_prefix\n\n # conda-build specific vars\n d = conda_build_vars(prefix, m.config)\n\n # languages\n d.update(python_vars(m, prefix, escape_backslash))\n d.update(perl_vars(m, prefix, escape_backslash))\n d.update(lua_vars(m, prefix, escape_backslash))\n d.update(r_vars(m, prefix, escape_backslash))\n\n if m:\n d.update(meta_vars(m, skip_build_id=skip_build_id))\n\n # system\n d.update(system_vars(d, m, prefix))\n\n # features\n d.update({feat.upper(): str(int(value)) for feat, value in\n feature_list})\n\n for k, v in m.config.variant.items():\n if not for_env or (k.upper() not in d and k.upper() not in LANGUAGES):\n d[k] = v\n return d\n\n\ndef conda_build_vars(prefix, config):\n src_dir = config.test_dir if os.path.basename(prefix)[:2] == '_t' else config.work_dir\n return {\n 'CONDA_BUILD': '1',\n 'PYTHONNOUSERSITE': '1',\n 'CONDA_DEFAULT_ENV': config.host_prefix,\n 'ARCH': str(config.host_arch),\n # This is the one that is most important for where people put artifacts that get bundled.\n # It is fed from our function argument, and can be any of:\n # 1. Build prefix - when host requirements are not explicitly set,\n # then prefix = build prefix = host prefix\n # 2. Host prefix - when host requirements are explicitly set, prefix = host prefix\n # 3. Test prefix - during test runs, this points at the test prefix\n 'PREFIX': prefix,\n # This is for things that are specifically build tools. Things that run on the build\n # platform, but probably should not be linked against, since they may not run on the\n # destination host platform\n # It can be equivalent to config.host_prefix if the host section is not explicitly set.\n 'BUILD_PREFIX': config.build_prefix,\n 'SYS_PREFIX': sys.prefix,\n 'SYS_PYTHON': sys.executable,\n 'SUBDIR': config.host_subdir,\n 'SRC_DIR': src_dir,\n 'HTTPS_PROXY': os.getenv('HTTPS_PROXY', ''),\n 'HTTP_PROXY': os.getenv('HTTP_PROXY', ''),\n 'REQUESTS_CA_BUNDLE': os.getenv('REQUESTS_CA_BUNDLE', ''),\n 'DIRTY': '1' if config.dirty else '',\n 'ROOT': root_dir,\n }\n\n\ndef python_vars(metadata, prefix, escape_backslash):\n py_ver = get_py_ver(metadata.config)\n stdlib_dir = utils.get_stdlib_dir(prefix, py_ver)\n sp_dir = utils.get_site_packages(prefix, py_ver)\n\n if utils.on_win and escape_backslash:\n stdlib_dir = stdlib_dir.replace('\\\\', '\\\\\\\\')\n sp_dir = sp_dir.replace('\\\\', '\\\\\\\\')\n\n vars_ = {\n 'CONDA_PY': ''.join(py_ver.split('.')[:2]),\n 'PY3K': str(int(int(py_ver[0]) >= 3)),\n 'PY_VER': py_ver,\n 'STDLIB_DIR': stdlib_dir,\n 'SP_DIR': sp_dir,\n }\n build_or_host = 'host' if metadata.is_cross else 'build'\n deps = [str(ms.name) for ms in metadata.ms_depends(build_or_host)]\n if 'python' in deps or metadata.name(fail_ok=True) == 'python':\n python_bin = metadata.config.python_bin(prefix, metadata.config.host_subdir)\n\n if utils.on_win and escape_backslash:\n python_bin = python_bin.replace('\\\\', '\\\\\\\\')\n\n vars_.update({\n # host prefix is always fine, because it is the same as build when is_cross is False\n 'PYTHON': python_bin,\n })\n\n np_ver = metadata.config.variant.get('numpy', get_default_variant(metadata.config)['numpy'])\n vars_['NPY_VER'] = '.'.join(np_ver.split('.')[:2])\n vars_['CONDA_NPY'] = ''.join(np_ver.split('.')[:2])\n vars_['NPY_DISTUTILS_APPEND_FLAGS'] = '1'\n return vars_\n\n\ndef perl_vars(metadata, prefix, escape_backslash):\n vars_ = {\n 'PERL_VER': get_perl_ver(metadata.config),\n 'CONDA_PERL': get_perl_ver(metadata.config),\n }\n build_or_host = 'host' if metadata.is_cross else 'build'\n deps = [str(ms.name) for ms in metadata.ms_depends(build_or_host)]\n if 'perl' in deps or metadata.name(fail_ok=True) == 'perl':\n perl_bin = metadata.config.perl_bin(prefix, metadata.config.host_subdir)\n\n if utils.on_win and escape_backslash:\n perl_bin = perl_bin.replace('\\\\', '\\\\\\\\')\n\n vars_.update({\n # host prefix is always fine, because it is the same as build when is_cross is False\n 'PERL': perl_bin,\n })\n return vars_\n\n\ndef lua_vars(metadata, prefix, escape_backslash):\n vars_ = {\n 'LUA_VER': get_lua_ver(metadata.config),\n 'CONDA_LUA': get_lua_ver(metadata.config),\n }\n build_or_host = 'host' if metadata.is_cross else 'build'\n deps = [str(ms.name) for ms in metadata.ms_depends(build_or_host)]\n if 'lua' in deps:\n lua_bin = metadata.config.lua_bin(prefix, metadata.config.host_subdir)\n lua_include_dir = get_lua_include_dir(metadata.config)\n\n if utils.on_win and escape_backslash:\n lua_bin = lua_bin.replace('\\\\', '\\\\\\\\')\n lua_include_dir = lua_include_dir.replace('\\\\', '\\\\\\\\')\n\n vars_.update({\n 'LUA': lua_bin,\n 'LUA_INCLUDE_DIR': lua_include_dir,\n })\n return vars_\n\n\ndef r_vars(metadata, prefix, escape_backslash):\n vars_ = {\n 'R_VER': get_r_ver(metadata.config),\n 'CONDA_R': get_r_ver(metadata.config),\n }\n\n build_or_host = 'host' if metadata.is_cross else 'build'\n deps = [str(ms.name) for ms in metadata.ms_depends(build_or_host)]\n if 'r-base' in deps or 'mro-base' in deps or metadata.name(fail_ok=True) in (\n 'r-base', 'mro-base'):\n r_bin = metadata.config.r_bin(prefix, metadata.config.host_subdir)\n\n if utils.on_win and escape_backslash:\n r_bin = r_bin.replace('\\\\', '\\\\\\\\')\n\n vars_.update({\n 'R': r_bin,\n })\n return vars_\n\n\ndef meta_vars(meta, skip_build_id=False):\n d = {}\n for var_name in ensure_list(meta.get_value('build/script_env', [])):\n value = os.getenv(var_name)\n if value is None:\n warnings.warn(\n \"The environment variable '%s' is undefined.\" % var_name,\n UserWarning\n )\n else:\n d[var_name] = value\n warnings.warn(\n \"The environment variable '%s' is being passed through with value %s. \"\n \"If you are splitting build and test phases with --no-test, please ensure \"\n \"that this value is also set similarly at test time.\" % (var_name, value),\n UserWarning\n )\n\n folder = meta.get_value('source/0/folder', '')\n repo_dir = join(meta.config.work_dir, folder)\n git_dir = join(repo_dir, '.git')\n hg_dir = join(repo_dir, '.hg')\n\n if not isinstance(git_dir, str):\n # On Windows, subprocess env can't handle unicode.\n git_dir = git_dir.encode(sys.getfilesystemencoding() or 'utf-8')\n\n git_exe = external.find_executable('git', meta.config.build_prefix)\n if git_exe and os.path.exists(git_dir):\n # We set all 'source' metavars using the FIRST source entry in meta.yaml.\n git_url = meta.get_value('source/0/git_url')\n\n if os.path.exists(git_url):\n if sys.platform == 'win32':\n git_url = utils.convert_unix_path_to_win(git_url)\n # If git_url is a relative path instead of a url, convert it to an abspath\n git_url = normpath(join(meta.path, git_url))\n\n _x = False\n\n if git_url:\n _x = verify_git_repo(git_exe,\n git_dir,\n git_url,\n meta.config.git_commits_since_tag,\n meta.config.debug,\n meta.get_value('source/0/git_rev', 'HEAD'))\n\n if _x or meta.get_value('source/0/path'):\n d.update(get_git_info(git_exe, git_dir, meta.config.debug))\n\n elif external.find_executable('hg', meta.config.build_prefix) and os.path.exists(hg_dir):\n d.update(get_hg_build_info(hg_dir))\n\n # use `get_value` to prevent early exit while name is still unresolved during rendering\n d['PKG_NAME'] = meta.get_value('package/name')\n d['PKG_VERSION'] = meta.version()\n d['PKG_BUILDNUM'] = str(meta.build_number() or 0)\n if meta.final and not skip_build_id:\n d['PKG_BUILD_STRING'] = str(meta.build_id())\n d['PKG_HASH'] = meta.hash_dependencies()\n else:\n d['PKG_BUILD_STRING'] = 'placeholder'\n d['PKG_HASH'] = '1234567'\n d['RECIPE_DIR'] = (meta.path if meta.path else\n meta.meta.get('extra', {}).get('parent_recipe', {}).get('path', ''))\n return d\n\n\n@memoized\ndef get_cpu_count():\n if sys.platform == \"darwin\":\n # multiprocessing.cpu_count() is not reliable on OSX\n # See issue #645 on github.com/conda/conda-build\n out, _ = subprocess.Popen('sysctl -n hw.logicalcpu', shell=True,\n stdout=subprocess.PIPE).communicate()\n return out.decode('utf-8').strip()\n else:\n try:\n return str(multiprocessing.cpu_count())\n except NotImplementedError:\n return \"1\"\n\n\ndef get_shlib_ext():\n # Return the shared library extension.\n if sys.platform == 'win32':\n return '.dll'\n elif sys.platform == 'darwin':\n return '.dylib'\n elif sys.platform.startswith('linux'):\n return '.so'\n else:\n raise NotImplementedError(sys.platform)\n\n\ndef windows_vars(m, get_default, prefix):\n \"\"\"This is setting variables on a dict that is part of the get_default function\"\"\"\n # We have gone for the clang values here.\n win_arch = 'i386' if str(m.config.host_arch) == '32' else 'amd64'\n win_msvc = '19.0.0' if PY3 else '15.0.0'\n library_prefix = join(prefix, 'Library')\n drive, tail = m.config.host_prefix.split(':')\n get_default('SCRIPTS', join(prefix, 'Scripts'))\n get_default('LIBRARY_PREFIX', library_prefix)\n get_default('LIBRARY_BIN', join(library_prefix, 'bin'))\n get_default('LIBRARY_INC', join(library_prefix, 'include'))\n get_default('LIBRARY_LIB', join(library_prefix, 'lib'))\n get_default('CYGWIN_PREFIX', ''.join(('/cygdrive/', drive.lower(), tail.replace('\\\\', '/'))))\n # see https://en.wikipedia.org/wiki/Environment_variable#Default_values\n get_default('ALLUSERSPROFILE')\n get_default('APPDATA')\n get_default('CommonProgramFiles')\n get_default('CommonProgramFiles(x86)')\n get_default('CommonProgramW6432')\n get_default('COMPUTERNAME')\n get_default('ComSpec')\n get_default('HOMEDRIVE')\n get_default('HOMEPATH')\n get_default('LOCALAPPDATA')\n get_default('LOGONSERVER')\n get_default('NUMBER_OF_PROCESSORS')\n get_default('PATHEXT')\n get_default('ProgramData')\n get_default('ProgramFiles')\n get_default('ProgramFiles(x86)')\n get_default('ProgramW6432')\n get_default('PROMPT')\n get_default('PSModulePath')\n get_default('PUBLIC')\n get_default('SystemDrive')\n get_default('SystemRoot')\n get_default('TEMP')\n get_default('TMP')\n get_default('USERDOMAIN')\n get_default('USERNAME')\n get_default('USERPROFILE')\n get_default('windir')\n # CPU data, see https://github.com/conda/conda-build/issues/2064\n get_default('PROCESSOR_ARCHITEW6432')\n get_default('PROCESSOR_ARCHITECTURE')\n get_default('PROCESSOR_IDENTIFIER')\n get_default('BUILD', win_arch + '-pc-windows-' + win_msvc)\n for env_var in os.environ.keys():\n if re.match('VS[0-9]{2,3}COMNTOOLS', env_var):\n get_default(env_var)\n\n\ndef unix_vars(m, get_default, prefix):\n \"\"\"This is setting variables on a dict that is part of the get_default function\"\"\"\n get_default('HOME', 'UNKNOWN')\n get_default('PKG_CONFIG_PATH', join(prefix, 'lib', 'pkgconfig'))\n get_default('CMAKE_GENERATOR', 'Unix Makefiles')\n get_default('SSL_CERT_FILE')\n\n\ndef osx_vars(m, get_default, prefix):\n \"\"\"This is setting variables on a dict that is part of the get_default function\"\"\"\n OSX_ARCH = 'i386' if str(m.config.host_arch) == '32' else 'x86_64'\n # 10.7 install_name_tool -delete_rpath causes broken dylibs, I will revisit this ASAP.\n # rpath = ' -Wl,-rpath,%(PREFIX)s/lib' % d # SIP workaround, DYLD_* no longer works.\n # d['LDFLAGS'] = ldflags + rpath + ' -arch %(OSX_ARCH)s' % d\n get_default('OSX_ARCH', OSX_ARCH)\n get_default('MACOSX_DEPLOYMENT_TARGET', '10.9')\n get_default('BUILD', OSX_ARCH + '-apple-darwin13.4.0')\n\n\n@memoized\ndef _machine_and_architecture():\n return platform.machine(), platform.architecture()\n\n\ndef linux_vars(m, get_default, prefix):\n \"\"\"This is setting variables on a dict that is part of the get_default function\"\"\"\n platform_machine, platform_architecture = _machine_and_architecture()\n build_arch = platform_machine\n # Python reports x86_64 when running a i686 Python binary on a 64-bit CPU\n # unless run through linux32. Issue a warning when we detect this.\n if build_arch == 'x86_64' and platform_architecture[0] == '32bit':\n print(\"Warning: You are running 32-bit Python on a 64-bit linux installation\")\n print(\" but have not launched it via linux32. Various qeuries *will*\")\n print(\" give unexpected results (uname -m, platform.machine() etc)\")\n build_arch = 'i686'\n # the GNU triplet is powerpc, not ppc. This matters.\n if build_arch.startswith('ppc'):\n build_arch = build_arch.replace('ppc', 'powerpc')\n if build_arch.startswith('powerpc'):\n build_distro = 'cos7'\n else:\n build_distro = 'cos6'\n # There is also QEMU_SET_ENV, but that needs to be\n # filtered so it only contains the result of `linux_vars`\n # which, before this change was empty, and after it only\n # contains other QEMU env vars.\n get_default('CFLAGS')\n get_default('CXXFLAGS')\n get_default('LDFLAGS')\n get_default('QEMU_LD_PREFIX')\n get_default('QEMU_UNAME')\n get_default('DEJAGNU')\n get_default('DISPLAY')\n get_default('LD_RUN_PATH', prefix + '/lib')\n get_default('BUILD', build_arch + '-conda_' + build_distro + '-linux-gnu')\n\n\ndef set_from_os_or_variant(out_dict, key, variant, default):\n value = os.getenv(key)\n if not value:\n value = variant.get(key, default)\n if value:\n out_dict[key] = value\n\n\n@memoized\ndef system_vars(env_dict, m, prefix):\n d = dict()\n # note the dictionary is passed in here - variables are set in that dict if they are non-null\n get_default = lambda key, default='': set_from_os_or_variant(d, key, m.config.variant, default)\n\n get_default('CPU_COUNT', get_cpu_count())\n get_default('LANG')\n get_default('LC_ALL')\n get_default('MAKEFLAGS')\n d['SHLIB_EXT'] = get_shlib_ext()\n d['PATH'] = os.environ.copy()['PATH']\n\n if not m.config.activate:\n d = prepend_bin_path(d, m.config.host_prefix)\n\n if sys.platform == 'win32':\n windows_vars(m, get_default, prefix)\n else:\n unix_vars(m, get_default, prefix)\n\n if sys.platform == 'darwin':\n osx_vars(m, get_default, prefix)\n elif sys.platform.startswith('linux'):\n linux_vars(m, get_default, prefix)\n\n return d\n\n\nclass InvalidEnvironment(Exception):\n pass\n\n\n# Stripped-down Environment class from conda-tools ( https://github.com/groutr/conda-tools )\n# Vendored here to avoid the whole dependency for just this bit.\ndef _load_json(path):\n with open(path, 'r') as fin:\n x = json.load(fin)\n return x\n\n\ndef _load_all_json(path):\n \"\"\"\n Load all json files in a directory. Return dictionary with filenames mapped to json\n dictionaries.\n \"\"\"\n root, _, files = next(utils.walk(path))\n result = {}\n for f in files:\n if f.endswith('.json'):\n result[f] = _load_json(join(root, f))\n return result\n\n\nclass Environment(object):\n def __init__(self, path):\n \"\"\"\n Initialize an Environment object.\n\n To reflect changes in the underlying environment, a new Environment object should be\n created.\n \"\"\"\n self.path = path\n self._meta = join(path, 'conda-meta')\n if os.path.isdir(path) and os.path.isdir(self._meta):\n self._packages = {}\n else:\n raise InvalidEnvironment('Unable to load environment {}'.format(path))\n\n def _read_package_json(self):\n if not self._packages:\n self._packages = _load_all_json(self._meta)\n\n def package_specs(self):\n \"\"\"\n List all package specs in the environment.\n \"\"\"\n self._read_package_json()\n json_objs = self._packages.values()\n specs = []\n for i in json_objs:\n p, v, b = i['name'], i['version'], i['build']\n specs.append('{} {} {}'.format(p, v, b))\n return specs\n\n\ncached_actions = {}\nlast_index_ts = 0\n\n\ndef get_install_actions(prefix, specs, env, retries=0, subdir=None,\n verbose=True, debug=False, locking=True,\n bldpkgs_dirs=None, timeout=90, disable_pip=False,\n max_env_retry=3, output_folder=None, channel_urls=None):\n global cached_actions\n global last_index_ts\n actions = {}\n log = utils.get_logger(__name__)\n conda_log_level = logging.WARN\n specs = list(specs)\n if verbose:\n capture = contextlib.contextmanager(lambda: (yield))\n elif debug:\n capture = contextlib.contextmanager(lambda: (yield))\n conda_log_level = logging.DEBUG\n else:\n capture = utils.capture\n for feature, value in feature_list:\n if value:\n specs.append('%s@' % feature)\n\n bldpkgs_dirs = ensure_list(bldpkgs_dirs)\n\n index, index_ts = get_build_index(subdir, list(bldpkgs_dirs)[0], output_folder=output_folder,\n channel_urls=channel_urls, debug=debug, verbose=verbose,\n locking=locking, timeout=timeout)\n specs = tuple(utils.ensure_valid_spec(spec) for spec in specs if not str(spec).endswith('@'))\n\n if ((specs, env, subdir, channel_urls, disable_pip) in cached_actions and\n last_index_ts >= index_ts):\n actions = cached_actions[(specs, env, subdir, channel_urls, disable_pip)].copy()\n if \"PREFIX\" in actions:\n actions['PREFIX'] = prefix\n elif specs:\n # this is hiding output like:\n # Fetching package metadata ...........\n # Solving package specifications: ..........\n with utils.LoggingContext(conda_log_level):\n with capture():\n try:\n actions = install_actions(prefix, index, specs, force=True)\n except (NoPackagesFoundError, UnsatisfiableError) as exc:\n raise DependencyNeedsBuildingError(exc, subdir=subdir)\n except (SystemExit, PaddingError, LinkError, DependencyNeedsBuildingError,\n CondaError, AssertionError) as exc:\n if 'lock' in str(exc):\n log.warn(\"failed to get install actions, retrying. exception was: %s\",\n str(exc))\n elif ('requires a minimum conda version' in str(exc) or\n 'link a source that does not' in str(exc) or\n isinstance(exc, AssertionError)):\n locks = utils.get_conda_operation_locks(locking, bldpkgs_dirs, timeout)\n with utils.try_acquire_locks(locks, timeout=timeout):\n pkg_dir = str(exc)\n folder = 0\n while os.path.dirname(pkg_dir) not in pkgs_dirs and folder < 20:\n pkg_dir = os.path.dirname(pkg_dir)\n folder += 1\n log.warn(\"I think conda ended up with a partial extraction for %s. \"\n \"Removing the folder and retrying\", pkg_dir)\n if pkg_dir in pkgs_dirs and os.path.isdir(pkg_dir):\n utils.rm_rf(pkg_dir)\n if retries < max_env_retry:\n log.warn(\"failed to get install actions, retrying. exception was: %s\",\n str(exc))\n actions = get_install_actions(prefix, tuple(specs), env,\n retries=retries + 1,\n subdir=subdir,\n verbose=verbose,\n debug=debug,\n locking=locking,\n bldpkgs_dirs=tuple(bldpkgs_dirs),\n timeout=timeout,\n disable_pip=disable_pip,\n max_env_retry=max_env_retry,\n output_folder=output_folder,\n channel_urls=tuple(channel_urls))\n else:\n log.error(\"Failed to get install actions, max retries exceeded.\")\n raise\n if disable_pip:\n for pkg in ('pip', 'setuptools', 'wheel'):\n # specs are the raw specifications, not the conda-derived actual specs\n # We're testing that pip etc. are manually specified\n if not any(re.match('^%s(?:$|[\\s=].*)' % pkg, str(dep)) for dep in specs):\n actions['LINK'] = [spec for spec in actions['LINK'] if spec.name != pkg]\n utils.trim_empty_keys(actions)\n cached_actions[(specs, env, subdir, channel_urls, disable_pip)] = actions.copy()\n last_index_ts = index_ts\n return actions\n\n\ndef create_env(prefix, specs_or_actions, env, config, subdir, clear_cache=True, retry=0,\n locks=None, is_cross=False, is_conda=False):\n '''\n Create a conda envrionment for the given prefix and specs.\n '''\n if config.debug:\n external_logger_context = utils.LoggingContext(logging.DEBUG)\n else:\n external_logger_context = utils.LoggingContext(logging.WARN)\n\n with external_logger_context:\n log = utils.get_logger(__name__)\n\n # if os.path.isdir(prefix):\n # utils.rm_rf(prefix)\n\n if specs_or_actions: # Don't waste time if there is nothing to do\n log.debug(\"Creating environment in %s\", prefix)\n log.debug(str(specs_or_actions))\n\n with utils.path_prepended(prefix):\n if not locks:\n locks = utils.get_conda_operation_locks(config)\n try:\n with utils.try_acquire_locks(locks, timeout=config.timeout):\n # input is a list - it's specs in MatchSpec format\n if not hasattr(specs_or_actions, 'keys'):\n specs = list(set(specs_or_actions))\n actions = get_install_actions(prefix, tuple(specs), env,\n subdir=subdir,\n verbose=config.verbose,\n debug=config.debug,\n locking=config.locking,\n bldpkgs_dirs=tuple(config.bldpkgs_dirs),\n timeout=config.timeout,\n disable_pip=config.disable_pip,\n max_env_retry=config.max_env_retry,\n output_folder=config.output_folder,\n channel_urls=tuple(config.channel_urls))\n else:\n actions = specs_or_actions\n index, index_ts = get_build_index(subdir=subdir,\n bldpkgs_dir=config.bldpkgs_dir,\n output_folder=config.output_folder,\n channel_urls=config.channel_urls,\n debug=config.debug,\n verbose=config.verbose,\n locking=config.locking,\n timeout=config.timeout)\n utils.trim_empty_keys(actions)\n display_actions(actions, index)\n if utils.on_win:\n for k, v in os.environ.items():\n os.environ[k] = str(v)\n execute_actions(actions, index, verbose=config.debug)\n except (SystemExit, PaddingError, LinkError, DependencyNeedsBuildingError,\n CondaError) as exc:\n if ((\"too short in\" in str(exc) or\n re.search('post-link failed for: (?:[a-zA-Z]*::)?openssl', str(exc)) or\n isinstance(exc, PaddingError)) and\n config.prefix_length > 80):\n if config.prefix_length_fallback:\n log.warn(\"Build prefix failed with prefix length %d\",\n config.prefix_length)\n log.warn(\"Error was: \")\n log.warn(str(exc))\n log.warn(\"One or more of your package dependencies needs to be rebuilt \"\n \"with a longer prefix length.\")\n log.warn(\"Falling back to legacy prefix length of 80 characters.\")\n log.warn(\"Your package will not install into prefixes > 80 characters.\")\n config.prefix_length = 80\n\n host = '_h_env' in prefix\n # Set this here and use to create environ\n # Setting this here is important because we use it below (symlink)\n prefix = config.host_prefix if host else config.build_prefix\n actions['PREFIX'] = prefix\n\n create_env(prefix, actions, config=config, subdir=subdir, env=env,\n clear_cache=clear_cache, is_cross=is_cross)\n else:\n raise\n elif 'lock' in str(exc):\n if retry < config.max_env_retry:\n log.warn(\"failed to create env, retrying. exception was: %s\", str(exc))\n create_env(prefix, actions, config=config, subdir=subdir, env=env,\n clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross)\n elif ('requires a minimum conda version' in str(exc) or\n 'link a source that does not' in str(exc)):\n with utils.try_acquire_locks(locks, timeout=config.timeout):\n pkg_dir = str(exc)\n folder = 0\n while os.path.dirname(pkg_dir) not in pkgs_dirs and folder < 20:\n pkg_dir = os.path.dirname(pkg_dir)\n folder += 1\n log.warn(\"I think conda ended up with a partial extraction for %s. \"\n \"Removing the folder and retrying\", pkg_dir)\n if os.path.isdir(pkg_dir):\n utils.rm_rf(pkg_dir)\n if retry < config.max_env_retry:\n log.warn(\"failed to create env, retrying. exception was: %s\", str(exc))\n create_env(prefix, actions, config=config, subdir=subdir, env=env,\n clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross)\n else:\n log.error(\"Failed to create env, max retries exceeded.\")\n raise\n else:\n raise\n # HACK: some of the time, conda screws up somehow and incomplete packages result.\n # Just retry.\n except (AssertionError, IOError, ValueError, RuntimeError, LockError) as exc:\n if isinstance(exc, AssertionError):\n with utils.try_acquire_locks(locks, timeout=config.timeout):\n pkg_dir = os.path.dirname(os.path.dirname(str(exc)))\n log.warn(\"I think conda ended up with a partial extraction for %s. \"\n \"Removing the folder and retrying\", pkg_dir)\n if os.path.isdir(pkg_dir):\n utils.rm_rf(pkg_dir)\n if retry < config.max_env_retry:\n log.warn(\"failed to create env, retrying. exception was: %s\", str(exc))\n create_env(prefix, actions, config=config, subdir=subdir, env=env,\n clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross)\n else:\n log.error(\"Failed to create env, max retries exceeded.\")\n raise\n\n if not is_conda:\n # Symlinking conda is critical here to make sure that activate scripts are not\n # accidentally included in packages.\n if utils.on_win:\n shell = \"cmd.exe\"\n else:\n shell = \"bash\"\n symlink_conda(prefix, sys.prefix, shell)\n\n\ndef clean_pkg_cache(dist, config):\n locks = []\n\n conda_log_level = logging.WARN\n if config.debug:\n conda_log_level = logging.DEBUG\n\n _pkgs_dirs = pkgs_dirs[:1]\n if config.locking:\n locks = [utils.get_lock(folder, timeout=config.timeout) for folder in _pkgs_dirs]\n with utils.LoggingContext(conda_log_level):\n with utils.try_acquire_locks(locks, timeout=config.timeout):\n rmplan = [\n 'RM_EXTRACTED {0} local::{0}'.format(dist),\n 'RM_FETCHED {0} local::{0}'.format(dist),\n ]\n execute_plan(rmplan)\n\n # Conda does not seem to do a complete cleanup sometimes. This is supplemental.\n # Conda's cleanup is still necessary - it keeps track of its own in-memory\n # list of downloaded things.\n for folder in pkgs_dirs:\n try:\n assert not os.path.exists(os.path.join(folder, dist))\n assert not os.path.exists(os.path.join(folder, dist + '.tar.bz2'))\n for pkg_id in [dist, 'local::' + dist]:\n assert pkg_id not in package_cache()\n except AssertionError:\n log = utils.get_logger(__name__)\n log.debug(\"Conda caching error: %s package remains in cache after removal\",\n dist)\n log.debug(\"manually removing to compensate\")\n cache = package_cache()\n keys = [key for key in cache.keys() if dist in key]\n for pkg_id in keys:\n if pkg_id in cache:\n del cache[pkg_id]\n for entry in glob(os.path.join(folder, dist + '*')):\n utils.rm_rf(entry)\n\n\ndef get_pinned_deps(m, section):\n with TemporaryDirectory(prefix='_') as tmpdir:\n actions = get_install_actions(tmpdir,\n tuple(m.ms_depends(section)), section,\n subdir=m.config.target_subdir,\n debug=m.config.debug,\n verbose=m.config.verbose,\n locking=m.config.locking,\n bldpkgs_dirs=tuple(m.config.bldpkgs_dirs),\n timeout=m.config.timeout,\n disable_pip=m.config.disable_pip,\n max_env_retry=m.config.max_env_retry,\n output_folder=m.config.output_folder,\n channel_urls=tuple(m.config.channel_urls))\n runtime_deps = [' '.join(link.dist_name.rsplit('-', 2)) for link in actions.get('LINK', [])]\n return runtime_deps\n",
"path": "conda_build/environ.py"
}
] | [
{
"content": "from __future__ import absolute_import, division, print_function\n\nimport contextlib\nimport json\nimport logging\nimport multiprocessing\nimport os\nimport platform\nimport re\nimport subprocess\nimport sys\nimport warnings\nfrom glob import glob\nfrom os.path import join, normpath\n\n# noqa here because PY3 is used only on windows, and trips up flake8 otherwise.\nfrom .conda_interface import text_type, PY3 # noqa\nfrom .conda_interface import (CondaError, LinkError, LockError, NoPackagesFoundError,\n PaddingError, UnsatisfiableError)\nfrom .conda_interface import display_actions, execute_actions, execute_plan, install_actions\nfrom .conda_interface import memoized\nfrom .conda_interface import package_cache, TemporaryDirectory\nfrom .conda_interface import pkgs_dirs, root_dir, symlink_conda\n\nfrom conda_build import utils\nfrom conda_build.exceptions import DependencyNeedsBuildingError\nfrom conda_build.features import feature_list\nfrom conda_build.index import get_build_index\nfrom conda_build.os_utils import external\nfrom conda_build.utils import ensure_list, prepend_bin_path\nfrom conda_build.variants import get_default_variant\n\n\n# these are things that we provide env vars for more explicitly. This list disables the\n# pass-through of variant values to env vars for these keys.\nLANGUAGES = ('PERL', 'LUA', 'R', \"NUMPY\", 'PYTHON')\n\n\ndef get_perl_ver(config):\n return '.'.join(config.variant.get('perl', get_default_variant(config)['perl']).split('.')[:2])\n\n\ndef get_lua_ver(config):\n return '.'.join(config.variant.get('lua', get_default_variant(config)['lua']).split('.')[:2])\n\n\ndef get_py_ver(config):\n py = config.variant.get('python', get_default_variant(config)['python'])\n if not hasattr(py, 'split'):\n py = py[0]\n return '.'.join(py.split('.')[:2])\n\n\ndef get_r_ver(config):\n return '.'.join(config.variant.get('r_base',\n get_default_variant(config)['r_base']).split('.')[:3])\n\n\ndef get_npy_ver(config):\n conda_npy = ''.join(str(config.variant.get('numpy') or\n get_default_variant(config)['numpy']).split('.'))\n # Convert int -> string, e.g.\n # 17 -> '1.7'\n # 110 -> '1.10'\n return conda_npy[0] + '.' + conda_npy[1:]\n\n\ndef get_lua_include_dir(config):\n return join(config.host_prefix, \"include\")\n\n\n@memoized\ndef verify_git_repo(git_exe, git_dir, git_url, git_commits_since_tag, debug=False,\n expected_rev='HEAD'):\n env = os.environ.copy()\n log = utils.get_logger(__name__)\n\n if debug:\n stderr = None\n else:\n FNULL = open(os.devnull, 'w')\n stderr = FNULL\n\n if not expected_rev:\n return False\n\n OK = True\n\n env['GIT_DIR'] = git_dir\n try:\n # Verify current commit (minus our locally applied patches) matches expected commit\n current_commit = utils.check_output_env([git_exe,\n \"log\",\n \"-n1\",\n \"--format=%H\",\n \"HEAD\" + \"^\" * git_commits_since_tag],\n env=env, stderr=stderr)\n current_commit = current_commit.decode('utf-8')\n expected_tag_commit = utils.check_output_env([git_exe, \"log\", \"-n1\", \"--format=%H\",\n expected_rev],\n env=env, stderr=stderr)\n expected_tag_commit = expected_tag_commit.decode('utf-8')\n\n if current_commit != expected_tag_commit:\n return False\n\n # Verify correct remote url. Need to find the git cache directory,\n # and check the remote from there.\n cache_details = utils.check_output_env([git_exe, \"remote\", \"-v\"], env=env,\n stderr=stderr)\n cache_details = cache_details.decode('utf-8')\n cache_dir = cache_details.split('\\n')[0].split()[1]\n\n if not isinstance(cache_dir, str):\n # On Windows, subprocess env can't handle unicode.\n cache_dir = cache_dir.encode(sys.getfilesystemencoding() or 'utf-8')\n\n try:\n remote_details = utils.check_output_env([git_exe, \"--git-dir\", cache_dir,\n \"remote\", \"-v\"],\n env=env, stderr=stderr)\n except subprocess.CalledProcessError:\n if sys.platform == 'win32' and cache_dir.startswith('/'):\n cache_dir = utils.convert_unix_path_to_win(cache_dir)\n remote_details = utils.check_output_env([git_exe, \"--git-dir\", cache_dir,\n \"remote\", \"-v\"],\n env=env, stderr=stderr)\n remote_details = remote_details.decode('utf-8')\n remote_url = remote_details.split('\\n')[0].split()[1]\n\n # on windows, remote URL comes back to us as cygwin or msys format. Python doesn't\n # know how to normalize it. Need to convert it to a windows path.\n if sys.platform == 'win32' and remote_url.startswith('/'):\n remote_url = utils.convert_unix_path_to_win(git_url)\n\n if os.path.exists(remote_url):\n # Local filepaths are allowed, but make sure we normalize them\n remote_url = normpath(remote_url)\n\n # If the current source directory in conda-bld/work doesn't match the user's\n # metadata git_url or git_rev, then we aren't looking at the right source.\n if not os.path.isdir(remote_url) and remote_url.lower() != git_url.lower():\n log.debug(\"remote does not match git_url\")\n log.debug(\"Remote: \" + remote_url.lower())\n log.debug(\"git_url: \" + git_url.lower())\n OK = False\n except subprocess.CalledProcessError as error:\n log.debug(\"Error obtaining git information in verify_git_repo. Error was: \")\n log.debug(str(error))\n OK = False\n finally:\n if not debug:\n FNULL.close()\n return OK\n\n\ndef get_git_info(git_exe, repo, debug):\n \"\"\"\n Given a repo to a git repo, return a dictionary of:\n GIT_DESCRIBE_TAG\n GIT_DESCRIBE_NUMBER\n GIT_DESCRIBE_HASH\n GIT_FULL_HASH\n GIT_BUILD_STR\n from the output of git describe.\n :return:\n \"\"\"\n d = {}\n log = utils.get_logger(__name__)\n\n if debug:\n stderr = None\n else:\n FNULL = open(os.devnull, 'w')\n stderr = FNULL\n\n # grab information from describe\n env = os.environ.copy()\n env['GIT_DIR'] = repo\n keys = [\"GIT_DESCRIBE_TAG\", \"GIT_DESCRIBE_NUMBER\", \"GIT_DESCRIBE_HASH\"]\n\n try:\n output = utils.check_output_env([git_exe, \"describe\", \"--tags\", \"--long\", \"HEAD\"],\n env=env, cwd=os.path.dirname(repo),\n stderr=stderr).splitlines()[0]\n output = output.decode('utf-8')\n parts = output.rsplit('-', 2)\n if len(parts) == 3:\n d.update(dict(zip(keys, parts)))\n except subprocess.CalledProcessError:\n msg = (\n \"Failed to obtain git tag information.\\n\"\n \"Consider using annotated tags if you are not already \"\n \"as they are more reliable when used with git describe.\"\n )\n log.debug(msg)\n\n try:\n # get the _full_ hash of the current HEAD\n output = utils.check_output_env([git_exe, \"rev-parse\", \"HEAD\"],\n env=env, cwd=os.path.dirname(repo),\n stderr=stderr).splitlines()[0]\n output = output.decode('utf-8')\n\n d['GIT_FULL_HASH'] = output\n except subprocess.CalledProcessError as error:\n log.debug(\"Error obtaining git commit information. Error was: \")\n log.debug(str(error))\n\n # set up the build string\n if \"GIT_DESCRIBE_NUMBER\" in d and \"GIT_DESCRIBE_HASH\" in d:\n d['GIT_BUILD_STR'] = '{}_{}'.format(d[\"GIT_DESCRIBE_NUMBER\"],\n d[\"GIT_DESCRIBE_HASH\"])\n\n # issues on Windows with the next line of the command prompt being recorded here.\n assert not any(\"\\n\" in value for value in d.values())\n return d\n\n\ndef get_hg_build_info(repo):\n env = os.environ.copy()\n env['HG_DIR'] = repo\n env = {str(key): str(value) for key, value in env.items()}\n\n d = {}\n cmd = [\"hg\", \"log\", \"--template\",\n \"{rev}|{node|short}|{latesttag}|{latesttagdistance}|{branch}\",\n \"--rev\", \".\"]\n output = utils.check_output_env(cmd, env=env, cwd=os.path.dirname(repo))\n output = output.decode('utf-8')\n rev, short_id, tag, distance, branch = output.split('|')\n if tag != 'null':\n d['HG_LATEST_TAG'] = tag\n if branch == \"\":\n branch = 'default'\n d['HG_BRANCH'] = branch\n d['HG_NUM_ID'] = rev\n d['HG_LATEST_TAG_DISTANCE'] = distance\n d['HG_SHORT_ID'] = short_id\n d['HG_BUILD_STR'] = '{}_{}'.format(d['HG_NUM_ID'], d['HG_SHORT_ID'])\n return d\n\n\ndef get_dict(m, prefix=None, for_env=True, skip_build_id=False, escape_backslash=False):\n if not prefix:\n prefix = m.config.host_prefix\n\n # conda-build specific vars\n d = conda_build_vars(prefix, m.config)\n\n # languages\n d.update(python_vars(m, prefix, escape_backslash))\n d.update(perl_vars(m, prefix, escape_backslash))\n d.update(lua_vars(m, prefix, escape_backslash))\n d.update(r_vars(m, prefix, escape_backslash))\n\n if m:\n d.update(meta_vars(m, skip_build_id=skip_build_id))\n\n # system\n d.update(system_vars(d, m, prefix))\n\n # features\n d.update({feat.upper(): str(int(value)) for feat, value in\n feature_list})\n\n for k, v in m.config.variant.items():\n if not for_env or (k.upper() not in d and k.upper() not in LANGUAGES):\n d[k] = v\n return d\n\n\ndef conda_build_vars(prefix, config):\n src_dir = config.test_dir if os.path.basename(prefix)[:2] == '_t' else config.work_dir\n return {\n 'CONDA_BUILD': '1',\n 'PYTHONNOUSERSITE': '1',\n 'CONDA_DEFAULT_ENV': config.host_prefix,\n 'ARCH': str(config.host_arch),\n # This is the one that is most important for where people put artifacts that get bundled.\n # It is fed from our function argument, and can be any of:\n # 1. Build prefix - when host requirements are not explicitly set,\n # then prefix = build prefix = host prefix\n # 2. Host prefix - when host requirements are explicitly set, prefix = host prefix\n # 3. Test prefix - during test runs, this points at the test prefix\n 'PREFIX': prefix,\n # This is for things that are specifically build tools. Things that run on the build\n # platform, but probably should not be linked against, since they may not run on the\n # destination host platform\n # It can be equivalent to config.host_prefix if the host section is not explicitly set.\n 'BUILD_PREFIX': config.build_prefix,\n 'SYS_PREFIX': sys.prefix,\n 'SYS_PYTHON': sys.executable,\n 'SUBDIR': config.host_subdir,\n 'SRC_DIR': src_dir,\n 'HTTPS_PROXY': os.getenv('HTTPS_PROXY', ''),\n 'HTTP_PROXY': os.getenv('HTTP_PROXY', ''),\n 'REQUESTS_CA_BUNDLE': os.getenv('REQUESTS_CA_BUNDLE', ''),\n 'DIRTY': '1' if config.dirty else '',\n 'ROOT': root_dir,\n }\n\n\ndef python_vars(metadata, prefix, escape_backslash):\n py_ver = get_py_ver(metadata.config)\n stdlib_dir = utils.get_stdlib_dir(prefix, py_ver)\n sp_dir = utils.get_site_packages(prefix, py_ver)\n\n if utils.on_win and escape_backslash:\n stdlib_dir = stdlib_dir.replace('\\\\', '\\\\\\\\')\n sp_dir = sp_dir.replace('\\\\', '\\\\\\\\')\n\n vars_ = {\n 'CONDA_PY': ''.join(py_ver.split('.')[:2]),\n 'PY3K': str(int(int(py_ver[0]) >= 3)),\n 'PY_VER': py_ver,\n 'STDLIB_DIR': stdlib_dir,\n 'SP_DIR': sp_dir,\n }\n build_or_host = 'host' if metadata.is_cross else 'build'\n deps = [str(ms.name) for ms in metadata.ms_depends(build_or_host)]\n if 'python' in deps or metadata.name(fail_ok=True) == 'python':\n python_bin = metadata.config.python_bin(prefix, metadata.config.host_subdir)\n\n if utils.on_win and escape_backslash:\n python_bin = python_bin.replace('\\\\', '\\\\\\\\')\n\n vars_.update({\n # host prefix is always fine, because it is the same as build when is_cross is False\n 'PYTHON': python_bin,\n })\n\n np_ver = metadata.config.variant.get('numpy', get_default_variant(metadata.config)['numpy'])\n vars_['NPY_VER'] = '.'.join(np_ver.split('.')[:2])\n vars_['CONDA_NPY'] = ''.join(np_ver.split('.')[:2])\n vars_['NPY_DISTUTILS_APPEND_FLAGS'] = '1'\n return vars_\n\n\ndef perl_vars(metadata, prefix, escape_backslash):\n vars_ = {\n 'PERL_VER': get_perl_ver(metadata.config),\n 'CONDA_PERL': get_perl_ver(metadata.config),\n }\n build_or_host = 'host' if metadata.is_cross else 'build'\n deps = [str(ms.name) for ms in metadata.ms_depends(build_or_host)]\n if 'perl' in deps or metadata.name(fail_ok=True) == 'perl':\n perl_bin = metadata.config.perl_bin(prefix, metadata.config.host_subdir)\n\n if utils.on_win and escape_backslash:\n perl_bin = perl_bin.replace('\\\\', '\\\\\\\\')\n\n vars_.update({\n # host prefix is always fine, because it is the same as build when is_cross is False\n 'PERL': perl_bin,\n })\n return vars_\n\n\ndef lua_vars(metadata, prefix, escape_backslash):\n vars_ = {\n 'LUA_VER': get_lua_ver(metadata.config),\n 'CONDA_LUA': get_lua_ver(metadata.config),\n }\n build_or_host = 'host' if metadata.is_cross else 'build'\n deps = [str(ms.name) for ms in metadata.ms_depends(build_or_host)]\n if 'lua' in deps:\n lua_bin = metadata.config.lua_bin(prefix, metadata.config.host_subdir)\n lua_include_dir = get_lua_include_dir(metadata.config)\n\n if utils.on_win and escape_backslash:\n lua_bin = lua_bin.replace('\\\\', '\\\\\\\\')\n lua_include_dir = lua_include_dir.replace('\\\\', '\\\\\\\\')\n\n vars_.update({\n 'LUA': lua_bin,\n 'LUA_INCLUDE_DIR': lua_include_dir,\n })\n return vars_\n\n\ndef r_vars(metadata, prefix, escape_backslash):\n vars_ = {\n 'R_VER': get_r_ver(metadata.config),\n 'CONDA_R': get_r_ver(metadata.config),\n }\n\n build_or_host = 'host' if metadata.is_cross else 'build'\n deps = [str(ms.name) for ms in metadata.ms_depends(build_or_host)]\n if 'r-base' in deps or 'mro-base' in deps or metadata.name(fail_ok=True) in (\n 'r-base', 'mro-base'):\n r_bin = metadata.config.r_bin(prefix, metadata.config.host_subdir)\n\n if utils.on_win and escape_backslash:\n r_bin = r_bin.replace('\\\\', '\\\\\\\\')\n\n vars_.update({\n 'R': r_bin,\n })\n return vars_\n\n\ndef meta_vars(meta, skip_build_id=False):\n d = {}\n for var_name in ensure_list(meta.get_value('build/script_env', [])):\n value = os.getenv(var_name)\n if value is None:\n warnings.warn(\n \"The environment variable '%s' is undefined.\" % var_name,\n UserWarning\n )\n else:\n d[var_name] = value\n warnings.warn(\n \"The environment variable '%s' is being passed through with value %s. \"\n \"If you are splitting build and test phases with --no-test, please ensure \"\n \"that this value is also set similarly at test time.\" % (var_name, value),\n UserWarning\n )\n\n folder = meta.get_value('source/0/folder', '')\n repo_dir = join(meta.config.work_dir, folder)\n git_dir = join(repo_dir, '.git')\n hg_dir = join(repo_dir, '.hg')\n\n if not isinstance(git_dir, str):\n # On Windows, subprocess env can't handle unicode.\n git_dir = git_dir.encode(sys.getfilesystemencoding() or 'utf-8')\n\n git_exe = external.find_executable('git', meta.config.build_prefix)\n if git_exe and os.path.exists(git_dir):\n # We set all 'source' metavars using the FIRST source entry in meta.yaml.\n git_url = meta.get_value('source/0/git_url')\n\n if os.path.exists(git_url):\n if sys.platform == 'win32':\n git_url = utils.convert_unix_path_to_win(git_url)\n # If git_url is a relative path instead of a url, convert it to an abspath\n git_url = normpath(join(meta.path, git_url))\n\n _x = False\n\n if git_url:\n _x = verify_git_repo(git_exe,\n git_dir,\n git_url,\n meta.config.git_commits_since_tag,\n meta.config.debug,\n meta.get_value('source/0/git_rev', 'HEAD'))\n\n if _x or meta.get_value('source/0/path'):\n d.update(get_git_info(git_exe, git_dir, meta.config.debug))\n\n elif external.find_executable('hg', meta.config.build_prefix) and os.path.exists(hg_dir):\n d.update(get_hg_build_info(hg_dir))\n\n # use `get_value` to prevent early exit while name is still unresolved during rendering\n d['PKG_NAME'] = meta.get_value('package/name')\n d['PKG_VERSION'] = meta.version()\n d['PKG_BUILDNUM'] = str(meta.build_number() or 0)\n if meta.final and not skip_build_id:\n d['PKG_BUILD_STRING'] = str(meta.build_id())\n d['PKG_HASH'] = meta.hash_dependencies()\n else:\n d['PKG_BUILD_STRING'] = 'placeholder'\n d['PKG_HASH'] = '1234567'\n d['RECIPE_DIR'] = (meta.path if meta.path else\n meta.meta.get('extra', {}).get('parent_recipe', {}).get('path', ''))\n return d\n\n\n@memoized\ndef get_cpu_count():\n if sys.platform == \"darwin\":\n # multiprocessing.cpu_count() is not reliable on OSX\n # See issue #645 on github.com/conda/conda-build\n out, _ = subprocess.Popen('sysctl -n hw.logicalcpu', shell=True,\n stdout=subprocess.PIPE).communicate()\n return out.decode('utf-8').strip()\n else:\n try:\n return str(multiprocessing.cpu_count())\n except NotImplementedError:\n return \"1\"\n\n\ndef get_shlib_ext():\n # Return the shared library extension.\n if sys.platform == 'win32':\n return '.dll'\n elif sys.platform == 'darwin':\n return '.dylib'\n elif sys.platform.startswith('linux'):\n return '.so'\n else:\n raise NotImplementedError(sys.platform)\n\n\ndef windows_vars(m, get_default, prefix):\n \"\"\"This is setting variables on a dict that is part of the get_default function\"\"\"\n # We have gone for the clang values here.\n win_arch = 'i386' if str(m.config.host_arch) == '32' else 'amd64'\n win_msvc = '19.0.0' if PY3 else '15.0.0'\n library_prefix = join(prefix, 'Library')\n drive, tail = m.config.host_prefix.split(':')\n get_default('SCRIPTS', join(prefix, 'Scripts'))\n get_default('LIBRARY_PREFIX', library_prefix)\n get_default('LIBRARY_BIN', join(library_prefix, 'bin'))\n get_default('LIBRARY_INC', join(library_prefix, 'include'))\n get_default('LIBRARY_LIB', join(library_prefix, 'lib'))\n get_default('CYGWIN_PREFIX', ''.join(('/cygdrive/', drive.lower(), tail.replace('\\\\', '/'))))\n # see https://en.wikipedia.org/wiki/Environment_variable#Default_values\n get_default('ALLUSERSPROFILE')\n get_default('APPDATA')\n get_default('CommonProgramFiles')\n get_default('CommonProgramFiles(x86)')\n get_default('CommonProgramW6432')\n get_default('COMPUTERNAME')\n get_default('ComSpec')\n get_default('HOMEDRIVE')\n get_default('HOMEPATH')\n get_default('LOCALAPPDATA')\n get_default('LOGONSERVER')\n get_default('NUMBER_OF_PROCESSORS')\n get_default('PATHEXT')\n get_default('ProgramData')\n get_default('ProgramFiles')\n get_default('ProgramFiles(x86)')\n get_default('ProgramW6432')\n get_default('PROMPT')\n get_default('PSModulePath')\n get_default('PUBLIC')\n get_default('SystemDrive')\n get_default('SystemRoot')\n get_default('TEMP')\n get_default('TMP')\n get_default('USERDOMAIN')\n get_default('USERNAME')\n get_default('USERPROFILE')\n get_default('windir')\n # CPU data, see https://github.com/conda/conda-build/issues/2064\n get_default('PROCESSOR_ARCHITEW6432')\n get_default('PROCESSOR_ARCHITECTURE')\n get_default('PROCESSOR_IDENTIFIER')\n get_default('BUILD', win_arch + '-pc-windows-' + win_msvc)\n for env_var in os.environ.keys():\n if re.match('VS[0-9]{2,3}COMNTOOLS', env_var):\n get_default(env_var)\n\n\ndef unix_vars(m, get_default, prefix):\n \"\"\"This is setting variables on a dict that is part of the get_default function\"\"\"\n get_default('HOME', 'UNKNOWN')\n get_default('PKG_CONFIG_PATH', join(prefix, 'lib', 'pkgconfig'))\n get_default('CMAKE_GENERATOR', 'Unix Makefiles')\n get_default('SSL_CERT_FILE')\n\n\ndef osx_vars(m, get_default, prefix):\n \"\"\"This is setting variables on a dict that is part of the get_default function\"\"\"\n OSX_ARCH = 'i386' if str(m.config.host_arch) == '32' else 'x86_64'\n # 10.7 install_name_tool -delete_rpath causes broken dylibs, I will revisit this ASAP.\n # rpath = ' -Wl,-rpath,%(PREFIX)s/lib' % d # SIP workaround, DYLD_* no longer works.\n # d['LDFLAGS'] = ldflags + rpath + ' -arch %(OSX_ARCH)s' % d\n get_default('OSX_ARCH', OSX_ARCH)\n get_default('MACOSX_DEPLOYMENT_TARGET', '10.9')\n get_default('BUILD', OSX_ARCH + '-apple-darwin13.4.0')\n\n\n@memoized\ndef _machine_and_architecture():\n return platform.machine(), platform.architecture()\n\n\ndef linux_vars(m, get_default, prefix):\n \"\"\"This is setting variables on a dict that is part of the get_default function\"\"\"\n platform_machine, platform_architecture = _machine_and_architecture()\n build_arch = platform_machine\n # Python reports x86_64 when running a i686 Python binary on a 64-bit CPU\n # unless run through linux32. Issue a warning when we detect this.\n if build_arch == 'x86_64' and platform_architecture[0] == '32bit':\n print(\"Warning: You are running 32-bit Python on a 64-bit linux installation\")\n print(\" but have not launched it via linux32. Various qeuries *will*\")\n print(\" give unexpected results (uname -m, platform.machine() etc)\")\n build_arch = 'i686'\n # the GNU triplet is powerpc, not ppc. This matters.\n if build_arch.startswith('ppc'):\n build_arch = build_arch.replace('ppc', 'powerpc')\n if build_arch.startswith('powerpc'):\n build_distro = 'cos7'\n else:\n build_distro = 'cos6'\n # There is also QEMU_SET_ENV, but that needs to be\n # filtered so it only contains the result of `linux_vars`\n # which, before this change was empty, and after it only\n # contains other QEMU env vars.\n get_default('CFLAGS')\n get_default('CXXFLAGS')\n get_default('LDFLAGS')\n get_default('QEMU_LD_PREFIX')\n get_default('QEMU_UNAME')\n get_default('DEJAGNU')\n get_default('DISPLAY')\n get_default('LD_RUN_PATH', prefix + '/lib')\n get_default('BUILD', build_arch + '-conda_' + build_distro + '-linux-gnu')\n\n\ndef set_from_os_or_variant(out_dict, key, variant, default):\n value = os.getenv(key)\n if not value:\n value = variant.get(key, default)\n if value:\n out_dict[key] = value\n\n\n@memoized\ndef system_vars(env_dict, m, prefix):\n d = dict()\n # note the dictionary is passed in here - variables are set in that dict if they are non-null\n get_default = lambda key, default='': set_from_os_or_variant(d, key, m.config.variant, default)\n\n get_default('CPU_COUNT', get_cpu_count())\n get_default('LANG')\n get_default('LC_ALL')\n get_default('MAKEFLAGS')\n d['SHLIB_EXT'] = get_shlib_ext()\n d['PATH'] = os.environ.copy()['PATH']\n\n if not m.config.activate:\n d = prepend_bin_path(d, m.config.host_prefix)\n\n if sys.platform == 'win32':\n windows_vars(m, get_default, prefix)\n else:\n unix_vars(m, get_default, prefix)\n\n if sys.platform == 'darwin':\n osx_vars(m, get_default, prefix)\n elif sys.platform.startswith('linux'):\n linux_vars(m, get_default, prefix)\n\n return d\n\n\nclass InvalidEnvironment(Exception):\n pass\n\n\n# Stripped-down Environment class from conda-tools ( https://github.com/groutr/conda-tools )\n# Vendored here to avoid the whole dependency for just this bit.\ndef _load_json(path):\n with open(path, 'r') as fin:\n x = json.load(fin)\n return x\n\n\ndef _load_all_json(path):\n \"\"\"\n Load all json files in a directory. Return dictionary with filenames mapped to json\n dictionaries.\n \"\"\"\n root, _, files = next(utils.walk(path))\n result = {}\n for f in files:\n if f.endswith('.json'):\n result[f] = _load_json(join(root, f))\n return result\n\n\nclass Environment(object):\n def __init__(self, path):\n \"\"\"\n Initialize an Environment object.\n\n To reflect changes in the underlying environment, a new Environment object should be\n created.\n \"\"\"\n self.path = path\n self._meta = join(path, 'conda-meta')\n if os.path.isdir(path) and os.path.isdir(self._meta):\n self._packages = {}\n else:\n raise InvalidEnvironment('Unable to load environment {}'.format(path))\n\n def _read_package_json(self):\n if not self._packages:\n self._packages = _load_all_json(self._meta)\n\n def package_specs(self):\n \"\"\"\n List all package specs in the environment.\n \"\"\"\n self._read_package_json()\n json_objs = self._packages.values()\n specs = []\n for i in json_objs:\n p, v, b = i['name'], i['version'], i['build']\n specs.append('{} {} {}'.format(p, v, b))\n return specs\n\n\ncached_actions = {}\nlast_index_ts = 0\n\n\ndef get_install_actions(prefix, specs, env, retries=0, subdir=None,\n verbose=True, debug=False, locking=True,\n bldpkgs_dirs=None, timeout=90, disable_pip=False,\n max_env_retry=3, output_folder=None, channel_urls=None):\n global cached_actions\n global last_index_ts\n actions = {}\n log = utils.get_logger(__name__)\n conda_log_level = logging.WARN\n specs = list(specs)\n if verbose:\n capture = contextlib.contextmanager(lambda: (yield))\n elif debug:\n capture = contextlib.contextmanager(lambda: (yield))\n conda_log_level = logging.DEBUG\n else:\n capture = utils.capture\n for feature, value in feature_list:\n if value:\n specs.append('%s@' % feature)\n\n bldpkgs_dirs = ensure_list(bldpkgs_dirs)\n\n index, index_ts = get_build_index(subdir, list(bldpkgs_dirs)[0], output_folder=output_folder,\n channel_urls=channel_urls, debug=debug, verbose=verbose,\n locking=locking, timeout=timeout)\n specs = tuple(utils.ensure_valid_spec(spec) for spec in specs if not str(spec).endswith('@'))\n\n if ((specs, env, subdir, channel_urls, disable_pip) in cached_actions and\n last_index_ts >= index_ts):\n actions = cached_actions[(specs, env, subdir, channel_urls, disable_pip)].copy()\n if \"PREFIX\" in actions:\n actions['PREFIX'] = prefix\n elif specs:\n # this is hiding output like:\n # Fetching package metadata ...........\n # Solving package specifications: ..........\n with utils.LoggingContext(conda_log_level):\n with capture():\n try:\n actions = install_actions(prefix, index, specs, force=True)\n except (NoPackagesFoundError, UnsatisfiableError) as exc:\n raise DependencyNeedsBuildingError(exc, subdir=subdir)\n except (SystemExit, PaddingError, LinkError, DependencyNeedsBuildingError,\n CondaError, AssertionError) as exc:\n if 'lock' in str(exc):\n log.warn(\"failed to get install actions, retrying. exception was: %s\",\n str(exc))\n elif ('requires a minimum conda version' in str(exc) or\n 'link a source that does not' in str(exc) or\n isinstance(exc, AssertionError)):\n locks = utils.get_conda_operation_locks(locking, bldpkgs_dirs, timeout)\n with utils.try_acquire_locks(locks, timeout=timeout):\n pkg_dir = str(exc)\n folder = 0\n while os.path.dirname(pkg_dir) not in pkgs_dirs and folder < 20:\n pkg_dir = os.path.dirname(pkg_dir)\n folder += 1\n log.warn(\"I think conda ended up with a partial extraction for %s. \"\n \"Removing the folder and retrying\", pkg_dir)\n if pkg_dir in pkgs_dirs and os.path.isdir(pkg_dir):\n utils.rm_rf(pkg_dir)\n if retries < max_env_retry:\n log.warn(\"failed to get install actions, retrying. exception was: %s\",\n str(exc))\n actions = get_install_actions(prefix, tuple(specs), env,\n retries=retries + 1,\n subdir=subdir,\n verbose=verbose,\n debug=debug,\n locking=locking,\n bldpkgs_dirs=tuple(bldpkgs_dirs),\n timeout=timeout,\n disable_pip=disable_pip,\n max_env_retry=max_env_retry,\n output_folder=output_folder,\n channel_urls=tuple(channel_urls))\n else:\n log.error(\"Failed to get install actions, max retries exceeded.\")\n raise\n if disable_pip:\n for pkg in ('pip', 'setuptools', 'wheel'):\n # specs are the raw specifications, not the conda-derived actual specs\n # We're testing that pip etc. are manually specified\n if not any(re.match('^%s(?:$|[\\s=].*)' % pkg, str(dep)) for dep in specs):\n actions['LINK'] = [spec for spec in actions['LINK'] if spec.name != pkg]\n utils.trim_empty_keys(actions)\n cached_actions[(specs, env, subdir, channel_urls, disable_pip)] = actions.copy()\n last_index_ts = index_ts\n return actions\n\n\ndef create_env(prefix, specs_or_actions, env, config, subdir, clear_cache=True, retry=0,\n locks=None, is_cross=False, is_conda=False):\n '''\n Create a conda envrionment for the given prefix and specs.\n '''\n if config.debug:\n external_logger_context = utils.LoggingContext(logging.DEBUG)\n else:\n external_logger_context = utils.LoggingContext(logging.WARN)\n\n with external_logger_context:\n log = utils.get_logger(__name__)\n\n # if os.path.isdir(prefix):\n # utils.rm_rf(prefix)\n\n if specs_or_actions: # Don't waste time if there is nothing to do\n log.debug(\"Creating environment in %s\", prefix)\n log.debug(str(specs_or_actions))\n\n with utils.path_prepended(prefix):\n if not locks:\n locks = utils.get_conda_operation_locks(config)\n try:\n with utils.try_acquire_locks(locks, timeout=config.timeout):\n # input is a list - it's specs in MatchSpec format\n if not hasattr(specs_or_actions, 'keys'):\n specs = list(set(specs_or_actions))\n actions = get_install_actions(prefix, tuple(specs), env,\n subdir=subdir,\n verbose=config.verbose,\n debug=config.debug,\n locking=config.locking,\n bldpkgs_dirs=tuple(config.bldpkgs_dirs),\n timeout=config.timeout,\n disable_pip=config.disable_pip,\n max_env_retry=config.max_env_retry,\n output_folder=config.output_folder,\n channel_urls=tuple(config.channel_urls))\n else:\n actions = specs_or_actions\n index, index_ts = get_build_index(subdir=subdir,\n bldpkgs_dir=config.bldpkgs_dir,\n output_folder=config.output_folder,\n channel_urls=config.channel_urls,\n debug=config.debug,\n verbose=config.verbose,\n locking=config.locking,\n timeout=config.timeout)\n utils.trim_empty_keys(actions)\n display_actions(actions, index)\n if utils.on_win:\n for k, v in os.environ.items():\n os.environ[k] = str(v)\n execute_actions(actions, index, verbose=config.debug)\n except (SystemExit, PaddingError, LinkError, DependencyNeedsBuildingError,\n CondaError) as exc:\n if ((\"too short in\" in str(exc) or\n re.search('post-link failed for: (?:[a-zA-Z]*::)?openssl', str(exc)) or\n isinstance(exc, PaddingError)) and\n config.prefix_length > 80):\n if config.prefix_length_fallback:\n log.warn(\"Build prefix failed with prefix length %d\",\n config.prefix_length)\n log.warn(\"Error was: \")\n log.warn(str(exc))\n log.warn(\"One or more of your package dependencies needs to be rebuilt \"\n \"with a longer prefix length.\")\n log.warn(\"Falling back to legacy prefix length of 80 characters.\")\n log.warn(\"Your package will not install into prefixes > 80 characters.\")\n config.prefix_length = 80\n\n host = '_h_env' in prefix\n # Set this here and use to create environ\n # Setting this here is important because we use it below (symlink)\n prefix = config.host_prefix if host else config.build_prefix\n actions['PREFIX'] = prefix\n\n create_env(prefix, actions, config=config, subdir=subdir, env=env,\n clear_cache=clear_cache, is_cross=is_cross)\n else:\n raise\n elif 'lock' in str(exc):\n if retry < config.max_env_retry:\n log.warn(\"failed to create env, retrying. exception was: %s\", str(exc))\n create_env(prefix, actions, config=config, subdir=subdir, env=env,\n clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross)\n elif ('requires a minimum conda version' in str(exc) or\n 'link a source that does not' in str(exc)):\n with utils.try_acquire_locks(locks, timeout=config.timeout):\n pkg_dir = str(exc)\n folder = 0\n while os.path.dirname(pkg_dir) not in pkgs_dirs and folder < 20:\n pkg_dir = os.path.dirname(pkg_dir)\n folder += 1\n log.warn(\"I think conda ended up with a partial extraction for %s. \"\n \"Removing the folder and retrying\", pkg_dir)\n if os.path.isdir(pkg_dir):\n utils.rm_rf(pkg_dir)\n if retry < config.max_env_retry:\n log.warn(\"failed to create env, retrying. exception was: %s\", str(exc))\n create_env(prefix, actions, config=config, subdir=subdir, env=env,\n clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross)\n else:\n log.error(\"Failed to create env, max retries exceeded.\")\n raise\n else:\n raise\n # HACK: some of the time, conda screws up somehow and incomplete packages result.\n # Just retry.\n except (AssertionError, IOError, ValueError, RuntimeError, LockError) as exc:\n if isinstance(exc, AssertionError):\n with utils.try_acquire_locks(locks, timeout=config.timeout):\n pkg_dir = os.path.dirname(os.path.dirname(str(exc)))\n log.warn(\"I think conda ended up with a partial extraction for %s. \"\n \"Removing the folder and retrying\", pkg_dir)\n if os.path.isdir(pkg_dir):\n utils.rm_rf(pkg_dir)\n if retry < config.max_env_retry:\n log.warn(\"failed to create env, retrying. exception was: %s\", str(exc))\n create_env(prefix, actions, config=config, subdir=subdir, env=env,\n clear_cache=clear_cache, retry=retry + 1, is_cross=is_cross)\n else:\n log.error(\"Failed to create env, max retries exceeded.\")\n raise\n\n if not is_conda:\n # Symlinking conda is critical here to make sure that activate scripts are not\n # accidentally included in packages.\n if utils.on_win:\n shell = \"cmd.exe\"\n else:\n shell = \"bash\"\n symlink_conda(prefix, sys.prefix, shell)\n\n\ndef clean_pkg_cache(dist, config):\n locks = []\n\n conda_log_level = logging.WARN\n if config.debug:\n conda_log_level = logging.DEBUG\n\n _pkgs_dirs = pkgs_dirs[:1]\n if config.locking:\n locks = [utils.get_lock(folder, timeout=config.timeout) for folder in _pkgs_dirs]\n with utils.LoggingContext(conda_log_level):\n with utils.try_acquire_locks(locks, timeout=config.timeout):\n rmplan = [\n 'RM_EXTRACTED {0} local::{0}'.format(dist),\n 'RM_FETCHED {0} local::{0}'.format(dist),\n ]\n execute_plan(rmplan)\n\n # Conda does not seem to do a complete cleanup sometimes. This is supplemental.\n # Conda's cleanup is still necessary - it keeps track of its own in-memory\n # list of downloaded things.\n for folder in pkgs_dirs:\n try:\n assert not os.path.exists(os.path.join(folder, dist))\n assert not os.path.exists(os.path.join(folder, dist + '.tar.bz2'))\n for pkg_id in [dist, 'local::' + dist]:\n assert pkg_id not in package_cache()\n except AssertionError:\n log = utils.get_logger(__name__)\n log.debug(\"Conda caching error: %s package remains in cache after removal\",\n dist)\n log.debug(\"manually removing to compensate\")\n cache = package_cache()\n keys = [key for key in cache.keys() if dist in key]\n for pkg_id in keys:\n if pkg_id in cache:\n del cache[pkg_id]\n for entry in glob(os.path.join(folder, dist + '*')):\n utils.rm_rf(entry)\n\n\ndef get_pinned_deps(m, section):\n with TemporaryDirectory(prefix='_') as tmpdir:\n actions = get_install_actions(tmpdir,\n tuple(m.ms_depends(section)), section,\n subdir=m.config.target_subdir,\n debug=m.config.debug,\n verbose=m.config.verbose,\n locking=m.config.locking,\n bldpkgs_dirs=tuple(m.config.bldpkgs_dirs),\n timeout=m.config.timeout,\n disable_pip=m.config.disable_pip,\n max_env_retry=m.config.max_env_retry,\n output_folder=m.config.output_folder,\n channel_urls=tuple(m.config.channel_urls))\n runtime_deps = [' '.join(link.dist_name.rsplit('-', 2)) for link in actions.get('LINK', [])]\n return runtime_deps\n",
"path": "conda_build/environ.py"
}
] | diff --git a/conda_build/environ.py b/conda_build/environ.py
index e67982bfd6..9afe7a026f 100644
--- a/conda_build/environ.py
+++ b/conda_build/environ.py
@@ -154,7 +154,6 @@ def verify_git_repo(git_exe, git_dir, git_url, git_commits_since_tag, debug=Fals
return OK
-@memoized
def get_git_info(git_exe, repo, debug):
"""
Given a repo to a git repo, return a dictionary of:
diff --git a/tests/test-recipes/variants/29_different_git_vars/conda_build_config.yaml b/tests/test-recipes/variants/29_different_git_vars/conda_build_config.yaml
new file mode 100644
index 0000000000..b6f56ca4bd
--- /dev/null
+++ b/tests/test-recipes/variants/29_different_git_vars/conda_build_config.yaml
@@ -0,0 +1,3 @@
+git_tag:
+ - 1.21.11
+ - 1.20.0
diff --git a/tests/test-recipes/variants/29_different_git_vars/meta.yaml b/tests/test-recipes/variants/29_different_git_vars/meta.yaml
new file mode 100644
index 0000000000..0c0231ccb3
--- /dev/null
+++ b/tests/test-recipes/variants/29_different_git_vars/meta.yaml
@@ -0,0 +1,15 @@
+package:
+ name: conda-build-test-variant-git
+ version: {{ GIT_DESCRIBE_TAG }}
+
+source:
+ git_url: https://github.com/conda/conda_build_test_recipe
+ git_tag: {{ git_tag }}
+
+build:
+ number: {{ GIT_DESCRIBE_NUMBER }}
+
+requirements:
+ build:
+ - python
+
diff --git a/tests/test_variants.py b/tests/test_variants.py
index 58c360c2f4..4b2366ce36 100644
--- a/tests/test_variants.py
+++ b/tests/test_variants.py
@@ -441,3 +441,11 @@ def test_custom_compiler():
recipe = os.path.join(recipe_dir, '28_custom_compiler')
ms = api.render(recipe, permit_unsatisfiable_variants=True, finalize=False, bypass_env_check=True)
assert len(ms) == 3
+
+
+def test_different_git_vars():
+ recipe = os.path.join(recipe_dir, '29_different_git_vars')
+ ms = api.render(recipe)
+ versions = [m[0].version() for m in ms]
+ assert "1.20.0" in versions
+ assert "1.21.11" in versions
|
nltk__nltk-3156 | Class 'CharTokenizer' is missing attribute '_string'
I think the class `CharTokenizer` is missing the attribute `_string=""`
https://github.com/nltk/nltk/blob/fc53edbf6f0763971afca5855386a2a382da37ac/nltk/tokenize/simple.py#L68-L77
Without this attribute, when trying to use the class, I get the following error:
`TypeError: Can't instantiate abstract class CharTokenizer with abstract method _string`
Example code:
```python
from nltk.tokenize.simple import CharTokenizer
tokenizer = CharTokenizer()
```
Error:
```bash
Traceback (most recent call last):
File "/home/francis/.local/share/virtualenvs/cafa-challenge-bUqSu2Tm/lib/python3.10/site-packages/IPython/core/interactiveshell.py", line 3508, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-7-803c2e672729>", line 1, in <module>
tokenizer = CharTokenizer()
TypeError: Can't instantiate abstract class CharTokenizer with abstract method _string
```
| [
{
"content": "# Natural Language Toolkit: Simple Tokenizers\n#\n# Copyright (C) 2001-2023 NLTK Project\n# Author: Edward Loper <[email protected]>\n# Steven Bird <[email protected]>\n# URL: <https://www.nltk.org>\n# For license information, see LICENSE.TXT\n\nr\"\"\"\nSimple Tokenizers\n\nThese tokenizers divide strings into substrings using the string\n``split()`` method.\nWhen tokenizing using a particular delimiter string, use\nthe string ``split()`` method directly, as this is more efficient.\n\nThe simple tokenizers are *not* available as separate functions;\ninstead, you should just use the string ``split()`` method directly:\n\n >>> s = \"Good muffins cost $3.88\\nin New York. Please buy me\\ntwo of them.\\n\\nThanks.\"\n >>> s.split() # doctest: +NORMALIZE_WHITESPACE\n ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.',\n 'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks.']\n >>> s.split(' ') # doctest: +NORMALIZE_WHITESPACE\n ['Good', 'muffins', 'cost', '$3.88\\nin', 'New', 'York.', '',\n 'Please', 'buy', 'me\\ntwo', 'of', 'them.\\n\\nThanks.']\n >>> s.split('\\n') # doctest: +NORMALIZE_WHITESPACE\n ['Good muffins cost $3.88', 'in New York. Please buy me',\n 'two of them.', '', 'Thanks.']\n\nThe simple tokenizers are mainly useful because they follow the\nstandard ``TokenizerI`` interface, and so can be used with any code\nthat expects a tokenizer. For example, these tokenizers can be used\nto specify the tokenization conventions when building a `CorpusReader`.\n\n\"\"\"\n\nfrom nltk.tokenize.api import StringTokenizer, TokenizerI\nfrom nltk.tokenize.util import regexp_span_tokenize, string_span_tokenize\n\n\nclass SpaceTokenizer(StringTokenizer):\n r\"\"\"Tokenize a string using the space character as a delimiter,\n which is the same as ``s.split(' ')``.\n\n >>> from nltk.tokenize import SpaceTokenizer\n >>> s = \"Good muffins cost $3.88\\nin New York. Please buy me\\ntwo of them.\\n\\nThanks.\"\n >>> SpaceTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE\n ['Good', 'muffins', 'cost', '$3.88\\nin', 'New', 'York.', '',\n 'Please', 'buy', 'me\\ntwo', 'of', 'them.\\n\\nThanks.']\n \"\"\"\n\n _string = \" \"\n\n\nclass TabTokenizer(StringTokenizer):\n r\"\"\"Tokenize a string use the tab character as a delimiter,\n the same as ``s.split('\\t')``.\n\n >>> from nltk.tokenize import TabTokenizer\n >>> TabTokenizer().tokenize('a\\tb c\\n\\t d')\n ['a', 'b c\\n', ' d']\n \"\"\"\n\n _string = \"\\t\"\n\n\nclass CharTokenizer(StringTokenizer):\n \"\"\"Tokenize a string into individual characters. If this functionality\n is ever required directly, use ``for char in string``.\n \"\"\"\n\n def tokenize(self, s):\n return list(s)\n\n def span_tokenize(self, s):\n yield from enumerate(range(1, len(s) + 1))\n\n\nclass LineTokenizer(TokenizerI):\n r\"\"\"Tokenize a string into its lines, optionally discarding blank lines.\n This is similar to ``s.split('\\n')``.\n\n >>> from nltk.tokenize import LineTokenizer\n >>> s = \"Good muffins cost $3.88\\nin New York. Please buy me\\ntwo of them.\\n\\nThanks.\"\n >>> LineTokenizer(blanklines='keep').tokenize(s) # doctest: +NORMALIZE_WHITESPACE\n ['Good muffins cost $3.88', 'in New York. Please buy me',\n 'two of them.', '', 'Thanks.']\n >>> # same as [l for l in s.split('\\n') if l.strip()]:\n >>> LineTokenizer(blanklines='discard').tokenize(s) # doctest: +NORMALIZE_WHITESPACE\n ['Good muffins cost $3.88', 'in New York. Please buy me',\n 'two of them.', 'Thanks.']\n\n :param blanklines: Indicates how blank lines should be handled. Valid values are:\n\n - ``discard``: strip blank lines out of the token list before returning it.\n A line is considered blank if it contains only whitespace characters.\n - ``keep``: leave all blank lines in the token list.\n - ``discard-eof``: if the string ends with a newline, then do not generate\n a corresponding token ``''`` after that newline.\n \"\"\"\n\n def __init__(self, blanklines=\"discard\"):\n valid_blanklines = (\"discard\", \"keep\", \"discard-eof\")\n if blanklines not in valid_blanklines:\n raise ValueError(\n \"Blank lines must be one of: %s\" % \" \".join(valid_blanklines)\n )\n\n self._blanklines = blanklines\n\n def tokenize(self, s):\n lines = s.splitlines()\n # If requested, strip off blank lines.\n if self._blanklines == \"discard\":\n lines = [l for l in lines if l.rstrip()]\n elif self._blanklines == \"discard-eof\":\n if lines and not lines[-1].strip():\n lines.pop()\n return lines\n\n # discard-eof not implemented\n def span_tokenize(self, s):\n if self._blanklines == \"keep\":\n yield from string_span_tokenize(s, r\"\\n\")\n else:\n yield from regexp_span_tokenize(s, r\"\\n(\\s+\\n)*\")\n\n\n######################################################################\n# { Tokenization Functions\n######################################################################\n# XXX: it is stated in module docs that there is no function versions\n\n\ndef line_tokenize(text, blanklines=\"discard\"):\n return LineTokenizer(blanklines).tokenize(text)\n",
"path": "nltk/tokenize/simple.py"
}
] | [
{
"content": "# Natural Language Toolkit: Simple Tokenizers\n#\n# Copyright (C) 2001-2023 NLTK Project\n# Author: Edward Loper <[email protected]>\n# Steven Bird <[email protected]>\n# URL: <https://www.nltk.org>\n# For license information, see LICENSE.TXT\n\nr\"\"\"\nSimple Tokenizers\n\nThese tokenizers divide strings into substrings using the string\n``split()`` method.\nWhen tokenizing using a particular delimiter string, use\nthe string ``split()`` method directly, as this is more efficient.\n\nThe simple tokenizers are *not* available as separate functions;\ninstead, you should just use the string ``split()`` method directly:\n\n >>> s = \"Good muffins cost $3.88\\nin New York. Please buy me\\ntwo of them.\\n\\nThanks.\"\n >>> s.split() # doctest: +NORMALIZE_WHITESPACE\n ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.',\n 'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks.']\n >>> s.split(' ') # doctest: +NORMALIZE_WHITESPACE\n ['Good', 'muffins', 'cost', '$3.88\\nin', 'New', 'York.', '',\n 'Please', 'buy', 'me\\ntwo', 'of', 'them.\\n\\nThanks.']\n >>> s.split('\\n') # doctest: +NORMALIZE_WHITESPACE\n ['Good muffins cost $3.88', 'in New York. Please buy me',\n 'two of them.', '', 'Thanks.']\n\nThe simple tokenizers are mainly useful because they follow the\nstandard ``TokenizerI`` interface, and so can be used with any code\nthat expects a tokenizer. For example, these tokenizers can be used\nto specify the tokenization conventions when building a `CorpusReader`.\n\n\"\"\"\n\nfrom nltk.tokenize.api import StringTokenizer, TokenizerI\nfrom nltk.tokenize.util import regexp_span_tokenize, string_span_tokenize\n\n\nclass SpaceTokenizer(StringTokenizer):\n r\"\"\"Tokenize a string using the space character as a delimiter,\n which is the same as ``s.split(' ')``.\n\n >>> from nltk.tokenize import SpaceTokenizer\n >>> s = \"Good muffins cost $3.88\\nin New York. Please buy me\\ntwo of them.\\n\\nThanks.\"\n >>> SpaceTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE\n ['Good', 'muffins', 'cost', '$3.88\\nin', 'New', 'York.', '',\n 'Please', 'buy', 'me\\ntwo', 'of', 'them.\\n\\nThanks.']\n \"\"\"\n\n _string = \" \"\n\n\nclass TabTokenizer(StringTokenizer):\n r\"\"\"Tokenize a string use the tab character as a delimiter,\n the same as ``s.split('\\t')``.\n\n >>> from nltk.tokenize import TabTokenizer\n >>> TabTokenizer().tokenize('a\\tb c\\n\\t d')\n ['a', 'b c\\n', ' d']\n \"\"\"\n\n _string = \"\\t\"\n\n\nclass CharTokenizer(StringTokenizer):\n \"\"\"Tokenize a string into individual characters. If this functionality\n is ever required directly, use ``for char in string``.\n \"\"\"\n\n _string = None\n\n def tokenize(self, s):\n return list(s)\n\n def span_tokenize(self, s):\n yield from enumerate(range(1, len(s) + 1))\n\n\nclass LineTokenizer(TokenizerI):\n r\"\"\"Tokenize a string into its lines, optionally discarding blank lines.\n This is similar to ``s.split('\\n')``.\n\n >>> from nltk.tokenize import LineTokenizer\n >>> s = \"Good muffins cost $3.88\\nin New York. Please buy me\\ntwo of them.\\n\\nThanks.\"\n >>> LineTokenizer(blanklines='keep').tokenize(s) # doctest: +NORMALIZE_WHITESPACE\n ['Good muffins cost $3.88', 'in New York. Please buy me',\n 'two of them.', '', 'Thanks.']\n >>> # same as [l for l in s.split('\\n') if l.strip()]:\n >>> LineTokenizer(blanklines='discard').tokenize(s) # doctest: +NORMALIZE_WHITESPACE\n ['Good muffins cost $3.88', 'in New York. Please buy me',\n 'two of them.', 'Thanks.']\n\n :param blanklines: Indicates how blank lines should be handled. Valid values are:\n\n - ``discard``: strip blank lines out of the token list before returning it.\n A line is considered blank if it contains only whitespace characters.\n - ``keep``: leave all blank lines in the token list.\n - ``discard-eof``: if the string ends with a newline, then do not generate\n a corresponding token ``''`` after that newline.\n \"\"\"\n\n def __init__(self, blanklines=\"discard\"):\n valid_blanklines = (\"discard\", \"keep\", \"discard-eof\")\n if blanklines not in valid_blanklines:\n raise ValueError(\n \"Blank lines must be one of: %s\" % \" \".join(valid_blanklines)\n )\n\n self._blanklines = blanklines\n\n def tokenize(self, s):\n lines = s.splitlines()\n # If requested, strip off blank lines.\n if self._blanklines == \"discard\":\n lines = [l for l in lines if l.rstrip()]\n elif self._blanklines == \"discard-eof\":\n if lines and not lines[-1].strip():\n lines.pop()\n return lines\n\n # discard-eof not implemented\n def span_tokenize(self, s):\n if self._blanklines == \"keep\":\n yield from string_span_tokenize(s, r\"\\n\")\n else:\n yield from regexp_span_tokenize(s, r\"\\n(\\s+\\n)*\")\n\n\n######################################################################\n# { Tokenization Functions\n######################################################################\n# XXX: it is stated in module docs that there is no function versions\n\n\ndef line_tokenize(text, blanklines=\"discard\"):\n return LineTokenizer(blanklines).tokenize(text)\n",
"path": "nltk/tokenize/simple.py"
}
] | diff --git a/nltk/test/unit/test_tokenize.py b/nltk/test/unit/test_tokenize.py
index 7688f52397..662b4562b2 100644
--- a/nltk/test/unit/test_tokenize.py
+++ b/nltk/test/unit/test_tokenize.py
@@ -16,6 +16,7 @@
sent_tokenize,
word_tokenize,
)
+from nltk.tokenize.simple import CharTokenizer
def load_stanford_segmenter():
@@ -865,3 +866,21 @@ class ExtLangVars(punkt.PunktLanguageVars):
)
def test_sent_tokenize(self, sentences: str, expected: List[str]):
assert sent_tokenize(sentences) == expected
+
+ def test_string_tokenizer(self) -> None:
+ sentence = "Hello there"
+ tokenizer = CharTokenizer()
+ assert tokenizer.tokenize(sentence) == list(sentence)
+ assert list(tokenizer.span_tokenize(sentence)) == [
+ (0, 1),
+ (1, 2),
+ (2, 3),
+ (3, 4),
+ (4, 5),
+ (5, 6),
+ (6, 7),
+ (7, 8),
+ (8, 9),
+ (9, 10),
+ (10, 11),
+ ]
diff --git a/nltk/tokenize/simple.py b/nltk/tokenize/simple.py
index 71a02d3098..54b2bf8440 100644
--- a/nltk/tokenize/simple.py
+++ b/nltk/tokenize/simple.py
@@ -70,6 +70,8 @@ class CharTokenizer(StringTokenizer):
is ever required directly, use ``for char in string``.
"""
+ _string = None
+
def tokenize(self, s):
return list(s)
|
wemake-services__wemake-python-styleguide-200 | Feature: allow magic numbers in async functions constructors
We check that some magic numbers can be used in function constructors like so:
```python
def some_function(price, delta=0.1):
return price * delta
```
But, we only allow regular functions, not `async` ones: https://github.com/wemake-services/wemake-python-styleguide/blob/master/wemake_python_styleguide/visitors/ast/numbers.py#L19-L21
What we need to do is:
1. Add `ast.AsyncFunctionDef` to the allowed list
2. Write a unit test for it: https://github.com/wemake-services/wemake-python-styleguide/blob/master/tests/test_visitors/test_ast/test_general/test_magic_numbers.py
| [
{
"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom typing import Optional\n\nfrom wemake_python_styleguide.constants import MAGIC_NUMBERS_WHITELIST\nfrom wemake_python_styleguide.violations.best_practices import (\n MagicNumberViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\n\n\nclass MagicNumberVisitor(BaseNodeVisitor):\n \"\"\"Checks magic numbers used in the code.\"\"\"\n\n _ALLOWED_PARENTS = (\n ast.Assign,\n\n # Constructor usages:\n ast.FunctionDef,\n ast.arguments,\n\n # Primitives:\n ast.List,\n ast.Dict,\n ast.Set,\n ast.Tuple,\n )\n\n # TODO: make consistent naming rules for class attributes:\n _PROXY_PARENTS = (\n ast.UnaryOp,\n )\n\n def _get_real_parent(self, node: Optional[ast.AST]) -> Optional[ast.AST]:\n \"\"\"\n Returns real number's parent.\n\n What can go wrong?\n\n 1. Number can be negative: ``x = -1``,\n so ``1`` has ``UnaryOp`` as parent, but should return ``Assign``\n\n \"\"\"\n parent = getattr(node, 'parent', None)\n if isinstance(parent, self._PROXY_PARENTS):\n return self._get_real_parent(parent)\n return parent\n\n def _check_is_magic(self, node: ast.Num) -> None:\n parent = self._get_real_parent(node)\n if isinstance(parent, self._ALLOWED_PARENTS):\n return\n\n if node.n in MAGIC_NUMBERS_WHITELIST:\n return\n\n if isinstance(node.n, int) and node.n <= 10:\n return\n\n self.add_violation(MagicNumberViolation(node, text=str(node.n)))\n\n def visit_Num(self, node: ast.Num) -> None:\n \"\"\"\n Checks numbers not to be magic constants inside the code.\n\n Raises:\n MagicNumberViolation\n\n \"\"\"\n self._check_is_magic(node)\n self.generic_visit(node)\n",
"path": "wemake_python_styleguide/visitors/ast/numbers.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom typing import Optional\n\nfrom wemake_python_styleguide.constants import MAGIC_NUMBERS_WHITELIST\nfrom wemake_python_styleguide.violations.best_practices import (\n MagicNumberViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\n\n\nclass MagicNumberVisitor(BaseNodeVisitor):\n \"\"\"Checks magic numbers used in the code.\"\"\"\n\n _ALLOWED_PARENTS = (\n ast.Assign,\n\n # Constructor usages:\n ast.FunctionDef,\n ast.AsyncFunctionDef,\n ast.arguments,\n\n # Primitives:\n ast.List,\n ast.Dict,\n ast.Set,\n ast.Tuple,\n )\n\n _PROXY_PARENTS = (\n ast.UnaryOp,\n )\n\n def _get_real_parent(self, node: Optional[ast.AST]) -> Optional[ast.AST]:\n \"\"\"\n Returns real number's parent.\n\n What can go wrong?\n\n 1. Number can be negative: ``x = -1``,\n so ``1`` has ``UnaryOp`` as parent, but should return ``Assign``\n\n \"\"\"\n parent = getattr(node, 'parent', None)\n if isinstance(parent, self._PROXY_PARENTS):\n return self._get_real_parent(parent)\n return parent\n\n def _check_is_magic(self, node: ast.Num) -> None:\n parent = self._get_real_parent(node)\n if isinstance(parent, self._ALLOWED_PARENTS):\n return\n\n if node.n in MAGIC_NUMBERS_WHITELIST:\n return\n\n if isinstance(node.n, int) and node.n <= 10:\n return\n\n self.add_violation(MagicNumberViolation(node, text=str(node.n)))\n\n def visit_Num(self, node: ast.Num) -> None:\n \"\"\"\n Checks numbers not to be magic constants inside the code.\n\n Raises:\n MagicNumberViolation\n\n \"\"\"\n self._check_is_magic(node)\n self.generic_visit(node)\n",
"path": "wemake_python_styleguide/visitors/ast/numbers.py"
}
] | diff --git a/tests/test_visitors/test_ast/test_general/test_magic_numbers.py b/tests/test_visitors/test_ast/test_general/test_magic_numbers.py
index 0b1128a40..98d860c81 100644
--- a/tests/test_visitors/test_ast/test_general/test_magic_numbers.py
+++ b/tests/test_visitors/test_ast/test_general/test_magic_numbers.py
@@ -20,6 +20,11 @@ def function_name(param1, param2={0}):
return param1 / param2
"""
+async_function_definition = """
+async def function_name(param1, param2={0}):
+ return param1 / param2
+"""
+
list_definition = '[{0}]'
dict_definition_key = '{{{0}: "value"}}'
dict_definition_value = '{{"first": {0}}}'
@@ -47,6 +52,7 @@ def function_name(param1, param2={0}):
assignment,
assignment_unary,
function_definition,
+ async_function_definition,
list_definition,
dict_definition_key,
dict_definition_value,
diff --git a/wemake_python_styleguide/visitors/ast/numbers.py b/wemake_python_styleguide/visitors/ast/numbers.py
index 30a8859b9..1bb34a335 100644
--- a/wemake_python_styleguide/visitors/ast/numbers.py
+++ b/wemake_python_styleguide/visitors/ast/numbers.py
@@ -18,6 +18,7 @@ class MagicNumberVisitor(BaseNodeVisitor):
# Constructor usages:
ast.FunctionDef,
+ ast.AsyncFunctionDef,
ast.arguments,
# Primitives:
|
learningequality__kolibri-8895 | Reports - Missing answered question data after upgrade from 0.14.7 to 0.15
## Observed behavior
This issue is a follow-up of https://github.com/learningequality/kolibri/pull/8818
When I'm logged in as a Coach and I go to the reports immediately after I've upgraded Kolibri from 0.14.7 to 0.15 and I attempt to go through the completed lessons or quizzes I'm not able to see the actual questions, instead I see the following text: No attempts made on this question.
## Expected behavior
All the completion details should be displayed.
## Steps to reproduce the issue
1. Install the a 0.14.7 version of Kolibri from [here](https://learningequality.org/r/kolibri-windows-setup-latest).
2. Setup a facility, create classes, users, lesson etc and complete a lesson and a quiz using exercises such as CK12's 'Make 10 (grids and number bonds)'.
3. Upgrade to the 0.15 version by Installing the following [build.](https://buildkite.com/learningequality/kolibri-python-package/builds/4467).
4. Go to Coach>Reports and observe the reports for the completed lesson and quiz. (Note that after restart of Kolibri the issue is observed only for the completed lesson while the quiz data is displayed correctly)
## Additional information

## Logs and DB files:
[UbuntuDBlogs.zip](https://github.com/learningequality/kolibri/files/7669547/UbuntuDBlogs.zip)
[WindowsDBLogs.zip](https://github.com/learningequality/kolibri/files/7669548/WindowsDBLogs.zip)
## Usage Details
- OS: Windows 10
- Browser: Chrome
| [
{
"content": "import logging\nfrom datetime import timedelta\nfrom itertools import groupby\nfrom random import randint\n\nfrom django.core.exceptions import PermissionDenied\nfrom django.db import transaction\nfrom django.db.models import OuterRef\nfrom django.db.models import Q\nfrom django.db.models import Subquery\nfrom django.db.models import Sum\nfrom django.http import Http404\nfrom django_filters.rest_framework import CharFilter\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom django_filters.rest_framework import FilterSet\nfrom django_filters.rest_framework import UUIDFilter\nfrom le_utils.constants import content_kinds\nfrom le_utils.constants import exercises\nfrom rest_framework import filters\nfrom rest_framework import serializers\nfrom rest_framework import viewsets\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.response import Response\n\nfrom .models import AttemptLog\nfrom .models import ContentSessionLog\nfrom .models import ContentSummaryLog\nfrom .models import MasteryLog\nfrom kolibri.core.api import ReadOnlyValuesViewset\nfrom kolibri.core.auth.api import KolibriAuthPermissions\nfrom kolibri.core.auth.api import KolibriAuthPermissionsFilter\nfrom kolibri.core.auth.models import dataset_cache\nfrom kolibri.core.content.api import OptionalPageNumberPagination\nfrom kolibri.core.content.models import AssessmentMetaData\nfrom kolibri.core.content.models import ContentNode\nfrom kolibri.core.exams.models import Exam\nfrom kolibri.core.lessons.models import Lesson\nfrom kolibri.core.logger.constants import interaction_types\nfrom kolibri.core.logger.constants.exercise_attempts import MAPPING\nfrom kolibri.core.notifications.api import create_summarylog\nfrom kolibri.core.notifications.api import parse_attemptslog\nfrom kolibri.core.notifications.api import parse_summarylog\nfrom kolibri.core.notifications.api import quiz_answered_notification\nfrom kolibri.core.notifications.api import quiz_completed_notification\nfrom kolibri.core.notifications.api import quiz_started_notification\nfrom kolibri.core.notifications.tasks import wrap_to_save_queue\nfrom kolibri.utils.time_utils import local_now\n\nlogger = logging.getLogger(__name__)\n\n\nclass HexStringUUIDField(serializers.UUIDField):\n def __init__(self, **kwargs):\n self.uuid_format = \"hex\"\n super(HexStringUUIDField, self).__init__(**kwargs)\n\n def to_internal_value(self, data):\n return super(HexStringUUIDField, self).to_internal_value(data).hex\n\n\nclass StartSessionSerializer(serializers.Serializer):\n lesson_id = HexStringUUIDField(required=False)\n node_id = HexStringUUIDField(required=False)\n # Do this as a special way of handling our coach generated quizzes\n quiz_id = HexStringUUIDField(required=False)\n # A flag to indicate whether to start the session over again\n repeat = serializers.BooleanField(required=False, default=False)\n\n def validate(self, data):\n if \"quiz_id\" in data and (\"lesson_id\" in data or \"node_id\" in data):\n raise ValidationError(\"quiz_id must not be mixed with other context\")\n if \"node_id\" not in data and \"quiz_id\" not in data:\n raise ValidationError(\"node_id is required if not a coach assigned quiz\")\n return data\n\n\nclass InteractionSerializer(serializers.Serializer):\n id = HexStringUUIDField(required=False)\n item = serializers.CharField()\n correct = serializers.FloatField(min_value=0, max_value=1)\n complete = serializers.BooleanField(required=False, default=False)\n time_spent = serializers.FloatField(min_value=0)\n\n answer = serializers.DictField(required=False)\n simple_answer = serializers.CharField(required=False, allow_blank=True)\n error = serializers.BooleanField(required=False, default=False)\n hinted = serializers.BooleanField(required=False, default=False)\n # Whether to replace the current answer with the new answer\n # this is a no-op if the attempt is being created.\n replace = serializers.BooleanField(required=False, default=False)\n\n def validate(self, data):\n if not data[\"error\"] and \"answer\" not in data:\n raise ValidationError(\"Must provide an answer if not an error\")\n return data\n\n\nclass UpdateSessionSerializer(serializers.Serializer):\n progress_delta = serializers.FloatField(min_value=0, max_value=1.0, required=False)\n progress = serializers.FloatField(min_value=0, max_value=1.0, required=False)\n time_spent_delta = serializers.FloatField(min_value=0, required=False)\n extra_fields = serializers.DictField(required=False)\n interactions = InteractionSerializer(required=False, many=True)\n\n def validate(self, data):\n if \"progress_delta\" in data and \"progress\" in data:\n raise ValidationError(\n \"must not pass progress_delta and progress in the same request\"\n )\n return data\n\n\n# The lowest integer that can be encoded\n# in a Django IntegerField across all backends\nMIN_INTEGER = -2147483648\n\n\nattemptlog_fields = [\n \"id\",\n \"correct\",\n \"complete\",\n \"hinted\",\n \"error\",\n \"item\",\n \"answer\",\n \"time_spent\",\n]\n\n\nclass LogContext(object):\n \"\"\"\n Object used to provide a limited dict like interface for encoding the\n context that can be stored in the sessionlog, and which is then\n returned to the frontend as part of the initialization of a content\n session.\n node_id - represents a specific ContentNode in a topic tree, while the\n content_id for that node is recorded directly on the sessionlog.\n quiz_id - represents the id of the Exam Model object that this session\n is regarding (if any).\n lesson_id - represents the id of the lesson this node_id is being engaged\n with from within (if any).\n mastery_level - represents the current 'try' at an assessment, whether an exercise\n a practice quiz or a coach assigned quiz. Different mastery_level values\n indicate a different try at the assessment.\n\n This is used to encode the values that are sent when initializing a session\n (see its use in the _get_context method below)\n and then also used to hold the values from an existing sessionlog when\n updating a session (see _update_session method).\n \"\"\"\n\n __slots__ = \"node_id\", \"quiz_id\", \"lesson_id\", \"mastery_level\"\n\n def __init__(self, **kwargs):\n for key, value in kwargs.items():\n self[key] = value\n\n def __setitem__(self, key, value):\n if key not in self.__slots__:\n return\n setattr(self, key, value)\n\n def __getitem__(self, key):\n if key not in self.__slots__:\n return\n return getattr(self, key, None)\n\n def __contains__(self, key):\n return key in self.__slots__ and hasattr(self, key)\n\n def to_dict(self):\n \"\"\"\n Provide a dictionary of the keys stored in the context object.\n Used to serialize for inclusion in an API Response.\n \"\"\"\n output = {}\n for slot in self.__slots__:\n if hasattr(self, slot):\n output[slot] = getattr(self, slot)\n return output\n\n\nclass ProgressTrackingViewSet(viewsets.GenericViewSet):\n def _precache_dataset_id(self, user):\n if user is None or user.is_anonymous():\n return\n key = ContentSessionLog.get_related_dataset_cache_key(\n user.id, user._meta.db_table\n )\n dataset_cache.set(key, user.dataset_id)\n\n def _check_quiz_permissions(self, user, quiz_id):\n if user.is_anonymous():\n raise PermissionDenied(\"Cannot access a quiz if not logged in\")\n if not Exam.objects.filter(\n active=True,\n assignments__collection_id__in=user.memberships.all().values(\n \"collection_id\"\n ),\n id=quiz_id,\n ).exists():\n raise PermissionDenied(\"User does not have access to this quiz_id\")\n\n def _check_lesson_permissions(self, user, lesson_id):\n if user.is_anonymous():\n raise PermissionDenied(\"Cannot access a lesson if not logged in\")\n if not Lesson.objects.filter(\n lesson_assignments__collection_id__in=user.memberships.all().values(\n \"collection_id\"\n ),\n id=lesson_id,\n ).exists():\n raise ValidationError(\"Invalid lesson_id\")\n\n def _get_context(self, user, validated_data):\n node_id = validated_data.get(\"node_id\")\n quiz_id = validated_data.get(\"quiz_id\")\n lesson_id = validated_data.get(\"lesson_id\")\n\n context = LogContext()\n\n if node_id is not None:\n try:\n node = (\n ContentNode.objects.annotate(\n mastery_model=Subquery(\n AssessmentMetaData.objects.filter(\n contentnode_id=OuterRef(\"id\")\n ).values_list(\"mastery_model\", flat=True)[:1]\n )\n )\n .values(\"content_id\", \"channel_id\", \"kind\", \"mastery_model\")\n .get(id=node_id)\n )\n mastery_model = node[\"mastery_model\"]\n content_id = node[\"content_id\"]\n channel_id = node[\"channel_id\"]\n kind = node[\"kind\"]\n context[\"node_id\"] = node_id\n if lesson_id:\n self._check_lesson_permissions(user, lesson_id)\n context[\"lesson_id\"] = lesson_id\n except ContentNode.DoesNotExist:\n raise ValidationError(\"Invalid node_id\")\n elif quiz_id is not None:\n self._check_quiz_permissions(user, quiz_id)\n mastery_model = {\"type\": \"quiz\", \"coach_assigned\": True}\n content_id = quiz_id\n channel_id = None\n kind = content_kinds.QUIZ\n context[\"quiz_id\"] = quiz_id\n return content_id, channel_id, kind, mastery_model, context\n\n def _get_or_create_summarylog(\n self,\n user,\n content_id,\n channel_id,\n kind,\n mastery_model,\n start_timestamp,\n repeat,\n context,\n ):\n if not user:\n output = {\n \"progress\": 0,\n \"extra_fields\": {},\n \"time_spent\": 0,\n \"complete\": False,\n }\n if mastery_model:\n output.update(\n {\n \"mastery_criterion\": mastery_model,\n \"pastattempts\": [],\n \"totalattempts\": 0,\n \"complete\": False,\n }\n )\n return output\n\n try:\n summarylog = ContentSummaryLog.objects.get(\n content_id=content_id,\n user=user,\n )\n updated_fields = (\"end_timestamp\", \"channel_id\", \"_morango_dirty_bit\")\n if repeat:\n summarylog.progress = 0\n updated_fields += (\"progress\",)\n summarylog.channel_id = channel_id\n summarylog.end_timestamp = start_timestamp\n summarylog.save(update_fields=updated_fields)\n except ContentSummaryLog.DoesNotExist:\n summarylog = ContentSummaryLog.objects.create(\n content_id=content_id,\n user=user,\n channel_id=channel_id,\n kind=kind,\n start_timestamp=start_timestamp,\n end_timestamp=start_timestamp,\n )\n self._process_created_notification(summarylog, context)\n\n output = {\n \"progress\": summarylog.progress,\n \"extra_fields\": summarylog.extra_fields,\n \"time_spent\": summarylog.time_spent,\n \"complete\": summarylog.progress >= 1,\n }\n if mastery_model:\n assessment_output, mastery_level = self._start_assessment_session(\n mastery_model,\n summarylog,\n user,\n start_timestamp,\n repeat,\n context,\n )\n output.update(assessment_output)\n context[\"mastery_level\"] = mastery_level\n return output\n\n def create(self, request):\n \"\"\"\n Make a POST request to start a content session.\n\n Requires one of either:\n - node_id: the pk of the resource\n - quiz_id: the pk of the quiz (Exam) object\n\n Optional parameters:\n - repeat: whether to reset previous progress on this content to zero and start fresh\n - lesson_id: if this is being engaged within a lesson\n\n Returns object with properties:\n - session_id: id of the session object that was created by this call\n - context: contains node_id, quiz_id, lesson_id, and mastery_level as appropriate\n - progress: any previous progress on this content resource\n - time_spent: any previous time spent on this content resource\n - extra_fields: any previously recorded additional data stored for this resource\n - complete: whether this resource is completed by this user\n\n If this is an assessment, return object will also include:\n - mastery_criterion: mastery criterion that should be applied to determine completion\n - pastattempts: serialized subset of recent responses, used to determine completion\n - totalattempts: total number of previous responses within this run of the assessment resource\n \"\"\"\n serializer = StartSessionSerializer(\n data=request.data, context={\"request\": request}\n )\n serializer.is_valid(raise_exception=True)\n start_timestamp = local_now()\n repeat = serializer.validated_data[\"repeat\"]\n\n content_id, channel_id, kind, mastery_model, context = self._get_context(\n request.user, serializer.validated_data\n )\n\n with transaction.atomic(), dataset_cache:\n\n user = None if request.user.is_anonymous() else request.user\n\n self._precache_dataset_id(user)\n\n output = self._get_or_create_summarylog(\n user,\n content_id,\n channel_id,\n kind,\n mastery_model,\n start_timestamp,\n repeat,\n context,\n )\n\n # Must ensure there is no user here to maintain user privacy for logging.\n visitor_id = (\n request.COOKIES.get(\"visitor_id\")\n if hasattr(request, \"COOKIES\") and not user\n else None\n )\n sessionlog = ContentSessionLog.objects.create(\n content_id=content_id,\n channel_id=channel_id,\n start_timestamp=start_timestamp,\n end_timestamp=start_timestamp,\n user=user,\n kind=kind,\n visitor_id=visitor_id,\n extra_fields={\"context\": context.to_dict()},\n )\n output.update({\"session_id\": sessionlog.id, \"context\": context.to_dict()})\n return Response(output)\n\n def _process_created_notification(self, summarylog, context):\n # dont create notifications upon creating a summary log for an exercise\n # notifications should only be triggered upon first attempting a question in the exercise\n if \"node_id\" in context and summarylog.kind != content_kinds.EXERCISE:\n # We have sufficient information to only trigger notifications for the specific\n # lesson that this is being engaged with, but until we can work out the exact\n # way that we want to match this with contextual progress tracking, we are\n # not changing this for now.\n wrap_to_save_queue(\n create_summarylog,\n summarylog,\n )\n\n def _process_masterylog_created_notification(self, masterylog, context):\n if \"quiz_id\" in context:\n wrap_to_save_queue(\n quiz_started_notification, masterylog, context[\"quiz_id\"]\n )\n\n def _check_quiz_log_permissions(self, masterylog):\n if (\n masterylog\n and masterylog.complete\n and masterylog.mastery_criterion.get(\"type\") == \"quiz\"\n and masterylog.mastery_criterion.get(\"coach_assigned\")\n ):\n raise PermissionDenied(\"Cannot update a finished coach assigned quiz\")\n\n def _get_or_create_masterylog(\n self,\n user,\n summarylog,\n repeat,\n mastery_model,\n start_timestamp,\n context,\n ):\n masterylog = (\n MasteryLog.objects.filter(\n summarylog=summarylog,\n user=user,\n )\n .order_by(\"-complete\", \"-end_timestamp\")\n .first()\n )\n\n if masterylog is None or (masterylog.complete and repeat):\n # There is no previous masterylog, or the previous masterylog\n # is complete, and the request is requesting a new attempt.\n # Here we generate a mastery_level value - this serves to disambiguate multiple\n # retries at an assessment (either an exercise, practice quiz, or coach assigned quiz).\n # Having the same mastery_level/summarylog (and hence user) pair will result in the same\n # identifier being created. So if the same user engages with the same assessment on different\n # devices, when the data synchronizes, if the mastery_level is the same, this data will be\n # unified under a single try.\n if mastery_model.get(\"coach_assigned\"):\n # To prevent coach assigned quiz mastery logs from propagating to older\n # Kolibri versions, we use negative mastery levels for these.\n # In older versions of Kolibri the mastery_level is validated to be\n # between 1 and 10 - so these values will fail validation and hence will\n # not be deserialized from the morango store.\n # We choose a random integer across the range of acceptable values,\n # in order to prevent collisions across multiple devices when users\n # start different tries of the same coach assigned quiz.\n # With a length of 9 digits for the decimal number, we would need approximately\n # 45 tries to have a 1 in a million chance of a collision.\n # Numbers derived using the formula for the generalized birthday problem:\n # https://en.wikipedia.org/wiki/Birthday_problem#The_generalized_birthday_problem\n # n=sqrt(2*d*ln(1/(1-p))\n # where d is the number of combinations of d digits, p is the probability\n # So for 9 digits, d = 10^9\n # p = 0.000001 for one in a million\n mastery_level = randint(MIN_INTEGER, -1)\n else:\n mastery_level = (\n masterylog.mastery_level + 1 if masterylog is not None else 1\n )\n\n masterylog = MasteryLog.objects.create(\n summarylog=summarylog,\n user=user,\n mastery_criterion=mastery_model,\n start_timestamp=start_timestamp,\n end_timestamp=start_timestamp,\n mastery_level=mastery_level,\n )\n self._process_masterylog_created_notification(masterylog, context)\n else:\n self._check_quiz_log_permissions(masterylog)\n return masterylog\n\n def _start_assessment_session(\n self, mastery_model, summarylog, user, start_timestamp, repeat, context\n ):\n masterylog = self._get_or_create_masterylog(\n user,\n summarylog,\n repeat,\n mastery_model,\n start_timestamp,\n context,\n )\n\n mastery_criterion = masterylog.mastery_criterion\n exercise_type = mastery_criterion.get(\"type\")\n attemptlogs = masterylog.attemptlogs.values(*attemptlog_fields).order_by(\n \"-start_timestamp\"\n )\n\n # get the first x logs depending on the exercise type\n if exercise_type == exercises.M_OF_N:\n attemptlogs = attemptlogs[: mastery_criterion[\"n\"]]\n elif exercise_type in MAPPING:\n attemptlogs = attemptlogs[: MAPPING[exercise_type]]\n elif exercise_type == \"quiz\":\n attemptlogs = attemptlogs.order_by()\n else:\n attemptlogs = attemptlogs[:10]\n\n return {\n \"mastery_criterion\": mastery_criterion,\n \"pastattempts\": attemptlogs,\n \"totalattempts\": masterylog.attemptlogs.count(),\n \"complete\": masterylog.complete,\n }, masterylog.mastery_level\n\n def _generate_interaction_summary(self, validated_data):\n if validated_data[\"error\"]:\n return {\n \"type\": interaction_types.ERROR,\n }\n elif validated_data[\"hinted\"]:\n return {\n \"type\": interaction_types.HINT,\n \"answer\": validated_data[\"answer\"],\n }\n return {\n \"type\": interaction_types.ANSWER,\n \"answer\": validated_data[\"answer\"],\n \"correct\": validated_data[\"correct\"],\n }\n\n def _process_masterylog_completed_notification(self, masterylog, context):\n if \"quiz_id\" in context:\n wrap_to_save_queue(\n quiz_completed_notification, masterylog, context[\"quiz_id\"]\n )\n\n def _update_and_return_mastery_log_id(\n self, user, complete, summarylog_id, end_timestamp, context\n ):\n if not user.is_anonymous() and context[\"mastery_level\"] is not None:\n try:\n masterylog = MasteryLog.objects.get(\n user=user,\n mastery_level=context[\"mastery_level\"],\n summarylog_id=summarylog_id,\n )\n if complete and not masterylog.complete:\n masterylog.complete = True\n masterylog.completion_timestamp = end_timestamp\n masterylog.save(\n update_fields=(\n \"complete\",\n \"completion_timestamp\",\n \"_morango_dirty_bit\",\n )\n )\n self._process_masterylog_completed_notification(masterylog, context)\n else:\n self._check_quiz_log_permissions(masterylog)\n return masterylog.id\n except MasteryLog.DoesNotExist:\n raise ValidationError(\n \"Invalid mastery_level value, this session has not been started.\"\n )\n\n def _update_attempt(self, attemptlog, interaction, update_fields, end_timestamp):\n\n interaction_summary = self._generate_interaction_summary(interaction)\n\n attemptlog.interaction_history += [interaction_summary]\n attemptlog.end_timestamp = end_timestamp\n attemptlog.time_spent = interaction[\"time_spent\"]\n\n if interaction[\"error\"] and not attemptlog.error:\n attemptlog.error = interaction[\"error\"]\n update_fields.add(\"error\")\n\n # Mark hinted only if it is not already correct, and don't undo previously hinted\n if interaction[\"hinted\"] and not attemptlog.hinted and not attemptlog.correct:\n attemptlog.hinted = interaction[\"hinted\"]\n update_fields.add(\"hinted\")\n\n if interaction[\"replace\"]:\n attemptlog.correct = interaction[\"correct\"]\n update_fields.add(\"correct\")\n\n if \"answer\" in interaction:\n attemptlog.answer = interaction[\"answer\"]\n update_fields.add(\"answer\")\n\n if \"simple_answer\" in interaction:\n attemptlog.simple_answer = interaction[\"simple_answer\"]\n update_fields.add(\"simple_answer\")\n\n if interaction[\"complete\"] and not attemptlog.complete:\n attemptlog.complete = interaction[\"complete\"]\n attemptlog.completion_timestamp = end_timestamp\n update_fields.update({\"complete\", \"completion_timestamp\"})\n\n def _create_attempt(\n self, session_id, masterylog_id, user, interaction, end_timestamp\n ):\n start_timestamp = end_timestamp - timedelta(seconds=interaction[\"time_spent\"])\n\n interaction_summary = self._generate_interaction_summary(interaction)\n\n del interaction[\"replace\"]\n\n return AttemptLog(\n sessionlog_id=session_id,\n masterylog_id=masterylog_id,\n interaction_history=[interaction_summary],\n user=user,\n start_timestamp=start_timestamp,\n completion_timestamp=end_timestamp if interaction[\"complete\"] else None,\n end_timestamp=end_timestamp,\n **interaction\n )\n\n def _update_or_create_attempts(\n self, session_id, masterylog_id, user, interactions, end_timestamp, context\n ):\n user = None if user.is_anonymous() else user\n\n output = []\n\n for _, item_interactions in groupby(interactions, lambda x: x[\"item\"]):\n created = False\n update_fields = {\n \"interaction_history\",\n \"end_timestamp\",\n \"time_spent\",\n \"_morango_dirty_bit\",\n }\n item_interactions = list(item_interactions)\n if \"id\" in item_interactions[0]:\n try:\n attemptlog = AttemptLog.objects.get(\n id=item_interactions[0][\"id\"],\n masterylog_id=masterylog_id,\n user=user,\n )\n except AttemptLog.DoesNotExist:\n raise ValidationError(\"Invalid attemptlog id specified\")\n else:\n attemptlog = self._create_attempt(\n session_id,\n masterylog_id,\n user,\n item_interactions[0],\n end_timestamp,\n )\n created = True\n item_interactions = item_interactions[1:]\n updated = bool(item_interactions)\n\n for response in item_interactions:\n self._update_attempt(attemptlog, response, update_fields, end_timestamp)\n\n self._process_attempt_notifications(\n attemptlog, context, user, created, updated\n )\n attemptlog.save(\n update_fields=None if created else update_fields, force_insert=created\n )\n attempt = {}\n for field in attemptlog_fields:\n attempt[field] = getattr(attemptlog, field)\n output.append(attempt)\n return {\"attempts\": output}\n\n def _process_attempt_notifications(\n self, attemptlog, context, user, created, updated\n ):\n if user is None:\n return\n if \"lesson_id\" in context:\n wrap_to_save_queue(parse_attemptslog, attemptlog)\n if created and \"quiz_id\" in context:\n wrap_to_save_queue(\n quiz_answered_notification, attemptlog, context[\"quiz_id\"]\n )\n\n def _get_session_log(self, session_id, user):\n try:\n if user.is_anonymous():\n return ContentSessionLog.objects.get(id=session_id, user__isnull=True)\n else:\n return ContentSessionLog.objects.get(id=session_id, user=user)\n except ContentSessionLog.DoesNotExist:\n raise Http404(\n \"ContentSessionLog with id {} does not exist\".format(session_id)\n )\n\n def _normalize_progress(self, progress):\n return max(0, min(1.0, progress))\n\n def _update_content_log(self, log, end_timestamp, validated_data):\n update_fields = (\"end_timestamp\", \"_morango_dirty_bit\")\n\n log.end_timestamp = end_timestamp\n if \"progress_delta\" in validated_data:\n update_fields += (\"progress\",)\n log.progress = self._normalize_progress(\n log.progress + validated_data[\"progress_delta\"]\n )\n elif \"progress\" in validated_data:\n update_fields += (\"progress\",)\n log.progress = self._normalize_progress(validated_data[\"progress\"])\n if \"time_spent_delta\" in validated_data:\n update_fields += (\"time_spent\",)\n log.time_spent += validated_data[\"time_spent_delta\"]\n return update_fields\n\n def _update_summary_log(\n self, user, sessionlog, end_timestamp, validated_data, context\n ):\n if user.is_anonymous():\n return\n summarylog = ContentSummaryLog.objects.get(\n content_id=sessionlog.content_id, user=user\n )\n was_complete = summarylog.progress >= 1\n\n update_fields = self._update_content_log(\n summarylog, end_timestamp, validated_data\n )\n\n if summarylog.progress >= 1 and not was_complete:\n summarylog.completion_timestamp = end_timestamp\n update_fields += (\"completion_timestamp\",)\n self._process_completed_notification(summarylog, context)\n if \"extra_fields\" in validated_data:\n update_fields += (\"extra_fields\",)\n summarylog.extra_fields = validated_data[\"extra_fields\"]\n\n summarylog.save(update_fields=update_fields)\n return summarylog\n\n def _update_session(self, session_id, user, end_timestamp, validated_data):\n sessionlog = self._get_session_log(session_id, user)\n\n context = LogContext(**sessionlog.extra_fields.get(\"context\", {}))\n\n if \"quiz_id\" in context:\n self._check_quiz_permissions(user, context[\"quiz_id\"])\n\n update_fields = self._update_content_log(\n sessionlog, end_timestamp, validated_data\n )\n sessionlog.save(update_fields=update_fields)\n\n summarylog = self._update_summary_log(\n user, sessionlog, end_timestamp, validated_data, context\n )\n\n if summarylog is not None:\n complete = summarylog.progress >= 1\n else:\n complete = sessionlog.progress >= 1\n\n return {\"complete\": complete}, summarylog.id if summarylog else None, context\n\n def _process_completed_notification(self, summarylog, context):\n if \"node_id\" in context:\n wrap_to_save_queue(\n parse_summarylog,\n summarylog,\n )\n\n def update(self, request, pk=None):\n \"\"\"\n Make a PUT request to update the current session\n\n Requires one of either:\n - progress_delta: increase the progress by this amount\n - progress: set the progress to this amount\n\n Can also update time spent recorded with a delta:\n - time_spent_delta: number of seconds to increase time_spent by\n\n And update the extra_fields value stored:\n - extra_fields: the complete representation to set extra_fields to\n\n If creating or updating attempts for an assessment must include:\n - interactions: an array of objects, if updating an existing attempt, must include attempt_id\n\n Returns an object with the properties:\n - complete: boolean indicating if the resource is completed\n\n If an attempt at an assessment was included, then this parameter will be included:\n - attempts: serialized form of the attempt, equivalent to that returned in pastattempts from\n session initialization\n \"\"\"\n if pk is None:\n raise Http404\n serializer = UpdateSessionSerializer(\n data=request.data, context={\"request\": request}\n )\n serializer.is_valid(raise_exception=True)\n end_timestamp = local_now()\n validated_data = serializer.validated_data\n\n with transaction.atomic(), dataset_cache:\n self._precache_dataset_id(request.user)\n\n output, summarylog_id, context = self._update_session(\n pk, request.user, end_timestamp, validated_data\n )\n masterylog_id = self._update_and_return_mastery_log_id(\n request.user, output[\"complete\"], summarylog_id, end_timestamp, context\n )\n if \"interactions\" in validated_data:\n attempt_output = self._update_or_create_attempts(\n pk,\n masterylog_id,\n request.user,\n validated_data[\"interactions\"],\n end_timestamp,\n context,\n )\n output.update(attempt_output)\n return Response(output)\n\n\nclass TotalContentProgressViewSet(viewsets.GenericViewSet):\n def retrieve(self, request, pk=None):\n if request.user.is_anonymous() or pk != request.user.id:\n raise PermissionDenied(\"Can only access progress data for self\")\n progress = (\n request.user.contentsummarylog_set.filter(progress=1)\n .aggregate(Sum(\"progress\"))\n .get(\"progress__sum\")\n )\n return Response(\n {\n \"id\": pk,\n \"progress\": progress,\n }\n )\n\n\nclass BaseLogFilter(FilterSet):\n facility = UUIDFilter(method=\"filter_facility\")\n classroom = UUIDFilter(method=\"filter_classroom\")\n learner_group = UUIDFilter(method=\"filter_learner_group\")\n\n # Only a superuser can filter by facilities\n def filter_facility(self, queryset, name, value):\n return queryset.filter(user__facility=value)\n\n def filter_classroom(self, queryset, name, value):\n return queryset.filter(\n Q(user__memberships__collection_id=value)\n | Q(user__memberships__collection__parent_id=value)\n )\n\n def filter_learner_group(self, queryset, name, value):\n return queryset.filter(user__memberships__collection_id=value)\n\n\nclass MasteryFilter(BaseLogFilter):\n content = UUIDFilter(name=\"summarylog__content_id\")\n\n class Meta:\n model = MasteryLog\n fields = [\"content\"]\n\n\nclass MasteryLogViewSet(ReadOnlyValuesViewset):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n queryset = MasteryLog.objects.all()\n pagination_class = OptionalPageNumberPagination\n filter_class = MasteryFilter\n values = (\n \"user\",\n \"summarylog\",\n \"mastery_criterion\",\n \"start_timestamp\",\n \"end_timestamp\",\n \"completion_timestamp\",\n \"mastery_level\",\n \"complete\",\n )\n\n\nclass AttemptFilter(BaseLogFilter):\n content = CharFilter(method=\"filter_content\")\n\n def filter_content(self, queryset, name, value):\n return queryset.filter(masterylog__summarylog__content_id=value)\n\n class Meta:\n model = AttemptLog\n fields = [\"masterylog\", \"complete\", \"user\", \"content\", \"item\"]\n\n\nclass AttemptLogViewSet(ReadOnlyValuesViewset):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (\n KolibriAuthPermissionsFilter,\n DjangoFilterBackend,\n filters.OrderingFilter,\n )\n queryset = AttemptLog.objects.all()\n pagination_class = OptionalPageNumberPagination\n filter_class = AttemptFilter\n ordering_fields = (\"end_timestamp\",)\n ordering = (\"end_timestamp\",)\n\n values = (\n \"item\",\n \"start_timestamp\",\n \"end_timestamp\",\n \"completion_timestamp\",\n \"time_spent\",\n \"complete\",\n \"correct\",\n \"hinted\",\n \"answer\",\n \"simple_answer\",\n \"interaction_history\",\n \"user\",\n \"error\",\n \"masterylog\",\n \"sessionlog\",\n )\n",
"path": "kolibri/core/logger/api.py"
}
] | [
{
"content": "import logging\nfrom datetime import timedelta\nfrom itertools import groupby\nfrom random import randint\n\nfrom django.core.exceptions import PermissionDenied\nfrom django.db import transaction\nfrom django.db.models import OuterRef\nfrom django.db.models import Q\nfrom django.db.models import Subquery\nfrom django.db.models import Sum\nfrom django.http import Http404\nfrom django_filters.rest_framework import CharFilter\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom django_filters.rest_framework import FilterSet\nfrom django_filters.rest_framework import UUIDFilter\nfrom le_utils.constants import content_kinds\nfrom le_utils.constants import exercises\nfrom rest_framework import filters\nfrom rest_framework import serializers\nfrom rest_framework import viewsets\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.response import Response\n\nfrom .models import AttemptLog\nfrom .models import ContentSessionLog\nfrom .models import ContentSummaryLog\nfrom .models import MasteryLog\nfrom kolibri.core.api import ReadOnlyValuesViewset\nfrom kolibri.core.auth.api import KolibriAuthPermissions\nfrom kolibri.core.auth.api import KolibriAuthPermissionsFilter\nfrom kolibri.core.auth.models import dataset_cache\nfrom kolibri.core.content.api import OptionalPageNumberPagination\nfrom kolibri.core.content.models import AssessmentMetaData\nfrom kolibri.core.content.models import ContentNode\nfrom kolibri.core.exams.models import Exam\nfrom kolibri.core.lessons.models import Lesson\nfrom kolibri.core.logger.constants import interaction_types\nfrom kolibri.core.logger.constants.exercise_attempts import MAPPING\nfrom kolibri.core.notifications.api import create_summarylog\nfrom kolibri.core.notifications.api import parse_attemptslog\nfrom kolibri.core.notifications.api import parse_summarylog\nfrom kolibri.core.notifications.api import quiz_answered_notification\nfrom kolibri.core.notifications.api import quiz_completed_notification\nfrom kolibri.core.notifications.api import quiz_started_notification\nfrom kolibri.core.notifications.tasks import wrap_to_save_queue\nfrom kolibri.utils.time_utils import local_now\n\nlogger = logging.getLogger(__name__)\n\n\nclass HexStringUUIDField(serializers.UUIDField):\n def __init__(self, **kwargs):\n self.uuid_format = \"hex\"\n super(HexStringUUIDField, self).__init__(**kwargs)\n\n def to_internal_value(self, data):\n return super(HexStringUUIDField, self).to_internal_value(data).hex\n\n\nclass StartSessionSerializer(serializers.Serializer):\n lesson_id = HexStringUUIDField(required=False)\n node_id = HexStringUUIDField(required=False)\n # Do this as a special way of handling our coach generated quizzes\n quiz_id = HexStringUUIDField(required=False)\n # A flag to indicate whether to start the session over again\n repeat = serializers.BooleanField(required=False, default=False)\n\n def validate(self, data):\n if \"quiz_id\" in data and (\"lesson_id\" in data or \"node_id\" in data):\n raise ValidationError(\"quiz_id must not be mixed with other context\")\n if \"node_id\" not in data and \"quiz_id\" not in data:\n raise ValidationError(\"node_id is required if not a coach assigned quiz\")\n return data\n\n\nclass InteractionSerializer(serializers.Serializer):\n id = HexStringUUIDField(required=False)\n item = serializers.CharField()\n correct = serializers.FloatField(min_value=0, max_value=1)\n complete = serializers.BooleanField(required=False, default=False)\n time_spent = serializers.FloatField(min_value=0)\n\n answer = serializers.DictField(required=False)\n simple_answer = serializers.CharField(required=False, allow_blank=True)\n error = serializers.BooleanField(required=False, default=False)\n hinted = serializers.BooleanField(required=False, default=False)\n # Whether to replace the current answer with the new answer\n # this is a no-op if the attempt is being created.\n replace = serializers.BooleanField(required=False, default=False)\n\n def validate(self, data):\n if not data[\"error\"] and \"answer\" not in data:\n raise ValidationError(\"Must provide an answer if not an error\")\n return data\n\n\nclass UpdateSessionSerializer(serializers.Serializer):\n progress_delta = serializers.FloatField(min_value=0, max_value=1.0, required=False)\n progress = serializers.FloatField(min_value=0, max_value=1.0, required=False)\n time_spent_delta = serializers.FloatField(min_value=0, required=False)\n extra_fields = serializers.DictField(required=False)\n interactions = InteractionSerializer(required=False, many=True)\n\n def validate(self, data):\n if \"progress_delta\" in data and \"progress\" in data:\n raise ValidationError(\n \"must not pass progress_delta and progress in the same request\"\n )\n return data\n\n\n# The lowest integer that can be encoded\n# in a Django IntegerField across all backends\nMIN_INTEGER = -2147483648\n\n\nattemptlog_fields = [\n \"id\",\n \"correct\",\n \"complete\",\n \"hinted\",\n \"error\",\n \"item\",\n \"answer\",\n \"time_spent\",\n]\n\n\nclass LogContext(object):\n \"\"\"\n Object used to provide a limited dict like interface for encoding the\n context that can be stored in the sessionlog, and which is then\n returned to the frontend as part of the initialization of a content\n session.\n node_id - represents a specific ContentNode in a topic tree, while the\n content_id for that node is recorded directly on the sessionlog.\n quiz_id - represents the id of the Exam Model object that this session\n is regarding (if any).\n lesson_id - represents the id of the lesson this node_id is being engaged\n with from within (if any).\n mastery_level - represents the current 'try' at an assessment, whether an exercise\n a practice quiz or a coach assigned quiz. Different mastery_level values\n indicate a different try at the assessment.\n\n This is used to encode the values that are sent when initializing a session\n (see its use in the _get_context method below)\n and then also used to hold the values from an existing sessionlog when\n updating a session (see _update_session method).\n \"\"\"\n\n __slots__ = \"node_id\", \"quiz_id\", \"lesson_id\", \"mastery_level\"\n\n def __init__(self, **kwargs):\n for key, value in kwargs.items():\n self[key] = value\n\n def __setitem__(self, key, value):\n if key not in self.__slots__:\n return\n setattr(self, key, value)\n\n def __getitem__(self, key):\n if key not in self.__slots__:\n return\n return getattr(self, key, None)\n\n def __contains__(self, key):\n return key in self.__slots__ and hasattr(self, key)\n\n def to_dict(self):\n \"\"\"\n Provide a dictionary of the keys stored in the context object.\n Used to serialize for inclusion in an API Response.\n \"\"\"\n output = {}\n for slot in self.__slots__:\n if hasattr(self, slot):\n output[slot] = getattr(self, slot)\n return output\n\n\nclass ProgressTrackingViewSet(viewsets.GenericViewSet):\n def _precache_dataset_id(self, user):\n if user is None or user.is_anonymous():\n return\n key = ContentSessionLog.get_related_dataset_cache_key(\n user.id, user._meta.db_table\n )\n dataset_cache.set(key, user.dataset_id)\n\n def _check_quiz_permissions(self, user, quiz_id):\n if user.is_anonymous():\n raise PermissionDenied(\"Cannot access a quiz if not logged in\")\n if not Exam.objects.filter(\n active=True,\n assignments__collection_id__in=user.memberships.all().values(\n \"collection_id\"\n ),\n id=quiz_id,\n ).exists():\n raise PermissionDenied(\"User does not have access to this quiz_id\")\n\n def _check_lesson_permissions(self, user, lesson_id):\n if user.is_anonymous():\n raise PermissionDenied(\"Cannot access a lesson if not logged in\")\n if not Lesson.objects.filter(\n lesson_assignments__collection_id__in=user.memberships.all().values(\n \"collection_id\"\n ),\n id=lesson_id,\n ).exists():\n raise ValidationError(\"Invalid lesson_id\")\n\n def _get_context(self, user, validated_data):\n node_id = validated_data.get(\"node_id\")\n quiz_id = validated_data.get(\"quiz_id\")\n lesson_id = validated_data.get(\"lesson_id\")\n\n context = LogContext()\n\n if node_id is not None:\n try:\n node = (\n ContentNode.objects.annotate(\n mastery_model=Subquery(\n AssessmentMetaData.objects.filter(\n contentnode_id=OuterRef(\"id\")\n ).values_list(\"mastery_model\", flat=True)[:1]\n )\n )\n .values(\"content_id\", \"channel_id\", \"kind\", \"mastery_model\")\n .get(id=node_id)\n )\n mastery_model = node[\"mastery_model\"]\n content_id = node[\"content_id\"]\n channel_id = node[\"channel_id\"]\n kind = node[\"kind\"]\n context[\"node_id\"] = node_id\n if lesson_id:\n self._check_lesson_permissions(user, lesson_id)\n context[\"lesson_id\"] = lesson_id\n except ContentNode.DoesNotExist:\n raise ValidationError(\"Invalid node_id\")\n elif quiz_id is not None:\n self._check_quiz_permissions(user, quiz_id)\n mastery_model = {\"type\": \"quiz\", \"coach_assigned\": True}\n content_id = quiz_id\n channel_id = None\n kind = content_kinds.QUIZ\n context[\"quiz_id\"] = quiz_id\n return content_id, channel_id, kind, mastery_model, context\n\n def _get_or_create_summarylog(\n self,\n user,\n content_id,\n channel_id,\n kind,\n mastery_model,\n start_timestamp,\n repeat,\n context,\n ):\n if not user:\n output = {\n \"progress\": 0,\n \"extra_fields\": {},\n \"time_spent\": 0,\n \"complete\": False,\n }\n if mastery_model:\n output.update(\n {\n \"mastery_criterion\": mastery_model,\n \"pastattempts\": [],\n \"totalattempts\": 0,\n \"complete\": False,\n }\n )\n return output\n\n try:\n summarylog = ContentSummaryLog.objects.get(\n content_id=content_id,\n user=user,\n )\n updated_fields = (\"end_timestamp\", \"channel_id\", \"_morango_dirty_bit\")\n if repeat:\n summarylog.progress = 0\n updated_fields += (\"progress\",)\n summarylog.channel_id = channel_id\n summarylog.end_timestamp = start_timestamp\n summarylog.save(update_fields=updated_fields)\n except ContentSummaryLog.DoesNotExist:\n summarylog = ContentSummaryLog.objects.create(\n content_id=content_id,\n user=user,\n channel_id=channel_id,\n kind=kind,\n start_timestamp=start_timestamp,\n end_timestamp=start_timestamp,\n )\n self._process_created_notification(summarylog, context)\n\n output = {\n \"progress\": summarylog.progress,\n \"extra_fields\": summarylog.extra_fields,\n \"time_spent\": summarylog.time_spent,\n \"complete\": summarylog.progress >= 1,\n }\n if mastery_model:\n assessment_output, mastery_level = self._start_assessment_session(\n mastery_model,\n summarylog,\n user,\n start_timestamp,\n repeat,\n context,\n )\n output.update(assessment_output)\n context[\"mastery_level\"] = mastery_level\n return output\n\n def create(self, request):\n \"\"\"\n Make a POST request to start a content session.\n\n Requires one of either:\n - node_id: the pk of the resource\n - quiz_id: the pk of the quiz (Exam) object\n\n Optional parameters:\n - repeat: whether to reset previous progress on this content to zero and start fresh\n - lesson_id: if this is being engaged within a lesson\n\n Returns object with properties:\n - session_id: id of the session object that was created by this call\n - context: contains node_id, quiz_id, lesson_id, and mastery_level as appropriate\n - progress: any previous progress on this content resource\n - time_spent: any previous time spent on this content resource\n - extra_fields: any previously recorded additional data stored for this resource\n - complete: whether this resource is completed by this user\n\n If this is an assessment, return object will also include:\n - mastery_criterion: mastery criterion that should be applied to determine completion\n - pastattempts: serialized subset of recent responses, used to determine completion\n - totalattempts: total number of previous responses within this run of the assessment resource\n \"\"\"\n serializer = StartSessionSerializer(\n data=request.data, context={\"request\": request}\n )\n serializer.is_valid(raise_exception=True)\n start_timestamp = local_now()\n repeat = serializer.validated_data[\"repeat\"]\n\n content_id, channel_id, kind, mastery_model, context = self._get_context(\n request.user, serializer.validated_data\n )\n\n with transaction.atomic(), dataset_cache:\n\n user = None if request.user.is_anonymous() else request.user\n\n self._precache_dataset_id(user)\n\n output = self._get_or_create_summarylog(\n user,\n content_id,\n channel_id,\n kind,\n mastery_model,\n start_timestamp,\n repeat,\n context,\n )\n\n # Must ensure there is no user here to maintain user privacy for logging.\n visitor_id = (\n request.COOKIES.get(\"visitor_id\")\n if hasattr(request, \"COOKIES\") and not user\n else None\n )\n sessionlog = ContentSessionLog.objects.create(\n content_id=content_id,\n channel_id=channel_id,\n start_timestamp=start_timestamp,\n end_timestamp=start_timestamp,\n user=user,\n kind=kind,\n visitor_id=visitor_id,\n extra_fields={\"context\": context.to_dict()},\n )\n output.update({\"session_id\": sessionlog.id, \"context\": context.to_dict()})\n return Response(output)\n\n def _process_created_notification(self, summarylog, context):\n # dont create notifications upon creating a summary log for an exercise\n # notifications should only be triggered upon first attempting a question in the exercise\n if \"node_id\" in context and summarylog.kind != content_kinds.EXERCISE:\n # We have sufficient information to only trigger notifications for the specific\n # lesson that this is being engaged with, but until we can work out the exact\n # way that we want to match this with contextual progress tracking, we are\n # not changing this for now.\n wrap_to_save_queue(\n create_summarylog,\n summarylog,\n )\n\n def _process_masterylog_created_notification(self, masterylog, context):\n if \"quiz_id\" in context:\n wrap_to_save_queue(\n quiz_started_notification, masterylog, context[\"quiz_id\"]\n )\n\n def _check_quiz_log_permissions(self, masterylog):\n if (\n masterylog\n and masterylog.complete\n and masterylog.mastery_criterion.get(\"type\") == \"quiz\"\n and masterylog.mastery_criterion.get(\"coach_assigned\")\n ):\n raise PermissionDenied(\"Cannot update a finished coach assigned quiz\")\n\n def _get_or_create_masterylog(\n self,\n user,\n summarylog,\n repeat,\n mastery_model,\n start_timestamp,\n context,\n ):\n masterylog = (\n MasteryLog.objects.filter(\n summarylog=summarylog,\n user=user,\n )\n .order_by(\"-complete\", \"-end_timestamp\")\n .first()\n )\n\n if masterylog is None or (masterylog.complete and repeat):\n # There is no previous masterylog, or the previous masterylog\n # is complete, and the request is requesting a new attempt.\n # Here we generate a mastery_level value - this serves to disambiguate multiple\n # retries at an assessment (either an exercise, practice quiz, or coach assigned quiz).\n # Having the same mastery_level/summarylog (and hence user) pair will result in the same\n # identifier being created. So if the same user engages with the same assessment on different\n # devices, when the data synchronizes, if the mastery_level is the same, this data will be\n # unified under a single try.\n if mastery_model.get(\"coach_assigned\"):\n # To prevent coach assigned quiz mastery logs from propagating to older\n # Kolibri versions, we use negative mastery levels for these.\n # In older versions of Kolibri the mastery_level is validated to be\n # between 1 and 10 - so these values will fail validation and hence will\n # not be deserialized from the morango store.\n # We choose a random integer across the range of acceptable values,\n # in order to prevent collisions across multiple devices when users\n # start different tries of the same coach assigned quiz.\n # With a length of 9 digits for the decimal number, we would need approximately\n # 45 tries to have a 1 in a million chance of a collision.\n # Numbers derived using the formula for the generalized birthday problem:\n # https://en.wikipedia.org/wiki/Birthday_problem#The_generalized_birthday_problem\n # n=sqrt(2*d*ln(1/(1-p))\n # where d is the number of combinations of d digits, p is the probability\n # So for 9 digits, d = 10^9\n # p = 0.000001 for one in a million\n mastery_level = randint(MIN_INTEGER, -1)\n else:\n mastery_level = (\n masterylog.mastery_level + 1 if masterylog is not None else 1\n )\n\n masterylog = MasteryLog.objects.create(\n summarylog=summarylog,\n user=user,\n mastery_criterion=mastery_model,\n start_timestamp=start_timestamp,\n end_timestamp=start_timestamp,\n mastery_level=mastery_level,\n )\n self._process_masterylog_created_notification(masterylog, context)\n else:\n self._check_quiz_log_permissions(masterylog)\n return masterylog\n\n def _start_assessment_session(\n self, mastery_model, summarylog, user, start_timestamp, repeat, context\n ):\n masterylog = self._get_or_create_masterylog(\n user,\n summarylog,\n repeat,\n mastery_model,\n start_timestamp,\n context,\n )\n\n mastery_criterion = masterylog.mastery_criterion\n exercise_type = mastery_criterion.get(\"type\")\n attemptlogs = masterylog.attemptlogs.values(*attemptlog_fields).order_by(\n \"-start_timestamp\"\n )\n\n # get the first x logs depending on the exercise type\n if exercise_type == exercises.M_OF_N:\n attemptlogs = attemptlogs[: mastery_criterion[\"n\"]]\n elif exercise_type in MAPPING:\n attemptlogs = attemptlogs[: MAPPING[exercise_type]]\n elif exercise_type == \"quiz\":\n attemptlogs = attemptlogs.order_by()\n else:\n attemptlogs = attemptlogs[:10]\n\n return {\n \"mastery_criterion\": mastery_criterion,\n \"pastattempts\": attemptlogs,\n \"totalattempts\": masterylog.attemptlogs.count(),\n \"complete\": masterylog.complete,\n }, masterylog.mastery_level\n\n def _generate_interaction_summary(self, validated_data):\n if validated_data[\"error\"]:\n return {\n \"type\": interaction_types.ERROR,\n }\n elif validated_data[\"hinted\"]:\n return {\n \"type\": interaction_types.HINT,\n \"answer\": validated_data[\"answer\"],\n }\n return {\n \"type\": interaction_types.ANSWER,\n \"answer\": validated_data[\"answer\"],\n \"correct\": validated_data[\"correct\"],\n }\n\n def _process_masterylog_completed_notification(self, masterylog, context):\n if \"quiz_id\" in context:\n wrap_to_save_queue(\n quiz_completed_notification, masterylog, context[\"quiz_id\"]\n )\n\n def _update_and_return_mastery_log_id(\n self, user, complete, summarylog_id, end_timestamp, context\n ):\n if not user.is_anonymous() and context[\"mastery_level\"] is not None:\n try:\n masterylog = MasteryLog.objects.get(\n user=user,\n mastery_level=context[\"mastery_level\"],\n summarylog_id=summarylog_id,\n )\n if complete and not masterylog.complete:\n masterylog.complete = True\n masterylog.completion_timestamp = end_timestamp\n masterylog.save(\n update_fields=(\n \"complete\",\n \"completion_timestamp\",\n \"_morango_dirty_bit\",\n )\n )\n self._process_masterylog_completed_notification(masterylog, context)\n else:\n self._check_quiz_log_permissions(masterylog)\n return masterylog.id\n except MasteryLog.DoesNotExist:\n raise ValidationError(\n \"Invalid mastery_level value, this session has not been started.\"\n )\n\n def _update_attempt(self, attemptlog, interaction, update_fields, end_timestamp):\n\n interaction_summary = self._generate_interaction_summary(interaction)\n\n attemptlog.interaction_history += [interaction_summary]\n attemptlog.end_timestamp = end_timestamp\n attemptlog.time_spent = interaction[\"time_spent\"]\n\n if interaction[\"error\"] and not attemptlog.error:\n attemptlog.error = interaction[\"error\"]\n update_fields.add(\"error\")\n\n # Mark hinted only if it is not already correct, and don't undo previously hinted\n if interaction[\"hinted\"] and not attemptlog.hinted and not attemptlog.correct:\n attemptlog.hinted = interaction[\"hinted\"]\n update_fields.add(\"hinted\")\n\n if interaction[\"replace\"]:\n attemptlog.correct = interaction[\"correct\"]\n update_fields.add(\"correct\")\n\n if \"answer\" in interaction:\n attemptlog.answer = interaction[\"answer\"]\n update_fields.add(\"answer\")\n\n if \"simple_answer\" in interaction:\n attemptlog.simple_answer = interaction[\"simple_answer\"]\n update_fields.add(\"simple_answer\")\n\n if interaction[\"complete\"] and not attemptlog.complete:\n attemptlog.complete = interaction[\"complete\"]\n attemptlog.completion_timestamp = end_timestamp\n update_fields.update({\"complete\", \"completion_timestamp\"})\n\n def _create_attempt(\n self, session_id, masterylog_id, user, interaction, end_timestamp\n ):\n start_timestamp = end_timestamp - timedelta(seconds=interaction[\"time_spent\"])\n\n interaction_summary = self._generate_interaction_summary(interaction)\n\n del interaction[\"replace\"]\n\n return AttemptLog(\n sessionlog_id=session_id,\n masterylog_id=masterylog_id,\n interaction_history=[interaction_summary],\n user=user,\n start_timestamp=start_timestamp,\n completion_timestamp=end_timestamp if interaction[\"complete\"] else None,\n end_timestamp=end_timestamp,\n **interaction\n )\n\n def _update_or_create_attempts(\n self, session_id, masterylog_id, user, interactions, end_timestamp, context\n ):\n user = None if user.is_anonymous() else user\n\n output = []\n\n for _, item_interactions in groupby(interactions, lambda x: x[\"item\"]):\n created = False\n update_fields = {\n \"interaction_history\",\n \"end_timestamp\",\n \"time_spent\",\n \"_morango_dirty_bit\",\n }\n item_interactions = list(item_interactions)\n if \"id\" in item_interactions[0]:\n try:\n attemptlog = AttemptLog.objects.get(\n id=item_interactions[0][\"id\"],\n masterylog_id=masterylog_id,\n user=user,\n )\n except AttemptLog.DoesNotExist:\n raise ValidationError(\"Invalid attemptlog id specified\")\n else:\n attemptlog = self._create_attempt(\n session_id,\n masterylog_id,\n user,\n item_interactions[0],\n end_timestamp,\n )\n created = True\n item_interactions = item_interactions[1:]\n updated = bool(item_interactions)\n\n for response in item_interactions:\n self._update_attempt(attemptlog, response, update_fields, end_timestamp)\n\n self._process_attempt_notifications(\n attemptlog, context, user, created, updated\n )\n attemptlog.save(\n update_fields=None if created else update_fields, force_insert=created\n )\n attempt = {}\n for field in attemptlog_fields:\n attempt[field] = getattr(attemptlog, field)\n output.append(attempt)\n return {\"attempts\": output}\n\n def _process_attempt_notifications(\n self, attemptlog, context, user, created, updated\n ):\n if user is None:\n return\n if \"lesson_id\" in context:\n wrap_to_save_queue(parse_attemptslog, attemptlog)\n if created and \"quiz_id\" in context:\n wrap_to_save_queue(\n quiz_answered_notification, attemptlog, context[\"quiz_id\"]\n )\n\n def _get_session_log(self, session_id, user):\n try:\n if user.is_anonymous():\n return ContentSessionLog.objects.get(id=session_id, user__isnull=True)\n else:\n return ContentSessionLog.objects.get(id=session_id, user=user)\n except ContentSessionLog.DoesNotExist:\n raise Http404(\n \"ContentSessionLog with id {} does not exist\".format(session_id)\n )\n\n def _normalize_progress(self, progress):\n return max(0, min(1.0, progress))\n\n def _update_content_log(self, log, end_timestamp, validated_data):\n update_fields = (\"end_timestamp\", \"_morango_dirty_bit\")\n\n log.end_timestamp = end_timestamp\n if \"progress_delta\" in validated_data:\n update_fields += (\"progress\",)\n log.progress = self._normalize_progress(\n log.progress + validated_data[\"progress_delta\"]\n )\n elif \"progress\" in validated_data:\n update_fields += (\"progress\",)\n log.progress = self._normalize_progress(validated_data[\"progress\"])\n if \"time_spent_delta\" in validated_data:\n update_fields += (\"time_spent\",)\n log.time_spent += validated_data[\"time_spent_delta\"]\n return update_fields\n\n def _update_summary_log(\n self, user, sessionlog, end_timestamp, validated_data, context\n ):\n if user.is_anonymous():\n return\n summarylog = ContentSummaryLog.objects.get(\n content_id=sessionlog.content_id, user=user\n )\n was_complete = summarylog.progress >= 1\n\n update_fields = self._update_content_log(\n summarylog, end_timestamp, validated_data\n )\n\n if summarylog.progress >= 1 and not was_complete:\n summarylog.completion_timestamp = end_timestamp\n update_fields += (\"completion_timestamp\",)\n self._process_completed_notification(summarylog, context)\n if \"extra_fields\" in validated_data:\n update_fields += (\"extra_fields\",)\n summarylog.extra_fields = validated_data[\"extra_fields\"]\n\n summarylog.save(update_fields=update_fields)\n return summarylog\n\n def _update_session(self, session_id, user, end_timestamp, validated_data):\n sessionlog = self._get_session_log(session_id, user)\n\n context = LogContext(**sessionlog.extra_fields.get(\"context\", {}))\n\n if \"quiz_id\" in context:\n self._check_quiz_permissions(user, context[\"quiz_id\"])\n\n update_fields = self._update_content_log(\n sessionlog, end_timestamp, validated_data\n )\n sessionlog.save(update_fields=update_fields)\n\n summarylog = self._update_summary_log(\n user, sessionlog, end_timestamp, validated_data, context\n )\n\n if summarylog is not None:\n complete = summarylog.progress >= 1\n else:\n complete = sessionlog.progress >= 1\n\n return {\"complete\": complete}, summarylog.id if summarylog else None, context\n\n def _process_completed_notification(self, summarylog, context):\n if \"node_id\" in context:\n wrap_to_save_queue(\n parse_summarylog,\n summarylog,\n )\n\n def update(self, request, pk=None):\n \"\"\"\n Make a PUT request to update the current session\n\n Requires one of either:\n - progress_delta: increase the progress by this amount\n - progress: set the progress to this amount\n\n Can also update time spent recorded with a delta:\n - time_spent_delta: number of seconds to increase time_spent by\n\n And update the extra_fields value stored:\n - extra_fields: the complete representation to set extra_fields to\n\n If creating or updating attempts for an assessment must include:\n - interactions: an array of objects, if updating an existing attempt, must include attempt_id\n\n Returns an object with the properties:\n - complete: boolean indicating if the resource is completed\n\n If an attempt at an assessment was included, then this parameter will be included:\n - attempts: serialized form of the attempt, equivalent to that returned in pastattempts from\n session initialization\n \"\"\"\n if pk is None:\n raise Http404\n serializer = UpdateSessionSerializer(\n data=request.data, context={\"request\": request}\n )\n serializer.is_valid(raise_exception=True)\n end_timestamp = local_now()\n validated_data = serializer.validated_data\n\n with transaction.atomic(), dataset_cache:\n self._precache_dataset_id(request.user)\n\n output, summarylog_id, context = self._update_session(\n pk, request.user, end_timestamp, validated_data\n )\n masterylog_id = self._update_and_return_mastery_log_id(\n request.user, output[\"complete\"], summarylog_id, end_timestamp, context\n )\n if \"interactions\" in validated_data:\n attempt_output = self._update_or_create_attempts(\n pk,\n masterylog_id,\n request.user,\n validated_data[\"interactions\"],\n end_timestamp,\n context,\n )\n output.update(attempt_output)\n return Response(output)\n\n\nclass TotalContentProgressViewSet(viewsets.GenericViewSet):\n def retrieve(self, request, pk=None):\n if request.user.is_anonymous() or pk != request.user.id:\n raise PermissionDenied(\"Can only access progress data for self\")\n progress = (\n request.user.contentsummarylog_set.filter(progress=1)\n .aggregate(Sum(\"progress\"))\n .get(\"progress__sum\")\n )\n return Response(\n {\n \"id\": pk,\n \"progress\": progress,\n }\n )\n\n\nclass BaseLogFilter(FilterSet):\n facility = UUIDFilter(method=\"filter_facility\")\n classroom = UUIDFilter(method=\"filter_classroom\")\n learner_group = UUIDFilter(method=\"filter_learner_group\")\n\n # Only a superuser can filter by facilities\n def filter_facility(self, queryset, name, value):\n return queryset.filter(user__facility=value)\n\n def filter_classroom(self, queryset, name, value):\n return queryset.filter(\n Q(user__memberships__collection_id=value)\n | Q(user__memberships__collection__parent_id=value)\n )\n\n def filter_learner_group(self, queryset, name, value):\n return queryset.filter(user__memberships__collection_id=value)\n\n\nclass MasteryFilter(BaseLogFilter):\n content = UUIDFilter(name=\"summarylog__content_id\")\n\n class Meta:\n model = MasteryLog\n fields = [\"content\"]\n\n\nclass MasteryLogViewSet(ReadOnlyValuesViewset):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n queryset = MasteryLog.objects.all()\n pagination_class = OptionalPageNumberPagination\n filter_class = MasteryFilter\n values = (\n \"user\",\n \"summarylog\",\n \"mastery_criterion\",\n \"start_timestamp\",\n \"end_timestamp\",\n \"completion_timestamp\",\n \"mastery_level\",\n \"complete\",\n )\n\n\nclass AttemptFilter(BaseLogFilter):\n content = CharFilter(method=\"filter_content\")\n\n def filter_content(self, queryset, name, value):\n return queryset.filter(masterylog__summarylog__content_id=value)\n\n class Meta:\n model = AttemptLog\n fields = [\"masterylog\", \"complete\", \"user\", \"content\", \"item\"]\n\n\nclass AttemptLogViewSet(ReadOnlyValuesViewset):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (\n KolibriAuthPermissionsFilter,\n DjangoFilterBackend,\n filters.OrderingFilter,\n )\n queryset = AttemptLog.objects.all()\n pagination_class = OptionalPageNumberPagination\n filter_class = AttemptFilter\n ordering_fields = (\"end_timestamp\",)\n ordering = (\"end_timestamp\",)\n\n values = (\n \"id\",\n \"item\",\n \"start_timestamp\",\n \"end_timestamp\",\n \"completion_timestamp\",\n \"time_spent\",\n \"complete\",\n \"correct\",\n \"hinted\",\n \"answer\",\n \"simple_answer\",\n \"interaction_history\",\n \"user\",\n \"error\",\n \"masterylog\",\n \"sessionlog\",\n )\n",
"path": "kolibri/core/logger/api.py"
}
] | diff --git a/kolibri/core/logger/api.py b/kolibri/core/logger/api.py
index e5b55a5c8fa..a2f040fac14 100644
--- a/kolibri/core/logger/api.py
+++ b/kolibri/core/logger/api.py
@@ -918,6 +918,7 @@ class AttemptLogViewSet(ReadOnlyValuesViewset):
ordering = ("end_timestamp",)
values = (
+ "id",
"item",
"start_timestamp",
"end_timestamp",
diff --git a/kolibri/plugins/coach/assets/src/views/reports/ReportsLearnerReportLessonPage.vue b/kolibri/plugins/coach/assets/src/views/reports/ReportsLearnerReportLessonPage.vue
index e62a7b30123..8b9a0c33c1f 100644
--- a/kolibri/plugins/coach/assets/src/views/reports/ReportsLearnerReportLessonPage.vue
+++ b/kolibri/plugins/coach/assets/src/views/reports/ReportsLearnerReportLessonPage.vue
@@ -68,10 +68,7 @@
<KRouterLink
v-if="showLink(tableRow)"
:text="tableRow.title"
- :to="classRoute(
- 'ReportsLearnerReportLessonExercisePage',
- { exerciseId: tableRow.content_id }
- )"
+ :to="tableRow.link"
/>
<template v-else>
{{ tableRow.title }}
@@ -99,6 +96,7 @@
<script>
import commonCoreStrings from 'kolibri.coreVue.mixins.commonCoreStrings';
+ import { PageNames } from '../../constants';
import commonCoach from '../common';
import CSVExporter from '../../csv/exporter';
import * as csvFields from '../../csv/fields';
@@ -125,6 +123,10 @@
return contentArray.map(content => {
const tableRow = {
statusObj: this.getContentStatusObjForLearner(content.content_id, this.learner.id),
+ link: this.classRoute(PageNames.REPORTS_LEARNER_REPORT_LESSON_EXERCISE_PAGE_ROOT, {
+ exerciseId: content.content_id,
+ learnerId: this.learner.id,
+ }),
};
Object.assign(tableRow, content);
return tableRow;
|
pypa__pipenv-3424 | NameError: name 'unicode' is not defined
### Issue description
```
[hadoop@ip-192-168-0-84 sales-forecast]$ pipenv install --python 3
Virtualenv already exists!
Removing existing virtualenv...
Creating a virtualenv for this project...
Pipfile: /home/hadoop/sales-forecast/Pipfile
Using /usr/bin/python3.4 (3.4.8) to create virtualenv...
⠼ Creating virtual environment...Using base prefix '/usr'
New python executable in /home/hadoop/.local/share/virtualenvs/sales-forecast-ZYWIRznP/bin/python3.4
Also creating executable in /home/hadoop/.local/share/virtualenvs/sales-forecast-ZYWIRznP/bin/python
Installing setuptools, pip, wheel...done.
Running virtualenv with interpreter /usr/bin/python3.4
✔ Successfully created virtual environment!
Virtualenv location: /home/hadoop/.local/share/virtualenvs/sales-forecast-ZYWIRznP
requirements.txt found, instead of Pipfile! Converting...
✔ Success!
Warning: Your Pipfile now contains pinned versions, if your requirements.txt did.
We recommend updating your Pipfile to specify the "*" version, instead.
Pipfile.lock not found, creating...
Locking [dev-packages] dependencies...
Locking [packages] dependencies...
✘ Locking Failed!
Traceback (most recent call last):
File "/usr/local/lib/python2.7/site-packages/pipenv/resolver.py", line 126, in <module>
main()
File "/usr/local/lib/python2.7/site-packages/pipenv/resolver.py", line 119, in main
parsed.requirements_dir, parsed.packages)
File "/usr/local/lib/python2.7/site-packages/pipenv/resolver.py", line 85, in _main
requirements_dir=requirements_dir,
File "/usr/local/lib/python2.7/site-packages/pipenv/resolver.py", line 69, in resolve
req_dir=requirements_dir
File "/usr/local/lib/python2.7/site-packages/pipenv/utils.py", line 700, in resolve_deps
from .vendor.requirementslib.models.requirements import Requirement
File "/usr/local/lib/python2.7/site-packages/pipenv/vendor/requirementslib/__init__.py", line 14, in <module>
from .models.pipfile import Pipfile
File "/usr/local/lib/python2.7/site-packages/pipenv/vendor/requirementslib/models/pipfile.py", line 23, in <module>
from ..environment import MYPY_RUNNING
File "/usr/local/lib/python2.7/site-packages/pipenv/vendor/requirementslib/environment.py", line 17, in <module>
MYPY_RUNNING = os.environ.get("MYPY_RUNNING", is_type_checking())
File "/usr/local/lib/python2.7/site-packages/pipenv/vendor/requirementslib/environment.py", line 10, in is_type_checking
from typing import TYPE_CHECKING
File "/usr/local/lib/python2.7/site-packages/typing.py", line 624, in <module>
AnyStr = TypeVar('AnyStr', bytes, unicode)
NameError: name 'unicode' is not defined
File "/usr/local/lib/python2.7/site-packages/pipenv/resolver.py", line 126, in <module>
main()
File "/usr/local/lib/python2.7/site-packages/pipenv/resolver.py", line 119, in main
parsed.requirements_dir, parsed.packages)
File "/usr/local/lib/python2.7/site-packages/pipenv/resolver.py", line 85, in _main
requirements_dir=requirements_dir,
File "/usr/local/lib/python2.7/site-packages/pipenv/resolver.py", line 69, in resolve
req_dir=requirements_dir
File "/usr/local/lib/python2.7/site-packages/pipenv/utils.py", line 700, in resolve_deps
from .vendor.requirementslib.models.requirements import Requirement
File "/usr/local/lib/python2.7/site-packages/pipenv/vendor/requirementslib/__init__.py", line 14, in <module>
from .models.pipfile import Pipfile
File "/usr/local/lib/python2.7/site-packages/pipenv/vendor/requirementslib/models/pipfile.py", line 23, in <module>
from ..environment import MYPY_RUNNING
File "/usr/local/lib/python2.7/site-packages/pipenv/vendor/requirementslib/environment.py", line 17, in <module>
MYPY_RUNNING = os.environ.get("MYPY_RUNNING", is_type_checking())
File "/usr/local/lib/python2.7/site-packages/pipenv/vendor/requirementslib/environment.py", line 10, in is_type_checking
from typing import TYPE_CHECKING
File "/usr/local/lib/python2.7/site-packages/typing.py", line 624, in <module>
AnyStr = TypeVar('AnyStr', bytes, unicode)
NameError: name 'unicode' is not defined
```
-------------------------------------------------------------------------------
<details><summary>$ pipenv --support</summary>
Pipenv version: `'2018.11.26'`
Pipenv location: `'/usr/local/lib/python2.7/site-packages/pipenv'`
Python location: `'/usr/bin/python2.7'`
Python installations found:
- `3.4.8`: `/usr/bin/python3`
- `2.7.14`: `/usr/bin/python2.7`
- `3.4.8`: `/usr/bin/python3.4m`
PEP 508 Information:
```
{'implementation_name': 'cpython',
'implementation_version': '0',
'os_name': 'posix',
'platform_machine': 'x86_64',
'platform_python_implementation': 'CPython',
'platform_release': '4.14.72-68.55.amzn1.x86_64',
'platform_system': 'Linux',
'platform_version': '#1 SMP Fri Sep 28 21:14:54 UTC 2018',
'python_full_version': '2.7.14',
'python_version': '2.7',
'sys_platform': 'linux2'}
```
System environment variables:
- `AWS_PATH`
- `PYTHONDONTWRITEBYTECODE`
- `LESSOPEN`
- `SSH_CLIENT`
- `LOGNAME`
- `USER`
- `HOME`
- `PATH`
- `PS1`
- `LANG`
- `LESS_TERMCAP_se`
- `TERM`
- `SHELL`
- `EC2_AMITOOL_HOME`
- `LESS_TERMCAP_me`
- `LESS_TERMCAP_md`
- `LESS_TERMCAP_mb`
- `HISTSIZE`
- `PYTHONFINDER_IGNORE_UNSUPPORTED`
- `AWS_ELB_HOME`
- `JAVA_HOME`
- `EC2_HOME`
- `LESS_TERMCAP_ue`
- `AWS_AUTO_SCALING_HOME`
- `PIP_PYTHON_PATH`
- `_`
- `AWS_DEFAULT_REGION`
- `SSH_CONNECTION`
- `AWS_CLOUDWATCH_HOME`
- `SSH_TTY`
- `OLDPWD`
- `HOSTNAME`
- `HISTCONTROL`
- `SHLVL`
- `PWD`
- `LESS_TERMCAP_us`
- `PIP_SHIMS_BASE_MODULE`
- `PIP_DISABLE_PIP_VERSION_CHECK`
- `MAIL`
- `LS_COLORS`
Pipenv–specific environment variables:
Debug–specific environment variables:
- `PATH`: `/usr/local/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/opt/aws/bin`
- `SHELL`: `/bin/bash`
- `LANG`: `en_US.UTF-8`
- `PWD`: `/home/hadoop/sales-forecast`
---------------------------
Contents of `Pipfile` ('/home/hadoop/sales-forecast/Pipfile'):
```toml
[[source]]
name = "pypi"
url = "https://pypi.org/simple"
verify_ssl = true
[dev-packages]
[packages]
numpy = "*"
scipy = "*"
scikit-learn = "==0.19.1"
statsmodels = "*"
xgboost = "*"
pystan = "*"
fbprophet = "*"
matplotlib = "==2.2.3"
seaborn = "*"
redis = "*"
sqlalchemy = "*"
mysqlclient = "*"
pyarrow = "==0.11.1"
hdfs = "*"
hdfs3 = "*"
s3fs = "*"
python-dateutil = "*"
chinesecalendar = "*"
pandas = "*"
h5py = "*"
lxml = "*"
openpyxl = "*"
xlrd = "*"
click = "*"
pathlib2 = "*"
python-dotenv = "*"
environs = "*"
joblib = "*"
tqdm = "*"
dask = {extras = ["complete"],version = "*"}
[requires]
python_version = "3.4"
```
</details>
| [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport codecs\nimport os\nimport sys\nfrom shutil import rmtree\n\nfrom setuptools import find_packages, setup, Command\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith codecs.open(os.path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = \"\\n\" + f.read()\n\nabout = {}\n\nwith open(os.path.join(here, \"pipenv\", \"__version__.py\")) as f:\n exec(f.read(), about)\n\nif sys.argv[-1] == \"publish\":\n os.system(\"python setup.py sdist bdist_wheel upload\")\n sys.exit()\n\nrequired = [\n \"pip>=9.0.1\",\n \"certifi\",\n \"setuptools>=36.2.1\",\n \"virtualenv-clone>=0.2.5\",\n \"virtualenv\",\n 'enum34; python_version<\"3\"',\n 'typing; python_version<\"3.5\"'\n]\n\n\n# https://pypi.python.org/pypi/stdeb/0.8.5#quickstart-2-just-tell-me-the-fastest-way-to-make-a-deb\nclass DebCommand(Command):\n \"\"\"Support for setup.py deb\"\"\"\n\n description = \"Build and publish the .deb package.\"\n user_options = []\n\n @staticmethod\n def status(s):\n \"\"\"Prints things in bold.\"\"\"\n print(\"\\033[1m{0}\\033[0m\".format(s))\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n try:\n self.status(\"Removing previous builds…\")\n rmtree(os.path.join(here, \"deb_dist\"))\n except FileNotFoundError:\n pass\n self.status(u\"Creating debian mainfest…\")\n os.system(\n \"python setup.py --command-packages=stdeb.command sdist_dsc -z artful --package3=pipenv --depends3=python3-virtualenv-clone\"\n )\n self.status(u\"Building .deb…\")\n os.chdir(\"deb_dist/pipenv-{0}\".format(about[\"__version__\"]))\n os.system(\"dpkg-buildpackage -rfakeroot -uc -us\")\n\n\nclass UploadCommand(Command):\n \"\"\"Support setup.py upload.\"\"\"\n\n description = \"Build and publish the package.\"\n user_options = []\n\n @staticmethod\n def status(s):\n \"\"\"Prints things in bold.\"\"\"\n print(\"\\033[1m{0}\\033[0m\".format(s))\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n try:\n self.status(\"Removing previous builds…\")\n rmtree(os.path.join(here, \"dist\"))\n except FileNotFoundError:\n pass\n self.status(\"Building Source distribution…\")\n os.system(\"{0} setup.py sdist bdist_wheel\".format(sys.executable))\n self.status(\"Uploading the package to PyPI via Twine…\")\n os.system(\"twine upload dist/*\")\n self.status(\"Pushing git tags…\")\n os.system(\"git tag v{0}\".format(about[\"__version__\"]))\n os.system(\"git push --tags\")\n sys.exit()\n\n\nsetup(\n name=\"pipenv\",\n version=about[\"__version__\"],\n description=\"Python Development Workflow for Humans.\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n author=\"Kenneth Reitz\",\n author_email=\"[email protected]\",\n url=\"https://github.com/pypa/pipenv\",\n packages=find_packages(exclude=[\"tests\", \"tests.*\", \"tasks\", \"tasks.*\"]),\n entry_points={\n \"console_scripts\": [\n \"pipenv=pipenv:cli\",\n \"pipenv-resolver=pipenv.resolver:main\",\n ]\n },\n package_data={\n \"\": [\"LICENSE\", \"NOTICES\"],\n \"pipenv.vendor.requests\": [\"*.pem\"],\n \"pipenv.vendor.certifi\": [\"*.pem\"],\n \"pipenv.vendor.click_completion\": [\"*.j2\"],\n \"pipenv.patched.notpip._vendor.certifi\": [\"*.pem\"],\n \"pipenv.patched.notpip._vendor.requests\": [\"*.pem\"],\n \"pipenv.patched.notpip._vendor.distlib._backport\": [\"sysconfig.cfg\"],\n \"pipenv.patched.notpip._vendor.distlib\": [\n \"t32.exe\",\n \"t64.exe\",\n \"w32.exe\",\n \"w64.exe\",\n ],\n },\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n setup_requires=[\"invoke\", \"parver\"],\n install_requires=required,\n extras_require={},\n include_package_data=True,\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n cmdclass={\"upload\": UploadCommand, \"deb\": DebCommand},\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport codecs\nimport os\nimport sys\nfrom shutil import rmtree\n\nfrom setuptools import find_packages, setup, Command\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith codecs.open(os.path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = \"\\n\" + f.read()\n\nabout = {}\n\nwith open(os.path.join(here, \"pipenv\", \"__version__.py\")) as f:\n exec(f.read(), about)\n\nif sys.argv[-1] == \"publish\":\n os.system(\"python setup.py sdist bdist_wheel upload\")\n sys.exit()\n\nrequired = [\n \"pip>=9.0.1\",\n \"certifi\",\n \"setuptools>=36.2.1\",\n \"virtualenv-clone>=0.2.5\",\n \"virtualenv\",\n 'enum34; python_version<\"3\"'\n]\n\n\n# https://pypi.python.org/pypi/stdeb/0.8.5#quickstart-2-just-tell-me-the-fastest-way-to-make-a-deb\nclass DebCommand(Command):\n \"\"\"Support for setup.py deb\"\"\"\n\n description = \"Build and publish the .deb package.\"\n user_options = []\n\n @staticmethod\n def status(s):\n \"\"\"Prints things in bold.\"\"\"\n print(\"\\033[1m{0}\\033[0m\".format(s))\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n try:\n self.status(\"Removing previous builds…\")\n rmtree(os.path.join(here, \"deb_dist\"))\n except FileNotFoundError:\n pass\n self.status(u\"Creating debian mainfest…\")\n os.system(\n \"python setup.py --command-packages=stdeb.command sdist_dsc -z artful --package3=pipenv --depends3=python3-virtualenv-clone\"\n )\n self.status(u\"Building .deb…\")\n os.chdir(\"deb_dist/pipenv-{0}\".format(about[\"__version__\"]))\n os.system(\"dpkg-buildpackage -rfakeroot -uc -us\")\n\n\nclass UploadCommand(Command):\n \"\"\"Support setup.py upload.\"\"\"\n\n description = \"Build and publish the package.\"\n user_options = []\n\n @staticmethod\n def status(s):\n \"\"\"Prints things in bold.\"\"\"\n print(\"\\033[1m{0}\\033[0m\".format(s))\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n try:\n self.status(\"Removing previous builds…\")\n rmtree(os.path.join(here, \"dist\"))\n except FileNotFoundError:\n pass\n self.status(\"Building Source distribution…\")\n os.system(\"{0} setup.py sdist bdist_wheel\".format(sys.executable))\n self.status(\"Uploading the package to PyPI via Twine…\")\n os.system(\"twine upload dist/*\")\n self.status(\"Pushing git tags…\")\n os.system(\"git tag v{0}\".format(about[\"__version__\"]))\n os.system(\"git push --tags\")\n sys.exit()\n\n\nsetup(\n name=\"pipenv\",\n version=about[\"__version__\"],\n description=\"Python Development Workflow for Humans.\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n author=\"Kenneth Reitz\",\n author_email=\"[email protected]\",\n url=\"https://github.com/pypa/pipenv\",\n packages=find_packages(exclude=[\"tests\", \"tests.*\", \"tasks\", \"tasks.*\"]),\n entry_points={\n \"console_scripts\": [\n \"pipenv=pipenv:cli\",\n \"pipenv-resolver=pipenv.resolver:main\",\n ]\n },\n package_data={\n \"\": [\"LICENSE\", \"NOTICES\"],\n \"pipenv.vendor.requests\": [\"*.pem\"],\n \"pipenv.vendor.certifi\": [\"*.pem\"],\n \"pipenv.vendor.click_completion\": [\"*.j2\"],\n \"pipenv.patched.notpip._vendor.certifi\": [\"*.pem\"],\n \"pipenv.patched.notpip._vendor.requests\": [\"*.pem\"],\n \"pipenv.patched.notpip._vendor.distlib._backport\": [\"sysconfig.cfg\"],\n \"pipenv.patched.notpip._vendor.distlib\": [\n \"t32.exe\",\n \"t64.exe\",\n \"w32.exe\",\n \"w64.exe\",\n ],\n },\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n setup_requires=[\"invoke\", \"parver\"],\n install_requires=required,\n extras_require={},\n include_package_data=True,\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n cmdclass={\"upload\": UploadCommand, \"deb\": DebCommand},\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 069a5252ff..4c848c4794 100644
--- a/setup.py
+++ b/setup.py
@@ -27,8 +27,7 @@
"setuptools>=36.2.1",
"virtualenv-clone>=0.2.5",
"virtualenv",
- 'enum34; python_version<"3"',
- 'typing; python_version<"3.5"'
+ 'enum34; python_version<"3"'
]
|
bokeh__bokeh-8067 | Add an option to disable column titles in DataTable
# READ AND FOLLOW THESE INSTRUCTIONS CAREFULLY
This issue is being reported for bokeh version 0.12.6.
DataTable widgets do not include an option to hide column titles. It would be nice to have that option.
Coincidentally, the `row_headers` property is poorly named. It turns on/off the index column, but its name seems to suggest it would toggle column titles. Perhaps that option should be redefined.
#### Complete, minimal, self-contained example code that reproduces the issue
```
data = dict(
dates=[date(2014, 3, i+1) for i in range(10)],
downloads=[randint(0, 100) for i in range(10)],
)
source = ColumnDataSource(data)
columns = [
TableColumn(field="dates", title="Date", formatter=DateFormatter()),
TableColumn(field="downloads", title="Downloads"),
]
data_table = DataTable(source=source, columns=columns, row_headers=False, editable=True)
```
#### Screenshots or screencasts of the bug in action
<img width="463" alt="screenshot" src="https://user-images.githubusercontent.com/1383911/27066925-73e0f548-4fbc-11e7-90c7-3761de80b990.png">
| [
{
"content": "''' Various kinds of data table (data grid) widgets.\n\n'''\nfrom __future__ import absolute_import\n\nfrom ...core.enums import DateFormat, FontStyle, NumeralLanguage, TextAlign, RoundingFunction\nfrom ...core.has_props import abstract\nfrom ...core.properties import Bool, Color, Either, Enum, Float, Instance, Int, List, Override, String\nfrom ...model import Model\n\nfrom ..sources import DataSource, CDSView\n\nfrom .widget import Widget\n\n@abstract\nclass CellFormatter(Model):\n ''' Abstract base class for data table's cell formatters.\n\n '''\n\n@abstract\nclass CellEditor(Model):\n ''' Abstract base class for data table's cell editors.\n\n '''\n\nclass StringFormatter(CellFormatter):\n ''' Basic string cell formatter.\n\n '''\n\n font_style = Enum(FontStyle, default=\"normal\", help=\"\"\"\n An optional text font style, e.g. bold, italic.\n \"\"\")\n\n text_align = Enum(TextAlign, default=\"left\", help=\"\"\"\n An optional text align, i.e. left, center or right.\n \"\"\")\n\n text_color = Color(help=\"\"\"\n An optional text color. See :class:`bokeh.core.properties.Color` for\n details.\n \"\"\")\n\nclass NumberFormatter(StringFormatter):\n ''' Number cell formatter.\n\n '''\n\n format = String(\"0,0\", help=\"\"\"\n The number format, as defined in the following tables:\n\n **NUMBERS**:\n\n ============ ============== ===============\n Number Format String\n ============ ============== ===============\n 10000 '0,0.0000' 10,000.0000\n 10000.23 '0,0' 10,000\n 10000.23 '+0,0' +10,000\n -10000 '0,0.0' -10,000.0\n 10000.1234 '0.000' 10000.123\n 10000.1234 '0[.]00000' 10000.12340\n -10000 '(0,0.0000)' (10,000.0000)\n -0.23 '.00' -.23\n -0.23 '(.00)' (.23)\n 0.23 '0.00000' 0.23000\n 0.23 '0.0[0000]' 0.23\n 1230974 '0.0a' 1.2m\n 1460 '0 a' 1 k\n -104000 '0a' -104k\n 1 '0o' 1st\n 52 '0o' 52nd\n 23 '0o' 23rd\n 100 '0o' 100th\n ============ ============== ===============\n\n **CURRENCY**:\n\n =========== =============== =============\n Number Format String\n =========== =============== =============\n 1000.234 '$0,0.00' $1,000.23\n 1000.2 '0,0[.]00 $' 1,000.20 $\n 1001 '$ 0,0[.]00' $ 1,001\n -1000.234 '($0,0)' ($1,000)\n -1000.234 '$0.00' -$1000.23\n 1230974 '($ 0.00 a)' $ 1.23 m\n =========== =============== =============\n\n **BYTES**:\n\n =============== =========== ============\n Number Format String\n =============== =========== ============\n 100 '0b' 100B\n 2048 '0 b' 2 KB\n 7884486213 '0.0b' 7.3GB\n 3467479682787 '0.000 b' 3.154 TB\n =============== =========== ============\n\n **PERCENTAGES**:\n\n ============= ============= ===========\n Number Format String\n ============= ============= ===========\n 1 '0%' 100%\n 0.974878234 '0.000%' 97.488%\n -0.43 '0 %' -43 %\n 0.43 '(0.000 %)' 43.000 %\n ============= ============= ===========\n\n **TIME**:\n\n ============ ============== ============\n Number Format String\n ============ ============== ============\n 25 '00:00:00' 0:00:25\n 238 '00:00:00' 0:03:58\n 63846 '00:00:00' 17:44:06\n ============ ============== ============\n\n For the complete specification, see http://numbrojs.com/format.html\n \"\"\")\n\n language = Enum(NumeralLanguage, default=\"en\", help=\"\"\"\n The language to use for formatting language-specific features (e.g. thousands separator).\n \"\"\")\n\n rounding = Enum(RoundingFunction, help=\"\"\"\n Rounding functions (round, floor, ceil) and their synonyms (nearest, rounddown, roundup).\n \"\"\")\n\nclass BooleanFormatter(CellFormatter):\n ''' Boolean (check mark) cell formatter.\n\n '''\n\n icon = Enum('check', 'check-circle', 'check-circle-o', 'check-square', 'check-square-o', help=\"\"\"\n The icon visualizing the check mark.\n \"\"\")\n\nclass DateFormatter(CellFormatter):\n ''' Date cell formatter.\n\n '''\n\n format = Either(Enum(DateFormat), String, default='ISO-8601', help=\"\"\"\n The date format can be any standard `strftime`_ format string, as well\n as any of the following predefined format names:\n\n ================================================ ================== ===================\n Format name(s) Format string Example Output\n ================================================ ================== ===================\n ``ATOM`` / ``W3C`` / ``RFC-3339`` / ``ISO-8601`` ``\"%Y-%m-%d\"`` 2014-03-01\n ``COOKIE`` ``\"%a, %d %b %Y\"`` Sat, 01 Mar 2014\n ``RFC-850`` ``\"%A, %d-%b-%y\"`` Saturday, 01-Mar-14\n ``RFC-1123`` / ``RFC-2822`` ``\"%a, %e %b %Y\"`` Sat, 1 Mar 2014\n ``RSS`` / ``RFC-822`` / ``RFC-1036`` ``\"%a, %e %b %y\"`` Sat, 1 Mar 14\n ``TIMESTAMP`` (ms since epoch) 1393632000000\n ================================================ ================== ===================\n\n Note that in the table some of the format names are synonymous, with\n identical format names separated by slashes.\n\n This list of supported `strftime`_ format codes is reproduced below.\n\n %a\n The abbreviated name of the day of the week according to the\n current locale.\n\n %A\n The full name of the day of the week according to the current\n locale.\n\n %b\n The abbreviated month name according to the current locale.\n\n %B\n The full month name according to the current locale.\n\n %c\n The preferred date and time representation for the current\n locale.\n\n %C\n The century number (year/100) as a 2-digit integer.\n\n %d\n The day of the month as a decimal number (range 01 to 31).\n\n %D\n Equivalent to %m/%d/%y. (Americans should note that in many\n other countries %d/%m/%y is rather common. This means that in\n international context this format is ambiguous and should not\n be used.)\n\n %e\n Like %d, the day of the month as a decimal number, but a\n leading zero is replaced by a space.\n\n %f\n Microsecond as a decimal number, zero-padded on the left (range\n 000000-999999). This is an extension to the set of directives\n available to `timezone`_.\n\n %F\n Equivalent to %Y-%m-%d (the ISO 8601 date format).\n\n %G\n The ISO 8601 week-based year with century as a decimal number.\n The 4-digit year corresponding to the ISO week number (see %V).\n This has the same format and value as %Y, except that if the\n ISO week number belongs to the previous or next year, that year\n is used instead.\n\n %g\n Like %G, but without century, that is, with a 2-digit year (00-99).\n\n %h\n Equivalent to %b.\n\n %H\n The hour as a decimal number using a 24-hour clock (range 00\n to 23).\n\n %I\n The hour as a decimal number using a 12-hour clock (range 01\n to 12).\n\n %j\n The day of the year as a decimal number (range 001 to 366).\n\n %k\n The hour (24-hour clock) as a decimal number (range 0 to 23).\n Single digits are preceded by a blank. (See also %H.)\n\n %l\n The hour (12-hour clock) as a decimal number (range 1 to 12).\n Single digits are preceded by a blank. (See also %I.) (TZ)\n\n %m\n The month as a decimal number (range 01 to 12).\n\n %M\n The minute as a decimal number (range 00 to 59).\n\n %n\n A newline character. Bokeh text does not currently support\n newline characters.\n\n %N\n Nanosecond as a decimal number, zero-padded on the left (range\n 000000000-999999999). Supports a padding width specifier, i.e.\n %3N displays 3 leftmost digits. However, this is only accurate\n to the millisecond level of precision due to limitations of\n `timezone`_.\n\n %p\n Either \"AM\" or \"PM\" according to the given time value, or the\n corresponding strings for the current locale. Noon is treated\n as \"PM\" and midnight as \"AM\".\n\n %P\n Like %p but in lowercase: \"am\" or \"pm\" or a corresponding\n string for the current locale.\n\n %r\n The time in a.m. or p.m. notation. In the POSIX locale this\n is equivalent to %I:%M:%S %p.\n\n %R\n The time in 24-hour notation (%H:%M). For a version including\n the seconds, see %T below.\n\n %s\n The number of seconds since the Epoch, 1970-01-01 00:00:00\n +0000 (UTC).\n\n %S\n The second as a decimal number (range 00 to 60). (The range\n is up to 60 to allow for occasional leap seconds.)\n\n %t\n A tab character. Bokeh text does not currently support tab\n characters.\n\n %T\n The time in 24-hour notation (%H:%M:%S).\n\n %u\n The day of the week as a decimal, range 1 to 7, Monday being 1.\n See also %w.\n\n %U\n The week number of the current year as a decimal number, range\n 00 to 53, starting with the first Sunday as the first day of\n week 01. See also %V and %W.\n\n %V\n The ISO 8601 week number (see NOTES) of the current year as a\n decimal number, range 01 to 53, where week 1 is the first week\n that has at least 4 days in the new year. See also %U and %W.\n\n %w\n The day of the week as a decimal, range 0 to 6, Sunday being 0.\n See also %u.\n\n %W\n The week number of the current year as a decimal number, range\n 00 to 53, starting with the first Monday as the first day of\n week 01.\n\n %x\n The preferred date representation for the current locale\n without the time.\n\n %X\n The preferred time representation for the current locale\n without the date.\n\n %y\n The year as a decimal number without a century (range 00 to 99).\n\n %Y\n The year as a decimal number including the century.\n\n %z\n The +hhmm or -hhmm numeric timezone (that is, the hour and\n minute offset from UTC).\n\n %Z\n The timezone name or abbreviation.\n\n %%\n A literal '%' character.\n\n .. warning::\n The client library BokehJS uses the `timezone`_ library to\n format datetimes. The inclusion of the list below is based on the\n claim that `timezone`_ makes to support \"the full compliment\n of GNU date format specifiers.\" However, this claim has not\n been tested exhaustively against this list. If you find formats\n that do not function as expected, please submit a `github issue`_,\n so that the documentation can be updated appropriately.\n\n .. _strftime: http://man7.org/linux/man-pages/man3/strftime.3.html\n .. _timezone: http://bigeasy.github.io/timezone/\n .. _github issue: https://github.com/bokeh/bokeh/issues\n\n \"\"\")\n\nclass HTMLTemplateFormatter(CellFormatter):\n ''' HTML formatter using a template.\n This uses Underscore's `template` method and syntax. http://underscorejs.org/#template\n The formatter has access other items in the row via the `dataContext` object passed to the formatter.\n So, for example, if another column in the datasource was named `url`, the template could access it as:\n\n .. code-block:: jinja\n\n <a href=\"<%= url %>\"><%= value %></a>\n\n To use a different set of template delimiters, pass the appropriate values for `evaluate`, `interpolate',\n or `escape`. See the Underscore `template` documentation for more information. http://underscorejs.org/#template\n\n Example: Simple HTML template to format the column value as code.\n\n .. code-block:: python\n\n HTMLTemplateFormatter(template='<code><%= value %></code>')\n\n Example: Use values from other columns (`manufacturer` and `model`) to build a hyperlink.\n\n .. code-block:: python\n\n HTMLTemplateFormatter(template=\n '<a href=\"https:/www.google.com/search?q=<%= manufacturer %>+<%= model %>\" target=\"_blank\"><%= value %></a>'\n )\n\n '''\n template = String('<%= value %>', help=\"\"\"\n Template string to be used by Underscore's template method.\n \"\"\")\n\nclass StringEditor(CellEditor):\n ''' Basic string cell editor with auto-completion.\n\n '''\n\n completions = List(String, help=\"\"\"\n An optional list of completion strings.\n \"\"\")\n\nclass TextEditor(CellEditor):\n ''' Multi-line string cell editor.\n\n '''\n\nclass SelectEditor(CellEditor):\n ''' Select cell editor.\n\n '''\n\n options = List(String, help=\"\"\"\n The list of options to select from.\n \"\"\")\n\nclass PercentEditor(CellEditor):\n ''' ``IntEditor`` optimized for editing percentages.\n\n '''\n\nclass CheckboxEditor(CellEditor):\n ''' Boolean value cell editor.\n\n '''\n\nclass IntEditor(CellEditor):\n ''' Spinner-based integer cell editor.\n\n '''\n\n step = Int(1, help=\"\"\"\n The major step value.\n \"\"\")\n\nclass NumberEditor(CellEditor):\n ''' Spinner-based number cell editor.\n\n '''\n\n step = Float(0.01, help=\"\"\"\n The major step value.\n \"\"\")\n\nclass TimeEditor(CellEditor):\n ''' Spinner-based time cell editor.\n\n '''\n\nclass DateEditor(CellEditor):\n ''' Calendar-based date cell editor.\n\n '''\n\nclass TableColumn(Model):\n ''' Table column widget.\n\n '''\n\n field = String(help=\"\"\"\n The name of the field mapping to a column in the data source.\n \"\"\")\n\n title = String(help=\"\"\"\n The title of this column. If not set, column's data field is\n used instead.\n \"\"\")\n\n width = Int(300, help=\"\"\"\n The width or maximum width (depending on data table's configuration)\n in pixels of this column.\n \"\"\")\n\n formatter = Instance(CellFormatter, lambda: StringFormatter(), help=\"\"\"\n The cell formatter for this column. By default, a simple string\n formatter is used.\n \"\"\")\n\n editor = Instance(CellEditor, lambda: StringEditor(), help=\"\"\"\n The cell editor for this column. By default, a simple string editor\n is used.\n \"\"\")\n\n sortable = Bool(True, help=\"\"\"\n Whether this column is sortable or not. Note that data table has\n to have sorting enabled to allow sorting in general.\n \"\"\")\n\n default_sort = Enum(\"ascending\", \"descending\", help=\"\"\"\n The default sorting order. By default ``ascending`` order is used.\n \"\"\")\n\n@abstract\nclass TableWidget(Widget):\n ''' Abstract base class for data table (data grid) widgets.\n\n '''\n\n source = Instance(DataSource, help=\"\"\"\n The source of data for the widget.\n \"\"\")\n\n view = Instance(CDSView, help=\"\"\"\n A view into the data source to use when rendering table rows. A default view\n of the entire data source is created if a view is not passed in during\n initialization.\n \"\"\")\n\n def __init__(self, **kw):\n super(TableWidget, self).__init__(**kw)\n if \"view\" not in kw:\n self.view = CDSView(source=self.source)\n\nclass DataTable(TableWidget):\n ''' Two dimensional grid for visualisation and editing large amounts\n of data.\n\n '''\n\n columns = List(Instance(TableColumn), help=\"\"\"\n The list of child column widgets.\n \"\"\")\n\n fit_columns = Bool(True, help=\"\"\"\n Whether columns should be fit to the available width. This results in no\n horizontal scrollbar showing up, but data can get unreadable if there is\n no enough space available. If set to ``True``, columns' width is\n understood as maximum width.\n \"\"\")\n\n sortable = Bool(True, help=\"\"\"\n Allows to sort table's contents. By default natural order is preserved.\n To sort a column, click on it's header. Clicking one more time changes\n sort direction. Use Ctrl + click to return to natural order. Use\n Shift + click to sort multiple columns simultaneously.\n \"\"\")\n\n reorderable = Bool(True, help=\"\"\"\n Allows the reordering of a tables's columns. To reorder a column,\n click and drag a table's header to the desired location in the table.\n The columns on either side will remain in their previous order.\n \"\"\")\n\n editable = Bool(False, help=\"\"\"\n Allows to edit table's contents. Needs cell editors to be configured on\n columns that are required to be editable.\n \"\"\")\n\n selectable = Either(Bool(True), Enum(\"checkbox\"), help=\"\"\"\n Whether a table's rows can be selected or not. Using ``checkbox`` is\n equivalent to ``True``, but makes selection visible through a checkbox\n for each row, instead of highlighting rows. Multiple selection is\n allowed and can be achieved by either clicking multiple checkboxes (if\n enabled) or using Shift + click on rows.\n \"\"\")\n\n index_position = Int(0, help=\"\"\"\n Where among the list of columns to insert a column displaying the row\n index. Negative indices are supported, and specify an index position\n from the end of the list of columns (i.e. standard Python behaviour).\n\n To prevent the index column from being added, set to None.\n\n If the absolute value of index_position is larger than the length of\n the columns, then the index will appear at the beginning or end, depending\n on the sign.\n \"\"\")\n\n index_header = String(\"#\", help=\"\"\"\n The column header to display for the index column, if it is present.\n \"\"\")\n\n index_width = Int(40, help=\"\"\"\n The width of the index column, if present.\n \"\"\")\n\n scroll_to_selection = Bool(True, help=\"\"\"\n Whenever a selection is made on the data source, scroll the selected\n rows into the table's viewport if none of the selected rows are already\n in the viewport.\n \"\"\")\n\n height = Override(default=400)\n",
"path": "bokeh/models/widgets/tables.py"
}
] | [
{
"content": "''' Various kinds of data table (data grid) widgets.\n\n'''\nfrom __future__ import absolute_import\n\nfrom ...core.enums import DateFormat, FontStyle, NumeralLanguage, TextAlign, RoundingFunction\nfrom ...core.has_props import abstract\nfrom ...core.properties import Bool, Color, Either, Enum, Float, Instance, Int, List, Override, String\nfrom ...model import Model\n\nfrom ..sources import DataSource, CDSView\n\nfrom .widget import Widget\n\n@abstract\nclass CellFormatter(Model):\n ''' Abstract base class for data table's cell formatters.\n\n '''\n\n@abstract\nclass CellEditor(Model):\n ''' Abstract base class for data table's cell editors.\n\n '''\n\nclass StringFormatter(CellFormatter):\n ''' Basic string cell formatter.\n\n '''\n\n font_style = Enum(FontStyle, default=\"normal\", help=\"\"\"\n An optional text font style, e.g. bold, italic.\n \"\"\")\n\n text_align = Enum(TextAlign, default=\"left\", help=\"\"\"\n An optional text align, i.e. left, center or right.\n \"\"\")\n\n text_color = Color(help=\"\"\"\n An optional text color. See :class:`bokeh.core.properties.Color` for\n details.\n \"\"\")\n\nclass NumberFormatter(StringFormatter):\n ''' Number cell formatter.\n\n '''\n\n format = String(\"0,0\", help=\"\"\"\n The number format, as defined in the following tables:\n\n **NUMBERS**:\n\n ============ ============== ===============\n Number Format String\n ============ ============== ===============\n 10000 '0,0.0000' 10,000.0000\n 10000.23 '0,0' 10,000\n 10000.23 '+0,0' +10,000\n -10000 '0,0.0' -10,000.0\n 10000.1234 '0.000' 10000.123\n 10000.1234 '0[.]00000' 10000.12340\n -10000 '(0,0.0000)' (10,000.0000)\n -0.23 '.00' -.23\n -0.23 '(.00)' (.23)\n 0.23 '0.00000' 0.23000\n 0.23 '0.0[0000]' 0.23\n 1230974 '0.0a' 1.2m\n 1460 '0 a' 1 k\n -104000 '0a' -104k\n 1 '0o' 1st\n 52 '0o' 52nd\n 23 '0o' 23rd\n 100 '0o' 100th\n ============ ============== ===============\n\n **CURRENCY**:\n\n =========== =============== =============\n Number Format String\n =========== =============== =============\n 1000.234 '$0,0.00' $1,000.23\n 1000.2 '0,0[.]00 $' 1,000.20 $\n 1001 '$ 0,0[.]00' $ 1,001\n -1000.234 '($0,0)' ($1,000)\n -1000.234 '$0.00' -$1000.23\n 1230974 '($ 0.00 a)' $ 1.23 m\n =========== =============== =============\n\n **BYTES**:\n\n =============== =========== ============\n Number Format String\n =============== =========== ============\n 100 '0b' 100B\n 2048 '0 b' 2 KB\n 7884486213 '0.0b' 7.3GB\n 3467479682787 '0.000 b' 3.154 TB\n =============== =========== ============\n\n **PERCENTAGES**:\n\n ============= ============= ===========\n Number Format String\n ============= ============= ===========\n 1 '0%' 100%\n 0.974878234 '0.000%' 97.488%\n -0.43 '0 %' -43 %\n 0.43 '(0.000 %)' 43.000 %\n ============= ============= ===========\n\n **TIME**:\n\n ============ ============== ============\n Number Format String\n ============ ============== ============\n 25 '00:00:00' 0:00:25\n 238 '00:00:00' 0:03:58\n 63846 '00:00:00' 17:44:06\n ============ ============== ============\n\n For the complete specification, see http://numbrojs.com/format.html\n \"\"\")\n\n language = Enum(NumeralLanguage, default=\"en\", help=\"\"\"\n The language to use for formatting language-specific features (e.g. thousands separator).\n \"\"\")\n\n rounding = Enum(RoundingFunction, help=\"\"\"\n Rounding functions (round, floor, ceil) and their synonyms (nearest, rounddown, roundup).\n \"\"\")\n\nclass BooleanFormatter(CellFormatter):\n ''' Boolean (check mark) cell formatter.\n\n '''\n\n icon = Enum('check', 'check-circle', 'check-circle-o', 'check-square', 'check-square-o', help=\"\"\"\n The icon visualizing the check mark.\n \"\"\")\n\nclass DateFormatter(CellFormatter):\n ''' Date cell formatter.\n\n '''\n\n format = Either(Enum(DateFormat), String, default='ISO-8601', help=\"\"\"\n The date format can be any standard `strftime`_ format string, as well\n as any of the following predefined format names:\n\n ================================================ ================== ===================\n Format name(s) Format string Example Output\n ================================================ ================== ===================\n ``ATOM`` / ``W3C`` / ``RFC-3339`` / ``ISO-8601`` ``\"%Y-%m-%d\"`` 2014-03-01\n ``COOKIE`` ``\"%a, %d %b %Y\"`` Sat, 01 Mar 2014\n ``RFC-850`` ``\"%A, %d-%b-%y\"`` Saturday, 01-Mar-14\n ``RFC-1123`` / ``RFC-2822`` ``\"%a, %e %b %Y\"`` Sat, 1 Mar 2014\n ``RSS`` / ``RFC-822`` / ``RFC-1036`` ``\"%a, %e %b %y\"`` Sat, 1 Mar 14\n ``TIMESTAMP`` (ms since epoch) 1393632000000\n ================================================ ================== ===================\n\n Note that in the table some of the format names are synonymous, with\n identical format names separated by slashes.\n\n This list of supported `strftime`_ format codes is reproduced below.\n\n %a\n The abbreviated name of the day of the week according to the\n current locale.\n\n %A\n The full name of the day of the week according to the current\n locale.\n\n %b\n The abbreviated month name according to the current locale.\n\n %B\n The full month name according to the current locale.\n\n %c\n The preferred date and time representation for the current\n locale.\n\n %C\n The century number (year/100) as a 2-digit integer.\n\n %d\n The day of the month as a decimal number (range 01 to 31).\n\n %D\n Equivalent to %m/%d/%y. (Americans should note that in many\n other countries %d/%m/%y is rather common. This means that in\n international context this format is ambiguous and should not\n be used.)\n\n %e\n Like %d, the day of the month as a decimal number, but a\n leading zero is replaced by a space.\n\n %f\n Microsecond as a decimal number, zero-padded on the left (range\n 000000-999999). This is an extension to the set of directives\n available to `timezone`_.\n\n %F\n Equivalent to %Y-%m-%d (the ISO 8601 date format).\n\n %G\n The ISO 8601 week-based year with century as a decimal number.\n The 4-digit year corresponding to the ISO week number (see %V).\n This has the same format and value as %Y, except that if the\n ISO week number belongs to the previous or next year, that year\n is used instead.\n\n %g\n Like %G, but without century, that is, with a 2-digit year (00-99).\n\n %h\n Equivalent to %b.\n\n %H\n The hour as a decimal number using a 24-hour clock (range 00\n to 23).\n\n %I\n The hour as a decimal number using a 12-hour clock (range 01\n to 12).\n\n %j\n The day of the year as a decimal number (range 001 to 366).\n\n %k\n The hour (24-hour clock) as a decimal number (range 0 to 23).\n Single digits are preceded by a blank. (See also %H.)\n\n %l\n The hour (12-hour clock) as a decimal number (range 1 to 12).\n Single digits are preceded by a blank. (See also %I.) (TZ)\n\n %m\n The month as a decimal number (range 01 to 12).\n\n %M\n The minute as a decimal number (range 00 to 59).\n\n %n\n A newline character. Bokeh text does not currently support\n newline characters.\n\n %N\n Nanosecond as a decimal number, zero-padded on the left (range\n 000000000-999999999). Supports a padding width specifier, i.e.\n %3N displays 3 leftmost digits. However, this is only accurate\n to the millisecond level of precision due to limitations of\n `timezone`_.\n\n %p\n Either \"AM\" or \"PM\" according to the given time value, or the\n corresponding strings for the current locale. Noon is treated\n as \"PM\" and midnight as \"AM\".\n\n %P\n Like %p but in lowercase: \"am\" or \"pm\" or a corresponding\n string for the current locale.\n\n %r\n The time in a.m. or p.m. notation. In the POSIX locale this\n is equivalent to %I:%M:%S %p.\n\n %R\n The time in 24-hour notation (%H:%M). For a version including\n the seconds, see %T below.\n\n %s\n The number of seconds since the Epoch, 1970-01-01 00:00:00\n +0000 (UTC).\n\n %S\n The second as a decimal number (range 00 to 60). (The range\n is up to 60 to allow for occasional leap seconds.)\n\n %t\n A tab character. Bokeh text does not currently support tab\n characters.\n\n %T\n The time in 24-hour notation (%H:%M:%S).\n\n %u\n The day of the week as a decimal, range 1 to 7, Monday being 1.\n See also %w.\n\n %U\n The week number of the current year as a decimal number, range\n 00 to 53, starting with the first Sunday as the first day of\n week 01. See also %V and %W.\n\n %V\n The ISO 8601 week number (see NOTES) of the current year as a\n decimal number, range 01 to 53, where week 1 is the first week\n that has at least 4 days in the new year. See also %U and %W.\n\n %w\n The day of the week as a decimal, range 0 to 6, Sunday being 0.\n See also %u.\n\n %W\n The week number of the current year as a decimal number, range\n 00 to 53, starting with the first Monday as the first day of\n week 01.\n\n %x\n The preferred date representation for the current locale\n without the time.\n\n %X\n The preferred time representation for the current locale\n without the date.\n\n %y\n The year as a decimal number without a century (range 00 to 99).\n\n %Y\n The year as a decimal number including the century.\n\n %z\n The +hhmm or -hhmm numeric timezone (that is, the hour and\n minute offset from UTC).\n\n %Z\n The timezone name or abbreviation.\n\n %%\n A literal '%' character.\n\n .. warning::\n The client library BokehJS uses the `timezone`_ library to\n format datetimes. The inclusion of the list below is based on the\n claim that `timezone`_ makes to support \"the full compliment\n of GNU date format specifiers.\" However, this claim has not\n been tested exhaustively against this list. If you find formats\n that do not function as expected, please submit a `github issue`_,\n so that the documentation can be updated appropriately.\n\n .. _strftime: http://man7.org/linux/man-pages/man3/strftime.3.html\n .. _timezone: http://bigeasy.github.io/timezone/\n .. _github issue: https://github.com/bokeh/bokeh/issues\n\n \"\"\")\n\nclass HTMLTemplateFormatter(CellFormatter):\n ''' HTML formatter using a template.\n This uses Underscore's `template` method and syntax. http://underscorejs.org/#template\n The formatter has access other items in the row via the `dataContext` object passed to the formatter.\n So, for example, if another column in the datasource was named `url`, the template could access it as:\n\n .. code-block:: jinja\n\n <a href=\"<%= url %>\"><%= value %></a>\n\n To use a different set of template delimiters, pass the appropriate values for `evaluate`, `interpolate',\n or `escape`. See the Underscore `template` documentation for more information. http://underscorejs.org/#template\n\n Example: Simple HTML template to format the column value as code.\n\n .. code-block:: python\n\n HTMLTemplateFormatter(template='<code><%= value %></code>')\n\n Example: Use values from other columns (`manufacturer` and `model`) to build a hyperlink.\n\n .. code-block:: python\n\n HTMLTemplateFormatter(template=\n '<a href=\"https:/www.google.com/search?q=<%= manufacturer %>+<%= model %>\" target=\"_blank\"><%= value %></a>'\n )\n\n '''\n template = String('<%= value %>', help=\"\"\"\n Template string to be used by Underscore's template method.\n \"\"\")\n\nclass StringEditor(CellEditor):\n ''' Basic string cell editor with auto-completion.\n\n '''\n\n completions = List(String, help=\"\"\"\n An optional list of completion strings.\n \"\"\")\n\nclass TextEditor(CellEditor):\n ''' Multi-line string cell editor.\n\n '''\n\nclass SelectEditor(CellEditor):\n ''' Select cell editor.\n\n '''\n\n options = List(String, help=\"\"\"\n The list of options to select from.\n \"\"\")\n\nclass PercentEditor(CellEditor):\n ''' ``IntEditor`` optimized for editing percentages.\n\n '''\n\nclass CheckboxEditor(CellEditor):\n ''' Boolean value cell editor.\n\n '''\n\nclass IntEditor(CellEditor):\n ''' Spinner-based integer cell editor.\n\n '''\n\n step = Int(1, help=\"\"\"\n The major step value.\n \"\"\")\n\nclass NumberEditor(CellEditor):\n ''' Spinner-based number cell editor.\n\n '''\n\n step = Float(0.01, help=\"\"\"\n The major step value.\n \"\"\")\n\nclass TimeEditor(CellEditor):\n ''' Spinner-based time cell editor.\n\n '''\n\nclass DateEditor(CellEditor):\n ''' Calendar-based date cell editor.\n\n '''\n\nclass TableColumn(Model):\n ''' Table column widget.\n\n '''\n\n field = String(help=\"\"\"\n The name of the field mapping to a column in the data source.\n \"\"\")\n\n title = String(help=\"\"\"\n The title of this column. If not set, column's data field is\n used instead.\n \"\"\")\n\n width = Int(300, help=\"\"\"\n The width or maximum width (depending on data table's configuration)\n in pixels of this column.\n \"\"\")\n\n formatter = Instance(CellFormatter, lambda: StringFormatter(), help=\"\"\"\n The cell formatter for this column. By default, a simple string\n formatter is used.\n \"\"\")\n\n editor = Instance(CellEditor, lambda: StringEditor(), help=\"\"\"\n The cell editor for this column. By default, a simple string editor\n is used.\n \"\"\")\n\n sortable = Bool(True, help=\"\"\"\n Whether this column is sortable or not. Note that data table has\n to have sorting enabled to allow sorting in general.\n \"\"\")\n\n default_sort = Enum(\"ascending\", \"descending\", help=\"\"\"\n The default sorting order. By default ``ascending`` order is used.\n \"\"\")\n\n@abstract\nclass TableWidget(Widget):\n ''' Abstract base class for data table (data grid) widgets.\n\n '''\n\n source = Instance(DataSource, help=\"\"\"\n The source of data for the widget.\n \"\"\")\n\n view = Instance(CDSView, help=\"\"\"\n A view into the data source to use when rendering table rows. A default view\n of the entire data source is created if a view is not passed in during\n initialization.\n \"\"\")\n\n def __init__(self, **kw):\n super(TableWidget, self).__init__(**kw)\n if \"view\" not in kw:\n self.view = CDSView(source=self.source)\n\nclass DataTable(TableWidget):\n ''' Two dimensional grid for visualisation and editing large amounts\n of data.\n\n '''\n\n columns = List(Instance(TableColumn), help=\"\"\"\n The list of child column widgets.\n \"\"\")\n\n fit_columns = Bool(True, help=\"\"\"\n Whether columns should be fit to the available width. This results in no\n horizontal scrollbar showing up, but data can get unreadable if there is\n no enough space available. If set to ``True``, columns' width is\n understood as maximum width.\n \"\"\")\n\n sortable = Bool(True, help=\"\"\"\n Allows to sort table's contents. By default natural order is preserved.\n To sort a column, click on it's header. Clicking one more time changes\n sort direction. Use Ctrl + click to return to natural order. Use\n Shift + click to sort multiple columns simultaneously.\n \"\"\")\n\n reorderable = Bool(True, help=\"\"\"\n Allows the reordering of a tables's columns. To reorder a column,\n click and drag a table's header to the desired location in the table.\n The columns on either side will remain in their previous order.\n \"\"\")\n\n editable = Bool(False, help=\"\"\"\n Allows to edit table's contents. Needs cell editors to be configured on\n columns that are required to be editable.\n \"\"\")\n\n selectable = Either(Bool(True), Enum(\"checkbox\"), help=\"\"\"\n Whether a table's rows can be selected or not. Using ``checkbox`` is\n equivalent to ``True``, but makes selection visible through a checkbox\n for each row, instead of highlighting rows. Multiple selection is\n allowed and can be achieved by either clicking multiple checkboxes (if\n enabled) or using Shift + click on rows.\n \"\"\")\n\n index_position = Int(0, help=\"\"\"\n Where among the list of columns to insert a column displaying the row\n index. Negative indices are supported, and specify an index position\n from the end of the list of columns (i.e. standard Python behaviour).\n\n To prevent the index column from being added, set to None.\n\n If the absolute value of index_position is larger than the length of\n the columns, then the index will appear at the beginning or end, depending\n on the sign.\n \"\"\")\n\n index_header = String(\"#\", help=\"\"\"\n The column header to display for the index column, if it is present.\n \"\"\")\n\n index_width = Int(40, help=\"\"\"\n The width of the index column, if present.\n \"\"\")\n\n scroll_to_selection = Bool(True, help=\"\"\"\n Whenever a selection is made on the data source, scroll the selected\n rows into the table's viewport if none of the selected rows are already\n in the viewport.\n \"\"\")\n\n header_row = Bool(True, help=\"\"\"\n Whether to show a header row with column names at the top of the table.\n \"\"\")\n\n height = Override(default=400)\n",
"path": "bokeh/models/widgets/tables.py"
}
] | diff --git a/bokeh/models/widgets/tables.py b/bokeh/models/widgets/tables.py
index c13e80236d2..dffeb9baac2 100644
--- a/bokeh/models/widgets/tables.py
+++ b/bokeh/models/widgets/tables.py
@@ -571,4 +571,8 @@ class DataTable(TableWidget):
in the viewport.
""")
+ header_row = Bool(True, help="""
+ Whether to show a header row with column names at the top of the table.
+ """)
+
height = Override(default=400)
diff --git a/bokehjs/src/lib/models/widgets/tables/data_table.ts b/bokehjs/src/lib/models/widgets/tables/data_table.ts
index ab6c8abfb9e..49da6ed501a 100644
--- a/bokehjs/src/lib/models/widgets/tables/data_table.ts
+++ b/bokehjs/src/lib/models/widgets/tables/data_table.ts
@@ -247,6 +247,9 @@ export class DataTableView extends WidgetView {
this.grid.invalidate()
this.updateSelection()
this.grid.render()
+ if (!this.model.header_row) {
+ this._hide_header()
+ }
})
if (this.model.selectable !== false) {
@@ -263,7 +266,19 @@ export class DataTableView extends WidgetView {
})
this.updateSelection()
+
+ if (!this.model.header_row) {
+ this._hide_header()
+ }
+
+ }
+ }
+
+ _hide_header(): void {
+ for (const el of Array.from(this.el.querySelectorAll('.slick-header-columns'))) {
+ (el as HTMLElement).style.height = "0px"
}
+ this.grid.resizeCanvas()
}
}
@@ -279,6 +294,7 @@ export namespace DataTable {
index_header: string
index_width: number
scroll_to_selection: boolean
+ header_row: boolean
}
export interface Props extends TableWidget.Props {}
@@ -309,6 +325,7 @@ export class DataTable extends TableWidget {
index_header: [ p.String, "#" ],
index_width: [ p.Int, 40 ],
scroll_to_selection: [ p.Bool, true ],
+ header_row: [ p.Bool, true ],
})
this.override({
|
ocf__ocfweb-48 | Have next 7 days in hours sidebar
This means all our normal hours are visible on a normal week.
| [
{
"content": "from datetime import date\nfrom datetime import timedelta\n\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.utils import timezone\nfrom ocflib.lab.hours import get_hours\nfrom ocflib.lab.staff_hours import get_staff_hours_soonest_first\n\nfrom ocfweb.component.blog import get_blog_posts\nfrom ocfweb.component.lab_status import get_lab_status\n\n\ndef home(request):\n hours = [\n get_hours(date.today() + timedelta(days=i)) for i in range(7)\n ]\n\n blog_posts = [\n post for post\n in get_blog_posts()\n if timezone.now() - post.published < timedelta(days=365)\n ][:2]\n\n return render_to_response(\n 'home.html',\n {\n 'fulltitle': 'Open Computing Facility at UC Berkeley',\n 'description': (\n 'The Open Computing Facility is an all-volunteer student '\n 'organization dedicated to free and open-source computing for all UC '\n 'Berkeley students.'''\n ),\n 'staff_hours': get_staff_hours_soonest_first()[:2],\n 'hours': hours,\n 'today': hours[0],\n 'blog_posts': blog_posts,\n 'lab_status': get_lab_status(),\n },\n context_instance=RequestContext(request),\n )\n",
"path": "ocfweb/main/home.py"
}
] | [
{
"content": "from datetime import date\nfrom datetime import timedelta\n\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.utils import timezone\nfrom ocflib.lab.hours import get_hours\nfrom ocflib.lab.staff_hours import get_staff_hours_soonest_first\n\nfrom ocfweb.component.blog import get_blog_posts\nfrom ocfweb.component.lab_status import get_lab_status\n\n\ndef home(request):\n hours = [\n get_hours(date.today() + timedelta(days=i)) for i in range(5)\n ]\n\n blog_posts = [\n post for post\n in get_blog_posts()\n if timezone.now() - post.published < timedelta(days=365)\n ][:2]\n\n return render_to_response(\n 'home.html',\n {\n 'fulltitle': 'Open Computing Facility at UC Berkeley',\n 'description': (\n 'The Open Computing Facility is an all-volunteer student '\n 'organization dedicated to free and open-source computing for all UC '\n 'Berkeley students.'''\n ),\n 'staff_hours': get_staff_hours_soonest_first()[:2],\n 'hours': hours,\n 'today': hours[0],\n 'blog_posts': blog_posts,\n 'lab_status': get_lab_status(),\n },\n context_instance=RequestContext(request),\n )\n",
"path": "ocfweb/main/home.py"
}
] | diff --git a/Makefile b/Makefile
index 5098a575a..e1b0d8d9d 100644
--- a/Makefile
+++ b/Makefile
@@ -32,11 +32,11 @@ gunicorn: venv
scss: venv
$(PYTHON) setup.py build_sass
-watch-scss: scss
+watch-scss: scss venv
while :; do \
find ocfweb/static -type f -name '*.scss' | \
inotifywait --fromfile - -e modify; \
- make scss; \
+ $(PYTHON) setup.py build_sass; \
done
update-requirements:
diff --git a/ocfweb/main/home.py b/ocfweb/main/home.py
index 9d51ce57d..6c5d63494 100644
--- a/ocfweb/main/home.py
+++ b/ocfweb/main/home.py
@@ -13,7 +13,7 @@
def home(request):
hours = [
- get_hours(date.today() + timedelta(days=i)) for i in range(7)
+ get_hours(date.today() + timedelta(days=i)) for i in range(5)
]
blog_posts = [
diff --git a/ocfweb/main/templates/home.html b/ocfweb/main/templates/home.html
index 392ceacfc..1c4e80c37 100644
--- a/ocfweb/main/templates/home.html
+++ b/ocfweb/main/templates/home.html
@@ -67,7 +67,7 @@ <h4>News from the staff team</h4>
<h3>Upcoming Lab Hours</h3>
<div class="ocf-hours">
{% for hour in hours %}
- <div class="ocf-hour row">
+ <div class="ocf-hour row {% cycle 'odd' 'even' %}">
<div class="col-sm-3 ocf-hour-title">
{{hour.weekday}}<br />
</div>
@@ -81,7 +81,7 @@ <h3>Upcoming Lab Hours</h3>
</div>
{% endfor %}
</div>
- <p><a href="{% url 'doc' 'services/lab' %}">See more lab hours »</a></p>
+ <p><a href="{% url 'doc' 'services/lab' %}" class="more-hours">more lab hours »</a></p>
<p>{% google_map '100%' '250px' %}</p>
diff --git a/ocfweb/static/scss/pages/home.scss b/ocfweb/static/scss/pages/home.scss
index 513d9643a..846ccd535 100644
--- a/ocfweb/static/scss/pages/home.scss
+++ b/ocfweb/static/scss/pages/home.scss
@@ -13,13 +13,31 @@
.ocf-hours {
font-size: 14px;
+ margin: 5px 15px;
.ocf-hour {
- padding: 10px 0;
- border-bottom: solid 1px #222;
+ border-bottom: solid 1px #ccc;
+ padding: 10px;
+
+ &.odd {
+ background-color: #f5f5f5;
+ }
+
+ .ocf-hour-title {
+ padding: 0 5px;
+ font-weight: bold;
+ }
+
+ .ocf-hour-hours {
+ padding: 0 5px;
+ }
}
}
+ .more-hours {
+ font-size: 14px;
+ }
+
.ocf-staffhours {
p {
margin: 0;
|
Lightning-Universe__lightning-bolts-319 | docs: Add Bolts logo
## 🚀 Feature
Not a strong opinion, but I would like to suggest a change to the logo image located at the top left in https://pytorch-lightning-bolts.readthedocs.io/en/. Currently, both `pytorch-lightning` and `pytorch-lightning-bolts` docs use the same logo image, and I personally think that makes it somewhat confusing. It would be much easier to distinguish them if the `pytorch-lightning-bolts` logo is different from `pytorch-lightning` logo.
| [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\n# import m2r\nimport builtins\nimport glob\nimport inspect\nimport os\nimport re\nimport shutil\nimport sys\n\nimport pt_lightning_sphinx_theme\nfrom sphinx.ext import apidoc\n\nPATH_HERE = os.path.abspath(os.path.dirname(__file__))\nPATH_ROOT = os.path.join(PATH_HERE, '..', '..')\nsys.path.insert(0, os.path.abspath(PATH_ROOT))\n\nbuiltins.__LIGHTNING_BOLT_SETUP__: bool = True\n\nSPHINX_MOCK_REQUIREMENTS = int(os.environ.get('SPHINX_MOCK_REQUIREMENTS', True))\n\nimport pl_bolts # noqa: E402\n\n# -- Project information -----------------------------------------------------\n\n# this name shall match the project name in Github as it is used for linking to code\nproject = 'PyTorch-Lightning-Bolts'\ncopyright = pl_bolts.__copyright__\nauthor = pl_bolts.__author__\n\n# The short X.Y version\nversion = pl_bolts.__version__\n# The full version, including alpha/beta/rc tags\nrelease = pl_bolts.__version__\n\n# Options for the linkcode extension\n# ----------------------------------\ngithub_user = 'PyTorchLightning'\ngithub_repo = project\n\n\n# -- Project documents -------------------------------------------------------\n# export the READme\nwith open(os.path.join(PATH_ROOT, 'README.md'), 'r') as fp:\n readme = fp.read()\n# TODO: temp fix removing SVG badges and GIF, because PDF cannot show them\nreadme = re.sub(r'(\\[!\\[.*\\))', '', readme)\nreadme = re.sub(r'(!\\[.*.gif\\))', '', readme)\nfor dir_name in (os.path.basename(p) for p in glob.glob(os.path.join(PATH_ROOT, '*')) if os.path.isdir(p)):\n readme = readme.replace('](%s/' % dir_name, '](%s/%s/' % (PATH_ROOT, dir_name))\nwith open('readme.md', 'w') as fp:\n fp.write(readme)\n\n# copy all documents from GH templates like contribution guide\nfor md in glob.glob(os.path.join(PATH_ROOT, '.github', '*.md')):\n shutil.copy(md, os.path.join(PATH_HERE, os.path.basename(md)))\n\n# export the changelog\nwith open(os.path.join(PATH_ROOT, 'CHANGELOG.md'), 'r') as fp:\n chlog_lines = fp.readlines()\n# enrich short subsub-titles to be unique\nchlog_ver = ''\nfor i, ln in enumerate(chlog_lines):\n if ln.startswith('## '):\n chlog_ver = ln[2:].split('-')[0].strip()\n elif ln.startswith('### '):\n ln = ln.replace('###', f'### {chlog_ver} -')\n chlog_lines[i] = ln\nwith open(os.path.join(PATH_HERE, 'CHANGELOG.md'), 'w') as fp:\n fp.writelines(chlog_lines)\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n\nneeds_sphinx = '2.4'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n # 'sphinxcontrib.mockautodoc', # raises error: directive 'automodule' is already registered ...\n # 'sphinxcontrib.fulltoc', # breaks pytorch-theme with unexpected kw argument 'titles_only'\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.todo',\n 'sphinx.ext.coverage',\n 'sphinx.ext.linkcode',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.imgmath',\n 'recommonmark',\n 'sphinx.ext.autosectionlabel',\n # 'm2r',\n # 'nbsphinx', # it seems some sphinx issue\n 'sphinx_autodoc_typehints',\n 'sphinx_copybutton',\n 'sphinx_paramlinks',\n 'sphinx_togglebutton',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# https://berkeley-stat159-f17.github.io/stat159-f17/lectures/14-sphinx..html#conf.py-(cont.)\n# https://stackoverflow.com/questions/38526888/embed-ipython-notebook-in-sphinx-document\n# I execute the notebooks manually in advance. If notebooks test the code,\n# they should be run at build time.\nnbsphinx_execute = 'never'\nnbsphinx_allow_errors = True\nnbsphinx_requirejs_path = ''\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\nsource_suffix = {\n '.rst': 'restructuredtext',\n '.txt': 'markdown',\n '.md': 'markdown',\n '.ipynb': 'nbsphinx',\n}\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\n 'api/pl_bolts.rst',\n 'api/modules.rst',\n 'api/pl_bolts.submit.rst',\n 'api/pl_bolts.utils.*',\n 'PULL_REQUEST_TEMPLATE.md',\n]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = None\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'pt_lightning_sphinx_theme'\nhtml_theme_path = [pt_lightning_sphinx_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n\nhtml_theme_options = {\n 'pytorch_project': pl_bolts.__homepage__,\n 'canonical_url': pl_bolts.__homepage__,\n 'collapse_navigation': False,\n 'display_version': True,\n 'logo_only': False,\n}\n\nhtml_logo = '_images/logos/lightning_logo-name.svg'\n\nhtml_favicon = '_images/logos/lightning_icon.svg'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_images', '_templates', '_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = project + '-doc'\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n # 'preamble': '',\n\n # Latex figure (float) alignment\n 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, project + '.tex', project + ' Documentation', author, 'manual'),\n]\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, project, project + ' Documentation', [author], 1)\n]\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, project, project + ' Documentation', author, project,\n 'The lightweight PyTorch wrapper for ML researchers. Scale your models. Write less boilerplate.',\n 'Miscellaneous'),\n]\n\n# -- Options for Epub output -------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = ['search.html']\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n 'pytorch_lightning': ('https://pytorch-lightning.readthedocs.io/en/stable/', None),\n 'python': ('https://docs.python.org/3', None),\n 'torch': ('https://pytorch.org/docs/stable/', None),\n 'numpy': ('https://docs.scipy.org/doc/numpy/', None),\n 'PIL': ('https://pillow.readthedocs.io/en/stable/', None),\n}\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n# https://github.com/rtfd/readthedocs.org/issues/1139\n# I use sphinx-apidoc to auto-generate API documentation for my project.\n# Right now I have to commit these auto-generated files to my repository\n# so that RTD can build them into HTML docs. It'd be cool if RTD could run\n# sphinx-apidoc for me, since it's easy to forget to regen API docs\n# and commit them to my repo after making changes to my code.\n\n# packages for which sphinx-apidoc should generate the docs (.rst files)\nPACKAGES = [\n pl_bolts.__name__,\n]\n\napidoc_output_folder = os.path.join(PATH_HERE, 'api')\n\n\ndef run_apidoc(_):\n sys.path.insert(0, apidoc_output_folder)\n\n # delete api-doc files before generating them\n if os.path.exists(apidoc_output_folder):\n shutil.rmtree(apidoc_output_folder)\n\n for pkg in PACKAGES:\n argv = ['-e',\n '-o', apidoc_output_folder,\n os.path.join(PATH_ROOT, pkg),\n '**/test_*',\n '--force',\n '--private',\n '--module-first']\n\n apidoc.main(argv)\n\n\ndef setup(app):\n # this is for hiding doctest decoration,\n # see: http://z4r.github.io/python/2011/12/02/hides-the-prompts-and-output/\n app.add_javascript('copybutton.js')\n app.connect('builder-inited', run_apidoc)\n\n\n# copy all notebooks to local folder\npath_nbs = os.path.join(PATH_HERE, 'notebooks')\nif not os.path.isdir(path_nbs):\n os.mkdir(path_nbs)\nfor path_ipynb in glob.glob(os.path.join(PATH_ROOT, 'notebooks', '*.ipynb')):\n path_ipynb2 = os.path.join(path_nbs, os.path.basename(path_ipynb))\n shutil.copy(path_ipynb, path_ipynb2)\n\n\n# Ignoring Third-party packages\n# https://stackoverflow.com/questions/15889621/sphinx-how-to-exclude-imports-in-automodule\ndef package_list_from_file(file):\n mocked_packages = []\n with open(file, 'r') as fp:\n for ln in fp.readlines():\n found = [ln.index(ch) for ch in list(',=<>#') if ch in ln]\n pkg = ln[:min(found)] if found else ln\n if pkg.strip():\n mocked_packages.append(pkg.strip())\n return mocked_packages\n\n\n# define mapping from PyPI names to python imports\nPACKAGE_MAPPING = {\n 'pytorch-lightning': 'pytorch_lightning',\n 'scikit-learn': 'sklearn',\n 'Pillow': 'PIL',\n 'opencv-python': 'cv2',\n}\nMOCK_PACKAGES = []\nif SPHINX_MOCK_REQUIREMENTS:\n # mock also base packages when we are on RTD since we don't install them there\n MOCK_PACKAGES += package_list_from_file(os.path.join(PATH_ROOT, 'requirements.txt'))\n MOCK_PACKAGES += package_list_from_file(os.path.join(PATH_ROOT, 'requirements', 'models.txt'))\n MOCK_PACKAGES += package_list_from_file(os.path.join(PATH_ROOT, 'requirements', 'loggers.txt'))\n# replace PyPI packages by importing ones\nMOCK_PACKAGES = [PACKAGE_MAPPING.get(pkg, pkg) for pkg in MOCK_PACKAGES]\n\nautodoc_mock_imports = MOCK_PACKAGES\n# for mod_name in MOCK_REQUIRE_PACKAGES:\n# sys.modules[mod_name] = mock.Mock()\n\n\n# Resolve function\n# This function is used to populate the (source) links in the API\ndef linkcode_resolve(domain, info):\n def find_source():\n # try to find the file and line number, based on code from numpy:\n # https://github.com/numpy/numpy/blob/master/doc/source/conf.py#L286\n obj = sys.modules[info['module']]\n for part in info['fullname'].split('.'):\n obj = getattr(obj, part)\n fname = inspect.getsourcefile(obj)\n # https://github.com/rtfd/readthedocs.org/issues/5735\n if any([s in fname for s in ('readthedocs', 'rtfd', 'checkouts')]):\n # /home/docs/checkouts/readthedocs.org/user_builds/pytorch_lightning/checkouts/\n # devel/pytorch_lightning/utilities/cls_experiment.py#L26-L176\n path_top = os.path.abspath(os.path.join('..', '..', '..'))\n fname = os.path.relpath(fname, start=path_top)\n else:\n # Local build, imitate master\n fname = 'master/' + os.path.relpath(fname, start=os.path.abspath('..'))\n source, lineno = inspect.getsourcelines(obj)\n return fname, lineno, lineno + len(source) - 1\n\n if domain != 'py' or not info['module']:\n return None\n try:\n filename = '%s#L%d-L%d' % find_source()\n except Exception:\n filename = info['module'].replace('.', '/') + '.py'\n # import subprocess\n # tag = subprocess.Popen(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE,\n # universal_newlines=True).communicate()[0][:-1]\n branch = filename.split('/')[0]\n # do mapping from latest tags to master\n branch = {'latest': 'master', 'stable': 'master'}.get(branch, branch)\n filename = '/'.join([branch] + filename.split('/')[1:])\n return \"https://github.com/%s/%s/blob/%s\" \\\n % (github_user, github_repo, filename)\n\n\nautodoc_member_order = 'groupwise'\nautoclass_content = 'both'\n# the options are fixed and will be soon in release,\n# see https://github.com/sphinx-doc/sphinx/issues/5459\nautodoc_default_options = {\n 'members': True,\n 'methods': True,\n 'special-members': '__call__',\n 'exclude-members': '_abc_impl',\n 'show-inheritance': True,\n 'noindex': True,\n}\n\n# Sphinx will add “permalinks” for each heading and description environment as paragraph signs that\n# become visible when the mouse hovers over them.\n# This value determines the text for the permalink; it defaults to \"¶\". Set it to None or the empty\n# string to disable permalinks.\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-html_add_permalinks\nhtml_add_permalinks = \"¶\"\n\n# True to prefix each section label with the name of the document it is in, followed by a colon.\n# For example, index:Introduction for a section called Introduction that appears in document index.rst.\n# Useful for avoiding ambiguity when the same section heading appears in different documents.\n# http://www.sphinx-doc.org/en/master/usage/extensions/autosectionlabel.html\nautosectionlabel_prefix_document = True\n\n# only run doctests marked with a \".. doctest::\" directive\ndoctest_test_doctest_blocks = ''\ndoctest_global_setup = \"\"\"\n\nimport importlib\nimport os\nimport torch\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning import Trainer, LightningModule\n\n\"\"\"\ncoverage_skip_undoc_in_source = True\n",
"path": "docs/source/conf.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\n# import m2r\nimport builtins\nimport glob\nimport inspect\nimport os\nimport re\nimport shutil\nimport sys\n\nimport pt_lightning_sphinx_theme\nfrom sphinx.ext import apidoc\n\nPATH_HERE = os.path.abspath(os.path.dirname(__file__))\nPATH_ROOT = os.path.join(PATH_HERE, '..', '..')\nsys.path.insert(0, os.path.abspath(PATH_ROOT))\n\nbuiltins.__LIGHTNING_BOLT_SETUP__: bool = True\n\nSPHINX_MOCK_REQUIREMENTS = int(os.environ.get('SPHINX_MOCK_REQUIREMENTS', True))\n\nimport pl_bolts # noqa: E402\n\n# -- Project information -----------------------------------------------------\n\n# this name shall match the project name in Github as it is used for linking to code\nproject = 'PyTorch-Lightning-Bolts'\ncopyright = pl_bolts.__copyright__\nauthor = pl_bolts.__author__\n\n# The short X.Y version\nversion = pl_bolts.__version__\n# The full version, including alpha/beta/rc tags\nrelease = pl_bolts.__version__\n\n# Options for the linkcode extension\n# ----------------------------------\ngithub_user = 'PyTorchLightning'\ngithub_repo = project\n\n\n# -- Project documents -------------------------------------------------------\n# export the READme\nwith open(os.path.join(PATH_ROOT, 'README.md'), 'r') as fp:\n readme = fp.read()\n# TODO: temp fix removing SVG badges and GIF, because PDF cannot show them\nreadme = re.sub(r'(\\[!\\[.*\\))', '', readme)\nreadme = re.sub(r'(!\\[.*.gif\\))', '', readme)\nfor dir_name in (os.path.basename(p) for p in glob.glob(os.path.join(PATH_ROOT, '*')) if os.path.isdir(p)):\n readme = readme.replace('](%s/' % dir_name, '](%s/%s/' % (PATH_ROOT, dir_name))\nwith open('readme.md', 'w') as fp:\n fp.write(readme)\n\n# copy all documents from GH templates like contribution guide\nfor md in glob.glob(os.path.join(PATH_ROOT, '.github', '*.md')):\n shutil.copy(md, os.path.join(PATH_HERE, os.path.basename(md)))\n\n# export the changelog\nwith open(os.path.join(PATH_ROOT, 'CHANGELOG.md'), 'r') as fp:\n chlog_lines = fp.readlines()\n# enrich short subsub-titles to be unique\nchlog_ver = ''\nfor i, ln in enumerate(chlog_lines):\n if ln.startswith('## '):\n chlog_ver = ln[2:].split('-')[0].strip()\n elif ln.startswith('### '):\n ln = ln.replace('###', f'### {chlog_ver} -')\n chlog_lines[i] = ln\nwith open(os.path.join(PATH_HERE, 'CHANGELOG.md'), 'w') as fp:\n fp.writelines(chlog_lines)\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n\nneeds_sphinx = '2.4'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n # 'sphinxcontrib.mockautodoc', # raises error: directive 'automodule' is already registered ...\n # 'sphinxcontrib.fulltoc', # breaks pytorch-theme with unexpected kw argument 'titles_only'\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.todo',\n 'sphinx.ext.coverage',\n 'sphinx.ext.linkcode',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.imgmath',\n 'recommonmark',\n 'sphinx.ext.autosectionlabel',\n # 'm2r',\n # 'nbsphinx', # it seems some sphinx issue\n 'sphinx_autodoc_typehints',\n 'sphinx_copybutton',\n 'sphinx_paramlinks',\n 'sphinx_togglebutton',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# https://berkeley-stat159-f17.github.io/stat159-f17/lectures/14-sphinx..html#conf.py-(cont.)\n# https://stackoverflow.com/questions/38526888/embed-ipython-notebook-in-sphinx-document\n# I execute the notebooks manually in advance. If notebooks test the code,\n# they should be run at build time.\nnbsphinx_execute = 'never'\nnbsphinx_allow_errors = True\nnbsphinx_requirejs_path = ''\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\nsource_suffix = {\n '.rst': 'restructuredtext',\n '.txt': 'markdown',\n '.md': 'markdown',\n '.ipynb': 'nbsphinx',\n}\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\n 'api/pl_bolts.rst',\n 'api/modules.rst',\n 'api/pl_bolts.submit.rst',\n 'api/pl_bolts.utils.*',\n 'PULL_REQUEST_TEMPLATE.md',\n]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = None\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'pt_lightning_sphinx_theme'\nhtml_theme_path = [pt_lightning_sphinx_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n\nhtml_theme_options = {\n 'pytorch_project': pl_bolts.__homepage__,\n 'canonical_url': pl_bolts.__homepage__,\n 'collapse_navigation': False,\n 'display_version': True,\n 'logo_only': False,\n}\n\nhtml_logo = '_images/logos/bolts_logo.png'\n\nhtml_favicon = '_images/logos/lightning_icon.svg'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_images', '_templates', '_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = project + '-doc'\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n # 'preamble': '',\n\n # Latex figure (float) alignment\n 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, project + '.tex', project + ' Documentation', author, 'manual'),\n]\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, project, project + ' Documentation', [author], 1)\n]\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, project, project + ' Documentation', author, project,\n 'The lightweight PyTorch wrapper for ML researchers. Scale your models. Write less boilerplate.',\n 'Miscellaneous'),\n]\n\n# -- Options for Epub output -------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = ['search.html']\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n 'pytorch_lightning': ('https://pytorch-lightning.readthedocs.io/en/stable/', None),\n 'python': ('https://docs.python.org/3', None),\n 'torch': ('https://pytorch.org/docs/stable/', None),\n 'numpy': ('https://docs.scipy.org/doc/numpy/', None),\n 'PIL': ('https://pillow.readthedocs.io/en/stable/', None),\n}\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n# https://github.com/rtfd/readthedocs.org/issues/1139\n# I use sphinx-apidoc to auto-generate API documentation for my project.\n# Right now I have to commit these auto-generated files to my repository\n# so that RTD can build them into HTML docs. It'd be cool if RTD could run\n# sphinx-apidoc for me, since it's easy to forget to regen API docs\n# and commit them to my repo after making changes to my code.\n\n# packages for which sphinx-apidoc should generate the docs (.rst files)\nPACKAGES = [\n pl_bolts.__name__,\n]\n\napidoc_output_folder = os.path.join(PATH_HERE, 'api')\n\n\ndef run_apidoc(_):\n sys.path.insert(0, apidoc_output_folder)\n\n # delete api-doc files before generating them\n if os.path.exists(apidoc_output_folder):\n shutil.rmtree(apidoc_output_folder)\n\n for pkg in PACKAGES:\n argv = ['-e',\n '-o', apidoc_output_folder,\n os.path.join(PATH_ROOT, pkg),\n '**/test_*',\n '--force',\n '--private',\n '--module-first']\n\n apidoc.main(argv)\n\n\ndef setup(app):\n # this is for hiding doctest decoration,\n # see: http://z4r.github.io/python/2011/12/02/hides-the-prompts-and-output/\n app.add_javascript('copybutton.js')\n app.connect('builder-inited', run_apidoc)\n\n\n# copy all notebooks to local folder\npath_nbs = os.path.join(PATH_HERE, 'notebooks')\nif not os.path.isdir(path_nbs):\n os.mkdir(path_nbs)\nfor path_ipynb in glob.glob(os.path.join(PATH_ROOT, 'notebooks', '*.ipynb')):\n path_ipynb2 = os.path.join(path_nbs, os.path.basename(path_ipynb))\n shutil.copy(path_ipynb, path_ipynb2)\n\n\n# Ignoring Third-party packages\n# https://stackoverflow.com/questions/15889621/sphinx-how-to-exclude-imports-in-automodule\ndef package_list_from_file(file):\n mocked_packages = []\n with open(file, 'r') as fp:\n for ln in fp.readlines():\n found = [ln.index(ch) for ch in list(',=<>#') if ch in ln]\n pkg = ln[:min(found)] if found else ln\n if pkg.strip():\n mocked_packages.append(pkg.strip())\n return mocked_packages\n\n\n# define mapping from PyPI names to python imports\nPACKAGE_MAPPING = {\n 'pytorch-lightning': 'pytorch_lightning',\n 'scikit-learn': 'sklearn',\n 'Pillow': 'PIL',\n 'opencv-python': 'cv2',\n}\nMOCK_PACKAGES = []\nif SPHINX_MOCK_REQUIREMENTS:\n # mock also base packages when we are on RTD since we don't install them there\n MOCK_PACKAGES += package_list_from_file(os.path.join(PATH_ROOT, 'requirements.txt'))\n MOCK_PACKAGES += package_list_from_file(os.path.join(PATH_ROOT, 'requirements', 'models.txt'))\n MOCK_PACKAGES += package_list_from_file(os.path.join(PATH_ROOT, 'requirements', 'loggers.txt'))\n# replace PyPI packages by importing ones\nMOCK_PACKAGES = [PACKAGE_MAPPING.get(pkg, pkg) for pkg in MOCK_PACKAGES]\n\nautodoc_mock_imports = MOCK_PACKAGES\n# for mod_name in MOCK_REQUIRE_PACKAGES:\n# sys.modules[mod_name] = mock.Mock()\n\n\n# Resolve function\n# This function is used to populate the (source) links in the API\ndef linkcode_resolve(domain, info):\n def find_source():\n # try to find the file and line number, based on code from numpy:\n # https://github.com/numpy/numpy/blob/master/doc/source/conf.py#L286\n obj = sys.modules[info['module']]\n for part in info['fullname'].split('.'):\n obj = getattr(obj, part)\n fname = inspect.getsourcefile(obj)\n # https://github.com/rtfd/readthedocs.org/issues/5735\n if any([s in fname for s in ('readthedocs', 'rtfd', 'checkouts')]):\n # /home/docs/checkouts/readthedocs.org/user_builds/pytorch_lightning/checkouts/\n # devel/pytorch_lightning/utilities/cls_experiment.py#L26-L176\n path_top = os.path.abspath(os.path.join('..', '..', '..'))\n fname = os.path.relpath(fname, start=path_top)\n else:\n # Local build, imitate master\n fname = 'master/' + os.path.relpath(fname, start=os.path.abspath('..'))\n source, lineno = inspect.getsourcelines(obj)\n return fname, lineno, lineno + len(source) - 1\n\n if domain != 'py' or not info['module']:\n return None\n try:\n filename = '%s#L%d-L%d' % find_source()\n except Exception:\n filename = info['module'].replace('.', '/') + '.py'\n # import subprocess\n # tag = subprocess.Popen(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE,\n # universal_newlines=True).communicate()[0][:-1]\n branch = filename.split('/')[0]\n # do mapping from latest tags to master\n branch = {'latest': 'master', 'stable': 'master'}.get(branch, branch)\n filename = '/'.join([branch] + filename.split('/')[1:])\n return \"https://github.com/%s/%s/blob/%s\" \\\n % (github_user, github_repo, filename)\n\n\nautodoc_member_order = 'groupwise'\nautoclass_content = 'both'\n# the options are fixed and will be soon in release,\n# see https://github.com/sphinx-doc/sphinx/issues/5459\nautodoc_default_options = {\n 'members': True,\n 'methods': True,\n 'special-members': '__call__',\n 'exclude-members': '_abc_impl',\n 'show-inheritance': True,\n 'noindex': True,\n}\n\n# Sphinx will add “permalinks” for each heading and description environment as paragraph signs that\n# become visible when the mouse hovers over them.\n# This value determines the text for the permalink; it defaults to \"¶\". Set it to None or the empty\n# string to disable permalinks.\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-html_add_permalinks\nhtml_add_permalinks = \"¶\"\n\n# True to prefix each section label with the name of the document it is in, followed by a colon.\n# For example, index:Introduction for a section called Introduction that appears in document index.rst.\n# Useful for avoiding ambiguity when the same section heading appears in different documents.\n# http://www.sphinx-doc.org/en/master/usage/extensions/autosectionlabel.html\nautosectionlabel_prefix_document = True\n\n# only run doctests marked with a \".. doctest::\" directive\ndoctest_test_doctest_blocks = ''\ndoctest_global_setup = \"\"\"\n\nimport importlib\nimport os\nimport torch\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning import Trainer, LightningModule\n\n\"\"\"\ncoverage_skip_undoc_in_source = True\n",
"path": "docs/source/conf.py"
}
] | diff --git a/README.md b/README.md
index 33e2c43a5e..fd7a57d447 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
<div align="center">
-
+
# PyTorch Lightning Bolts
diff --git a/docs/source/_images/logos/bolts_logo.png b/docs/source/_images/logos/bolts_logo.png
new file mode 100644
index 0000000000..841505feb3
Binary files /dev/null and b/docs/source/_images/logos/bolts_logo.png differ
diff --git a/docs/source/_images/logos/lightning_icon.svg b/docs/source/_images/logos/lightning_icon.svg
index 5ab3512c04..c2213e4f9e 100644
--- a/docs/source/_images/logos/lightning_icon.svg
+++ b/docs/source/_images/logos/lightning_icon.svg
@@ -1,62 +1,8 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- id="svg"
- version="1.1"
- width="16.000004"
- height="15.999986"
- viewBox="0 0 16.000004 15.999986"
- sodipodi:docname="lightning_icon.svg"
- inkscape:version="0.92.3 (2405546, 2018-03-11)">
- <metadata
- id="metadata13">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <defs
- id="defs11" />
- <sodipodi:namedview
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1"
- objecttolerance="10"
- gridtolerance="10"
- guidetolerance="10"
- inkscape:pageopacity="0"
- inkscape:pageshadow="2"
- inkscape:window-width="1920"
- inkscape:window-height="1028"
- id="namedview9"
- showgrid="false"
- inkscape:zoom="0.59"
- inkscape:cx="-669.05062"
- inkscape:cy="373.84245"
- inkscape:window-x="0"
- inkscape:window-y="0"
- inkscape:window-maximized="1"
- inkscape:current-layer="svg" />
- <path
- style="fill:#fbfbfb;fill-rule:evenodd;stroke:none;stroke-width:0.04002798"
- inkscape:connector-curvature="0"
- d="m 8.987101,1.723485 c -0.05588,0.03422 -4.121881,4.096544 -4.184645,4.180924 -0.02317,0.0311 -0.04587,0.06016 -0.05044,0.06456 -0.0087,0.0084 -0.07477,0.145063 -0.09679,0.20014 -0.05848,0.146583 -0.05804,0.44387 0.001,0.592413 0.08426,0.21243 0.08826,0.216754 1.576864,1.706274 0.779463,0.779947 1.41719,1.426877 1.41719,1.437604 0,0.0232 -0.253177,0.79848 -0.273873,0.838707 -0.0079,0.0153 -0.01433,0.04087 -0.01433,0.05684 0,0.01597 -0.0059,0.03587 -0.01313,0.04423 -0.0072,0.0084 -0.03678,0.09086 -0.06568,0.18333 -0.02893,0.09246 -0.05904,0.180647 -0.06693,0.195937 -0.0079,0.0153 -0.01437,0.04087 -0.01437,0.05684 0,0.01597 -0.0059,0.03586 -0.01313,0.04423 -0.0072,0.0084 -0.03679,0.09086 -0.06569,0.18333 -0.02893,0.09246 -0.05904,0.180643 -0.06693,0.195937 -0.0079,0.0153 -0.01437,0.04187 -0.01437,0.05908 0,0.0172 -0.0072,0.03574 -0.016,0.04119 -0.0088,0.0054 -0.016,0.02607 -0.016,0.04579 0,0.01973 -0.006,0.04271 -0.0134,0.05108 -0.0074,0.0084 -0.04439,0.112477 -0.08222,0.23136 -0.03787,0.118884 -0.151103,0.461124 -0.251693,0.760534 -0.489984,1.45874 -0.462444,1.36155 -0.413611,1.45938 0.06917,0.138657 0.23128,0.199741 0.358251,0.134974 0.07057,-0.03602 4.143298,-4.099985 4.245368,-4.236242 0.03382,-0.04515 0.09094,-0.165796 0.109916,-0.232123 0.0088,-0.03083 0.0243,-0.08498 0.03442,-0.120363 0.03346,-0.11668 0.0068,-0.361134 -0.0566,-0.520084 C 10.880518,9.229614 10.738898,9.079187 9.372744,7.714673 8.601524,6.944416 7.970523,6.302806 7.970523,6.288916 c 0,-0.01393 0.02817,-0.107833 0.0626,-0.208663 0.03442,-0.100834 0.07881,-0.237367 0.09859,-0.303414 0.0198,-0.06605 0.04207,-0.12693 0.04947,-0.135293 0.0074,-0.0084 0.0135,-0.03133 0.0135,-0.05108 0,-0.01973 0.0072,-0.04035 0.016,-0.04579 0.0088,-0.0054 0.016,-0.02707 0.016,-0.04803 0,-0.02097 0.0072,-0.04259 0.016,-0.04803 0.0088,-0.0054 0.016,-0.02707 0.016,-0.04803 0,-0.02097 0.0072,-0.04259 0.016,-0.04803 0.0088,-0.0054 0.016,-0.02707 0.016,-0.04803 0,-0.02097 0.0072,-0.04259 0.016,-0.04803 0.0088,-0.0054 0.016,-0.02707 0.016,-0.04803 0,-0.02097 0.0072,-0.04259 0.016,-0.04803 0.0088,-0.0054 0.016,-0.02707 0.016,-0.04803 0,-0.02097 0.0072,-0.04259 0.016,-0.04803 0.0088,-0.0054 0.016,-0.02707 0.016,-0.04803 0,-0.02097 0.0072,-0.04259 0.016,-0.04804 0.0088,-0.0054 0.016,-0.02707 0.016,-0.04803 0,-0.02097 0.0072,-0.04259 0.016,-0.04803 0.0088,-0.0054 0.016,-0.02707 0.016,-0.04803 0,-0.02097 0.0072,-0.04259 0.016,-0.04803 0.0088,-0.0054 0.016,-0.02707 0.016,-0.04803 0,-0.02097 0.0072,-0.04259 0.016,-0.04803 0.0088,-0.0054 0.016,-0.02707 0.016,-0.04803 0,-0.02097 0.0072,-0.04259 0.016,-0.04803 0.0088,-0.0054 0.016,-0.02707 0.016,-0.04803 0,-0.02097 0.0072,-0.04259 0.016,-0.04803 0.0088,-0.0054 0.016,-0.02707 0.016,-0.04803 0,-0.02097 0.0072,-0.04259 0.016,-0.04803 0.0088,-0.0054 0.016,-0.02707 0.016,-0.04803 0,-0.02097 0.0072,-0.04259 0.016,-0.04804 0.0088,-0.0054 0.016,-0.02707 0.016,-0.04803 0,-0.02097 0.0072,-0.04259 0.016,-0.04803 0.0088,-0.0054 0.016,-0.02397 0.016,-0.04119 0,-0.0172 0.0065,-0.04379 0.0144,-0.05908 0.0079,-0.0153 0.119204,-0.34484 0.247334,-0.73231 C 9.064507,2.979766 9.220177,2.513319 9.28226,2.330632 9.408267,1.960092 9.41367,1.921146 9.35255,1.826839 9.27225,1.703032 9.099973,1.654399 8.986893,1.723566"
- id="path0" />
- <path
- style="fill:#540c8c;fill-rule:evenodd;stroke:none;stroke-width:0.04002798"
- inkscape:connector-curvature="0"
- d="m 0.07719102,0.01733399 c -0.02187,0.0111 -0.04875,0.03799 -0.05984,0.05984 -0.0161,0.03173 -0.01937,1.62421701 -0.01633,7.94479601 l 0.0038,7.905086 0.03647,0.03646 0.03646,0.03647 H 8.00241 15.927073 l 0.03646,-0.03647 0.03647,-0.03646 V 8.002393 0.07773399 l -0.03647,-0.03646 -0.03646,-0.03647 -7.905086,-0.0038 c -6.320579,-0.003 -7.91305298,2.4e-4 -7.94479598,0.01633 M 9.193764,1.668208 c 0.259903,0.09046 0.275193,0.212427 0.09363,0.74628 C 8.845834,3.776859 8.388843,5.102846 7.991127,6.302606 L 9.415644,7.72492 c 1.24415,1.242111 1.51682,1.523547 1.51682,1.565414 0,0.0051 0.0133,0.03987 0.02953,0.07718 0.12913,0.296607 0.0877,0.664983 -0.103314,0.91872 -0.141456,0.187933 -4.207341,4.228478 -4.273468,4.246848 -0.139417,0.03871 -0.248653,-0.006 -0.34324,-0.140417 -0.07665,-0.108996 -0.06985,-0.137256 0.287004,-1.194633 0.34663,-1.101761 0.75901,-2.243218 1.08916,-3.290661 0,-0.0078 -0.636164,-0.650377 -1.413707,-1.427921 C 4.877658,7.152643 4.728155,6.995813 4.673718,6.87361 4.661948,6.84718 4.645988,6.81305 4.638168,6.79776 4.630368,6.78246 4.624038,6.75689 4.624038,6.74092 c 0,-0.01597 -0.0076,-0.03659 -0.01687,-0.04587 -0.02253,-0.02253 -0.02253,-0.436904 0,-0.45944 0.0093,-0.0093 0.01687,-0.0327 0.01687,-0.05204 0,-0.0363 0.06917,-0.178363 0.130414,-0.267907 0.07965,-0.1164 4.221831,-4.237681 4.259458,-4.237921 0.02047,-1.2e-4 0.04803,-0.0072 0.06124,-0.01577 0.03147,-0.02033 0.04415,-0.01967 0.118603,0.0062"
- id="path1"
- sodipodi:nodetypes="ccscccccccccccscccccscccccccccsssscccc" />
+<svg width="36" height="42" viewBox="0 0 36 42" fill="none" xmlns="http://www.w3.org/2000/svg">
+<mask id="mask0" mask-type="alpha" maskUnits="userSpaceOnUse" x="0" y="0" width="36" height="42">
+<path d="M17.9368 0L0 10.5V31.5L17.9375 42L35.875 31.5V10.5L17.9368 0ZM14.8314 31.5014L16.7203 23.3499L12.4729 19.1555L21.0692 10.4993L19.1768 18.6634L23.4014 22.8354L14.8314 31.5014Z" fill="#792EE5"/>
+</mask>
+<g mask="url(#mask0)">
+<rect x="-6.03564" y="-6.38623" width="47.8901" height="54.6798" fill="#792EE5"/>
+</g>
</svg>
diff --git a/docs/source/conf.py b/docs/source/conf.py
index af7696c8f9..838abf291f 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -179,7 +179,7 @@
'logo_only': False,
}
-html_logo = '_images/logos/lightning_logo-name.svg'
+html_logo = '_images/logos/bolts_logo.png'
html_favicon = '_images/logos/lightning_icon.svg'
|
pulp__pulpcore-119 | [noissue]: Update aiohttp requirement from <3.9.1,>=3.8.1 to >=3.8.1,<3.9.2
Updates the requirements on [aiohttp](https://github.com/aio-libs/aiohttp) to permit the latest version.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a href="https://github.com/aio-libs/aiohttp/releases">aiohttp's releases</a>.</em></p>
<blockquote>
<h2>3.9.1</h2>
<h2>Bugfixes</h2>
<ul>
<li>
<p>Fixed importing aiohttp under PyPy on Windows.</p>
<p>(<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7848">#7848</a>)</p>
</li>
<li>
<p>Fixed async concurrency safety in websocket compressor.</p>
<p>(<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7865">#7865</a>)</p>
</li>
<li>
<p>Fixed <code>ClientResponse.close()</code> releasing the connection instead of closing.</p>
<p>(<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7869">#7869</a>)</p>
</li>
<li>
<p>Fixed a regression where connection may get closed during upgrade. -- by :user:<code>Dreamsorcerer</code></p>
<p>(<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7879">#7879</a>)</p>
</li>
<li>
<p>Fixed messages being reported as upgraded without an Upgrade header in Python parser. -- by :user:<code>Dreamsorcerer</code></p>
<p>(<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7895">#7895</a>)</p>
</li>
</ul>
<hr />
</blockquote>
</details>
<details>
<summary>Changelog</summary>
<p><em>Sourced from <a href="https://github.com/aio-libs/aiohttp/blob/master/CHANGES.rst">aiohttp's changelog</a>.</em></p>
<blockquote>
<h1>3.9.1 (2023-11-26)</h1>
<h2>Bugfixes</h2>
<ul>
<li>
<p>Fixed importing aiohttp under PyPy on Windows.</p>
<p><code>[#7848](https://github.com/aio-libs/aiohttp/issues/7848) <https://github.com/aio-libs/aiohttp/issues/7848></code>_</p>
</li>
<li>
<p>Fixed async concurrency safety in websocket compressor.</p>
<p><code>[#7865](https://github.com/aio-libs/aiohttp/issues/7865) <https://github.com/aio-libs/aiohttp/issues/7865></code>_</p>
</li>
<li>
<p>Fixed <code>ClientResponse.close()</code> releasing the connection instead of closing.</p>
<p><code>[#7869](https://github.com/aio-libs/aiohttp/issues/7869) <https://github.com/aio-libs/aiohttp/issues/7869></code>_</p>
</li>
<li>
<p>Fixed a regression where connection may get closed during upgrade. -- by :user:<code>Dreamsorcerer</code></p>
<p><code>[#7879](https://github.com/aio-libs/aiohttp/issues/7879) <https://github.com/aio-libs/aiohttp/issues/7879></code>_</p>
</li>
<li>
<p>Fixed messages being reported as upgraded without an Upgrade header in Python parser. -- by :user:<code>Dreamsorcerer</code></p>
<p><code>[#7895](https://github.com/aio-libs/aiohttp/issues/7895) <https://github.com/aio-libs/aiohttp/issues/7895></code>_</p>
</li>
</ul>
<hr />
<h1>3.9.0 (2023-11-18)</h1>
<h2>Features</h2>
<ul>
<li>
<p>Introduced <code>AppKey</code> for static typing support of <code>Application</code> storage.
See <a href="https://docs.aiohttp.org/en/stable/web_advanced.html#application-s-config">https://docs.aiohttp.org/en/stable/web_advanced.html#application-s-config</a></p>
<p><code>[#5864](https://github.com/aio-libs/aiohttp/issues/5864) <https://github.com/aio-libs/aiohttp/issues/5864></code>_</p>
</li>
<li>
<p>Added a graceful shutdown period which allows pending tasks to complete before the application's cleanup is called.
The period can be adjusted with the <code>shutdown_timeout</code> parameter. -- by :user:<code>Dreamsorcerer</code>.
See <a href="https://docs.aiohttp.org/en/latest/web_advanced.html#graceful-shutdown">https://docs.aiohttp.org/en/latest/web_advanced.html#graceful-shutdown</a></p>
<p><code>[#7188](https://github.com/aio-libs/aiohttp/issues/7188) <https://github.com/aio-libs/aiohttp/issues/7188></code>_</p>
</li>
<li>
<p>Added <code>handler_cancellation <https://docs.aiohttp.org/en/stable/web_advanced.html#web-handler-cancellation></code>_ parameter to cancel web handler on client disconnection. -- by :user:<code>mosquito</code>
This (optionally) reintroduces a feature removed in a previous release.</p>
</li>
</ul>
<!-- raw HTML omitted -->
</blockquote>
<p>... (truncated)</p>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a href="https://github.com/aio-libs/aiohttp/commit/6333c026422c6b0fe57ff63cde4104e2d00f47f4"><code>6333c02</code></a> Release v3.9.1 (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7911">#7911</a>)</li>
<li><a href="https://github.com/aio-libs/aiohttp/commit/9dbd273093d6af6f5e1481816b05a7192860b440"><code>9dbd273</code></a> [PR <a href="https://redirect.github.com/aio-libs/aiohttp/issues/7673">#7673</a>/aa7d1a8f backport][3.9] Document release process (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7909">#7909</a>)</li>
<li><a href="https://github.com/aio-libs/aiohttp/commit/dd175b6b89564dc74fba0692a8a5f9a9b38e528a"><code>dd175b6</code></a> Fix regression with connection upgrade (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7879">#7879</a>) (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7908">#7908</a>)</li>
<li><a href="https://github.com/aio-libs/aiohttp/commit/946523d6380bd79e13146557432f46f6f9bbd53f"><code>946523d</code></a> Fix flaky websocket test (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7902">#7902</a>) (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7904">#7904</a>)</li>
<li><a href="https://github.com/aio-libs/aiohttp/commit/ddc2a26c9e0c43fd1229e4424f2a30d1b10ced13"><code>ddc2a26</code></a> [PR <a href="https://redirect.github.com/aio-libs/aiohttp/issues/7896">#7896</a>/9a7cfe77 backport][3.9] Fix some flaky tests (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7900">#7900</a>)</li>
<li><a href="https://github.com/aio-libs/aiohttp/commit/2ae4d6ffdd015f622bfb75dee98ad629240cccc4"><code>2ae4d6f</code></a> Message is not upgraded if Upgrade header is missing (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7895">#7895</a>) (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7898">#7898</a>)</li>
<li><a href="https://github.com/aio-libs/aiohttp/commit/bb111012706d3ef9edc525be3d8d4df410ad847f"><code>bb11101</code></a> Restore async concurrency safety to websocket compressor (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7865">#7865</a>) (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7889">#7889</a>)</li>
<li><a href="https://github.com/aio-libs/aiohttp/commit/6dd0122417f00ed4b2b353226a1b164b6463a245"><code>6dd0122</code></a> Update dependabot.yml (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7888">#7888</a>)</li>
<li><a href="https://github.com/aio-libs/aiohttp/commit/41a9f1f5b9b2630b1f4b971811c7ef8f016262fb"><code>41a9f1f</code></a> Bump mypy from 1.7.0 to 1.7.1 (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7882">#7882</a>)</li>
<li><a href="https://github.com/aio-libs/aiohttp/commit/a04970150c6ce9fda22c9f63d947845f79148b4c"><code>a049701</code></a> Fix usage of proxy.py in test_proxy_functional (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7773">#7773</a>) (<a href="https://redirect.github.com/aio-libs/aiohttp/issues/7876">#7876</a>)</li>
<li>Additional commits viewable in <a href="https://github.com/aio-libs/aiohttp/compare/v3.8.1...v3.9.1">compare view</a></li>
</ul>
</details>
<br />
Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
<details>
<summary>Dependabot commands and options</summary>
<br />
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
</details>
| [
{
"content": "from gettext import gettext as _\nimport hashlib\n\nfrom django.db import transaction\nfrom drf_chunked_upload.serializers import ChunkedUploadSerializer\nfrom rest_framework import serializers\nfrom rest_framework.validators import UniqueValidator\n\nfrom pulpcore.app import models, files\nfrom pulpcore.app.serializers import base, fields\n\n\nUNIQUE_ALGORITHMS = ['sha256', 'sha384', 'sha512']\n\n\nclass BaseContentSerializer(base.MasterModelSerializer):\n _href = base.DetailIdentityField()\n\n class Meta:\n model = models.Content\n fields = base.MasterModelSerializer.Meta.fields\n\n\nclass NoArtifactContentSerializer(BaseContentSerializer):\n\n class Meta:\n model = models.Content\n fields = BaseContentSerializer.Meta.fields\n\n\nclass SingleArtifactContentSerializer(BaseContentSerializer):\n _artifact = fields.SingleContentArtifactField(\n help_text=_(\"Artifact file representing the physical content\"),\n )\n\n _relative_path = serializers.CharField(\n help_text=_(\"Path where the artifact is located relative to distributions base_path\"),\n validators=[fields.relative_path_validator],\n write_only=True,\n )\n\n @transaction.atomic\n def create(self, validated_data):\n \"\"\"\n Create the content and associate it with its Artifact.\n\n Args:\n validated_data (dict): Data to save to the database\n \"\"\"\n artifact = validated_data.pop('_artifact')\n relative_path = validated_data.pop('_relative_path')\n content = self.Meta.model.objects.create(**validated_data)\n models.ContentArtifact.objects.create(\n artifact=artifact,\n content=content,\n relative_path=relative_path,\n )\n return content\n\n class Meta:\n model = models.Content\n fields = BaseContentSerializer.Meta.fields + ('_artifact', '_relative_path')\n\n\nclass MultipleArtifactContentSerializer(BaseContentSerializer):\n _artifacts = fields.ContentArtifactsField(\n help_text=_(\"A dict mapping relative paths inside the Content to the corresponding\"\n \"Artifact URLs. E.g.: {'relative/path': \"\n \"'/artifacts/1/'\"),\n )\n\n @transaction.atomic\n def create(self, validated_data):\n \"\"\"\n Create the content and associate it with all its Artifacts.\n\n Args:\n validated_data (dict): Data to save to the database\n \"\"\"\n _artifacts = validated_data.pop('_artifacts')\n content = self.Meta.model.objects.create(**validated_data)\n for relative_path, artifact in _artifacts.items():\n models.ContentArtifact.objects.create(\n artifact=artifact,\n content=content,\n relative_path=relative_path,\n )\n return content\n\n class Meta:\n model = models.Content\n fields = BaseContentSerializer.Meta.fields + ('_artifacts',)\n\n\nclass ArtifactSerializer(base.ModelSerializer):\n _href = base.IdentityField(\n view_name='artifacts-detail',\n )\n\n file = serializers.FileField(\n help_text=_(\"The stored file.\"),\n required=False\n )\n\n upload = serializers.HyperlinkedRelatedField(\n view_name=\"upload-detail\",\n write_only=True,\n required=False,\n queryset=models.Upload.objects.filter(status=models.Upload.COMPLETE)\n )\n\n size = serializers.IntegerField(\n help_text=_(\"The size of the file in bytes.\"),\n required=False\n )\n\n md5 = serializers.CharField(\n help_text=_(\"The MD5 checksum of the file if available.\"),\n required=False,\n allow_blank=True\n )\n\n sha1 = serializers.CharField(\n help_text=_(\"The SHA-1 checksum of the file if available.\"),\n required=False,\n allow_blank=True\n )\n\n sha224 = serializers.CharField(\n help_text=_(\"The SHA-224 checksum of the file if available.\"),\n required=False,\n allow_blank=True\n )\n\n sha256 = serializers.CharField(\n help_text=_(\"The SHA-256 checksum of the file if available.\"),\n required=False,\n allow_blank=True\n )\n\n sha384 = serializers.CharField(\n help_text=_(\"The SHA-384 checksum of the file if available.\"),\n required=False,\n allow_blank=True\n )\n\n sha512 = serializers.CharField(\n help_text=_(\"The SHA-512 checksum of the file if available.\"),\n required=False,\n allow_blank=True\n )\n\n def validate(self, data):\n \"\"\"\n Validate file by size and by all checksums provided.\n\n Args:\n data (:class:`django.http.QueryDict`): QueryDict mapping Artifact model fields to their\n values\n\n Raises:\n :class:`rest_framework.exceptions.ValidationError`: When the expected file size or any\n of the checksums don't match their actual values.\n \"\"\"\n super().validate(data)\n\n if ('file' not in data and 'upload' not in data) or \\\n ('file' in data and 'upload' in data):\n raise serializers.ValidationError(_(\"Either 'file' or 'upload' parameter must be \"\n \"supplied but not both.\"))\n\n if 'upload' in data:\n self.upload = data.pop('upload')\n data['file'] = files.PulpTemporaryUploadedFile.from_file(self.upload.file.file)\n\n if 'size' in data:\n if data['file'].size != int(data['size']):\n raise serializers.ValidationError(_(\"The size did not match actual size of file.\"))\n else:\n data['size'] = data['file'].size\n\n for algorithm in hashlib.algorithms_guaranteed:\n if algorithm in models.Artifact.DIGEST_FIELDS:\n digest = data['file'].hashers[algorithm].hexdigest()\n\n if algorithm in data and digest != data[algorithm]:\n raise serializers.ValidationError(_(\"The %s checksum did not match.\")\n % algorithm)\n else:\n data[algorithm] = digest\n if algorithm in UNIQUE_ALGORITHMS:\n validator = UniqueValidator(models.Artifact.objects.all(),\n message=_(\"{0} checksum must be \"\n \"unique.\").format(algorithm))\n validator.field_name = algorithm\n validator.instance = None\n validator(digest)\n return data\n\n def create(self, validated_data):\n \"\"\"\n Create the artifact and delete its associated upload (if there is one)\n\n Args:\n validated_data (dict): Data to save to the database\n \"\"\"\n artifact = super().create(validated_data)\n if hasattr(self, 'upload'):\n # creating an artifact will move the upload file so we need to delete the db record\n self.upload.delete()\n return artifact\n\n class Meta:\n model = models.Artifact\n fields = base.ModelSerializer.Meta.fields + ('file', 'size', 'md5', 'sha1', 'sha224',\n 'sha256', 'sha384', 'sha512', 'upload')\n\n\nclass UploadSerializer(base.ModelSerializer):\n \"\"\"Serializer for chunked uploads.\"\"\"\n viewname = 'uploads:upload-detail'\n\n _href = base.IdentityField(\n view_name='upload-detail',\n )\n\n file = serializers.FileField(\n write_only=True,\n )\n\n class Meta(ChunkedUploadSerializer.Meta):\n model = models.Upload\n fields = ('_href', 'file', 'offset', 'expires_at')\n",
"path": "pulpcore/app/serializers/content.py"
}
] | [
{
"content": "from gettext import gettext as _\nimport hashlib\n\nfrom django.db import transaction\nfrom drf_chunked_upload.serializers import ChunkedUploadSerializer\nfrom rest_framework import serializers\nfrom rest_framework.validators import UniqueValidator\n\nfrom pulpcore.app import models, files\nfrom pulpcore.app.serializers import base, fields\n\n\nUNIQUE_ALGORITHMS = ['sha256', 'sha384', 'sha512']\n\n\nclass BaseContentSerializer(base.MasterModelSerializer):\n _href = base.DetailIdentityField()\n\n class Meta:\n model = models.Content\n fields = base.MasterModelSerializer.Meta.fields\n\n\nclass NoArtifactContentSerializer(BaseContentSerializer):\n\n class Meta:\n model = models.Content\n fields = BaseContentSerializer.Meta.fields\n\n\nclass SingleArtifactContentSerializer(BaseContentSerializer):\n _artifact = fields.SingleContentArtifactField(\n help_text=_(\"Artifact file representing the physical content\"),\n )\n\n _relative_path = serializers.CharField(\n help_text=_(\"Path where the artifact is located relative to distributions base_path\"),\n validators=[fields.relative_path_validator],\n write_only=True,\n )\n\n @transaction.atomic\n def create(self, validated_data):\n \"\"\"\n Create the content and associate it with its Artifact.\n\n Args:\n validated_data (dict): Data to save to the database\n \"\"\"\n artifact = validated_data.pop('_artifact')\n relative_path = validated_data.pop('_relative_path')\n content = self.Meta.model.objects.create(**validated_data)\n models.ContentArtifact.objects.create(\n artifact=artifact,\n content=content,\n relative_path=relative_path,\n )\n return content\n\n class Meta:\n model = models.Content\n fields = BaseContentSerializer.Meta.fields + ('_artifact', '_relative_path')\n\n\nclass MultipleArtifactContentSerializer(BaseContentSerializer):\n _artifacts = fields.ContentArtifactsField(\n help_text=_(\"A dict mapping relative paths inside the Content to the corresponding\"\n \"Artifact URLs. E.g.: {'relative/path': \"\n \"'/artifacts/1/'\"),\n )\n\n @transaction.atomic\n def create(self, validated_data):\n \"\"\"\n Create the content and associate it with all its Artifacts.\n\n Args:\n validated_data (dict): Data to save to the database\n \"\"\"\n _artifacts = validated_data.pop('_artifacts')\n content = self.Meta.model.objects.create(**validated_data)\n for relative_path, artifact in _artifacts.items():\n models.ContentArtifact.objects.create(\n artifact=artifact,\n content=content,\n relative_path=relative_path,\n )\n return content\n\n class Meta:\n model = models.Content\n fields = BaseContentSerializer.Meta.fields + ('_artifacts',)\n\n\nclass ArtifactSerializer(base.ModelSerializer):\n _href = base.IdentityField(\n view_name='artifacts-detail',\n )\n\n file = serializers.FileField(\n help_text=_(\"The stored file.\"),\n allow_empty_file=True,\n required=False\n )\n\n upload = serializers.HyperlinkedRelatedField(\n view_name=\"upload-detail\",\n write_only=True,\n required=False,\n queryset=models.Upload.objects.filter(status=models.Upload.COMPLETE)\n )\n\n size = serializers.IntegerField(\n help_text=_(\"The size of the file in bytes.\"),\n required=False\n )\n\n md5 = serializers.CharField(\n help_text=_(\"The MD5 checksum of the file if available.\"),\n required=False,\n allow_blank=True\n )\n\n sha1 = serializers.CharField(\n help_text=_(\"The SHA-1 checksum of the file if available.\"),\n required=False,\n allow_blank=True\n )\n\n sha224 = serializers.CharField(\n help_text=_(\"The SHA-224 checksum of the file if available.\"),\n required=False,\n allow_blank=True\n )\n\n sha256 = serializers.CharField(\n help_text=_(\"The SHA-256 checksum of the file if available.\"),\n required=False,\n allow_blank=True\n )\n\n sha384 = serializers.CharField(\n help_text=_(\"The SHA-384 checksum of the file if available.\"),\n required=False,\n allow_blank=True\n )\n\n sha512 = serializers.CharField(\n help_text=_(\"The SHA-512 checksum of the file if available.\"),\n required=False,\n allow_blank=True\n )\n\n def validate(self, data):\n \"\"\"\n Validate file by size and by all checksums provided.\n\n Args:\n data (:class:`django.http.QueryDict`): QueryDict mapping Artifact model fields to their\n values\n\n Raises:\n :class:`rest_framework.exceptions.ValidationError`: When the expected file size or any\n of the checksums don't match their actual values.\n \"\"\"\n super().validate(data)\n\n if ('file' not in data and 'upload' not in data) or \\\n ('file' in data and 'upload' in data):\n raise serializers.ValidationError(_(\"Either 'file' or 'upload' parameter must be \"\n \"supplied but not both.\"))\n\n if 'upload' in data:\n self.upload = data.pop('upload')\n data['file'] = files.PulpTemporaryUploadedFile.from_file(self.upload.file.file)\n\n if 'size' in data:\n if data['file'].size != int(data['size']):\n raise serializers.ValidationError(_(\"The size did not match actual size of file.\"))\n else:\n data['size'] = data['file'].size\n\n for algorithm in hashlib.algorithms_guaranteed:\n if algorithm in models.Artifact.DIGEST_FIELDS:\n digest = data['file'].hashers[algorithm].hexdigest()\n\n if algorithm in data and digest != data[algorithm]:\n raise serializers.ValidationError(_(\"The %s checksum did not match.\")\n % algorithm)\n else:\n data[algorithm] = digest\n if algorithm in UNIQUE_ALGORITHMS:\n validator = UniqueValidator(models.Artifact.objects.all(),\n message=_(\"{0} checksum must be \"\n \"unique.\").format(algorithm))\n validator.field_name = algorithm\n validator.instance = None\n validator(digest)\n return data\n\n def create(self, validated_data):\n \"\"\"\n Create the artifact and delete its associated upload (if there is one)\n\n Args:\n validated_data (dict): Data to save to the database\n \"\"\"\n artifact = super().create(validated_data)\n if hasattr(self, 'upload'):\n # creating an artifact will move the upload file so we need to delete the db record\n self.upload.delete()\n return artifact\n\n class Meta:\n model = models.Artifact\n fields = base.ModelSerializer.Meta.fields + ('file', 'size', 'md5', 'sha1', 'sha224',\n 'sha256', 'sha384', 'sha512', 'upload')\n\n\nclass UploadSerializer(base.ModelSerializer):\n \"\"\"Serializer for chunked uploads.\"\"\"\n viewname = 'uploads:upload-detail'\n\n _href = base.IdentityField(\n view_name='upload-detail',\n )\n\n file = serializers.FileField(\n write_only=True,\n )\n\n class Meta(ChunkedUploadSerializer.Meta):\n model = models.Upload\n fields = ('_href', 'file', 'offset', 'expires_at')\n",
"path": "pulpcore/app/serializers/content.py"
}
] | diff --git a/pulpcore/app/serializers/content.py b/pulpcore/app/serializers/content.py
index c92f386f6d..69588fce10 100644
--- a/pulpcore/app/serializers/content.py
+++ b/pulpcore/app/serializers/content.py
@@ -99,6 +99,7 @@ class ArtifactSerializer(base.ModelSerializer):
file = serializers.FileField(
help_text=_("The stored file."),
+ allow_empty_file=True,
required=False
)
diff --git a/pulpcore/tests/functional/api/test_crd_artifacts.py b/pulpcore/tests/functional/api/test_crd_artifacts.py
index b96b98fe05..886cd9ceb4 100644
--- a/pulpcore/tests/functional/api/test_crd_artifacts.py
+++ b/pulpcore/tests/functional/api/test_crd_artifacts.py
@@ -54,11 +54,31 @@ def test_upload_valid_attrs(self):
for keys in itertools.combinations(file_attrs, i):
data = {key: file_attrs[key] for key in keys}
with self.subTest(data=data):
- self._do_upload_valid_attrs(data)
+ self._do_upload_valid_attrs(data, self.file)
- def _do_upload_valid_attrs(self, data):
+ def test_upload_empty_file(self):
+ """Upload an empty file.
+
+ For each possible combination of ``sha256`` and ``size`` (including
+ neither), do the following:
+
+ 1. Upload a file with the chosen combination of attributes.
+ 2. Verify that an artifact has been created, and that it has valid
+ attributes.
+ 3. Delete the artifact, and verify that its attributes are
+ inaccessible.
+ """
+ empty_file = b''
+ file_attrs = {'sha256': hashlib.sha256(empty_file).hexdigest(), 'size': 0}
+ for i in range(len(file_attrs) + 1):
+ for keys in itertools.combinations(file_attrs, i):
+ data = {key: file_attrs[key] for key in keys}
+ with self.subTest(data=data):
+ self._do_upload_valid_attrs(data, files={'file': empty_file})
+
+ def _do_upload_valid_attrs(self, data, files):
"""Upload a file with the given attributes."""
- artifact = self.client.post(ARTIFACTS_PATH, data=data, files=self.file)
+ artifact = self.client.post(ARTIFACTS_PATH, data=data, files=files)
self.addCleanup(self.client.delete, artifact['_href'])
read_artifact = self.client.get(artifact['_href'])
for key, val in artifact.items():
|
pypa__setuptools-4007 | Extra directories exposed by editable wheel for `setuptools` project itself
I noticed in `main` (5169a9b1f) that some extra directories of the `setuptools` project ended up exposed by the editable wheel. Specifically `launcher` and `newsfragments`. This is a quick reproducer:
```bash
git clone --depth 1 https://github.com/pypa/setuptools /tmp/test-setuptools
cd /tmp/test-setuptools
git reset --hard 5169a9b
python3.11 -m venv .venv
.venv/bin/python -m pip install -U 'pip==23.2.1'
.venv/bin/python -m pip install -e .
cat .venv/lib/python3.11/site-packages/__editable___setuptools_*_finder.py | head
```
```console
import sys
from importlib.machinery import ModuleSpec
from importlib.machinery import all_suffixes as module_suffixes
from importlib.util import spec_from_file_location
from itertools import chain
from pathlib import Path
MAPPING = {'_distutils_hack': '/tmp/test-setuptools/_distutils_hack', 'launcher': '/tmp/test-setuptools/launcher', 'newsfragments': '/tmp/test-setuptools/newsfragments', 'pkg_resources': '/tmp/test-setuptools/pkg_resources', 'setuptools': '/tmp/test-setuptools/setuptools'}
NAMESPACES = {'launcher': ['/tmp/test-setuptools/launcher'], 'newsfragments': ['/tmp/test-setuptools/newsfragments']}
PATH_PLACEHOLDER = '__editable__.setuptools-68.0.0.post20230808.finder' + ".__path_hook__"
```
This is likely caused by the way `packages = find_namespace:` is configured in `setup.cfg`.
We can check that by running:
```python
# Still inside /tmp/test-setuptools from the previous example
.venv/bin/python
Python 3.11.4 (main, Jun 7 2023, 12:45:49) [GCC 9.4.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import setuptools
>>> from distutils.core import run_setup
>>> dist = run_setup("setup.py", stop_after="commandline")
>>> dist.packages
['launcher', 'newsfragments', 'pkg_resources', 'setuptools', '_distutils_hack', 'pkg_resources.extern', 'pkg_resources._vendor', 'pkg_resources._vendor.importlib_resources', 'pkg_resources._vendor.jaraco', 'pkg_resources._vendor.more_itertools', 'pkg_resources._vendor.packaging', 'pkg_resources._vendor.platformdirs', 'pkg_resources._vendor.jaraco.text', 'setuptools.command', 'setuptools.config', 'setuptools.extern', 'setuptools._distutils', 'setuptools._vendor', 'setuptools.config._validate_pyproject', 'setuptools._distutils.command', 'setuptools._vendor.importlib_metadata', 'setuptools._vendor.importlib_resources', 'setuptools._vendor.jaraco', 'setuptools._vendor.more_itertools', 'setuptools._vendor.packaging', 'setuptools._vendor.tomli', 'setuptools._vendor.jaraco.text']
>>> "newsfragments" in dist.packages
True
>>> "launcher" in dist.packages
True
```
I imagine that the reason why `newsfragments` and `launcher` are not included in the regular wheel is because `setup.cfg/setup.py` does not have `package_data = [...]` or `include_package_data = True`, so the `.rst` and `.exe` files are excluded from the wheel, and therefore the `newsfragments` and `launcher` directories end up empty during the build, which will automatically remove them from the zip (zip files cannot contain empty directories right?)
The expectation would be that the `packages` configuration exclude these directories by default (even if they don't contain `.py` files) to prevent errors (e.g. in the future we could end up adding files to these directories that are captured by setuptools `package_data` or `include_package_data`).
| [
{
"content": "\"\"\"Automatic discovery of Python modules and packages (for inclusion in the\ndistribution) and other config values.\n\nFor the purposes of this module, the following nomenclature is used:\n\n- \"src-layout\": a directory representing a Python project that contains a \"src\"\n folder. Everything under the \"src\" folder is meant to be included in the\n distribution when packaging the project. Example::\n\n .\n ├── tox.ini\n ├── pyproject.toml\n └── src/\n └── mypkg/\n ├── __init__.py\n ├── mymodule.py\n └── my_data_file.txt\n\n- \"flat-layout\": a Python project that does not use \"src-layout\" but instead\n have a directory under the project root for each package::\n\n .\n ├── tox.ini\n ├── pyproject.toml\n └── mypkg/\n ├── __init__.py\n ├── mymodule.py\n └── my_data_file.txt\n\n- \"single-module\": a project that contains a single Python script direct under\n the project root (no directory used)::\n\n .\n ├── tox.ini\n ├── pyproject.toml\n └── mymodule.py\n\n\"\"\"\n\nimport itertools\nimport os\nfrom fnmatch import fnmatchcase\nfrom glob import glob\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Dict,\n Iterable,\n Iterator,\n List,\n Mapping,\n Optional,\n Tuple,\n Union,\n)\n\nimport _distutils_hack.override # noqa: F401\n\nfrom distutils import log\nfrom distutils.util import convert_path\n\n_Path = Union[str, os.PathLike]\nStrIter = Iterator[str]\n\nchain_iter = itertools.chain.from_iterable\n\nif TYPE_CHECKING:\n from setuptools import Distribution # noqa\n\n\ndef _valid_name(path: _Path) -> bool:\n # Ignore invalid names that cannot be imported directly\n return os.path.basename(path).isidentifier()\n\n\nclass _Filter:\n \"\"\"\n Given a list of patterns, create a callable that will be true only if\n the input matches at least one of the patterns.\n \"\"\"\n\n def __init__(self, *patterns: str):\n self._patterns = dict.fromkeys(patterns)\n\n def __call__(self, item: str) -> bool:\n return any(fnmatchcase(item, pat) for pat in self._patterns)\n\n def __contains__(self, item: str) -> bool:\n return item in self._patterns\n\n\nclass _Finder:\n \"\"\"Base class that exposes functionality for module/package finders\"\"\"\n\n ALWAYS_EXCLUDE: Tuple[str, ...] = ()\n DEFAULT_EXCLUDE: Tuple[str, ...] = ()\n\n @classmethod\n def find(\n cls,\n where: _Path = '.',\n exclude: Iterable[str] = (),\n include: Iterable[str] = ('*',),\n ) -> List[str]:\n \"\"\"Return a list of all Python items (packages or modules, depending on\n the finder implementation) found within directory 'where'.\n\n 'where' is the root directory which will be searched.\n It should be supplied as a \"cross-platform\" (i.e. URL-style) path;\n it will be converted to the appropriate local path syntax.\n\n 'exclude' is a sequence of names to exclude; '*' can be used\n as a wildcard in the names.\n When finding packages, 'foo.*' will exclude all subpackages of 'foo'\n (but not 'foo' itself).\n\n 'include' is a sequence of names to include.\n If it's specified, only the named items will be included.\n If it's not specified, all found items will be included.\n 'include' can contain shell style wildcard patterns just like\n 'exclude'.\n \"\"\"\n\n exclude = exclude or cls.DEFAULT_EXCLUDE\n return list(\n cls._find_iter(\n convert_path(str(where)),\n _Filter(*cls.ALWAYS_EXCLUDE, *exclude),\n _Filter(*include),\n )\n )\n\n @classmethod\n def _find_iter(cls, where: _Path, exclude: _Filter, include: _Filter) -> StrIter:\n raise NotImplementedError\n\n\nclass PackageFinder(_Finder):\n \"\"\"\n Generate a list of all Python packages found within a directory\n \"\"\"\n\n ALWAYS_EXCLUDE = (\"ez_setup\", \"*__pycache__\")\n\n @classmethod\n def _find_iter(cls, where: _Path, exclude: _Filter, include: _Filter) -> StrIter:\n \"\"\"\n All the packages found in 'where' that pass the 'include' filter, but\n not the 'exclude' filter.\n \"\"\"\n for root, dirs, files in os.walk(str(where), followlinks=True):\n # Copy dirs to iterate over it, then empty dirs.\n all_dirs = dirs[:]\n dirs[:] = []\n\n for dir in all_dirs:\n full_path = os.path.join(root, dir)\n rel_path = os.path.relpath(full_path, where)\n package = rel_path.replace(os.path.sep, '.')\n\n # Skip directory trees that are not valid packages\n if '.' in dir or not cls._looks_like_package(full_path, package):\n continue\n\n # Should this package be included?\n if include(package) and not exclude(package):\n yield package\n\n # Early pruning if there is nothing else to be scanned\n if f\"{package}*\" in exclude or f\"{package}.*\" in exclude:\n continue\n\n # Keep searching subdirectories, as there may be more packages\n # down there, even if the parent was excluded.\n dirs.append(dir)\n\n @staticmethod\n def _looks_like_package(path: _Path, _package_name: str) -> bool:\n \"\"\"Does a directory look like a package?\"\"\"\n return os.path.isfile(os.path.join(path, '__init__.py'))\n\n\nclass PEP420PackageFinder(PackageFinder):\n @staticmethod\n def _looks_like_package(_path: _Path, _package_name: str) -> bool:\n return True\n\n\nclass ModuleFinder(_Finder):\n \"\"\"Find isolated Python modules.\n This function will **not** recurse subdirectories.\n \"\"\"\n\n @classmethod\n def _find_iter(cls, where: _Path, exclude: _Filter, include: _Filter) -> StrIter:\n for file in glob(os.path.join(where, \"*.py\")):\n module, _ext = os.path.splitext(os.path.basename(file))\n\n if not cls._looks_like_module(module):\n continue\n\n if include(module) and not exclude(module):\n yield module\n\n _looks_like_module = staticmethod(_valid_name)\n\n\n# We have to be extra careful in the case of flat layout to not include files\n# and directories not meant for distribution (e.g. tool-related)\n\n\nclass FlatLayoutPackageFinder(PEP420PackageFinder):\n _EXCLUDE = (\n \"ci\",\n \"bin\",\n \"debian\",\n \"doc\",\n \"docs\",\n \"documentation\",\n \"manpages\",\n \"news\",\n \"changelog\",\n \"test\",\n \"tests\",\n \"unit_test\",\n \"unit_tests\",\n \"example\",\n \"examples\",\n \"scripts\",\n \"tools\",\n \"util\",\n \"utils\",\n \"python\",\n \"build\",\n \"dist\",\n \"venv\",\n \"env\",\n \"requirements\",\n # ---- Task runners / Build tools ----\n \"tasks\", # invoke\n \"fabfile\", # fabric\n \"site_scons\", # SCons\n # ---- Other tools ----\n \"benchmark\",\n \"benchmarks\",\n \"exercise\",\n \"exercises\",\n \"htmlcov\", # Coverage.py\n # ---- Hidden directories/Private packages ----\n \"[._]*\",\n )\n\n DEFAULT_EXCLUDE = tuple(chain_iter((p, f\"{p}.*\") for p in _EXCLUDE))\n \"\"\"Reserved package names\"\"\"\n\n @staticmethod\n def _looks_like_package(_path: _Path, package_name: str) -> bool:\n names = package_name.split('.')\n # Consider PEP 561\n root_pkg_is_valid = names[0].isidentifier() or names[0].endswith(\"-stubs\")\n return root_pkg_is_valid and all(name.isidentifier() for name in names[1:])\n\n\nclass FlatLayoutModuleFinder(ModuleFinder):\n DEFAULT_EXCLUDE = (\n \"setup\",\n \"conftest\",\n \"test\",\n \"tests\",\n \"example\",\n \"examples\",\n \"build\",\n # ---- Task runners ----\n \"toxfile\",\n \"noxfile\",\n \"pavement\",\n \"dodo\",\n \"tasks\",\n \"fabfile\",\n # ---- Other tools ----\n \"[Ss][Cc]onstruct\", # SCons\n \"conanfile\", # Connan: C/C++ build tool\n \"manage\", # Django\n \"benchmark\",\n \"benchmarks\",\n \"exercise\",\n \"exercises\",\n # ---- Hidden files/Private modules ----\n \"[._]*\",\n )\n \"\"\"Reserved top-level module names\"\"\"\n\n\ndef _find_packages_within(root_pkg: str, pkg_dir: _Path) -> List[str]:\n nested = PEP420PackageFinder.find(pkg_dir)\n return [root_pkg] + [\".\".join((root_pkg, n)) for n in nested]\n\n\nclass ConfigDiscovery:\n \"\"\"Fill-in metadata and options that can be automatically derived\n (from other metadata/options, the file system or conventions)\n \"\"\"\n\n def __init__(self, distribution: \"Distribution\"):\n self.dist = distribution\n self._called = False\n self._disabled = False\n self._skip_ext_modules = False\n\n def _disable(self):\n \"\"\"Internal API to disable automatic discovery\"\"\"\n self._disabled = True\n\n def _ignore_ext_modules(self):\n \"\"\"Internal API to disregard ext_modules.\n\n Normally auto-discovery would not be triggered if ``ext_modules`` are set\n (this is done for backward compatibility with existing packages relying on\n ``setup.py`` or ``setup.cfg``). However, ``setuptools`` can call this function\n to ignore given ``ext_modules`` and proceed with the auto-discovery if\n ``packages`` and ``py_modules`` are not given (e.g. when using pyproject.toml\n metadata).\n \"\"\"\n self._skip_ext_modules = True\n\n @property\n def _root_dir(self) -> _Path:\n # The best is to wait until `src_root` is set in dist, before using _root_dir.\n return self.dist.src_root or os.curdir\n\n @property\n def _package_dir(self) -> Dict[str, str]:\n if self.dist.package_dir is None:\n return {}\n return self.dist.package_dir\n\n def __call__(self, force=False, name=True, ignore_ext_modules=False):\n \"\"\"Automatically discover missing configuration fields\n and modifies the given ``distribution`` object in-place.\n\n Note that by default this will only have an effect the first time the\n ``ConfigDiscovery`` object is called.\n\n To repeatedly invoke automatic discovery (e.g. when the project\n directory changes), please use ``force=True`` (or create a new\n ``ConfigDiscovery`` instance).\n \"\"\"\n if force is False and (self._called or self._disabled):\n # Avoid overhead of multiple calls\n return\n\n self._analyse_package_layout(ignore_ext_modules)\n if name:\n self.analyse_name() # depends on ``packages`` and ``py_modules``\n\n self._called = True\n\n def _explicitly_specified(self, ignore_ext_modules: bool) -> bool:\n \"\"\"``True`` if the user has specified some form of package/module listing\"\"\"\n ignore_ext_modules = ignore_ext_modules or self._skip_ext_modules\n ext_modules = not (self.dist.ext_modules is None or ignore_ext_modules)\n return (\n self.dist.packages is not None\n or self.dist.py_modules is not None\n or ext_modules\n or hasattr(self.dist, \"configuration\")\n and self.dist.configuration\n # ^ Some projects use numpy.distutils.misc_util.Configuration\n )\n\n def _analyse_package_layout(self, ignore_ext_modules: bool) -> bool:\n if self._explicitly_specified(ignore_ext_modules):\n # For backward compatibility, just try to find modules/packages\n # when nothing is given\n return True\n\n log.debug(\n \"No `packages` or `py_modules` configuration, performing \"\n \"automatic discovery.\"\n )\n\n return (\n self._analyse_explicit_layout()\n or self._analyse_src_layout()\n # flat-layout is the trickiest for discovery so it should be last\n or self._analyse_flat_layout()\n )\n\n def _analyse_explicit_layout(self) -> bool:\n \"\"\"The user can explicitly give a package layout via ``package_dir``\"\"\"\n package_dir = self._package_dir.copy() # don't modify directly\n package_dir.pop(\"\", None) # This falls under the \"src-layout\" umbrella\n root_dir = self._root_dir\n\n if not package_dir:\n return False\n\n log.debug(f\"`explicit-layout` detected -- analysing {package_dir}\")\n pkgs = chain_iter(\n _find_packages_within(pkg, os.path.join(root_dir, parent_dir))\n for pkg, parent_dir in package_dir.items()\n )\n self.dist.packages = list(pkgs)\n log.debug(f\"discovered packages -- {self.dist.packages}\")\n return True\n\n def _analyse_src_layout(self) -> bool:\n \"\"\"Try to find all packages or modules under the ``src`` directory\n (or anything pointed by ``package_dir[\"\"]``).\n\n The \"src-layout\" is relatively safe for automatic discovery.\n We assume that everything within is meant to be included in the\n distribution.\n\n If ``package_dir[\"\"]`` is not given, but the ``src`` directory exists,\n this function will set ``package_dir[\"\"] = \"src\"``.\n \"\"\"\n package_dir = self._package_dir\n src_dir = os.path.join(self._root_dir, package_dir.get(\"\", \"src\"))\n if not os.path.isdir(src_dir):\n return False\n\n log.debug(f\"`src-layout` detected -- analysing {src_dir}\")\n package_dir.setdefault(\"\", os.path.basename(src_dir))\n self.dist.package_dir = package_dir # persist eventual modifications\n self.dist.packages = PEP420PackageFinder.find(src_dir)\n self.dist.py_modules = ModuleFinder.find(src_dir)\n log.debug(f\"discovered packages -- {self.dist.packages}\")\n log.debug(f\"discovered py_modules -- {self.dist.py_modules}\")\n return True\n\n def _analyse_flat_layout(self) -> bool:\n \"\"\"Try to find all packages and modules under the project root.\n\n Since the ``flat-layout`` is more dangerous in terms of accidentally including\n extra files/directories, this function is more conservative and will raise an\n error if multiple packages or modules are found.\n\n This assumes that multi-package dists are uncommon and refuse to support that\n use case in order to be able to prevent unintended errors.\n \"\"\"\n log.debug(f\"`flat-layout` detected -- analysing {self._root_dir}\")\n return self._analyse_flat_packages() or self._analyse_flat_modules()\n\n def _analyse_flat_packages(self) -> bool:\n self.dist.packages = FlatLayoutPackageFinder.find(self._root_dir)\n top_level = remove_nested_packages(remove_stubs(self.dist.packages))\n log.debug(f\"discovered packages -- {self.dist.packages}\")\n self._ensure_no_accidental_inclusion(top_level, \"packages\")\n return bool(top_level)\n\n def _analyse_flat_modules(self) -> bool:\n self.dist.py_modules = FlatLayoutModuleFinder.find(self._root_dir)\n log.debug(f\"discovered py_modules -- {self.dist.py_modules}\")\n self._ensure_no_accidental_inclusion(self.dist.py_modules, \"modules\")\n return bool(self.dist.py_modules)\n\n def _ensure_no_accidental_inclusion(self, detected: List[str], kind: str):\n if len(detected) > 1:\n from inspect import cleandoc\n\n from setuptools.errors import PackageDiscoveryError\n\n msg = f\"\"\"Multiple top-level {kind} discovered in a flat-layout: {detected}.\n\n To avoid accidental inclusion of unwanted files or directories,\n setuptools will not proceed with this build.\n\n If you are trying to create a single distribution with multiple {kind}\n on purpose, you should not rely on automatic discovery.\n Instead, consider the following options:\n\n 1. set up custom discovery (`find` directive with `include` or `exclude`)\n 2. use a `src-layout`\n 3. explicitly set `py_modules` or `packages` with a list of names\n\n To find more information, look for \"package discovery\" on setuptools docs.\n \"\"\"\n raise PackageDiscoveryError(cleandoc(msg))\n\n def analyse_name(self):\n \"\"\"The packages/modules are the essential contribution of the author.\n Therefore the name of the distribution can be derived from them.\n \"\"\"\n if self.dist.metadata.name or self.dist.name:\n # get_name() is not reliable (can return \"UNKNOWN\")\n return None\n\n log.debug(\"No `name` configuration, performing automatic discovery\")\n\n name = (\n self._find_name_single_package_or_module()\n or self._find_name_from_packages()\n )\n if name:\n self.dist.metadata.name = name\n\n def _find_name_single_package_or_module(self) -> Optional[str]:\n \"\"\"Exactly one module or package\"\"\"\n for field in ('packages', 'py_modules'):\n items = getattr(self.dist, field, None) or []\n if items and len(items) == 1:\n log.debug(f\"Single module/package detected, name: {items[0]}\")\n return items[0]\n\n return None\n\n def _find_name_from_packages(self) -> Optional[str]:\n \"\"\"Try to find the root package that is not a PEP 420 namespace\"\"\"\n if not self.dist.packages:\n return None\n\n packages = remove_stubs(sorted(self.dist.packages, key=len))\n package_dir = self.dist.package_dir or {}\n\n parent_pkg = find_parent_package(packages, package_dir, self._root_dir)\n if parent_pkg:\n log.debug(f\"Common parent package detected, name: {parent_pkg}\")\n return parent_pkg\n\n log.warn(\"No parent package detected, impossible to derive `name`\")\n return None\n\n\ndef remove_nested_packages(packages: List[str]) -> List[str]:\n \"\"\"Remove nested packages from a list of packages.\n\n >>> remove_nested_packages([\"a\", \"a.b1\", \"a.b2\", \"a.b1.c1\"])\n ['a']\n >>> remove_nested_packages([\"a\", \"b\", \"c.d\", \"c.d.e.f\", \"g.h\", \"a.a1\"])\n ['a', 'b', 'c.d', 'g.h']\n \"\"\"\n pkgs = sorted(packages, key=len)\n top_level = pkgs[:]\n size = len(pkgs)\n for i, name in enumerate(reversed(pkgs)):\n if any(name.startswith(f\"{other}.\") for other in top_level):\n top_level.pop(size - i - 1)\n\n return top_level\n\n\ndef remove_stubs(packages: List[str]) -> List[str]:\n \"\"\"Remove type stubs (:pep:`561`) from a list of packages.\n\n >>> remove_stubs([\"a\", \"a.b\", \"a-stubs\", \"a-stubs.b.c\", \"b\", \"c-stubs\"])\n ['a', 'a.b', 'b']\n \"\"\"\n return [pkg for pkg in packages if not pkg.split(\".\")[0].endswith(\"-stubs\")]\n\n\ndef find_parent_package(\n packages: List[str], package_dir: Mapping[str, str], root_dir: _Path\n) -> Optional[str]:\n \"\"\"Find the parent package that is not a namespace.\"\"\"\n packages = sorted(packages, key=len)\n common_ancestors = []\n for i, name in enumerate(packages):\n if not all(n.startswith(f\"{name}.\") for n in packages[i + 1 :]):\n # Since packages are sorted by length, this condition is able\n # to find a list of all common ancestors.\n # When there is divergence (e.g. multiple root packages)\n # the list will be empty\n break\n common_ancestors.append(name)\n\n for name in common_ancestors:\n pkg_path = find_package_path(name, package_dir, root_dir)\n init = os.path.join(pkg_path, \"__init__.py\")\n if os.path.isfile(init):\n return name\n\n return None\n\n\ndef find_package_path(\n name: str, package_dir: Mapping[str, str], root_dir: _Path\n) -> str:\n \"\"\"Given a package name, return the path where it should be found on\n disk, considering the ``package_dir`` option.\n\n >>> path = find_package_path(\"my.pkg\", {\"\": \"root/is/nested\"}, \".\")\n >>> path.replace(os.sep, \"/\")\n './root/is/nested/my/pkg'\n\n >>> path = find_package_path(\"my.pkg\", {\"my\": \"root/is/nested\"}, \".\")\n >>> path.replace(os.sep, \"/\")\n './root/is/nested/pkg'\n\n >>> path = find_package_path(\"my.pkg\", {\"my.pkg\": \"root/is/nested\"}, \".\")\n >>> path.replace(os.sep, \"/\")\n './root/is/nested'\n\n >>> path = find_package_path(\"other.pkg\", {\"my.pkg\": \"root/is/nested\"}, \".\")\n >>> path.replace(os.sep, \"/\")\n './other/pkg'\n \"\"\"\n parts = name.split(\".\")\n for i in range(len(parts), 0, -1):\n # Look backwards, the most specific package_dir first\n partial_name = \".\".join(parts[:i])\n if partial_name in package_dir:\n parent = package_dir[partial_name]\n return os.path.join(root_dir, parent, *parts[i:])\n\n parent = package_dir.get(\"\") or \"\"\n return os.path.join(root_dir, *parent.split(\"/\"), *parts)\n\n\ndef construct_package_dir(packages: List[str], package_path: _Path) -> Dict[str, str]:\n parent_pkgs = remove_nested_packages(packages)\n prefix = Path(package_path).parts\n return {pkg: \"/\".join([*prefix, *pkg.split(\".\")]) for pkg in parent_pkgs}\n",
"path": "setuptools/discovery.py"
}
] | [
{
"content": "\"\"\"Automatic discovery of Python modules and packages (for inclusion in the\ndistribution) and other config values.\n\nFor the purposes of this module, the following nomenclature is used:\n\n- \"src-layout\": a directory representing a Python project that contains a \"src\"\n folder. Everything under the \"src\" folder is meant to be included in the\n distribution when packaging the project. Example::\n\n .\n ├── tox.ini\n ├── pyproject.toml\n └── src/\n └── mypkg/\n ├── __init__.py\n ├── mymodule.py\n └── my_data_file.txt\n\n- \"flat-layout\": a Python project that does not use \"src-layout\" but instead\n have a directory under the project root for each package::\n\n .\n ├── tox.ini\n ├── pyproject.toml\n └── mypkg/\n ├── __init__.py\n ├── mymodule.py\n └── my_data_file.txt\n\n- \"single-module\": a project that contains a single Python script direct under\n the project root (no directory used)::\n\n .\n ├── tox.ini\n ├── pyproject.toml\n └── mymodule.py\n\n\"\"\"\n\nimport itertools\nimport os\nfrom fnmatch import fnmatchcase\nfrom glob import glob\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Dict,\n Iterable,\n Iterator,\n List,\n Mapping,\n Optional,\n Tuple,\n Union,\n)\n\nimport _distutils_hack.override # noqa: F401\n\nfrom distutils import log\nfrom distutils.util import convert_path\n\n_Path = Union[str, os.PathLike]\nStrIter = Iterator[str]\n\nchain_iter = itertools.chain.from_iterable\n\nif TYPE_CHECKING:\n from setuptools import Distribution # noqa\n\n\ndef _valid_name(path: _Path) -> bool:\n # Ignore invalid names that cannot be imported directly\n return os.path.basename(path).isidentifier()\n\n\nclass _Filter:\n \"\"\"\n Given a list of patterns, create a callable that will be true only if\n the input matches at least one of the patterns.\n \"\"\"\n\n def __init__(self, *patterns: str):\n self._patterns = dict.fromkeys(patterns)\n\n def __call__(self, item: str) -> bool:\n return any(fnmatchcase(item, pat) for pat in self._patterns)\n\n def __contains__(self, item: str) -> bool:\n return item in self._patterns\n\n\nclass _Finder:\n \"\"\"Base class that exposes functionality for module/package finders\"\"\"\n\n ALWAYS_EXCLUDE: Tuple[str, ...] = ()\n DEFAULT_EXCLUDE: Tuple[str, ...] = ()\n\n @classmethod\n def find(\n cls,\n where: _Path = '.',\n exclude: Iterable[str] = (),\n include: Iterable[str] = ('*',),\n ) -> List[str]:\n \"\"\"Return a list of all Python items (packages or modules, depending on\n the finder implementation) found within directory 'where'.\n\n 'where' is the root directory which will be searched.\n It should be supplied as a \"cross-platform\" (i.e. URL-style) path;\n it will be converted to the appropriate local path syntax.\n\n 'exclude' is a sequence of names to exclude; '*' can be used\n as a wildcard in the names.\n When finding packages, 'foo.*' will exclude all subpackages of 'foo'\n (but not 'foo' itself).\n\n 'include' is a sequence of names to include.\n If it's specified, only the named items will be included.\n If it's not specified, all found items will be included.\n 'include' can contain shell style wildcard patterns just like\n 'exclude'.\n \"\"\"\n\n exclude = exclude or cls.DEFAULT_EXCLUDE\n return list(\n cls._find_iter(\n convert_path(str(where)),\n _Filter(*cls.ALWAYS_EXCLUDE, *exclude),\n _Filter(*include),\n )\n )\n\n @classmethod\n def _find_iter(cls, where: _Path, exclude: _Filter, include: _Filter) -> StrIter:\n raise NotImplementedError\n\n\nclass PackageFinder(_Finder):\n \"\"\"\n Generate a list of all Python packages found within a directory\n \"\"\"\n\n ALWAYS_EXCLUDE = (\"ez_setup\", \"*__pycache__\")\n\n @classmethod\n def _find_iter(cls, where: _Path, exclude: _Filter, include: _Filter) -> StrIter:\n \"\"\"\n All the packages found in 'where' that pass the 'include' filter, but\n not the 'exclude' filter.\n \"\"\"\n for root, dirs, files in os.walk(str(where), followlinks=True):\n # Copy dirs to iterate over it, then empty dirs.\n all_dirs = dirs[:]\n dirs[:] = []\n\n for dir in all_dirs:\n full_path = os.path.join(root, dir)\n rel_path = os.path.relpath(full_path, where)\n package = rel_path.replace(os.path.sep, '.')\n\n # Skip directory trees that are not valid packages\n if '.' in dir or not cls._looks_like_package(full_path, package):\n continue\n\n # Should this package be included?\n if include(package) and not exclude(package):\n yield package\n\n # Early pruning if there is nothing else to be scanned\n if f\"{package}*\" in exclude or f\"{package}.*\" in exclude:\n continue\n\n # Keep searching subdirectories, as there may be more packages\n # down there, even if the parent was excluded.\n dirs.append(dir)\n\n @staticmethod\n def _looks_like_package(path: _Path, _package_name: str) -> bool:\n \"\"\"Does a directory look like a package?\"\"\"\n return os.path.isfile(os.path.join(path, '__init__.py'))\n\n\nclass PEP420PackageFinder(PackageFinder):\n @staticmethod\n def _looks_like_package(_path: _Path, _package_name: str) -> bool:\n return True\n\n\nclass ModuleFinder(_Finder):\n \"\"\"Find isolated Python modules.\n This function will **not** recurse subdirectories.\n \"\"\"\n\n @classmethod\n def _find_iter(cls, where: _Path, exclude: _Filter, include: _Filter) -> StrIter:\n for file in glob(os.path.join(where, \"*.py\")):\n module, _ext = os.path.splitext(os.path.basename(file))\n\n if not cls._looks_like_module(module):\n continue\n\n if include(module) and not exclude(module):\n yield module\n\n _looks_like_module = staticmethod(_valid_name)\n\n\n# We have to be extra careful in the case of flat layout to not include files\n# and directories not meant for distribution (e.g. tool-related)\n\n\nclass FlatLayoutPackageFinder(PEP420PackageFinder):\n _EXCLUDE = (\n \"ci\",\n \"bin\",\n \"debian\",\n \"doc\",\n \"docs\",\n \"documentation\",\n \"manpages\",\n \"news\",\n \"newsfragments\",\n \"changelog\",\n \"test\",\n \"tests\",\n \"unit_test\",\n \"unit_tests\",\n \"example\",\n \"examples\",\n \"scripts\",\n \"tools\",\n \"util\",\n \"utils\",\n \"python\",\n \"build\",\n \"dist\",\n \"venv\",\n \"env\",\n \"requirements\",\n # ---- Task runners / Build tools ----\n \"tasks\", # invoke\n \"fabfile\", # fabric\n \"site_scons\", # SCons\n # ---- Other tools ----\n \"benchmark\",\n \"benchmarks\",\n \"exercise\",\n \"exercises\",\n \"htmlcov\", # Coverage.py\n # ---- Hidden directories/Private packages ----\n \"[._]*\",\n )\n\n DEFAULT_EXCLUDE = tuple(chain_iter((p, f\"{p}.*\") for p in _EXCLUDE))\n \"\"\"Reserved package names\"\"\"\n\n @staticmethod\n def _looks_like_package(_path: _Path, package_name: str) -> bool:\n names = package_name.split('.')\n # Consider PEP 561\n root_pkg_is_valid = names[0].isidentifier() or names[0].endswith(\"-stubs\")\n return root_pkg_is_valid and all(name.isidentifier() for name in names[1:])\n\n\nclass FlatLayoutModuleFinder(ModuleFinder):\n DEFAULT_EXCLUDE = (\n \"setup\",\n \"conftest\",\n \"test\",\n \"tests\",\n \"example\",\n \"examples\",\n \"build\",\n # ---- Task runners ----\n \"toxfile\",\n \"noxfile\",\n \"pavement\",\n \"dodo\",\n \"tasks\",\n \"fabfile\",\n # ---- Other tools ----\n \"[Ss][Cc]onstruct\", # SCons\n \"conanfile\", # Connan: C/C++ build tool\n \"manage\", # Django\n \"benchmark\",\n \"benchmarks\",\n \"exercise\",\n \"exercises\",\n # ---- Hidden files/Private modules ----\n \"[._]*\",\n )\n \"\"\"Reserved top-level module names\"\"\"\n\n\ndef _find_packages_within(root_pkg: str, pkg_dir: _Path) -> List[str]:\n nested = PEP420PackageFinder.find(pkg_dir)\n return [root_pkg] + [\".\".join((root_pkg, n)) for n in nested]\n\n\nclass ConfigDiscovery:\n \"\"\"Fill-in metadata and options that can be automatically derived\n (from other metadata/options, the file system or conventions)\n \"\"\"\n\n def __init__(self, distribution: \"Distribution\"):\n self.dist = distribution\n self._called = False\n self._disabled = False\n self._skip_ext_modules = False\n\n def _disable(self):\n \"\"\"Internal API to disable automatic discovery\"\"\"\n self._disabled = True\n\n def _ignore_ext_modules(self):\n \"\"\"Internal API to disregard ext_modules.\n\n Normally auto-discovery would not be triggered if ``ext_modules`` are set\n (this is done for backward compatibility with existing packages relying on\n ``setup.py`` or ``setup.cfg``). However, ``setuptools`` can call this function\n to ignore given ``ext_modules`` and proceed with the auto-discovery if\n ``packages`` and ``py_modules`` are not given (e.g. when using pyproject.toml\n metadata).\n \"\"\"\n self._skip_ext_modules = True\n\n @property\n def _root_dir(self) -> _Path:\n # The best is to wait until `src_root` is set in dist, before using _root_dir.\n return self.dist.src_root or os.curdir\n\n @property\n def _package_dir(self) -> Dict[str, str]:\n if self.dist.package_dir is None:\n return {}\n return self.dist.package_dir\n\n def __call__(self, force=False, name=True, ignore_ext_modules=False):\n \"\"\"Automatically discover missing configuration fields\n and modifies the given ``distribution`` object in-place.\n\n Note that by default this will only have an effect the first time the\n ``ConfigDiscovery`` object is called.\n\n To repeatedly invoke automatic discovery (e.g. when the project\n directory changes), please use ``force=True`` (or create a new\n ``ConfigDiscovery`` instance).\n \"\"\"\n if force is False and (self._called or self._disabled):\n # Avoid overhead of multiple calls\n return\n\n self._analyse_package_layout(ignore_ext_modules)\n if name:\n self.analyse_name() # depends on ``packages`` and ``py_modules``\n\n self._called = True\n\n def _explicitly_specified(self, ignore_ext_modules: bool) -> bool:\n \"\"\"``True`` if the user has specified some form of package/module listing\"\"\"\n ignore_ext_modules = ignore_ext_modules or self._skip_ext_modules\n ext_modules = not (self.dist.ext_modules is None or ignore_ext_modules)\n return (\n self.dist.packages is not None\n or self.dist.py_modules is not None\n or ext_modules\n or hasattr(self.dist, \"configuration\")\n and self.dist.configuration\n # ^ Some projects use numpy.distutils.misc_util.Configuration\n )\n\n def _analyse_package_layout(self, ignore_ext_modules: bool) -> bool:\n if self._explicitly_specified(ignore_ext_modules):\n # For backward compatibility, just try to find modules/packages\n # when nothing is given\n return True\n\n log.debug(\n \"No `packages` or `py_modules` configuration, performing \"\n \"automatic discovery.\"\n )\n\n return (\n self._analyse_explicit_layout()\n or self._analyse_src_layout()\n # flat-layout is the trickiest for discovery so it should be last\n or self._analyse_flat_layout()\n )\n\n def _analyse_explicit_layout(self) -> bool:\n \"\"\"The user can explicitly give a package layout via ``package_dir``\"\"\"\n package_dir = self._package_dir.copy() # don't modify directly\n package_dir.pop(\"\", None) # This falls under the \"src-layout\" umbrella\n root_dir = self._root_dir\n\n if not package_dir:\n return False\n\n log.debug(f\"`explicit-layout` detected -- analysing {package_dir}\")\n pkgs = chain_iter(\n _find_packages_within(pkg, os.path.join(root_dir, parent_dir))\n for pkg, parent_dir in package_dir.items()\n )\n self.dist.packages = list(pkgs)\n log.debug(f\"discovered packages -- {self.dist.packages}\")\n return True\n\n def _analyse_src_layout(self) -> bool:\n \"\"\"Try to find all packages or modules under the ``src`` directory\n (or anything pointed by ``package_dir[\"\"]``).\n\n The \"src-layout\" is relatively safe for automatic discovery.\n We assume that everything within is meant to be included in the\n distribution.\n\n If ``package_dir[\"\"]`` is not given, but the ``src`` directory exists,\n this function will set ``package_dir[\"\"] = \"src\"``.\n \"\"\"\n package_dir = self._package_dir\n src_dir = os.path.join(self._root_dir, package_dir.get(\"\", \"src\"))\n if not os.path.isdir(src_dir):\n return False\n\n log.debug(f\"`src-layout` detected -- analysing {src_dir}\")\n package_dir.setdefault(\"\", os.path.basename(src_dir))\n self.dist.package_dir = package_dir # persist eventual modifications\n self.dist.packages = PEP420PackageFinder.find(src_dir)\n self.dist.py_modules = ModuleFinder.find(src_dir)\n log.debug(f\"discovered packages -- {self.dist.packages}\")\n log.debug(f\"discovered py_modules -- {self.dist.py_modules}\")\n return True\n\n def _analyse_flat_layout(self) -> bool:\n \"\"\"Try to find all packages and modules under the project root.\n\n Since the ``flat-layout`` is more dangerous in terms of accidentally including\n extra files/directories, this function is more conservative and will raise an\n error if multiple packages or modules are found.\n\n This assumes that multi-package dists are uncommon and refuse to support that\n use case in order to be able to prevent unintended errors.\n \"\"\"\n log.debug(f\"`flat-layout` detected -- analysing {self._root_dir}\")\n return self._analyse_flat_packages() or self._analyse_flat_modules()\n\n def _analyse_flat_packages(self) -> bool:\n self.dist.packages = FlatLayoutPackageFinder.find(self._root_dir)\n top_level = remove_nested_packages(remove_stubs(self.dist.packages))\n log.debug(f\"discovered packages -- {self.dist.packages}\")\n self._ensure_no_accidental_inclusion(top_level, \"packages\")\n return bool(top_level)\n\n def _analyse_flat_modules(self) -> bool:\n self.dist.py_modules = FlatLayoutModuleFinder.find(self._root_dir)\n log.debug(f\"discovered py_modules -- {self.dist.py_modules}\")\n self._ensure_no_accidental_inclusion(self.dist.py_modules, \"modules\")\n return bool(self.dist.py_modules)\n\n def _ensure_no_accidental_inclusion(self, detected: List[str], kind: str):\n if len(detected) > 1:\n from inspect import cleandoc\n\n from setuptools.errors import PackageDiscoveryError\n\n msg = f\"\"\"Multiple top-level {kind} discovered in a flat-layout: {detected}.\n\n To avoid accidental inclusion of unwanted files or directories,\n setuptools will not proceed with this build.\n\n If you are trying to create a single distribution with multiple {kind}\n on purpose, you should not rely on automatic discovery.\n Instead, consider the following options:\n\n 1. set up custom discovery (`find` directive with `include` or `exclude`)\n 2. use a `src-layout`\n 3. explicitly set `py_modules` or `packages` with a list of names\n\n To find more information, look for \"package discovery\" on setuptools docs.\n \"\"\"\n raise PackageDiscoveryError(cleandoc(msg))\n\n def analyse_name(self):\n \"\"\"The packages/modules are the essential contribution of the author.\n Therefore the name of the distribution can be derived from them.\n \"\"\"\n if self.dist.metadata.name or self.dist.name:\n # get_name() is not reliable (can return \"UNKNOWN\")\n return None\n\n log.debug(\"No `name` configuration, performing automatic discovery\")\n\n name = (\n self._find_name_single_package_or_module()\n or self._find_name_from_packages()\n )\n if name:\n self.dist.metadata.name = name\n\n def _find_name_single_package_or_module(self) -> Optional[str]:\n \"\"\"Exactly one module or package\"\"\"\n for field in ('packages', 'py_modules'):\n items = getattr(self.dist, field, None) or []\n if items and len(items) == 1:\n log.debug(f\"Single module/package detected, name: {items[0]}\")\n return items[0]\n\n return None\n\n def _find_name_from_packages(self) -> Optional[str]:\n \"\"\"Try to find the root package that is not a PEP 420 namespace\"\"\"\n if not self.dist.packages:\n return None\n\n packages = remove_stubs(sorted(self.dist.packages, key=len))\n package_dir = self.dist.package_dir or {}\n\n parent_pkg = find_parent_package(packages, package_dir, self._root_dir)\n if parent_pkg:\n log.debug(f\"Common parent package detected, name: {parent_pkg}\")\n return parent_pkg\n\n log.warn(\"No parent package detected, impossible to derive `name`\")\n return None\n\n\ndef remove_nested_packages(packages: List[str]) -> List[str]:\n \"\"\"Remove nested packages from a list of packages.\n\n >>> remove_nested_packages([\"a\", \"a.b1\", \"a.b2\", \"a.b1.c1\"])\n ['a']\n >>> remove_nested_packages([\"a\", \"b\", \"c.d\", \"c.d.e.f\", \"g.h\", \"a.a1\"])\n ['a', 'b', 'c.d', 'g.h']\n \"\"\"\n pkgs = sorted(packages, key=len)\n top_level = pkgs[:]\n size = len(pkgs)\n for i, name in enumerate(reversed(pkgs)):\n if any(name.startswith(f\"{other}.\") for other in top_level):\n top_level.pop(size - i - 1)\n\n return top_level\n\n\ndef remove_stubs(packages: List[str]) -> List[str]:\n \"\"\"Remove type stubs (:pep:`561`) from a list of packages.\n\n >>> remove_stubs([\"a\", \"a.b\", \"a-stubs\", \"a-stubs.b.c\", \"b\", \"c-stubs\"])\n ['a', 'a.b', 'b']\n \"\"\"\n return [pkg for pkg in packages if not pkg.split(\".\")[0].endswith(\"-stubs\")]\n\n\ndef find_parent_package(\n packages: List[str], package_dir: Mapping[str, str], root_dir: _Path\n) -> Optional[str]:\n \"\"\"Find the parent package that is not a namespace.\"\"\"\n packages = sorted(packages, key=len)\n common_ancestors = []\n for i, name in enumerate(packages):\n if not all(n.startswith(f\"{name}.\") for n in packages[i + 1 :]):\n # Since packages are sorted by length, this condition is able\n # to find a list of all common ancestors.\n # When there is divergence (e.g. multiple root packages)\n # the list will be empty\n break\n common_ancestors.append(name)\n\n for name in common_ancestors:\n pkg_path = find_package_path(name, package_dir, root_dir)\n init = os.path.join(pkg_path, \"__init__.py\")\n if os.path.isfile(init):\n return name\n\n return None\n\n\ndef find_package_path(\n name: str, package_dir: Mapping[str, str], root_dir: _Path\n) -> str:\n \"\"\"Given a package name, return the path where it should be found on\n disk, considering the ``package_dir`` option.\n\n >>> path = find_package_path(\"my.pkg\", {\"\": \"root/is/nested\"}, \".\")\n >>> path.replace(os.sep, \"/\")\n './root/is/nested/my/pkg'\n\n >>> path = find_package_path(\"my.pkg\", {\"my\": \"root/is/nested\"}, \".\")\n >>> path.replace(os.sep, \"/\")\n './root/is/nested/pkg'\n\n >>> path = find_package_path(\"my.pkg\", {\"my.pkg\": \"root/is/nested\"}, \".\")\n >>> path.replace(os.sep, \"/\")\n './root/is/nested'\n\n >>> path = find_package_path(\"other.pkg\", {\"my.pkg\": \"root/is/nested\"}, \".\")\n >>> path.replace(os.sep, \"/\")\n './other/pkg'\n \"\"\"\n parts = name.split(\".\")\n for i in range(len(parts), 0, -1):\n # Look backwards, the most specific package_dir first\n partial_name = \".\".join(parts[:i])\n if partial_name in package_dir:\n parent = package_dir[partial_name]\n return os.path.join(root_dir, parent, *parts[i:])\n\n parent = package_dir.get(\"\") or \"\"\n return os.path.join(root_dir, *parent.split(\"/\"), *parts)\n\n\ndef construct_package_dir(packages: List[str], package_path: _Path) -> Dict[str, str]:\n parent_pkgs = remove_nested_packages(packages)\n prefix = Path(package_path).parts\n return {pkg: \"/\".join([*prefix, *pkg.split(\".\")]) for pkg in parent_pkgs}\n",
"path": "setuptools/discovery.py"
}
] | diff --git a/newsfragments/4007.misc.rst b/newsfragments/4007.misc.rst
new file mode 100644
index 0000000000..5d57bf9d47
--- /dev/null
+++ b/newsfragments/4007.misc.rst
@@ -0,0 +1 @@
+Ignore ``newsfragments`` directories in the source tree when performing automatic discovery of packages.
diff --git a/setup.cfg b/setup.cfg
index d201e9d22c..f8ca0998b7 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -38,6 +38,8 @@ exclude =
*.tests.*
tools*
debian*
+ launcher*
+ newsfragments*
[options.extras_require]
testing =
diff --git a/setuptools/discovery.py b/setuptools/discovery.py
index ec817ab45f..25962863b9 100644
--- a/setuptools/discovery.py
+++ b/setuptools/discovery.py
@@ -219,6 +219,7 @@ class FlatLayoutPackageFinder(PEP420PackageFinder):
"documentation",
"manpages",
"news",
+ "newsfragments",
"changelog",
"test",
"tests",
|
freedomofpress__securedrop-6492 | Initial messages containing non-ascii characters fail if codename filtering is enabled.
## Description
Codename filtering was introduced in 2.3.0, allowing admins to block initial submissions containing only the user's codename, as they should not be shared with journalists. The filter uses the `compare_digest()` function to ensure constant-time comparison, but this fn will throw a `TypeError` if any of the strings being compared contain Unicode.
## Steps to Reproduce
- start up `make dev` on 2.4.0
- visit the JI and enable codename filtering under Admin > Instance Config
- visit the SI, create a new source, and submit an initial message containing unicode, ie `Hallo! ö, ü, ä, or ß`
## Expected Behavior
- Message is submitted
## Actual Behavior
- 500 error, and (in dev) stack trace due to TypeError
## Comments
Suggestions to fix, any other relevant information.
| [
{
"content": "import json\nimport re\nimport subprocess\nimport typing\nfrom hmac import compare_digest\n\nimport werkzeug\nfrom flask import current_app, flash, redirect, render_template, url_for\nfrom flask.sessions import SessionMixin\nfrom flask_babel import gettext\nfrom markupsafe import Markup, escape\nfrom source_user import SourceUser\nfrom store import Storage\n\nif typing.TYPE_CHECKING:\n from typing import Optional\n\n\ndef codename_detected(message: str, codename: str) -> bool:\n \"\"\"\n Check for codenames in incoming messages. including case where user copy/pasted\n from /generate or the codename widget on the same page\n \"\"\"\n message = message.strip()\n\n return compare_digest(message.strip(), codename)\n\n\ndef flash_msg(\n category: str,\n declarative: \"Optional[str]\",\n *msg_contents: \"str\",\n) -> None:\n \"\"\"\n Render flash message with a (currently) optional declarative heading.\n \"\"\"\n contents = Markup(\"<br>\".join([escape(part) for part in msg_contents]))\n\n msg = render_template(\n \"flash_message.html\",\n declarative=declarative,\n msg_contents=contents,\n )\n flash(Markup(msg), category)\n\n\ndef clear_session_and_redirect_to_logged_out_page(flask_session: SessionMixin) -> werkzeug.Response:\n msg = render_template(\n \"flash_message.html\",\n declarative=gettext(\"Important\"),\n msg_contents=Markup(\n gettext(\n 'You were logged out due to inactivity. Click the <img src={icon} alt=\"\" '\n 'width=\"16\" height=\"16\"> <b>New Identity</b> button in your Tor Browser\\'s '\n \"toolbar before moving on. This will clear your Tor Browser activity data on \"\n \"this device.\"\n ).format(icon=url_for(\"static\", filename=\"i/torbroom.png\"))\n ),\n )\n\n # Clear the session after we render the message so it's localized\n flask_session.clear()\n\n flash(Markup(msg), \"error\")\n return redirect(url_for(\"main.index\"))\n\n\ndef normalize_timestamps(logged_in_source: SourceUser) -> None:\n \"\"\"\n Update the timestamps on all of the source's submissions. This\n minimizes metadata that could be useful to investigators. See\n #301.\n \"\"\"\n source_in_db = logged_in_source.get_db_record()\n sub_paths = [\n Storage.get_default().path(logged_in_source.filesystem_id, submission.filename)\n for submission in source_in_db.submissions\n ]\n if len(sub_paths) > 1:\n args = [\"touch\", \"--no-create\"]\n args.extend(sub_paths)\n rc = subprocess.call(args)\n if rc != 0:\n current_app.logger.warning(\n \"Couldn't normalize submission \" \"timestamps (touch exited with %d)\" % rc\n )\n\n\ndef check_url_file(path: str, regexp: str) -> \"Optional[str]\":\n \"\"\"\n Check that a file exists at the path given and contains a single line\n matching the regexp. Used for checking the source interface address\n files in /var/lib/securedrop (as the Apache user can't read Tor config)\n \"\"\"\n try:\n f = open(path, \"r\")\n contents = f.readline().strip()\n f.close()\n if re.match(regexp, contents):\n return contents\n else:\n return None\n except IOError:\n return None\n\n\ndef get_sourcev3_url() -> \"Optional[str]\":\n return check_url_file(\"/var/lib/securedrop/source_v3_url\", r\"^[a-z0-9]{56}\\.onion$\")\n\n\ndef fit_codenames_into_cookie(codenames: dict) -> dict:\n \"\"\"\n If `codenames` will approach `werkzeug.Response.max_cookie_size` once\n serialized, incrementally pop off the oldest codename until the remaining\n (newer) ones will fit.\n \"\"\"\n\n serialized = json.dumps(codenames).encode()\n if len(codenames) > 1 and len(serialized) > 4000: # werkzeug.Response.max_cookie_size = 4093\n if current_app:\n current_app.logger.warn(\n f\"Popping oldest of {len(codenames)} \"\n f\"codenames ({len(serialized)} bytes) to \"\n f\"fit within maximum cookie size\"\n )\n del codenames[list(codenames)[0]] # FIFO\n\n return fit_codenames_into_cookie(codenames)\n\n return codenames\n",
"path": "securedrop/source_app/utils.py"
}
] | [
{
"content": "import json\nimport re\nimport subprocess\nimport typing\nfrom hmac import compare_digest\n\nimport werkzeug\nfrom flask import current_app, flash, redirect, render_template, url_for\nfrom flask.sessions import SessionMixin\nfrom flask_babel import gettext\nfrom markupsafe import Markup, escape\nfrom source_user import SourceUser\nfrom store import Storage\n\nif typing.TYPE_CHECKING:\n from typing import Optional\n\n\ndef codename_detected(message: str, codename: str) -> bool:\n \"\"\"\n Check for codenames in incoming messages. including case where user copy/pasted\n from /generate or the codename widget on the same page\n \"\"\"\n message = message.strip()\n\n return compare_digest(message.strip().encode(\"utf-8\"), codename.encode(\"utf-8\"))\n\n\ndef flash_msg(\n category: str,\n declarative: \"Optional[str]\",\n *msg_contents: \"str\",\n) -> None:\n \"\"\"\n Render flash message with a (currently) optional declarative heading.\n \"\"\"\n contents = Markup(\"<br>\".join([escape(part) for part in msg_contents]))\n\n msg = render_template(\n \"flash_message.html\",\n declarative=declarative,\n msg_contents=contents,\n )\n flash(Markup(msg), category)\n\n\ndef clear_session_and_redirect_to_logged_out_page(flask_session: SessionMixin) -> werkzeug.Response:\n msg = render_template(\n \"flash_message.html\",\n declarative=gettext(\"Important\"),\n msg_contents=Markup(\n gettext(\n 'You were logged out due to inactivity. Click the <img src={icon} alt=\"\" '\n 'width=\"16\" height=\"16\"> <b>New Identity</b> button in your Tor Browser\\'s '\n \"toolbar before moving on. This will clear your Tor Browser activity data on \"\n \"this device.\"\n ).format(icon=url_for(\"static\", filename=\"i/torbroom.png\"))\n ),\n )\n\n # Clear the session after we render the message so it's localized\n flask_session.clear()\n\n flash(Markup(msg), \"error\")\n return redirect(url_for(\"main.index\"))\n\n\ndef normalize_timestamps(logged_in_source: SourceUser) -> None:\n \"\"\"\n Update the timestamps on all of the source's submissions. This\n minimizes metadata that could be useful to investigators. See\n #301.\n \"\"\"\n source_in_db = logged_in_source.get_db_record()\n sub_paths = [\n Storage.get_default().path(logged_in_source.filesystem_id, submission.filename)\n for submission in source_in_db.submissions\n ]\n if len(sub_paths) > 1:\n args = [\"touch\", \"--no-create\"]\n args.extend(sub_paths)\n rc = subprocess.call(args)\n if rc != 0:\n current_app.logger.warning(\n \"Couldn't normalize submission \" \"timestamps (touch exited with %d)\" % rc\n )\n\n\ndef check_url_file(path: str, regexp: str) -> \"Optional[str]\":\n \"\"\"\n Check that a file exists at the path given and contains a single line\n matching the regexp. Used for checking the source interface address\n files in /var/lib/securedrop (as the Apache user can't read Tor config)\n \"\"\"\n try:\n f = open(path, \"r\")\n contents = f.readline().strip()\n f.close()\n if re.match(regexp, contents):\n return contents\n else:\n return None\n except IOError:\n return None\n\n\ndef get_sourcev3_url() -> \"Optional[str]\":\n return check_url_file(\"/var/lib/securedrop/source_v3_url\", r\"^[a-z0-9]{56}\\.onion$\")\n\n\ndef fit_codenames_into_cookie(codenames: dict) -> dict:\n \"\"\"\n If `codenames` will approach `werkzeug.Response.max_cookie_size` once\n serialized, incrementally pop off the oldest codename until the remaining\n (newer) ones will fit.\n \"\"\"\n\n serialized = json.dumps(codenames).encode()\n if len(codenames) > 1 and len(serialized) > 4000: # werkzeug.Response.max_cookie_size = 4093\n if current_app:\n current_app.logger.warn(\n f\"Popping oldest of {len(codenames)} \"\n f\"codenames ({len(serialized)} bytes) to \"\n f\"fit within maximum cookie size\"\n )\n del codenames[list(codenames)[0]] # FIFO\n\n return fit_codenames_into_cookie(codenames)\n\n return codenames\n",
"path": "securedrop/source_app/utils.py"
}
] | diff --git a/securedrop/source_app/utils.py b/securedrop/source_app/utils.py
index 5dbcb5d675..589f416c91 100644
--- a/securedrop/source_app/utils.py
+++ b/securedrop/source_app/utils.py
@@ -23,7 +23,7 @@ def codename_detected(message: str, codename: str) -> bool:
"""
message = message.strip()
- return compare_digest(message.strip(), codename)
+ return compare_digest(message.strip().encode("utf-8"), codename.encode("utf-8"))
def flash_msg(
diff --git a/securedrop/tests/test_source.py b/securedrop/tests/test_source.py
index 6a57d37489..53efdbd1a9 100644
--- a/securedrop/tests/test_source.py
+++ b/securedrop/tests/test_source.py
@@ -356,7 +356,10 @@ def _dummy_submission(app):
"""
return app.post(
url_for("main.submit"),
- data=dict(msg="Pay no attention to the man behind the curtain.", fh=(BytesIO(b""), "")),
+ data=dict(
+ msg="Hallo! ö, ü, ä, or ß...Pay no attention to the man behind the curtain.",
+ fh=(BytesIO(b""), ""),
+ ),
follow_redirects=True,
)
|
automl__auto-sklearn-1361 | Check if test requirement `flaky` can be removed
We currently have a test dependancy [flaky](https://pypi.org/project/flaky/) used to annotate a test `KernelPCAComponentTest::test_default_configuration_classify()`. This is the only place it's used.
| [
{
"content": "# -*- encoding: utf-8 -*-\nimport os\nimport sys\nfrom setuptools import setup, find_packages\n\n\n# Check if Auto-sklearn *could* run on the given system\nif os.name != 'posix':\n raise ValueError(\n 'Detected unsupported operating system: %s. Please check '\n 'the compability information of auto-sklearn: https://automl.github.io'\n '/auto-sklearn/master/installation.html#windows-osx-compatibility' %\n sys.platform\n )\n\nif sys.version_info < (3, 7):\n raise ValueError(\n 'Unsupported Python version %d.%d.%d found. Auto-sklearn requires Python '\n '3.7 or higher.' % (sys.version_info.major, sys.version_info.minor, sys.version_info.micro)\n )\n\nHERE = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(HERE, 'requirements.txt')) as fp:\n install_reqs = [r.rstrip() for r in fp.readlines()\n if not r.startswith('#') and not r.startswith('git+')]\n\nextras_reqs={\n \"test\": [\n \"pytest>=4.6\",\n \"mypy\",\n \"pytest-xdist\",\n \"pytest-timeout\",\n \"flaky\",\n \"openml\",\n \"pre-commit\",\n \"pytest-cov\",\n ],\n \"examples\": [\n \"matplotlib\",\n \"jupyter\",\n \"notebook\",\n \"seaborn\",\n ],\n \"docs\": [\n \"sphinx<4.3\",\n \"sphinx-gallery\",\n \"sphinx_bootstrap_theme\",\n \"numpydoc\",\n \"sphinx_toolbox\",\n \"docutils==0.16\"\n ],\n}\n\nwith open(os.path.join(HERE, 'autosklearn', '__version__.py')) as fh:\n version = fh.readlines()[-1].split()[-1].strip(\"\\\"'\")\n\n\nwith open(os.path.join(HERE, 'README.md')) as fh:\n long_description = fh.read()\n\n\nsetup(\n name='auto-sklearn',\n author='Matthias Feurer',\n author_email='[email protected]',\n description='Automated machine learning.',\n long_description=long_description,\n long_description_content_type='text/markdown',\n version=version,\n packages=find_packages(exclude=['test', 'scripts', 'examples']),\n extras_require=extras_reqs,\n install_requires=install_reqs,\n include_package_data=True,\n license='BSD3',\n platforms=['Linux'],\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Information Technology\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n python_requires='>=3.7',\n url='https://automl.github.io/auto-sklearn',\n)\n",
"path": "setup.py"
}
] | [
{
"content": "# -*- encoding: utf-8 -*-\nimport os\nimport sys\nfrom setuptools import setup, find_packages\n\n\n# Check if Auto-sklearn *could* run on the given system\nif os.name != 'posix':\n raise ValueError(\n 'Detected unsupported operating system: %s. Please check '\n 'the compability information of auto-sklearn: https://automl.github.io'\n '/auto-sklearn/master/installation.html#windows-osx-compatibility' %\n sys.platform\n )\n\nif sys.version_info < (3, 7):\n raise ValueError(\n 'Unsupported Python version %d.%d.%d found. Auto-sklearn requires Python '\n '3.7 or higher.' % (sys.version_info.major, sys.version_info.minor, sys.version_info.micro)\n )\n\nHERE = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(HERE, 'requirements.txt')) as fp:\n install_reqs = [r.rstrip() for r in fp.readlines()\n if not r.startswith('#') and not r.startswith('git+')]\n\nextras_reqs={\n \"test\": [\n \"pytest>=4.6\",\n \"mypy\",\n \"pytest-xdist\",\n \"pytest-timeout\",\n \"openml\",\n \"pre-commit\",\n \"pytest-cov\",\n ],\n \"examples\": [\n \"matplotlib\",\n \"jupyter\",\n \"notebook\",\n \"seaborn\",\n ],\n \"docs\": [\n \"sphinx<4.3\",\n \"sphinx-gallery\",\n \"sphinx_bootstrap_theme\",\n \"numpydoc\",\n \"sphinx_toolbox\",\n \"docutils==0.16\"\n ],\n}\n\nwith open(os.path.join(HERE, 'autosklearn', '__version__.py')) as fh:\n version = fh.readlines()[-1].split()[-1].strip(\"\\\"'\")\n\n\nwith open(os.path.join(HERE, 'README.md')) as fh:\n long_description = fh.read()\n\n\nsetup(\n name='auto-sklearn',\n author='Matthias Feurer',\n author_email='[email protected]',\n description='Automated machine learning.',\n long_description=long_description,\n long_description_content_type='text/markdown',\n version=version,\n packages=find_packages(exclude=['test', 'scripts', 'examples']),\n extras_require=extras_reqs,\n install_requires=install_reqs,\n include_package_data=True,\n license='BSD3',\n platforms=['Linux'],\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Information Technology\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n python_requires='>=3.7',\n url='https://automl.github.io/auto-sklearn',\n)\n",
"path": "setup.py"
}
] | diff --git a/Dockerfile b/Dockerfile
index d9f73b2c83..e2a74c04f6 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -32,7 +32,7 @@ ADD . /auto-sklearn/
# Upgrade pip then install dependencies
RUN pip3 install --upgrade pip
-RUN pip3 install pytest==4.6.* pep8 codecov pytest-cov flake8 flaky openml
+RUN pip3 install pytest==4.6.* pep8 codecov pytest-cov flake8 openml
RUN cat /auto-sklearn/requirements.txt | xargs -n 1 -L 1 pip3 install
RUN pip3 install jupyter
diff --git a/setup.py b/setup.py
index ac284efcf6..6107e60321 100644
--- a/setup.py
+++ b/setup.py
@@ -30,7 +30,6 @@
"mypy",
"pytest-xdist",
"pytest-timeout",
- "flaky",
"openml",
"pre-commit",
"pytest-cov",
diff --git a/test/test_pipeline/components/feature_preprocessing/test_kernel_pca.py b/test/test_pipeline/components/feature_preprocessing/test_kernel_pca.py
index 839b0df947..19b1368a49 100644
--- a/test/test_pipeline/components/feature_preprocessing/test_kernel_pca.py
+++ b/test/test_pipeline/components/feature_preprocessing/test_kernel_pca.py
@@ -1,7 +1,5 @@
import unittest
-import pytest
-
from sklearn.linear_model import RidgeClassifier
from autosklearn.pipeline.components.feature_preprocessing.kernel_pca import \
KernelPCA
@@ -25,7 +23,6 @@ def test_default_configuration_sparse(self):
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertFalse((transformation == 0).all())
- @pytest.mark.flaky()
def test_default_configuration_classify(self):
for i in range(5):
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
|
python__python-docs-es-1712 | Translate 'library/base64.po'
This needs to reach 100% translated.
The rendered version of this file will be available at https://docs.python.org/es/3.10/library/base64.html once translated.
Meanwhile, the English version is shown.
Current stats for `library/base64.po`:
* Fuzzy: 4
* Percent translated: 90.9%
* Entries: 50 / 55
* Untranslated: 5
Please, comment here if you want this file to be assigned to you and an member will assign it to you as soon as possible, so you can start working on it.
Remember to follow the steps in our [Contributing Guide](https://python-docs-es.readthedocs.io/page/CONTRIBUTING.html).
| [
{
"content": "import os\nimport re\nimport sys\nfrom typing import Dict, Tuple\n\nimport polib\n\nVERBOSE = False\nDEBUG = False\nSKIP_TRANSLATED_ENTRIES = True\n\ntry:\n from deep_translator import GoogleTranslator\nexcept ImportError:\n print(\"Error: This util script needs `deep_translator` to be installed\")\n sys.exit(1)\n\n_patterns = [\n \":c:func:`[^`]+`\",\n \":c:type:`[^`]+`\",\n \":c:macro:`[^`]+`\",\n \":c:member:`[^`]+`\",\n \":c:data:`[^`]+`\",\n \":py:data:`[^`]+`\",\n \":py:mod:`[^`]+`\",\n \":func:`[^`]+`\",\n \":mod:`[^`]+`\",\n \":ref:`[^`]+`\",\n \":class:`[^`]+`\",\n \":pep:`[^`]+`\",\n \":data:`[^`]+`\",\n \":exc:`[^`]+`\",\n \":term:`[^`]+`\",\n \":meth:`[^`]+`\",\n \":envvar:`[^`]+`\",\n \":file:`[^`]+`\",\n \":attr:`[^`]+`\",\n \":const:`[^`]+`\",\n \":issue:`[^`]+`\",\n \":opcode:`[^`]+`\",\n \":option:`[^`]+`\",\n \":program:`[^`]+`\",\n \":keyword:`[^`]+`\",\n \":RFC:`[^`]+`\",\n \":doc:`[^`]+`\",\n \"``[^`]+``\",\n \"`[^`]+`__\",\n \"`[^`]+`_\",\n \"\\*\\*.+\\*\\*\", # bold text between **\n \"\\*.+\\*\", # italic text between *\n]\n\n_exps = [re.compile(e) for e in _patterns]\n\ndef protect_sphinx_directives(s: str) -> Tuple[dict, str]:\n \"\"\"\n Parameters:\n string containing the text to translate\n\n Returns:\n dictionary containing all the placeholder text as keys\n and the correct value.\n \"\"\"\n\n i = 0\n d: Dict[str, str] = {}\n for exp in _exps:\n matches = exp.findall(s)\n if DEBUG:\n print(exp, matches)\n for match in matches:\n ph = f\"XASDF{str(i).zfill(2)}\"\n s = s.replace(match, ph)\n if ph in d and VERBOSE:\n print(f\"Error: {ph} is already in the dictionary\")\n print(\"new\", match)\n print(\"old\", d[ph])\n d[ph] = match\n i += 1\n return d, s\n\n\ndef undo_sphinx_directives_protection(placeholders: dict, translated_text: str) -> str:\n for ph, value in placeholders.items():\n translated_text = translated_text.replace(ph, value)\n if DEBUG:\n print(ph, value)\n print(translated_text)\n return translated_text\n\n\nif __name__ == \"__main__\":\n filename = sys.argv[1]\n if not os.path.isfile(filename):\n print(f\"File not found: '{filename}'\")\n sys.exit(-1)\n\n po = polib.pofile(filename)\n translator = GoogleTranslator(source=\"en\", target=\"es\")\n\n for entry in po:\n # If the entry has already a translation, skip.\n if SKIP_TRANSLATED_ENTRIES and entry.msgstr:\n continue\n\n print(\"\\nEN|\", entry.msgid)\n placeholders, temp_text = protect_sphinx_directives(entry.msgid)\n if VERBOSE:\n print(temp_text)\n print(placeholders)\n\n # Translate the temporary text without sphinx statements\n translated_text = translator.translate(temp_text)\n\n # Recover sphinx statements\n real_text = undo_sphinx_directives_protection(placeholders, translated_text)\n print(\"ES|\", real_text)\n\n # Replace the po file translated entry\n entry.msgstr = real_text\n\n # Save the file after all the entries are translated\n po.save()\n",
"path": "scripts/translate.py"
}
] | [
{
"content": "import os\nimport re\nimport sys\nfrom typing import Dict, Tuple\n\nimport polib\n\nVERBOSE = False\nDEBUG = False\nSKIP_TRANSLATED_ENTRIES = True\n\ntry:\n from deep_translator import GoogleTranslator\nexcept ImportError:\n print(\"Error: This util script needs `deep_translator` to be installed\")\n sys.exit(1)\n\n_patterns = [\n \":c:func:`[^`]+`\",\n \":c:type:`[^`]+`\",\n \":c:macro:`[^`]+`\",\n \":c:member:`[^`]+`\",\n \":c:data:`[^`]+`\",\n \":py:data:`[^`]+`\",\n \":py:mod:`[^`]+`\",\n \":func:`[^`]+`\",\n \":mod:`[^`]+`\",\n \":ref:`[^`]+`\",\n \":class:`[^`]+`\",\n \":pep:`[^`]+`\",\n \":data:`[^`]+`\",\n \":exc:`[^`]+`\",\n \":term:`[^`]+`\",\n \":meth:`[^`]+`\",\n \":envvar:`[^`]+`\",\n \":file:`[^`]+`\",\n \":attr:`[^`]+`\",\n \":const:`[^`]+`\",\n \":issue:`[^`]+`\",\n \":opcode:`[^`]+`\",\n \":option:`[^`]+`\",\n \":program:`[^`]+`\",\n \":keyword:`[^`]+`\",\n \":RFC:`[^`]+`\",\n \":rfc:`[^`]+`\",\n \":doc:`[^`]+`\",\n \"``[^`]+``\",\n \"`[^`]+`__\",\n \"`[^`]+`_\",\n \"\\*\\*.+\\*\\*\", # bold text between **\n \"\\*.+\\*\", # italic text between *\n]\n\n_exps = [re.compile(e) for e in _patterns]\n\ndef protect_sphinx_directives(s: str) -> Tuple[dict, str]:\n \"\"\"\n Parameters:\n string containing the text to translate\n\n Returns:\n dictionary containing all the placeholder text as keys\n and the correct value.\n \"\"\"\n\n i = 0\n d: Dict[str, str] = {}\n for exp in _exps:\n matches = exp.findall(s)\n if DEBUG:\n print(exp, matches)\n for match in matches:\n ph = f\"XASDF{str(i).zfill(2)}\"\n s = s.replace(match, ph)\n if ph in d and VERBOSE:\n print(f\"Error: {ph} is already in the dictionary\")\n print(\"new\", match)\n print(\"old\", d[ph])\n d[ph] = match\n i += 1\n return d, s\n\n\ndef undo_sphinx_directives_protection(placeholders: dict, translated_text: str) -> str:\n for ph, value in placeholders.items():\n translated_text = translated_text.replace(ph, value)\n if DEBUG:\n print(ph, value)\n print(translated_text)\n return translated_text\n\n\nif __name__ == \"__main__\":\n filename = sys.argv[1]\n if not os.path.isfile(filename):\n print(f\"File not found: '{filename}'\")\n sys.exit(-1)\n\n po = polib.pofile(filename)\n translator = GoogleTranslator(source=\"en\", target=\"es\")\n\n for entry in po:\n # If the entry has already a translation, skip.\n if SKIP_TRANSLATED_ENTRIES and entry.msgstr:\n continue\n\n print(\"\\nEN|\", entry.msgid)\n placeholders, temp_text = protect_sphinx_directives(entry.msgid)\n if VERBOSE:\n print(temp_text)\n print(placeholders)\n\n # Translate the temporary text without sphinx statements\n translated_text = translator.translate(temp_text)\n\n # Recover sphinx statements\n real_text = undo_sphinx_directives_protection(placeholders, translated_text)\n print(\"ES|\", real_text)\n\n # Replace the po file translated entry\n entry.msgstr = real_text\n\n # Save the file after all the entries are translated\n po.save()\n",
"path": "scripts/translate.py"
}
] | diff --git a/library/base64.po b/library/base64.po
index 1a27805120..f3ee62a0e7 100644
--- a/library/base64.po
+++ b/library/base64.po
@@ -13,12 +13,12 @@ msgstr ""
"POT-Creation-Date: 2021-10-16 21:42+0200\n"
"PO-Revision-Date: 2020-06-29 21:32+0200\n"
"Last-Translator: Cristián Maureira-Fredes <[email protected]>\n"
-"Language: es\n"
"Language-Team: python-doc-es\n"
-"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+"Language: es\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=(n != 1)\n"
"Generated-By: Babel 2.9.1\n"
#: ../Doc/library/base64.rst:2
@@ -31,7 +31,6 @@ msgid "**Source code:** :source:`Lib/base64.py`"
msgstr "**Código fuente:** :source:`Lib/base64.py`"
#: ../Doc/library/base64.rst:16
-#, fuzzy
msgid ""
"This module provides functions for encoding binary data to printable ASCII "
"characters and decoding such encodings back to binary data. It provides "
@@ -42,25 +41,24 @@ msgstr ""
"Este módulo proporciona funciones para codificar datos binarios en "
"caracteres ASCII imprimibles y decodificar dichas codificaciones en datos "
"binarios. Proporciona funciones de codificación y decodificación para las "
-"codificaciones especificadas en :rfc:`3548`, que define los algoritmos "
+"codificaciones especificadas en :rfc:`4648`, que define los algoritmos "
"Base16, Base32 y Base64, y para las codificaciones estándar de facto Ascii85 "
"y Base85."
#: ../Doc/library/base64.rst:22
-#, fuzzy
msgid ""
"The :rfc:`4648` encodings are suitable for encoding binary data so that it "
"can be safely sent by email, used as parts of URLs, or included as part of "
"an HTTP POST request. The encoding algorithm is not the same as the :"
"program:`uuencode` program."
msgstr ""
-"Las codificaciones :rfc:`3548` son adecuadas para codificar datos binarios "
+"Las codificaciones :rfc:`4648` son adecuadas para codificar datos binarios "
"para que puedan enviarse de forma segura por correo electrónico, usarse como "
-"partes de URL o incluirse como parte de una solicitud HTTP POST. El "
-"algoritmo de codificación no es el mismo que el programa :program:`uuencode`."
+"parte de las URL o incluirse como parte de una solicitud HTTP POST. El "
+"algoritmo de codificación no es el mismo que el del programa :program:"
+"`uuencode`."
#: ../Doc/library/base64.rst:27
-#, fuzzy
msgid ""
"There are two interfaces provided by this module. The modern interface "
"supports encoding :term:`bytes-like objects <bytes-like object>` to ASCII :"
@@ -70,10 +68,10 @@ msgid ""
msgstr ""
"Hay dos interfaces proporcionadas por este módulo. La interfaz moderna "
"admite la codificación de :term:`objetos similares a bytes <bytes-like "
-"object>` a ASCII :class:`bytes`, y decodificación :term:`objetos similares a "
-"bytes <bytes-like object>` o cadenas de caracteres que contienen ASCII a :"
-"class:`bytes`. Ambos alfabetos de base 64 definidos en :rfc:`3548` (normal y "
-"seguro para URL y sistema de archivos) son compatibles."
+"object>` a ASCII :class:`bytes` y la decodificación de :term:`objetos "
+"similares a bytes <bytes-like object>` o cadenas que contienen ASCII a :"
+"class:`bytes`. Se admiten los dos alfabetos base 64 definidos en :rfc:`4648` "
+"(normal y seguro para URL y sistema de archivos)."
#: ../Doc/library/base64.rst:33
msgid ""
@@ -246,7 +244,6 @@ msgstr ""
"es ``Falso``."
#: ../Doc/library/base64.rst:127
-#, fuzzy
msgid ""
":rfc:`4648` allows for optional mapping of the digit 0 (zero) to the letter "
"O (oh), and for optional mapping of the digit 1 (one) to either the letter I "
@@ -256,13 +253,13 @@ msgid ""
"purposes the default is ``None``, so that 0 and 1 are not allowed in the "
"input."
msgstr ""
-":rfc:`3548` permite el mapeo opcional del dígito 0 (cero) a la letra O (oh), "
-"y el mapeo opcional del dígito 1 (uno) a la letra I (*eye*) o la letra L "
-"(el) . El argumento opcional *map01* cuando no es ``None``, especifica a qué "
-"letra se debe asignar el dígito 1 (cuando *map01* no es ``None``, el dígito "
-"0 siempre se asigna a la letra O). Por motivos de seguridad, el valor "
-"predeterminado es ``None``, por lo que 0 y 1 no están permitidos en la "
-"entrada."
+":rfc:`4648` permite la asignación opcional del dígito 0 (cero) a la letra O "
+"(oh), y la asignación opcional del dígito 1 (uno) a la letra I (ojo) o la "
+"letra L (el) . El argumento opcional *map01* cuando no es ``None``, "
+"especifica la letra a la cual el dígito 1 debería mapearse(cuando *map01* no "
+"es ``None``, el dígito 0 siempre se asigna a la letra O). Por motivos de "
+"seguridad, el valor predeterminado es ``None``, por lo que 0 y 1 no están "
+"permitidos en la entrada."
#: ../Doc/library/base64.rst:134 ../Doc/library/base64.rst:175
msgid ""
@@ -277,12 +274,16 @@ msgid ""
"Similar to :func:`b32encode` but uses the Extended Hex Alphabet, as defined "
"in :rfc:`4648`."
msgstr ""
+"Similar a :func:`b32encode` pero usa el Alfabeto Hexagonal Extendido, como "
+"se define en :rfc:`4648`."
#: ../Doc/library/base64.rst:149
msgid ""
"Similar to :func:`b32decode` but uses the Extended Hex Alphabet, as defined "
"in :rfc:`4648`."
msgstr ""
+"Similar a :func:`b32decode` pero usa el Alfabeto Hexagonal Extendido, como "
+"se define en :rfc:`4648`."
#: ../Doc/library/base64.rst:152
msgid ""
@@ -291,6 +292,10 @@ msgid ""
"these characters are included in the Extended Hex Alphabet and are not "
"interchangeable."
msgstr ""
+"Esta versión no permite el dígito 0 (cero) a la letra O (oh) y el dígito 1 "
+"(uno) a las asignaciones de la letra I (ojo) o la letra L (el), todos estos "
+"caracteres están incluidos en el Alfabeto Hexagonal Extendido y no son "
+"intercambiables."
#: ../Doc/library/base64.rst:162
msgid ""
@@ -482,7 +487,7 @@ msgstr "Un ejemplo de uso del módulo:"
#: ../Doc/library/base64.rst:293
msgid "Security Considerations"
-msgstr ""
+msgstr "Consideraciones de Seguridad"
#: ../Doc/library/base64.rst:295
msgid ""
@@ -490,6 +495,9 @@ msgid ""
"it's recommended to review the security section for any code deployed to "
"production."
msgstr ""
+"Se agregó una nueva sección de consideraciones de seguridad a :rfc:`4648` "
+"(sección 12); se recomienda revisar la sección de seguridad para cualquier "
+"código implementado en producción."
#: ../Doc/library/base64.rst:301
msgid "Module :mod:`binascii`"
diff --git a/scripts/translate.py b/scripts/translate.py
index 7b9b131374..9a589214ef 100644
--- a/scripts/translate.py
+++ b/scripts/translate.py
@@ -42,6 +42,7 @@
":program:`[^`]+`",
":keyword:`[^`]+`",
":RFC:`[^`]+`",
+ ":rfc:`[^`]+`",
":doc:`[^`]+`",
"``[^`]+``",
"`[^`]+`__",
|
holoviz__holoviews-5924 | Unpin matplotlib 3.8
In https://github.com/holoviz/holoviews/pull/5895 I pinned matplotlib 3.8, this should be removed before the next release.
| [
{
"content": "#!/usr/bin/env python\n\nimport json\nimport os\nimport sys\nimport shutil\n\nfrom setuptools import setup, find_packages\n\nimport pyct.build\n\nsetup_args = {}\ninstall_requires = [\n \"param >=1.12.0,<3.0\",\n \"numpy >=1.0\",\n \"pyviz_comms >=0.7.4\",\n \"panel >=1.0\",\n \"colorcet\",\n \"packaging\",\n \"pandas >=0.20.0\",\n]\n\nextras_require = {}\n\nextras_require['lint'] = [\n 'ruff',\n 'pre-commit',\n]\n\n# Test requirements\nextras_require['tests_core'] = [\n 'pytest',\n 'pytest-cov',\n 'pytest-xdist',\n 'flaky',\n 'matplotlib >=3, <3.8', # 3.8 breaks tests\n 'nbconvert',\n 'bokeh >=3.1',\n 'pillow',\n 'plotly >=4.0',\n 'ipython >=5.4.0',\n 'contourpy',\n]\n\n# Optional tests dependencies, i.e. one should be able\n# to run and pass the test suite without installing any\n# of those.\nextras_require['tests'] = extras_require['tests_core'] + [\n 'dask',\n 'ibis-framework', # Mapped to ibis-sqlite in setup.cfg for conda\n 'xarray >=0.10.4',\n 'networkx',\n 'shapely',\n 'ffmpeg',\n 'cftime',\n 'scipy',\n 'selenium',\n 'spatialpandas',\n 'datashader >=0.11.1',\n 'dash >=1.16',\n]\n\nextras_require['tests_ci'] = [\n 'codecov',\n \"pytest-github-actions-annotate-failures\",\n]\n\nextras_require['tests_gpu'] = extras_require['tests'] + [\n 'cudf',\n]\n\nextras_require['tests_nb'] = ['nbval']\nextras_require['ui'] = ['playwright', 'pytest-playwright']\n\n# Notebook dependencies\nextras_require[\"notebook\"] = [\"ipython >=5.4.0\", \"notebook\"]\n\n# IPython Notebook + pandas + matplotlib + bokeh\nextras_require[\"recommended\"] = extras_require[\"notebook\"] + [\n \"matplotlib >=3\",\n \"bokeh >=3.1\",\n]\n\n# Requirements to run all examples\nextras_require[\"examples\"] = extras_require[\"recommended\"] + [\n \"networkx\",\n \"pillow\",\n \"xarray >=0.10.4\",\n \"plotly >=4.0\",\n 'dash >=1.16',\n \"streamz >=0.5.0\",\n \"ffmpeg\",\n \"cftime\",\n \"netcdf4\",\n \"dask\",\n \"scipy\",\n \"shapely\",\n \"scikit-image\",\n \"pyarrow\",\n \"pooch\",\n \"datashader >=0.11.1\",\n]\n\n\nextras_require[\"examples_tests\"] = extras_require[\"examples\"] + extras_require['tests_nb']\n\n# Extra third-party libraries\nextras_require[\"extras\"] = extras_require[\"examples\"] + [\n \"pscript ==0.7.1\",\n]\n\n# Not used in tox.ini or elsewhere, kept for backwards compatibility.\nextras_require[\"unit_tests\"] = extras_require[\"examples\"] + extras_require[\"tests\"] + extras_require['lint']\n\nextras_require['doc'] = extras_require['examples'] + [\n 'nbsite >=0.8.2,<0.9.0',\n 'mpl_sample_data >=3.1.3',\n 'pscript',\n 'graphviz',\n 'bokeh >=3.1',\n 'pooch',\n 'selenium',\n]\n\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nextras_require[\"build\"] = [\n \"param >=1.7.0\",\n \"setuptools >=30.3.0\",\n \"pyct >=0.4.4\",\n]\n\ndef get_setup_version(reponame):\n \"\"\"\n Helper to get the current version from either git describe or the\n .version file (if available).\n \"\"\"\n basepath = os.path.split(__file__)[0]\n version_file_path = os.path.join(basepath, reponame, \".version\")\n try:\n from param import version\n except ImportError:\n version = None\n if version is not None:\n return version.Version.setup_version(\n basepath, reponame, archive_commit=\"$Format:%h$\"\n )\n else:\n print(\n \"WARNING: param>=1.6.0 unavailable. If you are installing a package, this warning can safely be ignored. If you are creating a package or otherwise operating in a git repository, you should install param>=1.6.0.\"\n )\n return json.load(open(version_file_path))[\"version_string\"]\n\n\nsetup_args.update(\n dict(\n name=\"holoviews\",\n version=get_setup_version(\"holoviews\"),\n python_requires=\">=3.9\",\n install_requires=install_requires,\n extras_require=extras_require,\n description=\"Stop plotting your data - annotate your data and let it visualize itself.\",\n long_description=open(\"README.md\").read(),\n long_description_content_type=\"text/markdown\",\n author=\"Jean-Luc Stevens and Philipp Rudiger\",\n author_email=\"[email protected]\",\n maintainer=\"HoloViz Developers\",\n maintainer_email=\"[email protected]\",\n platforms=[\"Windows\", \"Mac OS X\", \"Linux\"],\n license=\"BSD\",\n url=\"https://www.holoviews.org\",\n project_urls={\n \"Source\": \"https://github.com/holoviz/holoviews\",\n },\n entry_points={\"console_scripts\": [\"holoviews = holoviews.util.command:main\"]},\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n \"License :: OSI Approved :: BSD License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3.12\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"Framework :: Matplotlib\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Software Development :: Libraries\",\n ],\n )\n)\n\n\nif __name__ == \"__main__\":\n example_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"holoviews/examples\"\n )\n\n if \"develop\" not in sys.argv and \"egg_info\" not in sys.argv:\n pyct.build.examples(example_path, __file__, force=True)\n\n if \"install\" in sys.argv:\n header = \"HOLOVIEWS INSTALLATION INFORMATION\"\n bars = \"=\" * len(header)\n\n extras = \"\\n\".join(\"holoviews[%s]\" % e for e in setup_args[\"extras_require\"])\n\n print(\"%s\\n%s\\n%s\" % (bars, header, bars))\n\n print(\"\\nHoloViews supports the following installation types:\\n\")\n print(\"%s\\n\" % extras)\n print(\"Users should consider using one of these options.\\n\")\n print(\"By default only a core installation is performed and \")\n print(\"only the minimal set of dependencies are fetched.\\n\\n\")\n print(\"For more information please visit http://holoviews.org/install.html\\n\")\n print(bars + \"\\n\")\n\n setup(**setup_args)\n\n if os.path.isdir(example_path):\n shutil.rmtree(example_path)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n\nimport json\nimport os\nimport sys\nimport shutil\n\nfrom setuptools import setup, find_packages\n\nimport pyct.build\n\nsetup_args = {}\ninstall_requires = [\n \"param >=1.12.0,<3.0\",\n \"numpy >=1.0\",\n \"pyviz_comms >=0.7.4\",\n \"panel >=1.0\",\n \"colorcet\",\n \"packaging\",\n \"pandas >=0.20.0\",\n]\n\nextras_require = {}\n\nextras_require['lint'] = [\n 'ruff',\n 'pre-commit',\n]\n\n# Test requirements\nextras_require['tests_core'] = [\n 'pytest',\n 'pytest-cov',\n 'pytest-xdist',\n 'flaky',\n 'matplotlib >=3',\n 'nbconvert',\n 'bokeh >=3.1',\n 'pillow',\n 'plotly >=4.0',\n 'ipython >=5.4.0',\n 'contourpy',\n]\n\n# Optional tests dependencies, i.e. one should be able\n# to run and pass the test suite without installing any\n# of those.\nextras_require['tests'] = extras_require['tests_core'] + [\n 'dask',\n 'ibis-framework', # Mapped to ibis-sqlite in setup.cfg for conda\n 'xarray >=0.10.4',\n 'networkx',\n 'shapely',\n 'ffmpeg',\n 'cftime',\n 'scipy',\n 'selenium',\n 'spatialpandas',\n 'datashader >=0.11.1',\n 'dash >=1.16',\n]\n\nextras_require['tests_ci'] = [\n 'codecov',\n \"pytest-github-actions-annotate-failures\",\n]\n\nextras_require['tests_gpu'] = extras_require['tests'] + [\n 'cudf',\n]\n\nextras_require['tests_nb'] = ['nbval']\nextras_require['ui'] = ['playwright', 'pytest-playwright']\n\n# Notebook dependencies\nextras_require[\"notebook\"] = [\"ipython >=5.4.0\", \"notebook\"]\n\n# IPython Notebook + pandas + matplotlib + bokeh\nextras_require[\"recommended\"] = extras_require[\"notebook\"] + [\n \"matplotlib >=3\",\n \"bokeh >=3.1\",\n]\n\n# Requirements to run all examples\nextras_require[\"examples\"] = extras_require[\"recommended\"] + [\n \"networkx\",\n \"pillow\",\n \"xarray >=0.10.4\",\n \"plotly >=4.0\",\n 'dash >=1.16',\n \"streamz >=0.5.0\",\n \"ffmpeg\",\n \"cftime\",\n \"netcdf4\",\n \"dask\",\n \"scipy\",\n \"shapely\",\n \"scikit-image\",\n \"pyarrow\",\n \"pooch\",\n \"datashader >=0.11.1\",\n]\n\n\nextras_require[\"examples_tests\"] = extras_require[\"examples\"] + extras_require['tests_nb']\n\n# Extra third-party libraries\nextras_require[\"extras\"] = extras_require[\"examples\"] + [\n \"pscript ==0.7.1\",\n]\n\n# Not used in tox.ini or elsewhere, kept for backwards compatibility.\nextras_require[\"unit_tests\"] = extras_require[\"examples\"] + extras_require[\"tests\"] + extras_require['lint']\n\nextras_require['doc'] = extras_require['examples'] + [\n 'nbsite >=0.8.2,<0.9.0',\n 'mpl_sample_data >=3.1.3',\n 'pscript',\n 'graphviz',\n 'bokeh >=3.1',\n 'pooch',\n 'selenium',\n]\n\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nextras_require[\"build\"] = [\n \"param >=1.7.0\",\n \"setuptools >=30.3.0\",\n \"pyct >=0.4.4\",\n]\n\ndef get_setup_version(reponame):\n \"\"\"\n Helper to get the current version from either git describe or the\n .version file (if available).\n \"\"\"\n basepath = os.path.split(__file__)[0]\n version_file_path = os.path.join(basepath, reponame, \".version\")\n try:\n from param import version\n except ImportError:\n version = None\n if version is not None:\n return version.Version.setup_version(\n basepath, reponame, archive_commit=\"$Format:%h$\"\n )\n else:\n print(\n \"WARNING: param>=1.6.0 unavailable. If you are installing a package, this warning can safely be ignored. If you are creating a package or otherwise operating in a git repository, you should install param>=1.6.0.\"\n )\n return json.load(open(version_file_path))[\"version_string\"]\n\n\nsetup_args.update(\n dict(\n name=\"holoviews\",\n version=get_setup_version(\"holoviews\"),\n python_requires=\">=3.9\",\n install_requires=install_requires,\n extras_require=extras_require,\n description=\"Stop plotting your data - annotate your data and let it visualize itself.\",\n long_description=open(\"README.md\").read(),\n long_description_content_type=\"text/markdown\",\n author=\"Jean-Luc Stevens and Philipp Rudiger\",\n author_email=\"[email protected]\",\n maintainer=\"HoloViz Developers\",\n maintainer_email=\"[email protected]\",\n platforms=[\"Windows\", \"Mac OS X\", \"Linux\"],\n license=\"BSD\",\n url=\"https://www.holoviews.org\",\n project_urls={\n \"Source\": \"https://github.com/holoviz/holoviews\",\n },\n entry_points={\"console_scripts\": [\"holoviews = holoviews.util.command:main\"]},\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n \"License :: OSI Approved :: BSD License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3.12\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"Framework :: Matplotlib\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Software Development :: Libraries\",\n ],\n )\n)\n\n\nif __name__ == \"__main__\":\n example_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"holoviews/examples\"\n )\n\n if \"develop\" not in sys.argv and \"egg_info\" not in sys.argv:\n pyct.build.examples(example_path, __file__, force=True)\n\n if \"install\" in sys.argv:\n header = \"HOLOVIEWS INSTALLATION INFORMATION\"\n bars = \"=\" * len(header)\n\n extras = \"\\n\".join(\"holoviews[%s]\" % e for e in setup_args[\"extras_require\"])\n\n print(\"%s\\n%s\\n%s\" % (bars, header, bars))\n\n print(\"\\nHoloViews supports the following installation types:\\n\")\n print(\"%s\\n\" % extras)\n print(\"Users should consider using one of these options.\\n\")\n print(\"By default only a core installation is performed and \")\n print(\"only the minimal set of dependencies are fetched.\\n\\n\")\n print(\"For more information please visit http://holoviews.org/install.html\\n\")\n print(bars + \"\\n\")\n\n setup(**setup_args)\n\n if os.path.isdir(example_path):\n shutil.rmtree(example_path)\n",
"path": "setup.py"
}
] | diff --git a/examples/gallery/demos/bokeh/bachelors_degrees_by_gender.ipynb b/examples/gallery/demos/bokeh/bachelors_degrees_by_gender.ipynb
index 7a682ea46c..b1463b1a63 100644
--- a/examples/gallery/demos/bokeh/bachelors_degrees_by_gender.ipynb
+++ b/examples/gallery/demos/bokeh/bachelors_degrees_by_gender.ipynb
@@ -29,17 +29,6 @@
"## Define data"
]
},
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "This example uses the matplotlib sample data that conda users can fetch using:\n",
- "\n",
- "```\n",
- "conda install -c conda-forge mpl_sample_data\n",
- "```"
- ]
- },
{
"cell_type": "code",
"execution_count": null,
@@ -47,10 +36,8 @@
"outputs": [],
"source": [
"import pandas as pd\n",
- "from matplotlib.cbook import get_sample_data\n",
"\n",
- "fname = get_sample_data('percent_bachelors_degrees_women_usa.csv')\n",
- "gender_degree_data = pd.read_csv(fname)\n",
+ "gender_degree_data = pd.read_csv(\"https://datasets.holoviz.org/bachelor_women/v1/percent_bachelors_degrees_women_usa.csv\")\n",
"\n",
"title = ('Percentage of Bachelor\\'s degrees conferred to women in '\n",
" 'the U.S.A. by major (1970-2011)\\n')\n",
@@ -130,5 +117,5 @@
}
},
"nbformat": 4,
- "nbformat_minor": 2
+ "nbformat_minor": 4
}
diff --git a/examples/gallery/demos/bokeh/bars_economic.ipynb b/examples/gallery/demos/bokeh/bars_economic.ipynb
index 4c08fefe11..559dbd3ded 100644
--- a/examples/gallery/demos/bokeh/bars_economic.ipynb
+++ b/examples/gallery/demos/bokeh/bars_economic.ipynb
@@ -68,5 +68,5 @@
}
},
"nbformat": 4,
- "nbformat_minor": 2
+ "nbformat_minor": 4
}
diff --git a/examples/gallery/demos/bokeh/topographic_hillshading.ipynb b/examples/gallery/demos/bokeh/topographic_hillshading.ipynb
index 511b5c025e..bf7f972366 100644
--- a/examples/gallery/demos/bokeh/topographic_hillshading.ipynb
+++ b/examples/gallery/demos/bokeh/topographic_hillshading.ipynb
@@ -40,7 +40,7 @@
"from matplotlib.cbook import get_sample_data\n",
"from matplotlib.colors import LightSource\n",
"\n",
- "dem = np.load(get_sample_data('jacksboro_fault_dem.npz'))\n",
+ "dem = get_sample_data('jacksboro_fault_dem.npz')\n",
"z = dem['elevation']\n",
"\n",
"dx, dy = dem['dx'], dem['dy']\n",
@@ -89,5 +89,5 @@
}
},
"nbformat": 4,
- "nbformat_minor": 2
+ "nbformat_minor": 4
}
diff --git a/examples/gallery/demos/matplotlib/bachelors_degrees_by_gender.ipynb b/examples/gallery/demos/matplotlib/bachelors_degrees_by_gender.ipynb
index 129d2031d1..4277bf24db 100644
--- a/examples/gallery/demos/matplotlib/bachelors_degrees_by_gender.ipynb
+++ b/examples/gallery/demos/matplotlib/bachelors_degrees_by_gender.ipynb
@@ -37,10 +37,8 @@
"outputs": [],
"source": [
"import pandas as pd\n",
- "from matplotlib.cbook import get_sample_data\n",
"\n",
- "fname = get_sample_data('percent_bachelors_degrees_women_usa.csv')\n",
- "gender_degree_data = pd.read_csv(fname)\n",
+ "gender_degree_data = pd.read_csv(\"https://datasets.holoviz.org/bachelor_women/v1/percent_bachelors_degrees_women_usa.csv\")\n",
"\n",
"title = ('Percentage of Bachelor\\'s degrees conferred to women in '\n",
" 'the U.S.A. by major (1970-2011)\\n')\n",
@@ -123,5 +121,5 @@
}
},
"nbformat": 4,
- "nbformat_minor": 2
+ "nbformat_minor": 4
}
diff --git a/examples/gallery/demos/matplotlib/topographic_hillshading.ipynb b/examples/gallery/demos/matplotlib/topographic_hillshading.ipynb
index 92e4a3c126..3c03b418cc 100644
--- a/examples/gallery/demos/matplotlib/topographic_hillshading.ipynb
+++ b/examples/gallery/demos/matplotlib/topographic_hillshading.ipynb
@@ -42,7 +42,7 @@
"from matplotlib.cbook import get_sample_data\n",
"from matplotlib.colors import LightSource\n",
"\n",
- "dem = np.load(get_sample_data('jacksboro_fault_dem.npz'))\n",
+ "dem = get_sample_data('jacksboro_fault_dem.npz')\n",
"z = dem['elevation']\n",
"\n",
"dx, dy = dem['dx'], dem['dy']\n",
@@ -92,5 +92,5 @@
}
},
"nbformat": 4,
- "nbformat_minor": 2
+ "nbformat_minor": 4
}
diff --git a/holoviews/tests/plotting/matplotlib/test_heatmapplot.py b/holoviews/tests/plotting/matplotlib/test_heatmapplot.py
index 0b13711669..f33bc90f30 100644
--- a/holoviews/tests/plotting/matplotlib/test_heatmapplot.py
+++ b/holoviews/tests/plotting/matplotlib/test_heatmapplot.py
@@ -2,7 +2,7 @@
from holoviews.element import HeatMap, Image
-from .test_plot import TestMPLPlot, mpl_renderer
+from .test_plot import TestMPLPlot, mpl38, mpl_renderer
class TestHeatMapPlot(TestMPLPlot):
@@ -12,25 +12,35 @@ def test_heatmap_invert_axes(self):
hm = HeatMap(Image(arr)).opts(invert_axes=True)
plot = mpl_renderer.get_plot(hm)
artist = plot.handles['artist']
- self.assertEqual(artist.get_array().data, arr.T[::-1].flatten())
+ if mpl38:
+ np.testing.assert_equal(artist.get_array().data, arr.T[::-1])
+ else:
+ np.testing.assert_equal(artist.get_array().data, arr.T[::-1].flatten())
def test_heatmap_extents(self):
hmap = HeatMap([('A', 50, 1), ('B', 2, 2), ('C', 50, 1)])
plot = mpl_renderer.get_plot(hmap)
- self.assertEqual(plot.get_extents(hmap, {}), (-.5, -22, 2.5, 74))
+ assert plot.get_extents(hmap, {}) == (-.5, -22, 2.5, 74)
def test_heatmap_invert_xaxis(self):
hmap = HeatMap([('A',1, 1), ('B', 2, 2)]).opts(invert_xaxis=True)
plot = mpl_renderer.get_plot(hmap)
array = plot.handles['artist'].get_array()
- expected = np.array([1, np.inf, np.inf, 2])
+ if mpl38:
+ expected = np.array([[1, np.inf], [np.inf, 2]])
+ else:
+ expected = np.array([1, np.inf, np.inf, 2])
masked = np.ma.array(expected, mask=np.logical_not(np.isfinite(expected)))
- self.assertEqual(array, masked)
+ np.testing.assert_equal(array, masked)
def test_heatmap_invert_yaxis(self):
hmap = HeatMap([('A',1, 1), ('B', 2, 2)]).opts(invert_yaxis=True)
plot = mpl_renderer.get_plot(hmap)
array = plot.handles['artist'].get_array()
expected = np.array([1, np.inf, np.inf, 2])
+ if mpl38:
+ expected = np.array([[1, np.inf], [np.inf, 2]])
+ else:
+ expected = np.array([1, np.inf, np.inf, 2])
masked = np.ma.array(expected, mask=np.logical_not(np.isfinite(expected)))
- self.assertEqual(array, masked)
+ np.testing.assert_equal(array, masked)
diff --git a/holoviews/tests/plotting/matplotlib/test_plot.py b/holoviews/tests/plotting/matplotlib/test_plot.py
index 666e08f6f3..bd00393461 100644
--- a/holoviews/tests/plotting/matplotlib/test_plot.py
+++ b/holoviews/tests/plotting/matplotlib/test_plot.py
@@ -1,14 +1,17 @@
import matplotlib.pyplot as plt
import pyviz_comms as comms
+from packaging.version import Version
from param import concrete_descendents
from holoviews.core.options import Store
from holoviews.element.comparison import ComparisonTestCase
+from holoviews.plotting.mpl import mpl_version
from holoviews.plotting.mpl.element import ElementPlot
from .. import option_intersections
mpl_renderer = Store.renderers['matplotlib']
+mpl38 = mpl_version >= Version("3.8")
class TestPlotDefinitions(ComparisonTestCase):
diff --git a/holoviews/tests/plotting/matplotlib/test_quadmeshplot.py b/holoviews/tests/plotting/matplotlib/test_quadmeshplot.py
index 7a59868fc6..1d3096b658 100644
--- a/holoviews/tests/plotting/matplotlib/test_quadmeshplot.py
+++ b/holoviews/tests/plotting/matplotlib/test_quadmeshplot.py
@@ -2,49 +2,53 @@
from holoviews.element import Dataset, Image, QuadMesh
-from .test_plot import TestMPLPlot, mpl_renderer
+from .test_plot import TestMPLPlot, mpl38, mpl_renderer
class TestQuadMeshPlot(TestMPLPlot):
def test_quadmesh_invert_axes(self):
- arr = np.array([[0, 1, 2], [3, 4, 5]])
+ arr = np.array([[0, 1, 2], [3, 4, 5]])
qmesh = QuadMesh(Image(arr)).opts(invert_axes=True)
plot = mpl_renderer.get_plot(qmesh)
artist = plot.handles['artist']
- self.assertEqual(artist.get_array().data, arr.T[:, ::-1].flatten())
+ if mpl38:
+ np.testing.assert_equal(artist.get_array().data, arr[::-1].T)
+ else:
+ np.testing.assert_equal(artist.get_array().data, arr.T[::-1].flatten())
def test_quadmesh_nodata(self):
- arr = np.array([[0, 1, 2], [3, 4, 5]])
+ arr = np.array([[0, 1, 2], [3, 4, 5]])
qmesh = QuadMesh(Image(arr)).opts(nodata=0)
plot = mpl_renderer.get_plot(qmesh)
artist = plot.handles['artist']
- self.assertEqual(artist.get_array().data,
- np.array([3, 4, 5, np.nan, 1, 2]))
+ if mpl38:
+ expected = np.array([[3, 4, 5], [np.nan, 1, 2]])
+ else:
+ expected = np.array([3, 4, 5, np.nan, 1, 2])
+
+ np.testing.assert_equal(artist.get_array().data, expected)
def test_quadmesh_nodata_uint(self):
- arr = np.array([[0, 1, 2], [3, 4, 5]], dtype='uint32')
+ arr = np.array([[0, 1, 2], [3, 4, 5]], dtype='uint32')
qmesh = QuadMesh(Image(arr)).opts(nodata=0)
plot = mpl_renderer.get_plot(qmesh)
artist = plot.handles['artist']
- self.assertEqual(artist.get_array().data,
- np.array([3, 4, 5, np.nan, 1, 2]))
+ if mpl38:
+ expected = np.array([[3, 4, 5], [np.nan, 1, 2]])
+ else:
+ expected = np.array([3, 4, 5, np.nan, 1, 2])
+ np.testing.assert_equal(artist.get_array().data, expected)
def test_quadmesh_update_cbar(self):
xs = ys = np.linspace(0, 6, 10)
zs = np.linspace(1, 2, 5)
- XS, YS, ZS = np.meshgrid(xs, ys, zs)
+ XS, _YS, ZS = np.meshgrid(xs, ys, zs)
values = np.sin(XS) * ZS
ds = Dataset((xs, ys, zs, values.T), ['x', 'y', 'z'], 'values')
hmap = ds.to(QuadMesh).opts(colorbar=True, framewise=True)
plot = mpl_renderer.get_plot(hmap)
cbar = plot.handles['cbar']
- self.assertEqual(
- np.allclose([cbar.vmin, cbar.vmax], [-0.9989549170979283, 0.9719379013633128]),
- True
- )
+ np.testing.assert_allclose([cbar.vmin, cbar.vmax], [-0.9989549170979283, 0.9719379013633128])
plot.update(3)
- self.assertEqual(
- np.allclose([cbar.vmin, cbar.vmax], [-1.7481711049213744, 1.7008913273857975]),
- True
- )
+ np.testing.assert_allclose([cbar.vmin, cbar.vmax], [-1.7481711049213744, 1.7008913273857975])
diff --git a/setup.py b/setup.py
index 2c4370e82d..3185442f03 100644
--- a/setup.py
+++ b/setup.py
@@ -33,7 +33,7 @@
'pytest-cov',
'pytest-xdist',
'flaky',
- 'matplotlib >=3, <3.8', # 3.8 breaks tests
+ 'matplotlib >=3',
'nbconvert',
'bokeh >=3.1',
'pillow',
|
bokeh__bokeh-8651 | grid_axis_alignment_no_toolbar is broken in Windows (32-bit)
`examples/integration/layout/grid_axis_alignment_no_toolbar`:

All axes should be in 0 to 10, 10^3, 10^6, 10^9 ranges. All major browsers are affected the same way. I suspect this may be due to 32-bit system architecture. It would be good if someone with 64-bit windows could run this example for comparison.
| [
{
"content": "from __future__ import absolute_import\n\nimport numpy as np\n\nfrom bokeh.plotting import figure, save\nfrom bokeh.layouts import gridplot\n\ncoeffs = [10**0, 10**3, 10**6, 10**9]\nV = np.arange(10)\n\nfigs = []\n\nfor ycoeff in coeffs:\n row = []\n for xcoeff in coeffs:\n fig = figure(plot_height=200, plot_width=200)\n fig.xaxis[0].formatter.use_scientific = False\n fig.yaxis[0].formatter.use_scientific = False\n fig.xaxis[0].major_label_orientation = \"vertical\"\n fig.yaxis[0].major_label_orientation = \"horizontal\"\n fig.scatter(V*xcoeff, V*ycoeff)\n row.append(fig)\n figs.append(row)\n\ngrid = gridplot(figs, toolbar_location=None)\n\nsave(grid)\n",
"path": "examples/integration/layout/grid_axis_alignment_no_toolbar.py"
}
] | [
{
"content": "from __future__ import absolute_import\n\nimport numpy as np\n\nfrom bokeh.plotting import figure, save\nfrom bokeh.layouts import gridplot\n\ncoeffs = [10**0, 10**3, 10**6, 10**9]\nV = np.arange(10, dtype=\"int64\")\n\nfigs = []\n\nfor ycoeff in coeffs:\n row = []\n for xcoeff in coeffs:\n fig = figure(plot_height=200, plot_width=200)\n fig.xaxis[0].formatter.use_scientific = False\n fig.yaxis[0].formatter.use_scientific = False\n fig.xaxis[0].major_label_orientation = \"vertical\"\n fig.yaxis[0].major_label_orientation = \"horizontal\"\n fig.scatter(V*xcoeff, V*ycoeff)\n row.append(fig)\n figs.append(row)\n\ngrid = gridplot(figs, toolbar_location=None)\n\nsave(grid)\n",
"path": "examples/integration/layout/grid_axis_alignment_no_toolbar.py"
}
] | diff --git a/examples/integration/layout/grid_axis_alignment_no_toolbar.py b/examples/integration/layout/grid_axis_alignment_no_toolbar.py
index 3bc06696e11..05cefe38274 100644
--- a/examples/integration/layout/grid_axis_alignment_no_toolbar.py
+++ b/examples/integration/layout/grid_axis_alignment_no_toolbar.py
@@ -6,7 +6,7 @@
from bokeh.layouts import gridplot
coeffs = [10**0, 10**3, 10**6, 10**9]
-V = np.arange(10)
+V = np.arange(10, dtype="int64")
figs = []
|
fidals__shopelectro-209 | Move CI to our server
Трэвис на беспланом серве билдит не очень.
Сейчас у нас один travis ci идёт ~20 мин. Учитывая, что мы будем оперировать задачами с оценкой по 15-30 мин, это слишком долго.
Кроме того, Трэвис часто отваливается по ресурсам, что плодит нам в ci рандомные ошибки. А это очень дорого, конечно же.
Исследуй разные коробки. Например travis, jenkins, team-city и тд.
Нам нужно что-то простенькое, позже развернём коробку на своём серве.
Результат задачи - отчёт по исследованию здесь в комментах, решение по инструменту принято, создана новая задача по внедрению инструмента
Концы от нашего хоста бери у @duker33
| [
{
"content": "\"\"\"\nDjango settings for shopelectro project.\n\nGenerated by 'django-admin startproject' using Django 1.9.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\nimport os\nfrom datetime import datetime\n\nimport dj_database_url\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', 'so_secret_key')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# http://bit.ly/sorl-thumbnail-docs\nTHUMBNAIL_DEBUG = False\n\nALLOWED_HOSTS = ['*']\n\nif os.environ.get('TEST_ENV', False):\n # disable https in CI\n # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'http')\n\n# Enable in frame loading for Ya.Metric\n# https://docs.djangoproject.com/es/1.10/ref/clickjacking/\n# https://yandex.ru/support/metrika/general/counter-webvisor.xml#download-page\nX_FRAME_OPTIONS = 'ALLOW-FROM http://webvisor.com'\n\n# Application definition\nINSTALLED_APPS = [\n # https://docs.djangoproject.com/en/1.9/ref/contrib/admin/#django.contrib.admin.autodiscover\n 'django.contrib.contenttypes',\n 'django.contrib.auth',\n 'django.contrib.messages',\n 'django.contrib.redirects',\n 'django.contrib.sessions',\n 'django.contrib.sitemaps',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django_user_agents',\n 'generic_admin',\n 'django.contrib.admin.apps.SimpleAdminConfig',\n 'debug_toolbar',\n 'mptt',\n 'widget_tweaks',\n 'sorl.thumbnail',\n 'images',\n 'pages',\n 'catalog',\n 'ecommerce',\n 'shopelectro',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.contrib.redirects.middleware.RedirectFallbackMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django_user_agents.middleware.UserAgentMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\nROOT_URLCONF = 'shopelectro.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.template.context_processors.request',\n 'django.template.context_processors.static',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'ecommerce.context_processors.cart',\n 'shopelectro.context_processors.shop',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'shopelectro.wsgi.application'\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLOCALE_NAME = 'en_US'\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'shopelectro/locale')]\nFORMAT_MODULE_PATH = [\n 'shopelectro.formats',\n]\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nASSETS_DIR = os.path.join(BASE_DIR, 'assets')\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'front/build'),\n ASSETS_DIR,\n]\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\n# It is fake-url. Correct url will be created on `docker-compose up` stage from `docker/.env`\nDATABASE_URL = 'postgres://user:pass@db_name/table'\nDATABASES = {\n 'default': dj_database_url.config(\n env='DATABASE_URL',\n default=DATABASE_URL,\n )\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n },\n}\n\nSITE_CREATED = datetime(2013, 1, 1)\n\nLOCALHOST = 'http://127.0.0.1:8000/'\nBASE_URL = 'https://www.shopelectro.ru'\n\nPLACEHOLDER_IMAGE = 'images/logo.png'\nPLACEHOLDER_ALT = 'Логотип компании Shopelectro'\n\n# Autocomplete and search settings\nSEARCH_SEE_ALL_LABEL = 'Смотреть все результаты'\n\n# For sitemaps and sites framework\nSITE_ID = 1\nSITE_DOMAIN_NAME = 'www.shopelectro.ru'\n\n# Used to retrieve instances in ecommerce.Cart\nCART_ID = 'cart'\n\n# Used to define choices attr in definition of Order.payment_type field\nPAYMENT_OPTIONS = (\n ('cash', 'Наличные'),\n ('cashless', 'Безналичные и денежные переводы'),\n ('AC', 'Банковская карта'),\n ('PC', 'Яндекс.Деньги'),\n ('GP', 'Связной (терминал)'),\n ('AB', 'Альфа-Клик'),\n)\n\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nYANDEX_SHOP_PASS = os.environ.get('YANDEX_SHOP_PASS', 'so_secret_pass')\n\n# Used for order's email in ecommerce app\nFAKE_ORDER_NUMBER = 6000\n\n# Subjects for different types of emails sent from SE.\nEMAIL_SUBJECTS = {\n 'call': 'Обратный звонок',\n 'order': 'Заказ №{0.fake_order_number}',\n 'yandex_order': 'Заказ №{0.fake_order_number} | Яндекс.Касса',\n 'one_click': 'Заказ в один клик №{0.fake_order_number}',\n 'ya_feedback_request': 'Оцените нас на Яндекс.Маркете',\n}\n\n# Email configs\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nEMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', 'so_secret_pass')\nEMAIL_HOST_USER = '[email protected]'\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.yandex.ru'\nEMAIL_PORT = 587\nEMAIL_SENDER = '[email protected]'\nEMAIL_RECIPIENT = '[email protected]'\nSHOP_EMAIL = '[email protected]'\n\n# FTP configs\nFTP_USER = os.environ.get('FTP_USER', 'user')\nFTP_PASS = os.environ.get('FTP_PASS', 'pass')\nFTP_IP = os.environ.get('FTP_IP', '0.0.0.0')\n\n# Used in admin image uploads\nMODEL_TYPES = {\n 'Product': {\n 'app_name': 'shopelectro',\n 'dir_name': 'products',\n },\n 'Category': {\n 'app_name': 'shopelectro',\n 'dir_name': 'categories',\n }\n}\n\n# This need for using {% debug %} variable in templates.\nINTERNAL_IPS = (\n '127.0.0.1',\n)\n\nTOP_PRODUCTS = [291, 438, 1137, 2166, 2725, 2838, 3288, 3884, 3959, 2764]\n\nSHOP = {\n 'id': '69886',\n 'scid': '64788',\n 'success_url': BASE_URL + '/shop/order-success/',\n 'fail_url': BASE_URL + '/',\n 'cps_phone': '+78124163200',\n 'cps_email': '[email protected]',\n 'local_delivery_cost': 300,\n 'local_delivery_cost_threshold': 5000,\n}\n\n# used in data-migrations and tests\nCUSTOM_PAGES = {\n 'index': {\n 'slug': '',\n 'name': 'Интернет-магазин элементов питания \"ShopElectro\"',\n 'menu_title': 'Главная',\n 'title': 'Интернет-магазин Элементов питания с доставкой по России',\n },\n 'sitemap': {\n 'slug': 'sitemap',\n 'h1': 'Карта сайта',\n 'name': 'Карта сайта',\n },\n 'order': {\n 'slug': 'order',\n 'name': 'Оформление заказа',\n 'title': 'Корзина Интернет-магазин shopelectro.ru Санкт-Петербург',\n },\n 'search': {\n 'slug': 'search',\n 'name': 'Результаты поиска',\n },\n 'catalog': {\n 'slug': 'catalog',\n 'name': 'Каталог товаров',\n 'menu_title': 'Каталог',\n },\n 'order_success': {\n 'slug': 'order-success',\n 'name': 'Заказ принят',\n }\n}\n",
"path": "shopelectro/settings/base.py"
}
] | [
{
"content": "\"\"\"\nDjango settings for shopelectro project.\n\nGenerated by 'django-admin startproject' using Django 1.9.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\nimport os\nfrom datetime import datetime\n\nimport dj_database_url\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', 'so_secret_key')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# http://bit.ly/sorl-thumbnail-docs\nTHUMBNAIL_DEBUG = False\n\nALLOWED_HOSTS = ['*']\n\nif os.environ.get('TEST_ENV', False):\n # disable https in CI\n # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'http')\n\n# Enable in frame loading for Ya.Metric\n# https://docs.djangoproject.com/es/1.10/ref/clickjacking/\n# https://yandex.ru/support/metrika/general/counter-webvisor.xml#download-page\nX_FRAME_OPTIONS = 'ALLOW-FROM http://webvisor.com'\n\n# Application definition\nINSTALLED_APPS = [\n # https://docs.djangoproject.com/en/1.9/ref/contrib/admin/#django.contrib.admin.autodiscover\n 'django.contrib.contenttypes',\n 'django.contrib.auth',\n 'django.contrib.messages',\n 'django.contrib.redirects',\n 'django.contrib.sessions',\n 'django.contrib.sitemaps',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django_user_agents',\n 'generic_admin',\n 'django.contrib.admin.apps.SimpleAdminConfig',\n 'debug_toolbar',\n 'mptt',\n 'widget_tweaks',\n 'sorl.thumbnail',\n 'images',\n 'pages',\n 'catalog',\n 'ecommerce',\n 'shopelectro',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.contrib.redirects.middleware.RedirectFallbackMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django_user_agents.middleware.UserAgentMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\nROOT_URLCONF = 'shopelectro.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.template.context_processors.request',\n 'django.template.context_processors.static',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'ecommerce.context_processors.cart',\n 'shopelectro.context_processors.shop',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'shopelectro.wsgi.application'\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLOCALE_NAME = 'en_US'\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'shopelectro/locale')]\nFORMAT_MODULE_PATH = [\n 'shopelectro.formats',\n]\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nASSETS_DIR = os.path.join(BASE_DIR, 'assets')\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'front/build'),\n ASSETS_DIR,\n]\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\n# It is fake-url. Correct url will be created on `docker-compose up` stage from `docker/.env`\nDATABASE_URL = 'postgres://user:pass@db_name/table'\nDATABASES = {\n 'default': dj_database_url.config(\n env='DATABASE_URL',\n default=DATABASE_URL,\n )\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n },\n}\n\nSELENIUM_URL = os.environ.get('SELENIUM_URL', 'http://se-selenium:4444/wd/hub')\n\nSITE_CREATED = datetime(2013, 1, 1)\n\nLOCALHOST = 'http://127.0.0.1:8000/'\nBASE_URL = 'https://www.shopelectro.ru'\n\nPLACEHOLDER_IMAGE = 'images/logo.png'\nPLACEHOLDER_ALT = 'Логотип компании Shopelectro'\n\n# Autocomplete and search settings\nSEARCH_SEE_ALL_LABEL = 'Смотреть все результаты'\n\n# For sitemaps and sites framework\nSITE_ID = 1\nSITE_DOMAIN_NAME = 'www.shopelectro.ru'\n\n# Used to retrieve instances in ecommerce.Cart\nCART_ID = 'cart'\n\n# Used to define choices attr in definition of Order.payment_type field\nPAYMENT_OPTIONS = (\n ('cash', 'Наличные'),\n ('cashless', 'Безналичные и денежные переводы'),\n ('AC', 'Банковская карта'),\n ('PC', 'Яндекс.Деньги'),\n ('GP', 'Связной (терминал)'),\n ('AB', 'Альфа-Клик'),\n)\n\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nYANDEX_SHOP_PASS = os.environ.get('YANDEX_SHOP_PASS', 'so_secret_pass')\n\n# Used for order's email in ecommerce app\nFAKE_ORDER_NUMBER = 6000\n\n# Subjects for different types of emails sent from SE.\nEMAIL_SUBJECTS = {\n 'call': 'Обратный звонок',\n 'order': 'Заказ №{0.fake_order_number}',\n 'yandex_order': 'Заказ №{0.fake_order_number} | Яндекс.Касса',\n 'one_click': 'Заказ в один клик №{0.fake_order_number}',\n 'ya_feedback_request': 'Оцените нас на Яндекс.Маркете',\n}\n\n# Email configs\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nEMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', 'so_secret_pass')\nEMAIL_HOST_USER = '[email protected]'\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.yandex.ru'\nEMAIL_PORT = 587\nEMAIL_SENDER = '[email protected]'\nEMAIL_RECIPIENT = '[email protected]'\nSHOP_EMAIL = '[email protected]'\n\n# FTP configs\nFTP_USER = os.environ.get('FTP_USER', 'user')\nFTP_PASS = os.environ.get('FTP_PASS', 'pass')\nFTP_IP = os.environ.get('FTP_IP', '0.0.0.0')\n\n# Used in admin image uploads\nMODEL_TYPES = {\n 'Product': {\n 'app_name': 'shopelectro',\n 'dir_name': 'products',\n },\n 'Category': {\n 'app_name': 'shopelectro',\n 'dir_name': 'categories',\n }\n}\n\n# This need for using {% debug %} variable in templates.\nINTERNAL_IPS = (\n '127.0.0.1',\n)\n\nTOP_PRODUCTS = [291, 438, 1137, 2166, 2725, 2838, 3288, 3884, 3959, 2764]\n\nSHOP = {\n 'id': '69886',\n 'scid': '64788',\n 'success_url': BASE_URL + '/shop/order-success/',\n 'fail_url': BASE_URL + '/',\n 'cps_phone': '+78124163200',\n 'cps_email': '[email protected]',\n 'local_delivery_cost': 300,\n 'local_delivery_cost_threshold': 5000,\n}\n\n# used in data-migrations and tests\nCUSTOM_PAGES = {\n 'index': {\n 'slug': '',\n 'name': 'Интернет-магазин элементов питания \"ShopElectro\"',\n 'menu_title': 'Главная',\n 'title': 'Интернет-магазин Элементов питания с доставкой по России',\n },\n 'sitemap': {\n 'slug': 'sitemap',\n 'h1': 'Карта сайта',\n 'name': 'Карта сайта',\n },\n 'order': {\n 'slug': 'order',\n 'name': 'Оформление заказа',\n 'title': 'Корзина Интернет-магазин shopelectro.ru Санкт-Петербург',\n },\n 'search': {\n 'slug': 'search',\n 'name': 'Результаты поиска',\n },\n 'catalog': {\n 'slug': 'catalog',\n 'name': 'Каталог товаров',\n 'menu_title': 'Каталог',\n },\n 'order_success': {\n 'slug': 'order-success',\n 'name': 'Заказ принят',\n }\n}\n",
"path": "shopelectro/settings/base.py"
}
] | diff --git a/.drone.yml b/.drone.yml
index 7244fa27..3b79a3cc 100644
--- a/.drone.yml
+++ b/.drone.yml
@@ -1,7 +1,83 @@
-# @todo #178 Напиши конфиг для Drone CI
+# @todo #208 Определи оптимальное количество параллельных тестов
pipeline:
- build:
- image: alpine
+ pip-install:
+ image: python
commands:
- - echo "Hello!"
+ - pip install -r requirements.txt
+ - rm -rf /usr/app/deps/site-packages
+ - mv /usr/local/lib/python3.6/site-packages /usr/app/deps
+ volumes:
+ - /tmp/cache/drone/shopelectro/site-packages/${DRONE_COMMIT_SHA}:/usr/app/deps
+
+ npm:
+ image: node
+ environment:
+ - DEPS_DIR=/usr/app/deps
+ commands:
+ - npm install
+ - npm install -g gulp-cli
+ - gulp build
+ volumes:
+ - /tmp/cache/drone/shopelectro/node_modules:/drone/src/github.com/fidals/shopelectro/commit/${DRONE_COMMIT_SHA}/node_modules
+ - /tmp/cache/drone/shopelectro/site-packages/${DRONE_COMMIT_SHA}/site-packages:/usr/app/deps
+
+ test:
+ image: python
+ environment:
+ - TEST_ENV=true
+ - DJANGO_SETTINGS_MODULE=shopelectro.settings.dev
+ - DATABASE_URL=postgres://postgres:test@postgres/test
+ - BROCKER_URL=amqp://rabbitmq:test@rabbitmq:5672/
+ - SELENIUM_URL=http://selenium:4444/wd/hub
+ - FTP_IP=${FTP_IP}
+ - FTP_USER=${FTP_USER}
+ - FTP_PASS=${FTP_PASS}
+ commands:
+ - python manage.py migrate
+ - python manage.py excel
+ - python manage.py price
+ - python manage.py collectstatic --noinput
+ - python manage.py test -v 3 --parallel --liveserver=test:8020-8030
+ volumes:
+ - /tmp/cache/drone/shopelectro/site-packages/${DRONE_COMMIT_SHA}/site-packages:/usr/local/lib/python3.6/site-packages
+ secrets: [ FTP_IP, FTP_USER, FTP_PASS ]
+
+ notify:
+ image: plugins/slack
+ channel: drone
+ webhook: ${SLACK_WEBHOOK}
+ when:
+ status: [ success, failure ]
+ secrets: [ SLACK_WEBHOOK ]
+
+ pip-clean:
+ image: busybox
+ commands:
+ - rm -rf /site-packages/${DRONE_COMMIT_SHA}
+ volumes:
+ - /tmp/cache/drone/shopelectro/site-packages/:/site-packages/
+ when:
+ status: [ success, failure ]
+
+services:
+ postgres:
+ image: postgres
+ environment:
+ - POSTGRES_USER=postgres
+ - POSTGRES_PASSWORD=test
+ - POSTGRES_DB=test
+
+ rabbitmq:
+ image: rabbitmq
+ environment:
+ - RABBITMQ_DEFAULT_USER=rabbitmq
+ - RABBITMQ_DEFAULT_PASS=test
+
+ selenium:
+ image: selenium/standalone-chrome-debug:3.3.0
+ environment: # https://github.com/SeleniumHQ/docker-selenium/issues/392
+ - DBUS_SESSION_BUS_ADDRESS=/dev/null
+ shm_size: 4G
+ volumes: # https://github.com/SeleniumHQ/docker-selenium#running-the-images
+ - /dev/shm:/dev/shm
diff --git a/shopelectro/settings/base.py b/shopelectro/settings/base.py
index 3f3942b0..41ba2c75 100644
--- a/shopelectro/settings/base.py
+++ b/shopelectro/settings/base.py
@@ -182,6 +182,8 @@
},
}
+SELENIUM_URL = os.environ.get('SELENIUM_URL', 'http://se-selenium:4444/wd/hub')
+
SITE_CREATED = datetime(2013, 1, 1)
LOCALHOST = 'http://127.0.0.1:8000/'
diff --git a/shopelectro/tests/helpers.py b/shopelectro/tests/helpers.py
index 0130a3a9..1cb67768 100644
--- a/shopelectro/tests/helpers.py
+++ b/shopelectro/tests/helpers.py
@@ -1,3 +1,4 @@
+from django.conf import settings
from django.test import LiveServerTestCase, override_settings
from selenium.common.exceptions import InvalidElementStateException
from selenium.webdriver.common.action_chains import ActionChains
@@ -44,7 +45,7 @@ def setUpClass(cls):
"""Instantiate browser instance."""
super(SeleniumTestCase, cls).setUpClass()
cls.browser = Remote(
- command_executor='http://se-selenium:4444/wd/hub',
+ command_executor=settings.SELENIUM_URL,
desired_capabilities=DesiredCapabilities.CHROME
)
cls.wait = WebDriverWait(cls.browser, 120)
diff --git a/shopelectro/tests/tests_selenium_mobile.py b/shopelectro/tests/tests_selenium_mobile.py
index 9b9abf84..40a52d33 100644
--- a/shopelectro/tests/tests_selenium_mobile.py
+++ b/shopelectro/tests/tests_selenium_mobile.py
@@ -4,6 +4,7 @@
If you need to create new test-suite, subclass it from SeleniumTestCase class.
Every Selenium-based test suite uses fixture called dump.json.
"""
+from django.conf import settings
from django.test import LiveServerTestCase
from django.urls import reverse
from selenium import webdriver
@@ -30,7 +31,7 @@ def setUpClass(cls):
},
}
cls.browser = webdriver.Remote(
- command_executor='http://se-selenium:4444/wd/hub',
+ command_executor=settings.SELENIUM_URL,
desired_capabilities=capabilities
)
cls.wait = WebDriverWait(cls.browser, 120)
|
rucio__rucio-2062 | Documentation build fails
Motivation
----------
Currently the documentation builds fail with
```
Running Sphinx v1.6.5
making output directory...
loading translations [en]... done
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/rucio/envs/latest/local/lib/python2.7/site-packages/sphinx/cmdline.py", line 305, in main
opts.warningiserror, opts.tags, opts.verbosity, opts.jobs)
File "/home/docs/checkouts/readthedocs.org/user_builds/rucio/envs/latest/local/lib/python2.7/site-packages/sphinx/application.py", line 196, in __init__
self.setup_extension(extension)
File "/home/docs/checkouts/readthedocs.org/user_builds/rucio/envs/latest/local/lib/python2.7/site-packages/sphinx/application.py", line 456, in setup_extension
self.registry.load_extension(self, extname)
File "/home/docs/checkouts/readthedocs.org/user_builds/rucio/envs/latest/local/lib/python2.7/site-packages/sphinx/registry.py", line 199, in load_extension
raise ExtensionError(__('Could not import extension %s') % extname, err)
ExtensionError: Could not import extension rucio.common.doc.argparse.ext (exception: No module named rucio.common.doc.argparse.ext)
Extension error:
Could not import extension rucio.common.doc.argparse.ext (exception: No module named rucio.common.doc.argparse.ext)
```
I did not look too much into the issue yet, but I wonder why we added this `rucio.common.doc.argparse.ext` to the repository instead of using `sphinx-argparse`? @vingar do you maybe remember?
| [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Rucio documentation build configuration file, created by\n# sphinx-quickstart on Fri Oct 27 14:25:40 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nfrom mock import Mock as MagicMock\n\nsys.path.insert(len(sys.path) - 1, os.path.abspath('.'))\n\n\nclass Mock(MagicMock):\n @classmethod\n def __getattr__(cls, name):\n return Mock()\n\n @classmethod\n def __getitem__(cls, name):\n return Mock()\n\n\nMOCK_MODULES = ['pycurl', 'M2Crypto']\nsys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'sphinx.ext.todo',\n 'rucio.common.doc.argparse.ext',\n 'sphinxcontrib.httpdomain',\n 'sphinxcontrib.autohttp.flask',\n 'sphinxcontrib.autohttp.flaskqref']\n\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Rucio'\ncopyright = u'2012-2018 CERN for the benefit of the ATLAS collaboration'\nauthor = u'[email protected]'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = u''\n# The full version, including alpha/beta/rc tags.\nrelease = u'1.2'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n# html_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\n# html_sidebars = {\n# '**': [\n# 'about.html',\n# 'navigation.html',\n# 'relations.html', # needs 'show_related': True theme option to display\n# 'searchbox.html',\n# 'donate.html',\n# ]\n# }\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Ruciodoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'Rucio.tex', u'Rucio Documentation',\n u'[email protected]', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'rucio', u'Rucio Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Rucio', u'Rucio Documentation',\n author, 'Rucio', 'One line description of project.',\n 'Miscellaneous'),\n]\n",
"path": "doc/source/conf.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Rucio documentation build configuration file, created by\n# sphinx-quickstart on Fri Oct 27 14:25:40 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nfrom mock import Mock as MagicMock\n\nsys.path.insert(len(sys.path), os.path.abspath('../../lib'))\n\n\nclass Mock(MagicMock):\n @classmethod\n def __getattr__(cls, name):\n return Mock()\n\n @classmethod\n def __getitem__(cls, name):\n return Mock()\n\n\nMOCK_MODULES = ['pycurl', 'M2Crypto']\nsys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'sphinx.ext.todo',\n 'rucio.common.doc.argparse.ext',\n 'sphinxcontrib.httpdomain',\n 'sphinxcontrib.autohttp.flask',\n 'sphinxcontrib.autohttp.flaskqref']\n\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Rucio'\ncopyright = u'2012-2018 CERN for the benefit of the ATLAS collaboration'\nauthor = u'[email protected]'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = u''\n# The full version, including alpha/beta/rc tags.\nrelease = u'1.2'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n# html_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\n# html_sidebars = {\n# '**': [\n# 'about.html',\n# 'navigation.html',\n# 'relations.html', # needs 'show_related': True theme option to display\n# 'searchbox.html',\n# 'donate.html',\n# ]\n# }\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Ruciodoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'Rucio.tex', u'Rucio Documentation',\n u'[email protected]', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'rucio', u'Rucio Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Rucio', u'Rucio Documentation',\n author, 'Rucio', 'One line description of project.',\n 'Miscellaneous'),\n]\n",
"path": "doc/source/conf.py"
}
] | diff --git a/doc/source/conf.py b/doc/source/conf.py
index 60e19bdfaf..99bdb9c1fc 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -20,7 +20,7 @@
import sys
from mock import Mock as MagicMock
-sys.path.insert(len(sys.path) - 1, os.path.abspath('.'))
+sys.path.insert(len(sys.path), os.path.abspath('../../lib'))
class Mock(MagicMock):
|
pyro-ppl__pyro-2345 | Switch from pillow to pillow-simd
This attempts to fix some `pillow`-`torchvision` breakage blocking #2239 by switching to the alternative torchvision-recommended library `pillow-simd`.
| [
{
"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nimport subprocess\nimport sys\n\nfrom setuptools import find_packages, setup\n\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\nVERSION = \"\"\"\n# This file is auto-generated with the version information during setup.py installation.\n\n__version__ = '{}'\n\"\"\"\n\n# Find pyro version.\nfor line in open(os.path.join(PROJECT_PATH, 'pyro', '__init__.py')):\n if line.startswith('version_prefix = '):\n version = line.strip().split()[2][1:-1]\n\n# Append current commit sha to version\ncommit_sha = ''\ntry:\n current_tag = subprocess.check_output(['git', 'tag', '--points-at', 'HEAD'],\n cwd=PROJECT_PATH).decode('ascii').strip()\n # only add sha if HEAD does not point to the release tag\n if not current_tag == version:\n commit_sha = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'],\n cwd=PROJECT_PATH).decode('ascii').strip()\n# catch all exception to be safe\nexcept Exception:\n pass # probably not a git repo\n\n# Write version to _version.py\nif commit_sha:\n version += '+{}'.format(commit_sha)\nwith open(os.path.join(PROJECT_PATH, 'pyro', '_version.py'), 'w') as f:\n f.write(VERSION.format(version))\n\n\n# READ README.md for long description on PyPi.\n# This requires uploading via twine, e.g.:\n# $ python setup.py sdist bdist_wheel\n# $ twine upload --repository-url https://test.pypi.org/legacy/ dist/* # test version\n# $ twine upload dist/*\ntry:\n long_description = open('README.md', encoding='utf-8').read()\nexcept Exception as e:\n sys.stderr.write('Failed to read README.md\\n'.format(e))\n sys.stderr.flush()\n long_description = ''\n\n# Remove badges since they will always be obsolete.\n# This assumes the first 12 lines contain badge info.\nlong_description = '\\n'.join([str(line) for line in long_description.split('\\n')[12:]])\n\n# examples/tutorials\nEXTRAS_REQUIRE = [\n 'jupyter>=1.0.0',\n 'graphviz>=0.8',\n 'matplotlib>=1.3',\n 'pillow-simd',\n 'torchvision>=0.5.0',\n 'visdom>=0.1.4',\n 'pandas',\n 'seaborn',\n 'wget',\n]\n\nsetup(\n name='pyro-ppl',\n version=version,\n description='A Python library for probabilistic modeling and inference',\n long_description=long_description,\n long_description_content_type='text/markdown',\n packages=find_packages(include=['pyro', 'pyro.*']),\n package_data={\"pyro.distributions\": [\"*.cpp\"]},\n url='http://pyro.ai',\n author='Uber AI Labs',\n author_email='[email protected]',\n install_requires=[\n # if you add any additional libraries, please also\n # add them to `docs/requirements.txt`\n # numpy is necessary for some functionality of PyTorch\n 'numpy>=1.7',\n 'opt_einsum>=2.3.2',\n 'pyro-api>=0.1.1',\n 'torch>=1.4.0',\n 'tqdm>=4.36',\n ],\n extras_require={\n 'extras': EXTRAS_REQUIRE,\n 'test': EXTRAS_REQUIRE + [\n 'nbval',\n 'pytest>=4.1',\n 'pytest-cov',\n 'scipy>=1.1',\n ],\n 'profile': ['prettytable', 'pytest-benchmark', 'snakeviz'],\n 'dev': EXTRAS_REQUIRE + [\n 'flake8',\n 'isort',\n 'nbformat',\n 'nbsphinx>=0.3.2',\n 'nbstripout',\n 'nbval',\n 'ninja',\n 'pypandoc',\n 'pytest>=4.1',\n 'pytest-xdist',\n 'scipy>=1.1',\n 'sphinx',\n 'sphinx_rtd_theme',\n 'yapf',\n ],\n },\n python_requires='>=3.5',\n keywords='machine learning statistics probabilistic programming bayesian modeling pytorch',\n license='Apache 2.0',\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n # yapf\n)\n",
"path": "setup.py"
}
] | [
{
"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nimport subprocess\nimport sys\n\nfrom setuptools import find_packages, setup\n\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\nVERSION = \"\"\"\n# This file is auto-generated with the version information during setup.py installation.\n\n__version__ = '{}'\n\"\"\"\n\n# Find pyro version.\nfor line in open(os.path.join(PROJECT_PATH, 'pyro', '__init__.py')):\n if line.startswith('version_prefix = '):\n version = line.strip().split()[2][1:-1]\n\n# Append current commit sha to version\ncommit_sha = ''\ntry:\n current_tag = subprocess.check_output(['git', 'tag', '--points-at', 'HEAD'],\n cwd=PROJECT_PATH).decode('ascii').strip()\n # only add sha if HEAD does not point to the release tag\n if not current_tag == version:\n commit_sha = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'],\n cwd=PROJECT_PATH).decode('ascii').strip()\n# catch all exception to be safe\nexcept Exception:\n pass # probably not a git repo\n\n# Write version to _version.py\nif commit_sha:\n version += '+{}'.format(commit_sha)\nwith open(os.path.join(PROJECT_PATH, 'pyro', '_version.py'), 'w') as f:\n f.write(VERSION.format(version))\n\n\n# READ README.md for long description on PyPi.\n# This requires uploading via twine, e.g.:\n# $ python setup.py sdist bdist_wheel\n# $ twine upload --repository-url https://test.pypi.org/legacy/ dist/* # test version\n# $ twine upload dist/*\ntry:\n long_description = open('README.md', encoding='utf-8').read()\nexcept Exception as e:\n sys.stderr.write('Failed to read README.md\\n'.format(e))\n sys.stderr.flush()\n long_description = ''\n\n# Remove badges since they will always be obsolete.\n# This assumes the first 12 lines contain badge info.\nlong_description = '\\n'.join([str(line) for line in long_description.split('\\n')[12:]])\n\n# examples/tutorials\nEXTRAS_REQUIRE = [\n 'jupyter>=1.0.0',\n 'graphviz>=0.8',\n 'matplotlib>=1.3',\n 'torchvision>=0.5.0',\n 'visdom>=0.1.4',\n 'pandas',\n 'seaborn',\n 'wget',\n]\n\nsetup(\n name='pyro-ppl',\n version=version,\n description='A Python library for probabilistic modeling and inference',\n long_description=long_description,\n long_description_content_type='text/markdown',\n packages=find_packages(include=['pyro', 'pyro.*']),\n package_data={\"pyro.distributions\": [\"*.cpp\"]},\n url='http://pyro.ai',\n author='Uber AI Labs',\n author_email='[email protected]',\n install_requires=[\n # if you add any additional libraries, please also\n # add them to `docs/requirements.txt`\n # numpy is necessary for some functionality of PyTorch\n 'numpy>=1.7',\n 'opt_einsum>=2.3.2',\n 'pyro-api>=0.1.1',\n 'torch>=1.4.0',\n 'tqdm>=4.36',\n ],\n extras_require={\n 'extras': EXTRAS_REQUIRE,\n 'test': EXTRAS_REQUIRE + [\n 'nbval',\n 'pytest>=4.1',\n 'pytest-cov',\n 'scipy>=1.1',\n ],\n 'profile': ['prettytable', 'pytest-benchmark', 'snakeviz'],\n 'dev': EXTRAS_REQUIRE + [\n 'flake8',\n 'isort',\n 'nbformat',\n 'nbsphinx>=0.3.2',\n 'nbstripout',\n 'nbval',\n 'ninja',\n 'pypandoc',\n 'pytest>=4.1',\n 'pytest-xdist',\n 'scipy>=1.1',\n 'sphinx',\n 'sphinx_rtd_theme',\n 'yapf',\n ],\n },\n python_requires='>=3.5',\n keywords='machine learning statistics probabilistic programming bayesian modeling pytorch',\n license='Apache 2.0',\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n # yapf\n)\n",
"path": "setup.py"
}
] | diff --git a/docs/requirements.txt b/docs/requirements.txt
index 84dc77d139..c941f0e148 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -5,6 +5,5 @@ graphviz>=0.8
numpy>=1.7
observations>=0.1.4
opt_einsum>=2.3.2
-pillow-simd
pyro-api>=0.1.1
tqdm>=4.36
diff --git a/setup.py b/setup.py
index f87b5296e0..7f965ffb40 100644
--- a/setup.py
+++ b/setup.py
@@ -60,7 +60,6 @@
'jupyter>=1.0.0',
'graphviz>=0.8',
'matplotlib>=1.3',
- 'pillow-simd',
'torchvision>=0.5.0',
'visdom>=0.1.4',
'pandas',
|
napalm-automation__napalm-1681 | IOS-XR get_lldp_neighbor_details does not support 'N/A' capabilities
### Description of Issue/Question
The constants LLDP_CAPAB_TRANFORM_TABLE does not include 'N/A' of system capabilities. No checks for this value either.
```
<SystemCapabilities>
N/A
</SystemCapabilities>
```
*Note*: Please check https://guides.github.com/features/mastering-markdown/
to see how to properly format your request.
### Did you follow the steps from https://github.com/napalm-automation/napalm#faq
(Place an ``x`` between the square brackets where applicable)
- [x] Yes
- [] No
### Setup
### napalm version
(Paste verbatim output from `pip freeze | grep napalm` between quotes below)
```
root@salt-master1:/# python3 -m pip freeze | grep napalm
napalm==3.2.0
```
### Network operating system version
(Paste verbatim output from `show version` - or equivalent - between quotes below)
```
RP/0/RP0/CPU0:hbr1.12345.999#show version
Wed Sep 23 18:41:02.500 CEST
Cisco IOS XR Software, Version 7.1.2 LNT
Copyright (c) 2013-2020 by Cisco Systems, Inc.
Build Information:
Built By : ahoang
Built On : Sat Aug 29 20:04:27 UTC 2020
Build Host : iox-lnx-069
Workspace : /auto/srcarchive13/prod/7.1.2/ncs540l/ws
Version : 7.1.2
Label : 7.1.2
cisco NCS540L (C3708 @ 1.70GHz)
System uptime is 1 week, 1 day, 10 hours, 2 minutes
```
### Steps to Reproduce the Issue
### Error Traceback
(Paste the complete traceback of the exception between quotes below)
```
================= Traceback =================
Traceback (most recent call last):
File "/usr/local/bin/napalm", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.8/dist-packages/napalm/base/clitools/cl_napalm.py", line 308, in main
run_tests(args)
File "/usr/local/lib/python3.8/dist-packages/napalm/base/clitools/cl_napalm.py", line 291, in run_tests
call_getter(device, args.method, **method_kwargs)
File "/usr/local/lib/python3.8/dist-packages/napalm/base/clitools/cl_napalm.py", line 27, in wrapper
r = func(*args, **kwargs)
File "/usr/local/lib/python3.8/dist-packages/napalm/base/clitools/cl_napalm.py", line 255, in call_getter
r = func(**kwargs)
File "/usr/local/lib/python3.8/dist-packages/napalm/iosxr/iosxr.py", line 852, in get_lldp_neighbors_detail
"remote_system_capab": napalm.base.helpers.transform_lldp_capab(
File "/usr/local/lib/python3.8/dist-packages/napalm/base/helpers.py", line 463, in transform_lldp_capab
[constants.LLDP_CAPAB_TRANFORM_TABLE[c.strip()] for c in capabilities]
File "/usr/local/lib/python3.8/dist-packages/napalm/base/helpers.py", line 463, in <listcomp>
[constants.LLDP_CAPAB_TRANFORM_TABLE[c.strip()] for c in capabilities]
KeyError: 'n/a'
```
| [
{
"content": "\"\"\"Constants to be used across NAPALM drivers.\"\"\"\n\nCONFIG_LOCK = True # must be changed soon!\nTIMEOUT = 60 # seconds\n\nINTERFACE_NULL_SPEED = -1\n\nACTION_TYPE_METHODS = (\"ping\", \"traceroute\")\n\nBGP_NEIGHBOR_NULL_COUNTER = -1\n\nSNMP_AUTHORIZATION_MODE_MAP = {\"read-only\": \"ro\", \"read-write\": \"rw\"}\n\nROUTE_COMMON_PROTOCOL_FIELDS = [\n \"destination\",\n \"prefix_length\",\n \"protocol\",\n \"current_active\",\n \"last_active\",\n \"age\",\n \"next_hop\",\n \"outgoing_interface\",\n \"selected_next_hop\",\n \"preference\",\n \"inactive_reason\",\n \"routing_table\",\n] # identifies the list of fileds common for all protocols\n\nROUTE_PROTOCOL_SPECIFIC_FIELDS = {\n \"bgp\": [\n \"local_as\",\n \"remote_as\",\n \"as_path\",\n \"communities\",\n \"local_preference\",\n \"preference2\",\n \"remote_address\",\n \"metric\",\n \"metric2\",\n ],\n \"isis\": [\"level\", \"metric\", \"local_as\"],\n \"static\": [], # nothing specific to static routes\n}\n\nTRACEROUTE_TTL = 255\nTRACEROUTE_SOURCE = \"\"\nTRACEROUTE_TIMEOUT = 2\nTRACEROUTE_NULL_HOST_NAME = \"*\"\nTRACEROUTE_NULL_IP_ADDRESS = \"*\"\nTRACEROUTE_VRF = \"\"\n\nOPTICS_NULL_LEVEL = \"-Inf\"\n\nPING_SOURCE = \"\"\nPING_TTL = 255\nPING_TIMEOUT = 2\nPING_SIZE = 100\nPING_COUNT = 5\nPING_VRF = \"\"\nPING_SOURCE_INTERFACE = \"\"\n\nNETMIKO_MAP = {\n \"ios\": \"cisco_ios\",\n \"nxos\": \"cisco_nxos\",\n \"nxos_ssh\": \"cisco_nxos\",\n \"iosxr\": \"cisco_iosxr\",\n \"eos\": \"arista_eos\",\n \"junos\": \"juniper_junos\",\n}\nLLDP_CAPAB_TRANFORM_TABLE = {\n \"o\": \"other\",\n \"p\": \"repeater\",\n \"b\": \"bridge\",\n \"w\": \"wlan-access-point\",\n \"r\": \"router\",\n \"t\": \"telephone\",\n \"c\": \"docsis-cable-device\",\n \"s\": \"station\",\n}\n\nCISCO_SANITIZE_FILTERS = {\n r\"^(snmp-server community).*$\": r\"\\1 <removed>\",\n r\"^(snmp-server host \\S+( vrf \\S+)?( version (1|2c|3))?)\\s+\\S+((\\s+\\S*)*)\\s*$\": r\"\\1 <removed> \\5\", # noqa\n r\"^(snmp-server user \\S+( \\S+)? auth md5) \\S+ (priv) \\S+ (localizedkey( engineID \\S+)?)\\s*$\": r\"\\1 <removed> \\3 <removed> \\4\\5\", # noqa\n r\"^(username .+ (password|secret) \\d) .+$\": r\"\\1 <removed>\",\n r\"^(enable (password|secret)( level \\d+)? \\d) .+$\": r\"\\1 <removed>\",\n r\"^(\\s+(?:password|secret)) (?:\\d{1,2} )?\\S+$\": r\"\\1 <removed>\",\n r\"^(.*wpa-psk ascii \\d) (\\S+)$\": r\"\\1 <removed>\",\n r\"^(.*key 7) (\\d.+)$\": r\"\\1 <removed>\",\n r\"^(tacacs-server (.+ )?key) .+$\": r\"\\1 <removed>\",\n r\"^(crypto isakmp key) (\\S+) (.*)$\": r\"\\1 <removed> \\3\",\n r\"^(\\s+ip ospf message-digest-key \\d+ md5) .+$\": r\"\\1 <removed>\",\n r\"^(\\s+ip ospf authentication-key) .+$\": r\"\\1 <removed>\",\n r\"^(\\s+neighbor \\S+ password) .+$\": r\"\\1 <removed>\",\n r\"^(\\s+vrrp \\d+ authentication text) .+$\": r\"\\1 <removed>\",\n r\"^(\\s+standby \\d+ authentication) .{1,8}$\": r\"\\1 <removed>\",\n r\"^(\\s+standby \\d+ authentication md5 key-string) .+?( timeout \\d+)?$\": r\"\\1 <removed> \\2\",\n r\"^(\\s+key-string) .+$\": r\"\\1 <removed>\",\n r\"^((tacacs|radius) server [^\\n]+\\n(\\s+[^\\n]+\\n)*\\s+key) [^\\n]+$\": r\"\\1 <removed>\",\n r\"^(\\s+ppp (chap|pap) password \\d) .+$\": r\"\\1 <removed>\",\n}\n",
"path": "napalm/base/constants.py"
}
] | [
{
"content": "\"\"\"Constants to be used across NAPALM drivers.\"\"\"\n\nCONFIG_LOCK = True # must be changed soon!\nTIMEOUT = 60 # seconds\n\nINTERFACE_NULL_SPEED = -1\n\nACTION_TYPE_METHODS = (\"ping\", \"traceroute\")\n\nBGP_NEIGHBOR_NULL_COUNTER = -1\n\nSNMP_AUTHORIZATION_MODE_MAP = {\"read-only\": \"ro\", \"read-write\": \"rw\"}\n\nROUTE_COMMON_PROTOCOL_FIELDS = [\n \"destination\",\n \"prefix_length\",\n \"protocol\",\n \"current_active\",\n \"last_active\",\n \"age\",\n \"next_hop\",\n \"outgoing_interface\",\n \"selected_next_hop\",\n \"preference\",\n \"inactive_reason\",\n \"routing_table\",\n] # identifies the list of fileds common for all protocols\n\nROUTE_PROTOCOL_SPECIFIC_FIELDS = {\n \"bgp\": [\n \"local_as\",\n \"remote_as\",\n \"as_path\",\n \"communities\",\n \"local_preference\",\n \"preference2\",\n \"remote_address\",\n \"metric\",\n \"metric2\",\n ],\n \"isis\": [\"level\", \"metric\", \"local_as\"],\n \"static\": [], # nothing specific to static routes\n}\n\nTRACEROUTE_TTL = 255\nTRACEROUTE_SOURCE = \"\"\nTRACEROUTE_TIMEOUT = 2\nTRACEROUTE_NULL_HOST_NAME = \"*\"\nTRACEROUTE_NULL_IP_ADDRESS = \"*\"\nTRACEROUTE_VRF = \"\"\n\nOPTICS_NULL_LEVEL = \"-Inf\"\n\nPING_SOURCE = \"\"\nPING_TTL = 255\nPING_TIMEOUT = 2\nPING_SIZE = 100\nPING_COUNT = 5\nPING_VRF = \"\"\nPING_SOURCE_INTERFACE = \"\"\n\nNETMIKO_MAP = {\n \"ios\": \"cisco_ios\",\n \"nxos\": \"cisco_nxos\",\n \"nxos_ssh\": \"cisco_nxos\",\n \"iosxr\": \"cisco_iosxr\",\n \"eos\": \"arista_eos\",\n \"junos\": \"juniper_junos\",\n}\nLLDP_CAPAB_TRANFORM_TABLE = {\n \"o\": \"other\",\n \"n/a\": \"other\",\n \"p\": \"repeater\",\n \"b\": \"bridge\",\n \"w\": \"wlan-access-point\",\n \"r\": \"router\",\n \"t\": \"telephone\",\n \"c\": \"docsis-cable-device\",\n \"s\": \"station\",\n}\n\nCISCO_SANITIZE_FILTERS = {\n r\"^(snmp-server community).*$\": r\"\\1 <removed>\",\n r\"^(snmp-server host \\S+( vrf \\S+)?( version (1|2c|3))?)\\s+\\S+((\\s+\\S*)*)\\s*$\": r\"\\1 <removed> \\5\", # noqa\n r\"^(snmp-server user \\S+( \\S+)? auth md5) \\S+ (priv) \\S+ (localizedkey( engineID \\S+)?)\\s*$\": r\"\\1 <removed> \\3 <removed> \\4\\5\", # noqa\n r\"^(username .+ (password|secret) \\d) .+$\": r\"\\1 <removed>\",\n r\"^(enable (password|secret)( level \\d+)? \\d) .+$\": r\"\\1 <removed>\",\n r\"^(\\s+(?:password|secret)) (?:\\d{1,2} )?\\S+$\": r\"\\1 <removed>\",\n r\"^(.*wpa-psk ascii \\d) (\\S+)$\": r\"\\1 <removed>\",\n r\"^(.*key 7) (\\d.+)$\": r\"\\1 <removed>\",\n r\"^(tacacs-server (.+ )?key) .+$\": r\"\\1 <removed>\",\n r\"^(crypto isakmp key) (\\S+) (.*)$\": r\"\\1 <removed> \\3\",\n r\"^(\\s+ip ospf message-digest-key \\d+ md5) .+$\": r\"\\1 <removed>\",\n r\"^(\\s+ip ospf authentication-key) .+$\": r\"\\1 <removed>\",\n r\"^(\\s+neighbor \\S+ password) .+$\": r\"\\1 <removed>\",\n r\"^(\\s+vrrp \\d+ authentication text) .+$\": r\"\\1 <removed>\",\n r\"^(\\s+standby \\d+ authentication) .{1,8}$\": r\"\\1 <removed>\",\n r\"^(\\s+standby \\d+ authentication md5 key-string) .+?( timeout \\d+)?$\": r\"\\1 <removed> \\2\",\n r\"^(\\s+key-string) .+$\": r\"\\1 <removed>\",\n r\"^((tacacs|radius) server [^\\n]+\\n(\\s+[^\\n]+\\n)*\\s+key) [^\\n]+$\": r\"\\1 <removed>\",\n r\"^(\\s+ppp (chap|pap) password \\d) .+$\": r\"\\1 <removed>\",\n}\n",
"path": "napalm/base/constants.py"
}
] | diff --git a/napalm/base/constants.py b/napalm/base/constants.py
index 75f492b1a..d505081ab 100644
--- a/napalm/base/constants.py
+++ b/napalm/base/constants.py
@@ -69,6 +69,7 @@
}
LLDP_CAPAB_TRANFORM_TABLE = {
"o": "other",
+ "n/a": "other",
"p": "repeater",
"b": "bridge",
"w": "wlan-access-point",
|
Cog-Creators__Red-DiscordBot-1170 | [V3] Error in command repl
### Type:
- [ ] Suggestion
- [x] Bug
### Brief description of the problem
So I first tried to use the core repl function on V3 for the first time. I set a very basic value and got an error
### Steps to reproduce
1. `=repl`
2. `level = 1`

Full error:
```Py
Exception in command 'repl'
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/discord/ext/commands/core.py", line 62, in wrapped
ret = yield from coro(*args, **kwargs)
File "/usr/local/Cellar/python3/3.6.2/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/redbot/core/dev_commands.py", line 273, in repl
msg = self.sanitize_output(ctx, msg)
File "/usr/local/Cellar/python3/3.6.2/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/redbot/core/dev_commands.py", line 64, in sanitize_output
result = input_.replace(token, r)
AttributeError: 'NoneType' object has no attribute 'replace'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/discord/ext/commands/bot.py", line 886, in invoke
yield from ctx.command.invoke(ctx)
File "/usr/local/lib/python3.6/site-packages/discord/ext/commands/core.py", line 489, in invoke
yield from injected(*ctx.args, **ctx.kwargs)
File "/usr/local/lib/python3.6/site-packages/discord/ext/commands/core.py", line 71, in wrapped
raise CommandInvokeError(e) from e
discord.ext.commands.errors.CommandInvokeError: Command raised an exception: AttributeError: 'NoneType' object has no attribute 'replace'
```
| [
{
"content": "import asyncio\nimport inspect\nimport io\nimport textwrap\nimport traceback\nfrom contextlib import redirect_stdout\nfrom copy import copy\n\nimport discord\nfrom discord.ext import commands\nfrom . import checks\nfrom .i18n import CogI18n\nfrom .utils.chat_formatting import box, pagify\n\"\"\"\nNotice:\n\n95% of the below code came from R.Danny which can be found here:\n\nhttps://github.com/Rapptz/RoboDanny/blob/master/cogs/repl.py\n\"\"\"\n\n_ = CogI18n(\"Dev\", __file__)\n\n\nclass Dev:\n \"\"\"Various development focused utilities.\"\"\"\n\n def __init__(self):\n self._last_result = None\n self.sessions = set()\n\n @staticmethod\n def cleanup_code(content):\n \"\"\"Automatically removes code blocks from the code.\"\"\"\n # remove ```py\\n```\n if content.startswith('```') and content.endswith('```'):\n return '\\n'.join(content.split('\\n')[1:-1])\n\n # remove `foo`\n return content.strip('` \\n')\n\n @staticmethod\n def get_syntax_error(e):\n \"\"\"Format a syntax error to send to the user.\n\n Returns a string representation of the error formatted as a codeblock.\n \"\"\"\n if e.text is None:\n return box('{0.__class__.__name__}: {0}'.format(e), lang=\"py\")\n return box(\n '{0.text}{1:>{0.offset}}\\n{2}: {0}'\n ''.format(e, '^', type(e).__name__),\n lang=\"py\")\n\n @staticmethod\n def get_pages(msg: str):\n \"\"\"Pagify the given message for output to the user.\"\"\"\n return pagify(msg, delims=[\"\\n\", \" \"], priority=True, shorten_by=10)\n\n @staticmethod\n def sanitize_output(ctx: commands.Context, input_: str) -> str:\n \"\"\"Hides the bot's token from a string.\"\"\"\n token = ctx.bot.http.token\n r = \"[EXPUNGED]\"\n result = input_.replace(token, r)\n result = result.replace(token.lower(), r)\n result = result.replace(token.upper(), r)\n return result\n\n @commands.command()\n @checks.is_owner()\n async def debug(self, ctx, *, code):\n \"\"\"Evaluate a statement of python code.\n\n The bot will always respond with the return value of the code.\n If the return value of the code is a coroutine, it will be awaited,\n and the result of that will be the bot's response.\n\n Note: Only one statement may be evaluated. Using await, yield or\n similar restricted keywords will result in a syntax error. For multiple\n lines or asynchronous code, see [p]repl or [p]eval.\n\n Environment Variables:\n ctx - command invokation context\n bot - bot object\n channel - the current channel object\n author - command author's member object\n message - the command's message object\n discord - discord.py library\n commands - discord.py commands extension\n _ - The result of the last dev command.\n \"\"\"\n env = {\n 'bot': ctx.bot,\n 'ctx': ctx,\n 'channel': ctx.channel,\n 'author': ctx.author,\n 'guild': ctx.guild,\n 'message': ctx.message,\n 'discord': discord,\n 'commands': commands,\n '_': self._last_result\n }\n\n code = self.cleanup_code(code)\n\n try:\n result = eval(code, env)\n except SyntaxError as e:\n await ctx.send(self.get_syntax_error(e))\n return\n except Exception as e:\n await ctx.send(\n box('{}: {!s}'.format(type(e).__name__, e), lang='py'))\n return\n\n if asyncio.iscoroutine(result):\n result = await result\n\n self._last_result = result\n\n result = self.sanitize_output(ctx, str(result))\n\n await ctx.send_interactive(self.get_pages(result), box_lang=\"py\")\n\n @commands.command(name='eval')\n @checks.is_owner()\n async def _eval(self, ctx, *, body: str):\n \"\"\"Execute asynchronous code.\n\n This command wraps code into the body of an async function and then\n calls and awaits it. The bot will respond with anything printed to\n stdout, as well as the return value of the function.\n\n The code can be within a codeblock, inline code or neither, as long\n as they are not mixed and they are formatted correctly.\n\n Environment Variables:\n ctx - command invokation context\n bot - bot object\n channel - the current channel object\n author - command author's member object\n message - the command's message object\n discord - discord.py library\n commands - discord.py commands extension\n _ - The result of the last dev command.\n \"\"\"\n env = {\n 'bot': ctx.bot,\n 'ctx': ctx,\n 'channel': ctx.channel,\n 'author': ctx.author,\n 'guild': ctx.guild,\n 'message': ctx.message,\n 'discord': discord,\n 'commands': commands,\n '_': self._last_result\n }\n\n body = self.cleanup_code(body)\n stdout = io.StringIO()\n\n to_compile = 'async def func():\\n%s' % textwrap.indent(body, ' ')\n\n try:\n exec(to_compile, env)\n except SyntaxError as e:\n return await ctx.send(self.get_syntax_error(e))\n\n func = env['func']\n result = None\n try:\n with redirect_stdout(stdout):\n result = await func()\n except:\n printed = \"{}{}\".format(stdout.getvalue(), traceback.format_exc())\n else:\n printed = stdout.getvalue()\n await ctx.tick()\n\n if result is not None:\n self._last_result = result\n msg = \"{}{}\".format(printed, result)\n else:\n msg = printed\n msg = self.sanitize_output(ctx, msg)\n\n await ctx.send_interactive(self.get_pages(msg), box_lang=\"py\")\n\n @commands.command()\n @checks.is_owner()\n async def repl(self, ctx):\n \"\"\"Open an interactive REPL.\n\n The REPL will only recognise code as messages which start with a\n backtick. This includes codeblocks, and as such multiple lines can be\n evaluated.\n\n You may not await any code in this REPL unless you define it inside an\n async function.\n \"\"\"\n variables = {\n 'ctx': ctx,\n 'bot': ctx.bot,\n 'message': ctx.message,\n 'guild': ctx.guild,\n 'channel': ctx.channel,\n 'author': ctx.author,\n '_': None,\n }\n\n if ctx.channel.id in self.sessions:\n await ctx.send(_('Already running a REPL session in this channel. '\n 'Exit it with `quit`.'))\n return\n\n self.sessions.add(ctx.channel.id)\n await ctx.send(_('Enter code to execute or evaluate.'\n ' `exit()` or `quit` to exit.'))\n\n msg_check = lambda m: (m.author == ctx.author and\n m.channel == ctx.channel and\n m.content.startswith('`'))\n\n while True:\n response = await ctx.bot.wait_for(\"message\", check=msg_check)\n\n cleaned = self.cleanup_code(response.content)\n\n if cleaned in ('quit', 'exit', 'exit()'):\n await ctx.send('Exiting.')\n self.sessions.remove(ctx.channel.id)\n return\n\n executor = exec\n if cleaned.count('\\n') == 0:\n # single statement, potentially 'eval'\n try:\n code = compile(cleaned, '<repl session>', 'eval')\n except SyntaxError:\n pass\n else:\n executor = eval\n\n if executor is exec:\n try:\n code = compile(cleaned, '<repl session>', 'exec')\n except SyntaxError as e:\n await ctx.send(self.get_syntax_error(e))\n continue\n\n variables['message'] = response\n\n stdout = io.StringIO()\n\n msg = None\n\n try:\n with redirect_stdout(stdout):\n result = executor(code, variables)\n if inspect.isawaitable(result):\n result = await result\n except:\n value = stdout.getvalue()\n msg = \"{}{}\".format(value, traceback.format_exc())\n else:\n value = stdout.getvalue()\n if result is not None:\n msg = \"{}{}\".format(value, result)\n variables['_'] = result\n elif value:\n msg = \"{}\".format(value)\n\n msg = self.sanitize_output(ctx, msg)\n\n try:\n await ctx.send_interactive(self.get_pages(msg), box_lang=\"py\")\n except discord.Forbidden:\n pass\n except discord.HTTPException as e:\n await ctx.send(_('Unexpected error: `{}`').format(e))\n\n @commands.command()\n @checks.is_owner()\n async def mock(self, ctx, user: discord.Member, *, command):\n \"\"\"Mock another user invoking a command.\n\n The prefix must not be entered.\n \"\"\"\n msg = copy(ctx.message)\n msg.author = user\n msg.content = ctx.prefix + command\n\n ctx.bot.dispatch('message', msg)\n\n @commands.command(name=\"mockmsg\")\n @checks.is_owner()\n async def mock_msg(self, ctx, user: discord.Member, *, content: str):\n \"\"\"Dispatch a message event as if it were sent by a different user.\n\n Only reads the raw content of the message. Attachments, embeds etc. are\n ignored.\n \"\"\"\n old_author = ctx.author\n old_content = ctx.message.content\n ctx.message.author = user\n ctx.message.content = content\n\n ctx.bot.dispatch(\"message\", ctx.message)\n\n # If we change the author and content back too quickly,\n # the bot won't process the mocked message in time.\n await asyncio.sleep(2)\n ctx.message.author = old_author\n ctx.message.content = old_content\n",
"path": "redbot/core/dev_commands.py"
}
] | [
{
"content": "import asyncio\nimport inspect\nimport io\nimport textwrap\nimport traceback\nfrom contextlib import redirect_stdout\nfrom copy import copy\n\nimport discord\nfrom discord.ext import commands\nfrom . import checks\nfrom .i18n import CogI18n\nfrom .utils.chat_formatting import box, pagify\n\"\"\"\nNotice:\n\n95% of the below code came from R.Danny which can be found here:\n\nhttps://github.com/Rapptz/RoboDanny/blob/master/cogs/repl.py\n\"\"\"\n\n_ = CogI18n(\"Dev\", __file__)\n\n\nclass Dev:\n \"\"\"Various development focused utilities.\"\"\"\n\n def __init__(self):\n self._last_result = None\n self.sessions = set()\n\n @staticmethod\n def cleanup_code(content):\n \"\"\"Automatically removes code blocks from the code.\"\"\"\n # remove ```py\\n```\n if content.startswith('```') and content.endswith('```'):\n return '\\n'.join(content.split('\\n')[1:-1])\n\n # remove `foo`\n return content.strip('` \\n')\n\n @staticmethod\n def get_syntax_error(e):\n \"\"\"Format a syntax error to send to the user.\n\n Returns a string representation of the error formatted as a codeblock.\n \"\"\"\n if e.text is None:\n return box('{0.__class__.__name__}: {0}'.format(e), lang=\"py\")\n return box(\n '{0.text}{1:>{0.offset}}\\n{2}: {0}'\n ''.format(e, '^', type(e).__name__),\n lang=\"py\")\n\n @staticmethod\n def get_pages(msg: str):\n \"\"\"Pagify the given message for output to the user.\"\"\"\n return pagify(msg, delims=[\"\\n\", \" \"], priority=True, shorten_by=10)\n\n @staticmethod\n def sanitize_output(ctx: commands.Context, input_: str) -> str:\n \"\"\"Hides the bot's token from a string.\"\"\"\n token = ctx.bot.http.token\n r = \"[EXPUNGED]\"\n result = input_.replace(token, r)\n result = result.replace(token.lower(), r)\n result = result.replace(token.upper(), r)\n return result\n\n @commands.command()\n @checks.is_owner()\n async def debug(self, ctx, *, code):\n \"\"\"Evaluate a statement of python code.\n\n The bot will always respond with the return value of the code.\n If the return value of the code is a coroutine, it will be awaited,\n and the result of that will be the bot's response.\n\n Note: Only one statement may be evaluated. Using await, yield or\n similar restricted keywords will result in a syntax error. For multiple\n lines or asynchronous code, see [p]repl or [p]eval.\n\n Environment Variables:\n ctx - command invokation context\n bot - bot object\n channel - the current channel object\n author - command author's member object\n message - the command's message object\n discord - discord.py library\n commands - discord.py commands extension\n _ - The result of the last dev command.\n \"\"\"\n env = {\n 'bot': ctx.bot,\n 'ctx': ctx,\n 'channel': ctx.channel,\n 'author': ctx.author,\n 'guild': ctx.guild,\n 'message': ctx.message,\n 'discord': discord,\n 'commands': commands,\n '_': self._last_result\n }\n\n code = self.cleanup_code(code)\n\n try:\n result = eval(code, env)\n except SyntaxError as e:\n await ctx.send(self.get_syntax_error(e))\n return\n except Exception as e:\n await ctx.send(\n box('{}: {!s}'.format(type(e).__name__, e), lang='py'))\n return\n\n if asyncio.iscoroutine(result):\n result = await result\n\n self._last_result = result\n\n result = self.sanitize_output(ctx, str(result))\n\n await ctx.send_interactive(self.get_pages(result), box_lang=\"py\")\n\n @commands.command(name='eval')\n @checks.is_owner()\n async def _eval(self, ctx, *, body: str):\n \"\"\"Execute asynchronous code.\n\n This command wraps code into the body of an async function and then\n calls and awaits it. The bot will respond with anything printed to\n stdout, as well as the return value of the function.\n\n The code can be within a codeblock, inline code or neither, as long\n as they are not mixed and they are formatted correctly.\n\n Environment Variables:\n ctx - command invokation context\n bot - bot object\n channel - the current channel object\n author - command author's member object\n message - the command's message object\n discord - discord.py library\n commands - discord.py commands extension\n _ - The result of the last dev command.\n \"\"\"\n env = {\n 'bot': ctx.bot,\n 'ctx': ctx,\n 'channel': ctx.channel,\n 'author': ctx.author,\n 'guild': ctx.guild,\n 'message': ctx.message,\n 'discord': discord,\n 'commands': commands,\n '_': self._last_result\n }\n\n body = self.cleanup_code(body)\n stdout = io.StringIO()\n\n to_compile = 'async def func():\\n%s' % textwrap.indent(body, ' ')\n\n try:\n exec(to_compile, env)\n except SyntaxError as e:\n return await ctx.send(self.get_syntax_error(e))\n\n func = env['func']\n result = None\n try:\n with redirect_stdout(stdout):\n result = await func()\n except:\n printed = \"{}{}\".format(stdout.getvalue(), traceback.format_exc())\n else:\n printed = stdout.getvalue()\n await ctx.tick()\n\n if result is not None:\n self._last_result = result\n msg = \"{}{}\".format(printed, result)\n else:\n msg = printed\n msg = self.sanitize_output(ctx, msg)\n\n await ctx.send_interactive(self.get_pages(msg), box_lang=\"py\")\n\n @commands.command()\n @checks.is_owner()\n async def repl(self, ctx):\n \"\"\"Open an interactive REPL.\n\n The REPL will only recognise code as messages which start with a\n backtick. This includes codeblocks, and as such multiple lines can be\n evaluated.\n\n You may not await any code in this REPL unless you define it inside an\n async function.\n \"\"\"\n variables = {\n 'ctx': ctx,\n 'bot': ctx.bot,\n 'message': ctx.message,\n 'guild': ctx.guild,\n 'channel': ctx.channel,\n 'author': ctx.author,\n '_': None,\n }\n\n if ctx.channel.id in self.sessions:\n await ctx.send(_('Already running a REPL session in this channel. '\n 'Exit it with `quit`.'))\n return\n\n self.sessions.add(ctx.channel.id)\n await ctx.send(_('Enter code to execute or evaluate.'\n ' `exit()` or `quit` to exit.'))\n\n msg_check = lambda m: (m.author == ctx.author and\n m.channel == ctx.channel and\n m.content.startswith('`'))\n\n while True:\n response = await ctx.bot.wait_for(\"message\", check=msg_check)\n\n cleaned = self.cleanup_code(response.content)\n\n if cleaned in ('quit', 'exit', 'exit()'):\n await ctx.send('Exiting.')\n self.sessions.remove(ctx.channel.id)\n return\n\n executor = exec\n if cleaned.count('\\n') == 0:\n # single statement, potentially 'eval'\n try:\n code = compile(cleaned, '<repl session>', 'eval')\n except SyntaxError:\n pass\n else:\n executor = eval\n\n if executor is exec:\n try:\n code = compile(cleaned, '<repl session>', 'exec')\n except SyntaxError as e:\n await ctx.send(self.get_syntax_error(e))\n continue\n\n variables['message'] = response\n\n stdout = io.StringIO()\n\n msg = \"\"\n\n try:\n with redirect_stdout(stdout):\n result = executor(code, variables)\n if inspect.isawaitable(result):\n result = await result\n except:\n value = stdout.getvalue()\n msg = \"{}{}\".format(value, traceback.format_exc())\n else:\n value = stdout.getvalue()\n if result is not None:\n msg = \"{}{}\".format(value, result)\n variables['_'] = result\n elif value:\n msg = \"{}\".format(value)\n\n msg = self.sanitize_output(ctx, msg)\n\n try:\n await ctx.send_interactive(self.get_pages(msg), box_lang=\"py\")\n except discord.Forbidden:\n pass\n except discord.HTTPException as e:\n await ctx.send(_('Unexpected error: `{}`').format(e))\n\n @commands.command()\n @checks.is_owner()\n async def mock(self, ctx, user: discord.Member, *, command):\n \"\"\"Mock another user invoking a command.\n\n The prefix must not be entered.\n \"\"\"\n msg = copy(ctx.message)\n msg.author = user\n msg.content = ctx.prefix + command\n\n ctx.bot.dispatch('message', msg)\n\n @commands.command(name=\"mockmsg\")\n @checks.is_owner()\n async def mock_msg(self, ctx, user: discord.Member, *, content: str):\n \"\"\"Dispatch a message event as if it were sent by a different user.\n\n Only reads the raw content of the message. Attachments, embeds etc. are\n ignored.\n \"\"\"\n old_author = ctx.author\n old_content = ctx.message.content\n ctx.message.author = user\n ctx.message.content = content\n\n ctx.bot.dispatch(\"message\", ctx.message)\n\n # If we change the author and content back too quickly,\n # the bot won't process the mocked message in time.\n await asyncio.sleep(2)\n ctx.message.author = old_author\n ctx.message.content = old_content\n",
"path": "redbot/core/dev_commands.py"
}
] | diff --git a/redbot/core/dev_commands.py b/redbot/core/dev_commands.py
index 3ee48e710b4..8ec038bf073 100644
--- a/redbot/core/dev_commands.py
+++ b/redbot/core/dev_commands.py
@@ -253,7 +253,7 @@ async def repl(self, ctx):
stdout = io.StringIO()
- msg = None
+ msg = ""
try:
with redirect_stdout(stdout):
|
python__python-docs-es-760 | Reemplazar 'PyCampES/python-docs-es' en links a repositorio 'python/python-docs-es'
Parece que se están realizando redirecciones en links del repositorio `PyCampES/python-docs-es` a `python/python-docs-es`. Me imagino que `PyCampES` sería la antigua organización que administraba este proyecto.
No vendría mal reemplazar los links para que apunten a este repositorio y nos ahorramos la redirección. Lo haría en un momento, pero prefiero abrir este issue para asegurarme que no se han mantenido así por alguna razón.
| [
{
"content": "# Use together with `pageviews.py`\n# python scripts/pageviews.py | head -n 150 | grep -v whats | cut -d ' ' -f 2 | sed 's/\\.html/\\.po/g' | xargs -I '{}' python scripts/create_issue.py '{}'\n\nimport os\nimport sys\nfrom pathlib import Path\n\nfrom github import Github\nfrom potodo._po_file import PoFileStats\n\nif len(sys.argv) != 2:\n print('Specify PO filename')\n sys.exit(1)\n\npofilename = sys.argv[1]\npofile = PoFileStats(Path(pofilename))\n\ng = Github(os.environ.get('GITHUB_TOKEN'))\n\nrepo = g.get_repo('PyCampES/python-docs-es')\n\n\nissues = repo.get_issues(state='all')\nfor issue in issues:\n if pofilename in issue.title:\n\n print(f'Skipping {pofilename}. There is a similar issue already created at {issue.html_url}')\n sys.exit(1)\n\n msg = f'There is a similar issue already created at {issue.html_url}.\\nDo you want to create it anyways? [y/N] '\n answer = input(msg)\n if answer != 'y':\n sys.exit(1)\n\nif any([\n pofile.translated_nb == pofile.po_file_size,\n pofile.untranslated_nb == 0,\n]):\n print(f'Skipping {pofilename}. The file is 100% translated already.')\n sys.exit(1)\n\n# https://pygithub.readthedocs.io/en/latest/github_objects/Repository.html#github.Repository.Repository.create_issue\ntitle = f'Translate `{pofilename}`'\nurlfile = pofilename.replace('.po', '.html')\nissue = repo.create_issue(\n title=title,\n body=f'''This needs to reach 100% translated.\n\nThe rendered version of this file will be available at https://docs.python.org/es/3.8/{urlfile} once translated.\nMeanwhile, the English version is shown.\n\nCurrent stats for `{pofilename}`:\n\n- Fuzzy: {pofile.fuzzy_nb}\n- Percent translated: {pofile.percent_translated}%\n- Entries: {pofile.translated_nb} / {pofile.po_file_size}\n- Untranslated: {pofile.untranslated_nb}\n\nPlease, comment here if you want this file to be assigned to you and an member will assign it to you as soon as possible, so you can start working on it.\n\nRemember to follow the steps in our [Contributing Guide](https://python-docs-es.readthedocs.io/page/CONTRIBUTING.html).''',\n)\nprint(f'Issue \"{title}\" created at {issue.html_url}')\n",
"path": "scripts/create_issue.py"
}
] | [
{
"content": "# Use together with `pageviews.py`\n# python scripts/pageviews.py | head -n 150 | grep -v whats | cut -d ' ' -f 2 | sed 's/\\.html/\\.po/g' | xargs -I '{}' python scripts/create_issue.py '{}'\n\nimport os\nimport sys\nfrom pathlib import Path\n\nfrom github import Github\nfrom potodo._po_file import PoFileStats\n\nif len(sys.argv) != 2:\n print('Specify PO filename')\n sys.exit(1)\n\npofilename = sys.argv[1]\npofile = PoFileStats(Path(pofilename))\n\ng = Github(os.environ.get('GITHUB_TOKEN'))\n\nrepo = g.get_repo('python/python-docs-es')\n\n\nissues = repo.get_issues(state='all')\nfor issue in issues:\n if pofilename in issue.title:\n\n print(f'Skipping {pofilename}. There is a similar issue already created at {issue.html_url}')\n sys.exit(1)\n\n msg = f'There is a similar issue already created at {issue.html_url}.\\nDo you want to create it anyways? [y/N] '\n answer = input(msg)\n if answer != 'y':\n sys.exit(1)\n\nif any([\n pofile.translated_nb == pofile.po_file_size,\n pofile.untranslated_nb == 0,\n]):\n print(f'Skipping {pofilename}. The file is 100% translated already.')\n sys.exit(1)\n\n# https://pygithub.readthedocs.io/en/latest/github_objects/Repository.html#github.Repository.Repository.create_issue\ntitle = f'Translate `{pofilename}`'\nurlfile = pofilename.replace('.po', '.html')\nissue = repo.create_issue(\n title=title,\n body=f'''This needs to reach 100% translated.\n\nThe rendered version of this file will be available at https://docs.python.org/es/3.8/{urlfile} once translated.\nMeanwhile, the English version is shown.\n\nCurrent stats for `{pofilename}`:\n\n- Fuzzy: {pofile.fuzzy_nb}\n- Percent translated: {pofile.percent_translated}%\n- Entries: {pofile.translated_nb} / {pofile.po_file_size}\n- Untranslated: {pofile.untranslated_nb}\n\nPlease, comment here if you want this file to be assigned to you and an member will assign it to you as soon as possible, so you can start working on it.\n\nRemember to follow the steps in our [Contributing Guide](https://python-docs-es.readthedocs.io/page/CONTRIBUTING.html).''',\n)\nprint(f'Issue \"{title}\" created at {issue.html_url}')\n",
"path": "scripts/create_issue.py"
}
] | null |
zostera__django-bootstrap3-843 | Upgrade importlib_metadata
This library currently enforces `importlib_metadata<3` for Python<3.8. This causes conflicts for us, since other libraries such as `markdown` start enforcing `importlib_metadata>4.4` to get Python-3.10-like behaviour.
Is there a specific reason this version is enforced to be `<3`?
refs https://github.com/pretix/pretix/pull/2329
| [
{
"content": "import pathlib\n\nfrom setuptools import find_packages, setup\n\n# The directory containing this file\nHERE = pathlib.Path(__file__).parent\n\n# The text of the README file\nREADME = (HERE / \"README.md\").read_text()\n\n\n# This call to setup() does all the work\nsetup(\n name=\"django-bootstrap3\",\n zip_safe=False,\n version=\"23.1\",\n description=\"Bootstrap 3 support for Django projects\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/zostera/django-bootstrap3\",\n author=\"Dylan Verheul\",\n author_email=\"[email protected]\",\n license=\"BSD-3-Clause\",\n packages=find_packages(where=\"src\"),\n package_dir={\"\": \"src\"},\n include_package_data=True,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Django\",\n \"Framework :: Django :: 3.2\",\n \"Framework :: Django :: 4.0\",\n \"Framework :: Django :: 4.1\",\n \"Framework :: Django :: 4.2\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Utilities\",\n ],\n python_requires=\">=3.7\",\n install_requires=[\n \"Django>=3.2\",\n 'importlib-metadata<3; python_version<\"3.8\"',\n ],\n)\n",
"path": "setup.py"
}
] | [
{
"content": "import pathlib\n\nfrom setuptools import find_packages, setup\n\n# The directory containing this file\nHERE = pathlib.Path(__file__).parent\n\n# The text of the README file\nREADME = (HERE / \"README.md\").read_text()\n\n\n# This call to setup() does all the work\nsetup(\n name=\"django-bootstrap3\",\n zip_safe=False,\n version=\"23.1\",\n description=\"Bootstrap 3 support for Django projects\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/zostera/django-bootstrap3\",\n author=\"Dylan Verheul\",\n author_email=\"[email protected]\",\n license=\"BSD-3-Clause\",\n packages=find_packages(where=\"src\"),\n package_dir={\"\": \"src\"},\n include_package_data=True,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Django\",\n \"Framework :: Django :: 3.2\",\n \"Framework :: Django :: 4.0\",\n \"Framework :: Django :: 4.1\",\n \"Framework :: Django :: 4.2\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Utilities\",\n ],\n python_requires=\">=3.7\",\n install_requires=[\n \"Django>=3.2\",\n 'importlib-metadata; python_version<\"3.8\"',\n ],\n)\n",
"path": "setup.py"
}
] | diff --git a/CHANGELOG.md b/CHANGELOG.md
index dca2095f..011f2a3b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,7 @@
## 23.2 (in development)
+- Remove version restriction on importlib-metadata (#).
- Replace m2r2 with sphinx-mdinclude (#).
## 23.1 (2023-04-02)
diff --git a/setup.py b/setup.py
index 791b0652..65f80339 100644
--- a/setup.py
+++ b/setup.py
@@ -47,6 +47,6 @@
python_requires=">=3.7",
install_requires=[
"Django>=3.2",
- 'importlib-metadata<3; python_version<"3.8"',
+ 'importlib-metadata; python_version<"3.8"',
],
)
|
oppia__oppia-6435 | CSRF tokens are not generated in 404-error pages
<!--
- Thanks for taking the time to report a bug in the Oppia project.
- Before filing a new issue, please do a quick search to check that it hasn't
- already been filed on the [issue tracker](https://github.com/oppia/oppia/issues)._
-->
**Describe the bug**
Unable to create Exploration from 404-error page because CSRF tokens are not generated in 404-error pages
**To Reproduce**
Steps to reproduce the behaviour:
1. Go to '/error'
2. Click on 'create'
3. See error

Also, See the sourcecode of 404-error page.
you'll find CSRF in GLOBALS is null, Line:57

| [
{
"content": "# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Base constants and handlers.\"\"\"\n\nimport Cookie\nimport base64\nimport datetime\nimport hmac\nimport json\nimport logging\nimport os\nimport sys\nimport time\nimport traceback\nimport urlparse\n\nfrom constants import constants\nfrom core.domain import config_domain\nfrom core.domain import config_services\nfrom core.domain import rights_manager\nfrom core.domain import user_services\nfrom core.platform import models\nimport feconf\nimport jinja_utils\nimport utils\n\nfrom google.appengine.api import users\nimport jinja2\nimport webapp2\n\napp_identity_services = models.Registry.import_app_identity_services()\ncurrent_user_services = models.Registry.import_current_user_services()\n(user_models,) = models.Registry.import_models([models.NAMES.user])\n\nONE_DAY_AGO_IN_SECS = -24 * 60 * 60\nDEFAULT_CSRF_SECRET = 'oppia csrf secret'\nCSRF_SECRET = config_domain.ConfigProperty(\n 'oppia_csrf_secret', {'type': 'unicode'},\n 'Text used to encrypt CSRF tokens.', DEFAULT_CSRF_SECRET)\n\nBEFORE_END_HEAD_TAG_HOOK = config_domain.ConfigProperty(\n 'before_end_head_tag_hook', {\n 'type': 'unicode',\n 'ui_config': {\n 'rows': 7,\n },\n },\n 'Code to insert just before the closing </head> tag in all pages.', '')\n\n\ndef _clear_login_cookies(response_headers):\n \"\"\"Clears login cookies from the given response headers.\"\"\"\n\n # App Engine sets the ACSID cookie for http:// and the SACSID cookie\n # for https:// . We just unset both below.\n cookie = Cookie.SimpleCookie()\n for cookie_name in ['ACSID', 'SACSID']:\n cookie = Cookie.SimpleCookie()\n cookie[cookie_name] = ''\n cookie[cookie_name]['expires'] = (\n datetime.datetime.utcnow() +\n datetime.timedelta(seconds=ONE_DAY_AGO_IN_SECS)\n ).strftime('%a, %d %b %Y %H:%M:%S GMT')\n response_headers.add_header(*cookie.output().split(': ', 1))\n\n\nclass LogoutPage(webapp2.RequestHandler):\n \"\"\"Class which handles the logout URL.\"\"\"\n\n def get(self):\n \"\"\"Logs the user out, and returns them to a specified follow-up\n page (or the home page if no follow-up page is specified).\n \"\"\"\n\n # The str conversion is needed, otherwise an InvalidResponseError\n # asking for the 'Location' header value to be str instead of\n # 'unicode' will result.\n url_to_redirect_to = str(self.request.get('return_url') or '/')\n _clear_login_cookies(self.response.headers)\n\n if constants.DEV_MODE:\n self.redirect(users.create_logout_url(url_to_redirect_to))\n else:\n self.redirect(url_to_redirect_to)\n\n\nclass UserFacingExceptions(object):\n \"\"\"This class contains all the exception class definitions used.\"\"\"\n\n class NotLoggedInException(Exception):\n \"\"\"Error class for users that are not logged in (error code 401).\"\"\"\n\n class InvalidInputException(Exception):\n \"\"\"Error class for invalid input on the user side (error code 400).\"\"\"\n\n class UnauthorizedUserException(Exception):\n \"\"\"Error class for unauthorized access.\"\"\"\n\n class PageNotFoundException(Exception):\n \"\"\"Error class for a page not found error (error code 404).\"\"\"\n\n class InternalErrorException(Exception):\n \"\"\"Error class for an internal server side error (error code 500).\"\"\"\n\n\nclass BaseHandler(webapp2.RequestHandler):\n \"\"\"Base class for all Oppia handlers.\"\"\"\n\n # Whether to check POST and PUT payloads for CSRF tokens prior to\n # processing them. Can be overridden by subclasses if this check is\n # not necessary.\n REQUIRE_PAYLOAD_CSRF_CHECK = True\n # Whether to redirect requests corresponding to a logged-in user who has\n # not completed signup in to the signup page. This ensures that logged-in\n # users have agreed to the latest terms.\n REDIRECT_UNFINISHED_SIGNUPS = True\n\n # What format the get method returns when exception raised, json or html.\n GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_HTML\n POST_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n PUT_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n DELETE_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n\n @webapp2.cached_property\n def jinja2_env(self):\n \"\"\"Returns a Jinja2 environment cached for frontend templates.\n\n Returns:\n Environment. A Jinja2 environment object used to load templates.\n \"\"\"\n return jinja_utils.get_jinja_env(feconf.FRONTEND_TEMPLATES_DIR)\n\n def __init__(self, request, response): # pylint: disable=super-init-not-called\n # Set self.request, self.response and self.app.\n self.initialize(request, response)\n\n self.start_time = datetime.datetime.utcnow()\n\n # Initializes the return dict for the handlers.\n self.values = {}\n\n self.user_id = current_user_services.get_current_user_id()\n self.username = None\n self.has_seen_editor_tutorial = False\n self.has_seen_translation_tutorial = False\n self.partially_logged_in = False\n\n if self.user_id:\n user_settings = user_services.get_user_settings(\n self.user_id, strict=False)\n if user_settings is None:\n email = current_user_services.get_current_user_email()\n user_settings = user_services.create_new_user(\n self.user_id, email)\n self.values['user_email'] = user_settings.email\n\n if (self.REDIRECT_UNFINISHED_SIGNUPS and not\n user_services.has_fully_registered(self.user_id)):\n _clear_login_cookies(self.response.headers)\n self.partially_logged_in = True\n self.user_id = None\n else:\n self.username = user_settings.username\n self.values['username'] = self.username\n if user_settings.last_started_state_editor_tutorial:\n self.has_seen_editor_tutorial = True\n if user_settings.last_started_state_translation_tutorial:\n self.has_seen_translation_tutorial = True\n # In order to avoid too many datastore writes, we do not bother\n # recording a log-in if the current time is sufficiently close\n # to the last log-in time.\n if (user_settings.last_logged_in is None or\n not utils.are_datetimes_close(\n datetime.datetime.utcnow(),\n user_settings.last_logged_in)):\n user_services.record_user_logged_in(self.user_id)\n\n self.role = (\n feconf.ROLE_ID_GUEST\n if self.user_id is None else user_settings.role)\n self.user = user_services.UserActionsInfo(self.user_id)\n\n self.is_super_admin = (\n current_user_services.is_current_user_super_admin())\n\n self.values['additional_angular_modules'] = []\n self.values['iframed'] = False\n self.values['is_moderator'] = user_services.is_at_least_moderator(\n self.user_id)\n self.values['is_admin'] = user_services.is_admin(self.user_id)\n self.values['is_topic_manager'] = (\n user_services.is_topic_manager(self.user_id))\n self.values['is_super_admin'] = self.is_super_admin\n\n if self.request.get('payload'):\n self.payload = json.loads(self.request.get('payload'))\n else:\n self.payload = None\n\n def dispatch(self):\n \"\"\"Overrides dispatch method in webapp2 superclass.\n\n Raises:\n Exception: The CSRF token is missing.\n UnauthorizedUserException: The CSRF token is invalid.\n \"\"\"\n # If the request is to the old demo server, redirect it permanently to\n # the new demo server.\n if self.request.uri.startswith('https://oppiaserver.appspot.com'):\n self.redirect('https://oppiatestserver.appspot.com', permanent=True)\n return\n\n # In DEV_MODE, clearing cookies does not log out the user, so we\n # force-clear them by redirecting to the logout URL.\n if constants.DEV_MODE and self.partially_logged_in:\n self.redirect(users.create_logout_url(self.request.uri))\n return\n\n if self.payload is not None and self.REQUIRE_PAYLOAD_CSRF_CHECK:\n try:\n # If user opens a new tab during signup process, the user_id\n # parameter is set to None and this causes the signup session\n # to expire. The code here checks if user is on the signup\n # page and the user_id is None, if that is the case an exception\n # is raised which is handled by the frontend by showing a\n # continue to registration modal.\n if 'signup' in self.request.uri and not self.user_id:\n raise self.UnauthorizedUserException(\n 'Registration session expired.')\n csrf_token = self.request.get('csrf_token')\n if not csrf_token:\n raise Exception(\n 'Missing CSRF token. Changes were not saved. '\n 'Please report this bug.')\n\n is_csrf_token_valid = CsrfTokenManager.is_csrf_token_valid(\n self.user_id, csrf_token)\n\n if not is_csrf_token_valid:\n raise self.UnauthorizedUserException(\n 'Your session has expired, and unfortunately your '\n 'changes cannot be saved. Please refresh the page.')\n except Exception as e:\n logging.error('%s: payload %s', e, self.payload)\n\n self.handle_exception(e, self.app.debug)\n return\n\n super(BaseHandler, self).dispatch()\n\n def get(self, *args, **kwargs): # pylint: disable=unused-argument\n \"\"\"Base method to handle GET requests.\"\"\"\n raise self.PageNotFoundException\n\n def post(self, *args): # pylint: disable=unused-argument\n \"\"\"Base method to handle POST requests.\"\"\"\n raise self.PageNotFoundException\n\n def put(self, *args): # pylint: disable=unused-argument\n \"\"\"Base method to handle PUT requests.\"\"\"\n raise self.PageNotFoundException\n\n def delete(self, *args): # pylint: disable=unused-argument\n \"\"\"Base method to handle DELETE requests.\"\"\"\n raise self.PageNotFoundException\n\n def render_json(self, values):\n \"\"\"Prepares JSON response to be sent to the client.\n\n Args:\n values: dict. The key-value pairs to encode in the JSON response.\n \"\"\"\n self.response.content_type = 'application/json; charset=utf-8'\n self.response.headers['Content-Disposition'] = (\n 'attachment; filename=\"oppia-attachment.txt\"')\n self.response.headers['Strict-Transport-Security'] = (\n 'max-age=31536000; includeSubDomains')\n self.response.headers['X-Content-Type-Options'] = 'nosniff'\n self.response.headers['X-Xss-Protection'] = '1; mode=block'\n\n json_output = json.dumps(values, cls=utils.JSONEncoderForHTML)\n self.response.write('%s%s' % (feconf.XSSI_PREFIX, json_output))\n\n def render_downloadable_file(self, values, filename, content_type):\n \"\"\"Prepares downloadable content to be sent to the client.\"\"\"\n self.response.headers['Content-Type'] = content_type\n self.response.headers['Content-Disposition'] = str(\n 'attachment; filename=%s' % filename)\n self.response.write(values)\n\n def _get_logout_url(self, redirect_url_on_logout):\n \"\"\"Prepares and returns logout url which will be handled\n by LogoutPage handler.\n\n Args:\n redirect_url_on_logout: str. URL to redirect to on logout.\n\n Returns:\n str. Logout URL to be handled by LogoutPage handler.\n \"\"\"\n return current_user_services.create_logout_url(redirect_url_on_logout)\n\n def render_template(\n self, filepath, iframe_restriction='DENY',\n redirect_url_on_logout=None):\n \"\"\"Prepares an HTML response to be sent to the client.\n\n Args:\n filepath: str. The template filepath.\n iframe_restriction: str or None. Possible values are\n 'DENY' and 'SAMEORIGIN':\n\n DENY: Strictly prevents the template to load in an iframe.\n SAMEORIGIN: The template can only be displayed in a frame\n on the same origin as the page itself.\n redirect_url_on_logout: str or None. URL to redirect to on logout.\n \"\"\"\n values = self.values\n\n scheme, netloc, path, _, _ = urlparse.urlsplit(self.request.uri)\n\n values.update({\n 'BEFORE_END_HEAD_TAG_HOOK': jinja2.utils.Markup(\n BEFORE_END_HEAD_TAG_HOOK.value),\n 'DEV_MODE': constants.DEV_MODE,\n 'DOMAIN_URL': '%s://%s' % (scheme, netloc),\n 'ACTIVITY_STATUS_PRIVATE': (\n rights_manager.ACTIVITY_STATUS_PRIVATE),\n 'ACTIVITY_STATUS_PUBLIC': (\n rights_manager.ACTIVITY_STATUS_PUBLIC),\n 'GCS_RESOURCE_BUCKET_NAME': (\n app_identity_services.get_gcs_resource_bucket_name()),\n # The 'path' variable starts with a forward slash.\n 'FULL_URL': '%s://%s%s' % (scheme, netloc, path),\n 'user_is_logged_in': user_services.has_fully_registered(\n self.user_id)\n })\n\n if 'status_code' not in values:\n values['status_code'] = 200\n\n if 'meta_name' not in values:\n values['meta_name'] = 'Personalized Online Learning from Oppia'\n\n if 'meta_description' not in values:\n values['meta_description'] = (\n 'Oppia is a free, open-source learning platform. Join the '\n 'community to create or try an exploration today!')\n\n if redirect_url_on_logout is None:\n redirect_url_on_logout = self.request.uri\n\n if self.user_id:\n values['logout_url'] = self._get_logout_url(redirect_url_on_logout)\n else:\n values['logout_url'] = None\n\n # Create a new csrf token for inclusion in HTML responses. This assumes\n # that tokens generated in one handler will be sent back to a handler\n # with the same page name.\n values['csrf_token'] = ''\n\n if self.REQUIRE_PAYLOAD_CSRF_CHECK:\n values['csrf_token'] = CsrfTokenManager.create_csrf_token(\n self.user_id)\n\n self.response.cache_control.no_cache = True\n self.response.cache_control.must_revalidate = True\n self.response.headers['Strict-Transport-Security'] = (\n 'max-age=31536000; includeSubDomains')\n self.response.headers['X-Content-Type-Options'] = 'nosniff'\n self.response.headers['X-Xss-Protection'] = '1; mode=block'\n\n if iframe_restriction is not None:\n if iframe_restriction in ['SAMEORIGIN', 'DENY']:\n self.response.headers['X-Frame-Options'] = iframe_restriction\n else:\n raise Exception(\n 'Invalid X-Frame-Options: %s' % iframe_restriction)\n\n self.response.expires = 'Mon, 01 Jan 1990 00:00:00 GMT'\n self.response.pragma = 'no-cache'\n\n self.response.write(\n self.jinja2_env.get_template(filepath).render(**values))\n\n def _render_exception_json_or_html(self, return_type, values):\n \"\"\"Renders an error page, or an error JSON response.\n\n Args:\n return_type: str. Indicator to return JSON or HTML.\n values: dict. The key-value pairs to include in the response.\n \"\"\"\n\n method = self.request.environ['REQUEST_METHOD']\n\n if return_type == feconf.HANDLER_TYPE_HTML and (\n method == 'GET'):\n self.values.update(values)\n if 'iframed' in self.values and self.values['iframed']:\n self.render_template(\n 'pages/error/error_iframed.html', iframe_restriction=None)\n else:\n self.render_template('pages/error/error.html')\n else:\n if return_type != feconf.HANDLER_TYPE_JSON and (\n return_type != feconf.HANDLER_TYPE_DOWNLOADABLE):\n logging.warning('Not a recognized return type: '\n 'defaulting to render JSON.')\n self.render_json(values)\n\n def _render_exception(self, error_code, values):\n \"\"\"Renders an error page, or an error JSON response.\n\n Args:\n error_code: int. The HTTP status code (expected to be one of\n 400, 401, 404 or 500).\n values: dict. The key-value pairs to include in the response.\n \"\"\"\n assert error_code in [400, 401, 404, 500]\n values['status_code'] = error_code\n method = self.request.environ['REQUEST_METHOD']\n\n if method == 'GET':\n self._render_exception_json_or_html(\n self.GET_HANDLER_ERROR_RETURN_TYPE, values)\n elif method == 'POST':\n self._render_exception_json_or_html(\n self.POST_HANDLER_ERROR_RETURN_TYPE, values)\n elif method == 'PUT':\n self._render_exception_json_or_html(\n self.PUT_HANDLER_ERROR_RETURN_TYPE, values)\n elif method == 'DELETE':\n self._render_exception_json_or_html(\n self.DELETE_HANDLER_ERROR_RETURN_TYPE, values)\n else:\n logging.warning('Not a recognized request method.')\n self._render_exception_json_or_html(\n None, values)\n\n def handle_exception(self, exception, unused_debug_mode):\n \"\"\"Overwrites the default exception handler.\n\n Args:\n exception: Exception. The exception that was thrown.\n unused_debug_mode: bool. True if the web application is running\n in debug mode.\n \"\"\"\n if isinstance(exception, self.NotLoggedInException):\n # This checks if the response should be JSON or HTML.\n # For GET requests, there is no payload, so we check against\n # GET_HANDLER_ERROR_RETURN_TYPE.\n # Otherwise, we check whether self.payload exists.\n if (self.payload is not None or\n self.GET_HANDLER_ERROR_RETURN_TYPE ==\n feconf.HANDLER_TYPE_JSON):\n self.error(401)\n self._render_exception(401, {'error': unicode(exception)})\n else:\n self.redirect(\n current_user_services.create_login_url(self.request.uri))\n return\n\n logging.info(''.join(traceback.format_exception(*sys.exc_info())))\n\n if isinstance(exception, self.PageNotFoundException):\n logging.warning('Invalid URL requested: %s', self.request.uri)\n self.error(404)\n self._render_exception(\n 404, {\n 'error': 'Could not find the page %s.' % self.request.uri})\n return\n\n logging.error('Exception raised: %s', exception)\n\n if isinstance(exception, self.UnauthorizedUserException):\n self.error(401)\n self._render_exception(401, {'error': unicode(exception)})\n return\n\n if isinstance(exception, self.InvalidInputException):\n self.error(400)\n self._render_exception(400, {'error': unicode(exception)})\n return\n\n if isinstance(exception, self.InternalErrorException):\n self.error(500)\n self._render_exception(500, {'error': unicode(exception)})\n return\n\n self.error(500)\n self._render_exception(500, {'error': unicode(exception)})\n\n InternalErrorException = UserFacingExceptions.InternalErrorException\n InvalidInputException = UserFacingExceptions.InvalidInputException\n NotLoggedInException = UserFacingExceptions.NotLoggedInException\n PageNotFoundException = UserFacingExceptions.PageNotFoundException\n UnauthorizedUserException = UserFacingExceptions.UnauthorizedUserException\n\n\nclass Error404Handler(BaseHandler):\n \"\"\"Handles 404 errors.\"\"\"\n\n REQUIRE_PAYLOAD_CSRF_CHECK = False\n\n\nclass CsrfTokenManager(object):\n \"\"\"Manages page/user tokens in memcache to protect against CSRF.\"\"\"\n\n # Max age of the token (48 hours).\n _CSRF_TOKEN_AGE_SECS = 60 * 60 * 48\n # Default user id for non-logged-in users.\n _USER_ID_DEFAULT = 'non_logged_in_user'\n\n @classmethod\n def init_csrf_secret(cls):\n \"\"\"Verify that non-default CSRF secret exists; creates one if not.\"\"\"\n\n # Any non-default value is fine.\n if CSRF_SECRET.value and CSRF_SECRET.value != DEFAULT_CSRF_SECRET:\n return\n\n # Initialize to random value.\n config_services.set_property(\n feconf.SYSTEM_COMMITTER_ID, CSRF_SECRET.name,\n base64.urlsafe_b64encode(os.urandom(20)))\n\n @classmethod\n def _create_token(cls, user_id, issued_on):\n \"\"\"Creates a new CSRF token.\n\n Args:\n user_id: str. The user_id for which the token is generated.\n issued_on: float. The timestamp at which the token was issued.\n\n Returns:\n str: The generated CSRF token.\n \"\"\"\n cls.init_csrf_secret()\n\n # The token has 4 parts: hash of the actor user id, hash of the page\n # name, hash of the time issued and plain text of the time issued.\n\n if user_id is None:\n user_id = cls._USER_ID_DEFAULT\n\n # Round time to seconds.\n issued_on = long(issued_on)\n\n digester = hmac.new(str(CSRF_SECRET.value))\n digester.update(str(user_id))\n digester.update(':')\n digester.update(str(issued_on))\n\n digest = digester.digest()\n token = '%s/%s' % (issued_on, base64.urlsafe_b64encode(digest))\n\n return token\n\n @classmethod\n def _get_current_time(cls):\n \"\"\"Returns the current server time.\n\n Returns:\n float. The time in seconds as floating point number.\n \"\"\"\n return time.time()\n\n @classmethod\n def create_csrf_token(cls, user_id):\n \"\"\"Creates a CSRF token for the given user_id.\n\n Args:\n user_id: str. The user_id for whom the token is generated.\n\n Returns:\n str. The generated CSRF token.\n \"\"\"\n return cls._create_token(user_id, cls._get_current_time())\n\n @classmethod\n def is_csrf_token_valid(cls, user_id, token):\n \"\"\"Validates a given CSRF token.\n\n Args:\n user_id: str. The user_id to validate the CSRF token against.\n token: str. The CSRF token to validate.\n\n Returns:\n bool. Whether the given CSRF token is valid.\n \"\"\"\n try:\n parts = token.split('/')\n if len(parts) != 2:\n return False\n\n issued_on = long(parts[0])\n age = cls._get_current_time() - issued_on\n if age > cls._CSRF_TOKEN_AGE_SECS:\n return False\n\n authentic_token = cls._create_token(user_id, issued_on)\n if authentic_token == token:\n return True\n\n return False\n except Exception:\n return False\n",
"path": "core/controllers/base.py"
}
] | [
{
"content": "# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Base constants and handlers.\"\"\"\n\nimport Cookie\nimport base64\nimport datetime\nimport hmac\nimport json\nimport logging\nimport os\nimport sys\nimport time\nimport traceback\nimport urlparse\n\nfrom constants import constants\nfrom core.domain import config_domain\nfrom core.domain import config_services\nfrom core.domain import rights_manager\nfrom core.domain import user_services\nfrom core.platform import models\nimport feconf\nimport jinja_utils\nimport utils\n\nfrom google.appengine.api import users\nimport jinja2\nimport webapp2\n\napp_identity_services = models.Registry.import_app_identity_services()\ncurrent_user_services = models.Registry.import_current_user_services()\n(user_models,) = models.Registry.import_models([models.NAMES.user])\n\nONE_DAY_AGO_IN_SECS = -24 * 60 * 60\nDEFAULT_CSRF_SECRET = 'oppia csrf secret'\nCSRF_SECRET = config_domain.ConfigProperty(\n 'oppia_csrf_secret', {'type': 'unicode'},\n 'Text used to encrypt CSRF tokens.', DEFAULT_CSRF_SECRET)\n\nBEFORE_END_HEAD_TAG_HOOK = config_domain.ConfigProperty(\n 'before_end_head_tag_hook', {\n 'type': 'unicode',\n 'ui_config': {\n 'rows': 7,\n },\n },\n 'Code to insert just before the closing </head> tag in all pages.', '')\n\n\ndef _clear_login_cookies(response_headers):\n \"\"\"Clears login cookies from the given response headers.\"\"\"\n\n # App Engine sets the ACSID cookie for http:// and the SACSID cookie\n # for https:// . We just unset both below.\n cookie = Cookie.SimpleCookie()\n for cookie_name in ['ACSID', 'SACSID']:\n cookie = Cookie.SimpleCookie()\n cookie[cookie_name] = ''\n cookie[cookie_name]['expires'] = (\n datetime.datetime.utcnow() +\n datetime.timedelta(seconds=ONE_DAY_AGO_IN_SECS)\n ).strftime('%a, %d %b %Y %H:%M:%S GMT')\n response_headers.add_header(*cookie.output().split(': ', 1))\n\n\nclass LogoutPage(webapp2.RequestHandler):\n \"\"\"Class which handles the logout URL.\"\"\"\n\n def get(self):\n \"\"\"Logs the user out, and returns them to a specified follow-up\n page (or the home page if no follow-up page is specified).\n \"\"\"\n\n # The str conversion is needed, otherwise an InvalidResponseError\n # asking for the 'Location' header value to be str instead of\n # 'unicode' will result.\n url_to_redirect_to = str(self.request.get('return_url') or '/')\n _clear_login_cookies(self.response.headers)\n\n if constants.DEV_MODE:\n self.redirect(users.create_logout_url(url_to_redirect_to))\n else:\n self.redirect(url_to_redirect_to)\n\n\nclass UserFacingExceptions(object):\n \"\"\"This class contains all the exception class definitions used.\"\"\"\n\n class NotLoggedInException(Exception):\n \"\"\"Error class for users that are not logged in (error code 401).\"\"\"\n\n class InvalidInputException(Exception):\n \"\"\"Error class for invalid input on the user side (error code 400).\"\"\"\n\n class UnauthorizedUserException(Exception):\n \"\"\"Error class for unauthorized access.\"\"\"\n\n class PageNotFoundException(Exception):\n \"\"\"Error class for a page not found error (error code 404).\"\"\"\n\n class InternalErrorException(Exception):\n \"\"\"Error class for an internal server side error (error code 500).\"\"\"\n\n\nclass BaseHandler(webapp2.RequestHandler):\n \"\"\"Base class for all Oppia handlers.\"\"\"\n\n # Whether to check POST and PUT payloads for CSRF tokens prior to\n # processing them. Can be overridden by subclasses if this check is\n # not necessary.\n REQUIRE_PAYLOAD_CSRF_CHECK = True\n # Whether to redirect requests corresponding to a logged-in user who has\n # not completed signup in to the signup page. This ensures that logged-in\n # users have agreed to the latest terms.\n REDIRECT_UNFINISHED_SIGNUPS = True\n\n # What format the get method returns when exception raised, json or html.\n GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_HTML\n POST_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n PUT_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n DELETE_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n\n @webapp2.cached_property\n def jinja2_env(self):\n \"\"\"Returns a Jinja2 environment cached for frontend templates.\n\n Returns:\n Environment. A Jinja2 environment object used to load templates.\n \"\"\"\n return jinja_utils.get_jinja_env(feconf.FRONTEND_TEMPLATES_DIR)\n\n def __init__(self, request, response): # pylint: disable=super-init-not-called\n # Set self.request, self.response and self.app.\n self.initialize(request, response)\n\n self.start_time = datetime.datetime.utcnow()\n\n # Initializes the return dict for the handlers.\n self.values = {}\n\n self.user_id = current_user_services.get_current_user_id()\n self.username = None\n self.has_seen_editor_tutorial = False\n self.has_seen_translation_tutorial = False\n self.partially_logged_in = False\n\n if self.user_id:\n user_settings = user_services.get_user_settings(\n self.user_id, strict=False)\n if user_settings is None:\n email = current_user_services.get_current_user_email()\n user_settings = user_services.create_new_user(\n self.user_id, email)\n self.values['user_email'] = user_settings.email\n\n if (self.REDIRECT_UNFINISHED_SIGNUPS and not\n user_services.has_fully_registered(self.user_id)):\n _clear_login_cookies(self.response.headers)\n self.partially_logged_in = True\n self.user_id = None\n else:\n self.username = user_settings.username\n self.values['username'] = self.username\n if user_settings.last_started_state_editor_tutorial:\n self.has_seen_editor_tutorial = True\n if user_settings.last_started_state_translation_tutorial:\n self.has_seen_translation_tutorial = True\n # In order to avoid too many datastore writes, we do not bother\n # recording a log-in if the current time is sufficiently close\n # to the last log-in time.\n if (user_settings.last_logged_in is None or\n not utils.are_datetimes_close(\n datetime.datetime.utcnow(),\n user_settings.last_logged_in)):\n user_services.record_user_logged_in(self.user_id)\n\n self.role = (\n feconf.ROLE_ID_GUEST\n if self.user_id is None else user_settings.role)\n self.user = user_services.UserActionsInfo(self.user_id)\n\n self.is_super_admin = (\n current_user_services.is_current_user_super_admin())\n\n self.values['additional_angular_modules'] = []\n self.values['iframed'] = False\n self.values['is_moderator'] = user_services.is_at_least_moderator(\n self.user_id)\n self.values['is_admin'] = user_services.is_admin(self.user_id)\n self.values['is_topic_manager'] = (\n user_services.is_topic_manager(self.user_id))\n self.values['is_super_admin'] = self.is_super_admin\n\n if self.request.get('payload'):\n self.payload = json.loads(self.request.get('payload'))\n else:\n self.payload = None\n\n def dispatch(self):\n \"\"\"Overrides dispatch method in webapp2 superclass.\n\n Raises:\n Exception: The CSRF token is missing.\n UnauthorizedUserException: The CSRF token is invalid.\n \"\"\"\n # If the request is to the old demo server, redirect it permanently to\n # the new demo server.\n if self.request.uri.startswith('https://oppiaserver.appspot.com'):\n self.redirect('https://oppiatestserver.appspot.com', permanent=True)\n return\n\n # In DEV_MODE, clearing cookies does not log out the user, so we\n # force-clear them by redirecting to the logout URL.\n if constants.DEV_MODE and self.partially_logged_in:\n self.redirect(users.create_logout_url(self.request.uri))\n return\n\n if self.payload is not None and self.REQUIRE_PAYLOAD_CSRF_CHECK:\n try:\n # If user opens a new tab during signup process, the user_id\n # parameter is set to None and this causes the signup session\n # to expire. The code here checks if user is on the signup\n # page and the user_id is None, if that is the case an exception\n # is raised which is handled by the frontend by showing a\n # continue to registration modal.\n if 'signup' in self.request.uri and not self.user_id:\n raise self.UnauthorizedUserException(\n 'Registration session expired.')\n csrf_token = self.request.get('csrf_token')\n if not csrf_token:\n raise Exception(\n 'Missing CSRF token. Changes were not saved. '\n 'Please report this bug.')\n\n is_csrf_token_valid = CsrfTokenManager.is_csrf_token_valid(\n self.user_id, csrf_token)\n\n if not is_csrf_token_valid:\n raise self.UnauthorizedUserException(\n 'Your session has expired, and unfortunately your '\n 'changes cannot be saved. Please refresh the page.')\n except Exception as e:\n logging.error('%s: payload %s', e, self.payload)\n\n self.handle_exception(e, self.app.debug)\n return\n\n super(BaseHandler, self).dispatch()\n\n def get(self, *args, **kwargs): # pylint: disable=unused-argument\n \"\"\"Base method to handle GET requests.\"\"\"\n raise self.PageNotFoundException\n\n def post(self, *args): # pylint: disable=unused-argument\n \"\"\"Base method to handle POST requests.\"\"\"\n raise self.PageNotFoundException\n\n def put(self, *args): # pylint: disable=unused-argument\n \"\"\"Base method to handle PUT requests.\"\"\"\n raise self.PageNotFoundException\n\n def delete(self, *args): # pylint: disable=unused-argument\n \"\"\"Base method to handle DELETE requests.\"\"\"\n raise self.PageNotFoundException\n\n def render_json(self, values):\n \"\"\"Prepares JSON response to be sent to the client.\n\n Args:\n values: dict. The key-value pairs to encode in the JSON response.\n \"\"\"\n self.response.content_type = 'application/json; charset=utf-8'\n self.response.headers['Content-Disposition'] = (\n 'attachment; filename=\"oppia-attachment.txt\"')\n self.response.headers['Strict-Transport-Security'] = (\n 'max-age=31536000; includeSubDomains')\n self.response.headers['X-Content-Type-Options'] = 'nosniff'\n self.response.headers['X-Xss-Protection'] = '1; mode=block'\n\n json_output = json.dumps(values, cls=utils.JSONEncoderForHTML)\n self.response.write('%s%s' % (feconf.XSSI_PREFIX, json_output))\n\n def render_downloadable_file(self, values, filename, content_type):\n \"\"\"Prepares downloadable content to be sent to the client.\"\"\"\n self.response.headers['Content-Type'] = content_type\n self.response.headers['Content-Disposition'] = str(\n 'attachment; filename=%s' % filename)\n self.response.write(values)\n\n def _get_logout_url(self, redirect_url_on_logout):\n \"\"\"Prepares and returns logout url which will be handled\n by LogoutPage handler.\n\n Args:\n redirect_url_on_logout: str. URL to redirect to on logout.\n\n Returns:\n str. Logout URL to be handled by LogoutPage handler.\n \"\"\"\n return current_user_services.create_logout_url(redirect_url_on_logout)\n\n def render_template(\n self, filepath, iframe_restriction='DENY',\n redirect_url_on_logout=None):\n \"\"\"Prepares an HTML response to be sent to the client.\n\n Args:\n filepath: str. The template filepath.\n iframe_restriction: str or None. Possible values are\n 'DENY' and 'SAMEORIGIN':\n\n DENY: Strictly prevents the template to load in an iframe.\n SAMEORIGIN: The template can only be displayed in a frame\n on the same origin as the page itself.\n redirect_url_on_logout: str or None. URL to redirect to on logout.\n \"\"\"\n values = self.values\n\n scheme, netloc, path, _, _ = urlparse.urlsplit(self.request.uri)\n\n values.update({\n 'BEFORE_END_HEAD_TAG_HOOK': jinja2.utils.Markup(\n BEFORE_END_HEAD_TAG_HOOK.value),\n 'DEV_MODE': constants.DEV_MODE,\n 'DOMAIN_URL': '%s://%s' % (scheme, netloc),\n 'ACTIVITY_STATUS_PRIVATE': (\n rights_manager.ACTIVITY_STATUS_PRIVATE),\n 'ACTIVITY_STATUS_PUBLIC': (\n rights_manager.ACTIVITY_STATUS_PUBLIC),\n 'GCS_RESOURCE_BUCKET_NAME': (\n app_identity_services.get_gcs_resource_bucket_name()),\n # The 'path' variable starts with a forward slash.\n 'FULL_URL': '%s://%s%s' % (scheme, netloc, path),\n 'user_is_logged_in': user_services.has_fully_registered(\n self.user_id)\n })\n\n if 'status_code' not in values:\n values['status_code'] = 200\n\n if 'meta_name' not in values:\n values['meta_name'] = 'Personalized Online Learning from Oppia'\n\n if 'meta_description' not in values:\n values['meta_description'] = (\n 'Oppia is a free, open-source learning platform. Join the '\n 'community to create or try an exploration today!')\n\n if redirect_url_on_logout is None:\n redirect_url_on_logout = self.request.uri\n\n if self.user_id:\n values['logout_url'] = self._get_logout_url(redirect_url_on_logout)\n else:\n values['logout_url'] = None\n\n # Create a new csrf token for inclusion in HTML responses. This assumes\n # that tokens generated in one handler will be sent back to a handler\n # with the same page name.\n values['csrf_token'] = ''\n\n if self.REQUIRE_PAYLOAD_CSRF_CHECK:\n values['csrf_token'] = CsrfTokenManager.create_csrf_token(\n self.user_id)\n\n self.response.cache_control.no_cache = True\n self.response.cache_control.must_revalidate = True\n self.response.headers['Strict-Transport-Security'] = (\n 'max-age=31536000; includeSubDomains')\n self.response.headers['X-Content-Type-Options'] = 'nosniff'\n self.response.headers['X-Xss-Protection'] = '1; mode=block'\n\n if iframe_restriction is not None:\n if iframe_restriction in ['SAMEORIGIN', 'DENY']:\n self.response.headers['X-Frame-Options'] = iframe_restriction\n else:\n raise Exception(\n 'Invalid X-Frame-Options: %s' % iframe_restriction)\n\n self.response.expires = 'Mon, 01 Jan 1990 00:00:00 GMT'\n self.response.pragma = 'no-cache'\n\n self.response.write(\n self.jinja2_env.get_template(filepath).render(**values))\n\n def _render_exception_json_or_html(self, return_type, values):\n \"\"\"Renders an error page, or an error JSON response.\n\n Args:\n return_type: str. Indicator to return JSON or HTML.\n values: dict. The key-value pairs to include in the response.\n \"\"\"\n\n method = self.request.environ['REQUEST_METHOD']\n\n if return_type == feconf.HANDLER_TYPE_HTML and (\n method == 'GET'):\n self.values.update(values)\n if 'iframed' in self.values and self.values['iframed']:\n self.render_template(\n 'pages/error/error_iframed.html', iframe_restriction=None)\n else:\n self.render_template('pages/error/error.html')\n else:\n if return_type != feconf.HANDLER_TYPE_JSON and (\n return_type != feconf.HANDLER_TYPE_DOWNLOADABLE):\n logging.warning('Not a recognized return type: '\n 'defaulting to render JSON.')\n self.render_json(values)\n\n def _render_exception(self, error_code, values):\n \"\"\"Renders an error page, or an error JSON response.\n\n Args:\n error_code: int. The HTTP status code (expected to be one of\n 400, 401, 404 or 500).\n values: dict. The key-value pairs to include in the response.\n \"\"\"\n assert error_code in [400, 401, 404, 500]\n values['status_code'] = error_code\n method = self.request.environ['REQUEST_METHOD']\n\n if method == 'GET':\n self._render_exception_json_or_html(\n self.GET_HANDLER_ERROR_RETURN_TYPE, values)\n elif method == 'POST':\n self._render_exception_json_or_html(\n self.POST_HANDLER_ERROR_RETURN_TYPE, values)\n elif method == 'PUT':\n self._render_exception_json_or_html(\n self.PUT_HANDLER_ERROR_RETURN_TYPE, values)\n elif method == 'DELETE':\n self._render_exception_json_or_html(\n self.DELETE_HANDLER_ERROR_RETURN_TYPE, values)\n else:\n logging.warning('Not a recognized request method.')\n self._render_exception_json_or_html(\n None, values)\n\n def handle_exception(self, exception, unused_debug_mode):\n \"\"\"Overwrites the default exception handler.\n\n Args:\n exception: Exception. The exception that was thrown.\n unused_debug_mode: bool. True if the web application is running\n in debug mode.\n \"\"\"\n if isinstance(exception, self.NotLoggedInException):\n # This checks if the response should be JSON or HTML.\n # For GET requests, there is no payload, so we check against\n # GET_HANDLER_ERROR_RETURN_TYPE.\n # Otherwise, we check whether self.payload exists.\n if (self.payload is not None or\n self.GET_HANDLER_ERROR_RETURN_TYPE ==\n feconf.HANDLER_TYPE_JSON):\n self.error(401)\n self._render_exception(401, {'error': unicode(exception)})\n else:\n self.redirect(\n current_user_services.create_login_url(self.request.uri))\n return\n\n logging.info(''.join(traceback.format_exception(*sys.exc_info())))\n\n if isinstance(exception, self.PageNotFoundException):\n logging.warning('Invalid URL requested: %s', self.request.uri)\n self.error(404)\n self._render_exception(\n 404, {\n 'error': 'Could not find the page %s.' % self.request.uri})\n return\n\n logging.error('Exception raised: %s', exception)\n\n if isinstance(exception, self.UnauthorizedUserException):\n self.error(401)\n self._render_exception(401, {'error': unicode(exception)})\n return\n\n if isinstance(exception, self.InvalidInputException):\n self.error(400)\n self._render_exception(400, {'error': unicode(exception)})\n return\n\n if isinstance(exception, self.InternalErrorException):\n self.error(500)\n self._render_exception(500, {'error': unicode(exception)})\n return\n\n self.error(500)\n self._render_exception(500, {'error': unicode(exception)})\n\n InternalErrorException = UserFacingExceptions.InternalErrorException\n InvalidInputException = UserFacingExceptions.InvalidInputException\n NotLoggedInException = UserFacingExceptions.NotLoggedInException\n PageNotFoundException = UserFacingExceptions.PageNotFoundException\n UnauthorizedUserException = UserFacingExceptions.UnauthorizedUserException\n\n\nclass Error404Handler(BaseHandler):\n \"\"\"Handles 404 errors.\"\"\"\n\n pass\n\n\nclass CsrfTokenManager(object):\n \"\"\"Manages page/user tokens in memcache to protect against CSRF.\"\"\"\n\n # Max age of the token (48 hours).\n _CSRF_TOKEN_AGE_SECS = 60 * 60 * 48\n # Default user id for non-logged-in users.\n _USER_ID_DEFAULT = 'non_logged_in_user'\n\n @classmethod\n def init_csrf_secret(cls):\n \"\"\"Verify that non-default CSRF secret exists; creates one if not.\"\"\"\n\n # Any non-default value is fine.\n if CSRF_SECRET.value and CSRF_SECRET.value != DEFAULT_CSRF_SECRET:\n return\n\n # Initialize to random value.\n config_services.set_property(\n feconf.SYSTEM_COMMITTER_ID, CSRF_SECRET.name,\n base64.urlsafe_b64encode(os.urandom(20)))\n\n @classmethod\n def _create_token(cls, user_id, issued_on):\n \"\"\"Creates a new CSRF token.\n\n Args:\n user_id: str. The user_id for which the token is generated.\n issued_on: float. The timestamp at which the token was issued.\n\n Returns:\n str: The generated CSRF token.\n \"\"\"\n cls.init_csrf_secret()\n\n # The token has 4 parts: hash of the actor user id, hash of the page\n # name, hash of the time issued and plain text of the time issued.\n\n if user_id is None:\n user_id = cls._USER_ID_DEFAULT\n\n # Round time to seconds.\n issued_on = long(issued_on)\n\n digester = hmac.new(str(CSRF_SECRET.value))\n digester.update(str(user_id))\n digester.update(':')\n digester.update(str(issued_on))\n\n digest = digester.digest()\n token = '%s/%s' % (issued_on, base64.urlsafe_b64encode(digest))\n\n return token\n\n @classmethod\n def _get_current_time(cls):\n \"\"\"Returns the current server time.\n\n Returns:\n float. The time in seconds as floating point number.\n \"\"\"\n return time.time()\n\n @classmethod\n def create_csrf_token(cls, user_id):\n \"\"\"Creates a CSRF token for the given user_id.\n\n Args:\n user_id: str. The user_id for whom the token is generated.\n\n Returns:\n str. The generated CSRF token.\n \"\"\"\n return cls._create_token(user_id, cls._get_current_time())\n\n @classmethod\n def is_csrf_token_valid(cls, user_id, token):\n \"\"\"Validates a given CSRF token.\n\n Args:\n user_id: str. The user_id to validate the CSRF token against.\n token: str. The CSRF token to validate.\n\n Returns:\n bool. Whether the given CSRF token is valid.\n \"\"\"\n try:\n parts = token.split('/')\n if len(parts) != 2:\n return False\n\n issued_on = long(parts[0])\n age = cls._get_current_time() - issued_on\n if age > cls._CSRF_TOKEN_AGE_SECS:\n return False\n\n authentic_token = cls._create_token(user_id, issued_on)\n if authentic_token == token:\n return True\n\n return False\n except Exception:\n return False\n",
"path": "core/controllers/base.py"
}
] | diff --git a/core/controllers/base.py b/core/controllers/base.py
index 5933f3c52c5a2..abb25ce29a0ce 100755
--- a/core/controllers/base.py
+++ b/core/controllers/base.py
@@ -513,7 +513,7 @@ def handle_exception(self, exception, unused_debug_mode):
class Error404Handler(BaseHandler):
"""Handles 404 errors."""
- REQUIRE_PAYLOAD_CSRF_CHECK = False
+ pass
class CsrfTokenManager(object):
diff --git a/core/controllers/base_test.py b/core/controllers/base_test.py
index e32cebb0d6b1b..01f1b01349e70 100644
--- a/core/controllers/base_test.py
+++ b/core/controllers/base_test.py
@@ -107,6 +107,8 @@ def test_that_no_get_results_in_500_error(self):
def test_requests_for_invalid_paths(self):
"""Test that requests for invalid paths result in a 404 error."""
+ user_id = user_services.get_user_id_from_username('learneruser')
+ csrf_token = base.CsrfTokenManager.create_csrf_token(user_id)
self.get_html_response(
'/library/extra', expected_status_int=404)
@@ -115,10 +117,12 @@ def test_requests_for_invalid_paths(self):
'/library/data/extra', expected_status_int=404)
self.post_json(
- '/library/extra', payload={}, expected_status_int=404)
+ '/library/extra', payload={}, csrf_token=csrf_token,
+ expected_status_int=404)
self.put_json(
- '/library/extra', payload={}, expected_status_int=404)
+ '/library/extra', payload={}, csrf_token=csrf_token,
+ expected_status_int=404)
def test_redirect_in_logged_out_states(self):
"""Test for a redirect in logged out state on '/'."""
|
nautobot__nautobot-3981 | 2.0: Tag string representation is incorrect
### Environment
* Nautobot version (Docker tag too if applicable): `next`, e13883b7b8a4f44bca0c40d8074dcf8f82e544e6
### Steps to Reproduce
1. Create a Tag and associate it to any model
2. View the legacy-UI list view or detail view of that model
<!-- What did you expect to happen? -->
### Expected Behavior
Tag name to be displayed
<!-- What happened instead? -->
### Observed Behavior
`Tag object (<uuid>)` is displayed.

Appears to be a regression due to #3914.
| [
{
"content": "from django.contrib.contenttypes.models import ContentType\nfrom django.db import models\nfrom taggit.models import GenericUUIDTaggedItemBase\n\nfrom nautobot.core.choices import ColorChoices\nfrom nautobot.core.models import BaseManager, BaseModel\nfrom nautobot.core.models.fields import ColorField\nfrom nautobot.core.models.querysets import RestrictedQuerySet\nfrom nautobot.extras.models import ChangeLoggedModel, CustomFieldModel\nfrom nautobot.extras.models.mixins import NotesMixin\nfrom nautobot.extras.models.relationships import RelationshipModel\nfrom nautobot.extras.utils import extras_features, TaggableClassesQuery\n\n\n#\n# Tags\n#\n\n\nclass TagQuerySet(RestrictedQuerySet):\n \"\"\"Queryset for `Tags` objects.\"\"\"\n\n def get_for_model(self, model):\n \"\"\"\n Return all `Tags` assigned to the given model.\n \"\"\"\n return self.filter(content_types__model=model._meta.model_name, content_types__app_label=model._meta.app_label)\n\n\n# Tag *should* be a `NameColorContentTypesModel` but that way lies circular import purgatory. Sigh.\n@extras_features(\n \"custom_validators\",\n)\nclass Tag(BaseModel, ChangeLoggedModel, CustomFieldModel, RelationshipModel, NotesMixin):\n name = models.CharField(max_length=100, unique=True)\n content_types = models.ManyToManyField(\n to=ContentType,\n related_name=\"tags\",\n limit_choices_to=TaggableClassesQuery(),\n )\n color = ColorField(default=ColorChoices.COLOR_GREY)\n description = models.CharField(\n max_length=200,\n blank=True,\n )\n\n objects = BaseManager.from_queryset(TagQuerySet)()\n\n class Meta:\n ordering = [\"name\"]\n\n def validate_content_types_removal(self, content_types_id):\n \"\"\"Validate content_types to be removed are not tagged to a model\"\"\"\n errors = {}\n\n removed_content_types = self.content_types.exclude(id__in=content_types_id)\n\n # check if tag is assigned to any of the removed content_types\n for content_type in removed_content_types:\n model = content_type.model_class()\n if model.objects.filter(tags=self).exists():\n errors.setdefault(\"content_types\", []).append(\n f\"Unable to remove {model._meta.label_lower}. Dependent objects were found.\"\n )\n\n return errors\n\n\nclass TaggedItem(BaseModel, GenericUUIDTaggedItemBase):\n tag = models.ForeignKey(to=Tag, related_name=\"%(app_label)s_%(class)s_items\", on_delete=models.CASCADE)\n\n class Meta:\n index_together = (\"content_type\", \"object_id\")\n unique_together = [[\"content_type\", \"object_id\", \"tag\"]]\n",
"path": "nautobot/extras/models/tags.py"
}
] | [
{
"content": "from django.contrib.contenttypes.models import ContentType\nfrom django.db import models\nfrom taggit.models import GenericUUIDTaggedItemBase\n\nfrom nautobot.core.choices import ColorChoices\nfrom nautobot.core.models import BaseManager, BaseModel\nfrom nautobot.core.models.fields import ColorField\nfrom nautobot.core.models.querysets import RestrictedQuerySet\nfrom nautobot.extras.models import ChangeLoggedModel, CustomFieldModel\nfrom nautobot.extras.models.mixins import NotesMixin\nfrom nautobot.extras.models.relationships import RelationshipModel\nfrom nautobot.extras.utils import extras_features, TaggableClassesQuery\n\n\n#\n# Tags\n#\n\n\nclass TagQuerySet(RestrictedQuerySet):\n \"\"\"Queryset for `Tags` objects.\"\"\"\n\n def get_for_model(self, model):\n \"\"\"\n Return all `Tags` assigned to the given model.\n \"\"\"\n return self.filter(content_types__model=model._meta.model_name, content_types__app_label=model._meta.app_label)\n\n\n# Tag *should* be a `NameColorContentTypesModel` but that way lies circular import purgatory. Sigh.\n@extras_features(\n \"custom_validators\",\n)\nclass Tag(BaseModel, ChangeLoggedModel, CustomFieldModel, RelationshipModel, NotesMixin):\n name = models.CharField(max_length=100, unique=True)\n content_types = models.ManyToManyField(\n to=ContentType,\n related_name=\"tags\",\n limit_choices_to=TaggableClassesQuery(),\n )\n color = ColorField(default=ColorChoices.COLOR_GREY)\n description = models.CharField(\n max_length=200,\n blank=True,\n )\n\n objects = BaseManager.from_queryset(TagQuerySet)()\n\n def __str__(self):\n return self.name\n\n class Meta:\n ordering = [\"name\"]\n\n def validate_content_types_removal(self, content_types_id):\n \"\"\"Validate content_types to be removed are not tagged to a model\"\"\"\n errors = {}\n\n removed_content_types = self.content_types.exclude(id__in=content_types_id)\n\n # check if tag is assigned to any of the removed content_types\n for content_type in removed_content_types:\n model = content_type.model_class()\n if model.objects.filter(tags=self).exists():\n errors.setdefault(\"content_types\", []).append(\n f\"Unable to remove {model._meta.label_lower}. Dependent objects were found.\"\n )\n\n return errors\n\n\nclass TaggedItem(BaseModel, GenericUUIDTaggedItemBase):\n tag = models.ForeignKey(to=Tag, related_name=\"%(app_label)s_%(class)s_items\", on_delete=models.CASCADE)\n\n class Meta:\n index_together = (\"content_type\", \"object_id\")\n unique_together = [[\"content_type\", \"object_id\", \"tag\"]]\n",
"path": "nautobot/extras/models/tags.py"
}
] | diff --git a/changes/3980.fixed b/changes/3980.fixed
new file mode 100644
index 00000000000..14e85a78fd9
--- /dev/null
+++ b/changes/3980.fixed
@@ -0,0 +1 @@
+Fixed a regression in the display of Tag records in the UI.
diff --git a/nautobot/extras/models/tags.py b/nautobot/extras/models/tags.py
index 93145e33722..160f33ad89b 100644
--- a/nautobot/extras/models/tags.py
+++ b/nautobot/extras/models/tags.py
@@ -46,6 +46,9 @@ class Tag(BaseModel, ChangeLoggedModel, CustomFieldModel, RelationshipModel, Not
objects = BaseManager.from_queryset(TagQuerySet)()
+ def __str__(self):
+ return self.name
+
class Meta:
ordering = ["name"]
|
python__python-docs-es-1787 | Translate 'using/unix.po'
This needs to reach 100% translated.
The rendered version of this file will be available at https://docs.python.org/es/3.10/using/unix.html once translated.
Meanwhile, the English version is shown.
Current stats for `using/unix.po`:
* Fuzzy: 1
* Percent translated: 88.9%
* Entries: 40 / 45
* Untranslated: 5
Please, comment here if you want this file to be assigned to you and an member will assign it to you as soon as possible, so you can start working on it.
Remember to follow the steps in our [Contributing Guide](https://python-docs-es.readthedocs.io/page/CONTRIBUTING.html).
| [
{
"content": "import os\nimport re\nimport sys\nfrom typing import Dict, Tuple\n\nimport polib\n\nVERBOSE = False\nDEBUG = False\nSKIP_TRANSLATED_ENTRIES = True\n\ntry:\n from deep_translator import GoogleTranslator\nexcept ImportError:\n print(\"Error: This util script needs `deep_translator` to be installed\")\n sys.exit(1)\n\n_patterns = [\n \":c:func:`[^`]+`\",\n \":c:type:`[^`]+`\",\n \":c:macro:`[^`]+`\",\n \":c:member:`[^`]+`\",\n \":c:data:`[^`]+`\",\n \":py:data:`[^`]+`\",\n \":py:mod:`[^`]+`\",\n \":func:`[^`]+`\",\n \":mod:`[^`]+`\",\n \":ref:`[^`]+`\",\n \":class:`[^`]+`\",\n \":pep:`[^`]+`\",\n \":data:`[^`]+`\",\n \":exc:`[^`]+`\",\n \":term:`[^`]+`\",\n \":meth:`[^`]+`\",\n \":envvar:`[^`]+`\",\n \":file:`[^`]+`\",\n \":attr:`[^`]+`\",\n \":const:`[^`]+`\",\n \":issue:`[^`]+`\",\n \":opcode:`[^`]+`\",\n \":option:`[^`]+`\",\n \":program:`[^`]+`\",\n \":keyword:`[^`]+`\",\n \":RFC:`[^`]+`\",\n \":rfc:`[^`]+`\",\n \":doc:`[^`]+`\",\n \":manpage:`[^`]+`\",\n \":sup:`[^`]+`\",\n \"``[^`]+``\",\n \"`[^`]+`__\",\n \"`[^`]+`_\",\n \"\\*\\*[^\\*]+\\*\\*\", # bold text between **\n \"\\*[^\\*]+\\*\", # italic text between *\n]\n\n_exps = [re.compile(e) for e in _patterns]\n\ndef protect_sphinx_directives(s: str) -> Tuple[dict, str]:\n \"\"\"\n Parameters:\n string containing the text to translate\n\n Returns:\n dictionary containing all the placeholder text as keys\n and the correct value.\n \"\"\"\n\n i = 0\n d: Dict[str, str] = {}\n for exp in _exps:\n matches = exp.findall(s)\n if DEBUG:\n print(exp, matches)\n for match in matches:\n ph = f\"XASDF{str(i).zfill(2)}\"\n s = s.replace(match, ph)\n if ph in d and VERBOSE:\n print(f\"Error: {ph} is already in the dictionary\")\n print(\"new\", match)\n print(\"old\", d[ph])\n d[ph] = match\n i += 1\n return d, s\n\n\ndef undo_sphinx_directives_protection(placeholders: dict, translated_text: str) -> str:\n for ph, value in placeholders.items():\n translated_text = translated_text.replace(ph, value)\n if DEBUG:\n print(ph, value)\n print(translated_text)\n return translated_text\n\n\nif __name__ == \"__main__\":\n filename = sys.argv[1]\n if not os.path.isfile(filename):\n print(f\"File not found: '{filename}'\")\n sys.exit(-1)\n\n po = polib.pofile(filename)\n translator = GoogleTranslator(source=\"en\", target=\"es\")\n\n for entry in po:\n # If the entry has already a translation, skip.\n if SKIP_TRANSLATED_ENTRIES and entry.msgstr:\n continue\n\n print(\"\\nEN|\", entry.msgid)\n placeholders, temp_text = protect_sphinx_directives(entry.msgid)\n if VERBOSE:\n print(temp_text)\n print(placeholders)\n\n # Translate the temporary text without sphinx statements\n translated_text = translator.translate(temp_text)\n\n # Recover sphinx statements\n real_text = undo_sphinx_directives_protection(placeholders, translated_text)\n print(\"ES|\", real_text)\n\n # Replace the po file translated entry\n entry.msgstr = real_text\n\n # Save the file after all the entries are translated\n po.save()\n",
"path": "scripts/translate.py"
}
] | [
{
"content": "import os\nimport re\nimport sys\nfrom typing import Dict, Tuple\n\nimport polib\n\nVERBOSE = False\nDEBUG = False\nSKIP_TRANSLATED_ENTRIES = True\n\ntry:\n from deep_translator import GoogleTranslator\nexcept ImportError:\n print(\"Error: This util script needs `deep_translator` to be installed\")\n sys.exit(1)\n\n_patterns = [\n \":c:func:`[^`]+`\",\n \":c:type:`[^`]+`\",\n \":c:macro:`[^`]+`\",\n \":c:member:`[^`]+`\",\n \":c:data:`[^`]+`\",\n \":py:data:`[^`]+`\",\n \":py:mod:`[^`]+`\",\n \":func:`[^`]+`\",\n \":mod:`[^`]+`\",\n \":ref:`[^`]+`\",\n \":class:`[^`]+`\",\n \":pep:`[^`]+`\",\n \":data:`[^`]+`\",\n \":exc:`[^`]+`\",\n \":term:`[^`]+`\",\n \":meth:`[^`]+`\",\n \":envvar:`[^`]+`\",\n \":file:`[^`]+`\",\n \":attr:`[^`]+`\",\n \":const:`[^`]+`\",\n \":issue:`[^`]+`\",\n \":opcode:`[^`]+`\",\n \":option:`[^`]+`\",\n \":program:`[^`]+`\",\n \":keyword:`[^`]+`\",\n \":RFC:`[^`]+`\",\n \":rfc:`[^`]+`\",\n \":doc:`[^`]+`\",\n \":source:`[^`]+`\",\n \":manpage:`[^`]+`\",\n \":sup:`[^`]+`\",\n \"``[^`]+``\",\n \"`[^`]+`__\",\n \"`[^`]+`_\",\n \"\\*\\*[^\\*]+\\*\\*\", # bold text between **\n \"\\*[^\\*]+\\*\", # italic text between *\n]\n\n_exps = [re.compile(e) for e in _patterns]\n\ndef protect_sphinx_directives(s: str) -> Tuple[dict, str]:\n \"\"\"\n Parameters:\n string containing the text to translate\n\n Returns:\n dictionary containing all the placeholder text as keys\n and the correct value.\n \"\"\"\n\n i = 0\n d: Dict[str, str] = {}\n for exp in _exps:\n matches = exp.findall(s)\n if DEBUG:\n print(exp, matches)\n for match in matches:\n ph = f\"XASDF{str(i).zfill(2)}\"\n s = s.replace(match, ph)\n if ph in d and VERBOSE:\n print(f\"Error: {ph} is already in the dictionary\")\n print(\"new\", match)\n print(\"old\", d[ph])\n d[ph] = match\n i += 1\n return d, s\n\n\ndef undo_sphinx_directives_protection(placeholders: dict, translated_text: str) -> str:\n for ph, value in placeholders.items():\n translated_text = translated_text.replace(ph, value)\n if DEBUG:\n print(ph, value)\n print(translated_text)\n return translated_text\n\n\nif __name__ == \"__main__\":\n filename = sys.argv[1]\n if not os.path.isfile(filename):\n print(f\"File not found: '{filename}'\")\n sys.exit(-1)\n\n po = polib.pofile(filename)\n translator = GoogleTranslator(source=\"en\", target=\"es\")\n\n for entry in po:\n # If the entry has already a translation, skip.\n if SKIP_TRANSLATED_ENTRIES and entry.msgstr:\n continue\n\n print(\"\\nEN|\", entry.msgid)\n placeholders, temp_text = protect_sphinx_directives(entry.msgid)\n if VERBOSE:\n print(temp_text)\n print(placeholders)\n\n # Translate the temporary text without sphinx statements\n translated_text = translator.translate(temp_text)\n\n # Recover sphinx statements\n real_text = undo_sphinx_directives_protection(placeholders, translated_text)\n print(\"ES|\", real_text)\n\n # Replace the po file translated entry\n entry.msgstr = real_text\n\n # Save the file after all the entries are translated\n po.save()\n",
"path": "scripts/translate.py"
}
] | diff --git a/scripts/translate.py b/scripts/translate.py
index 26c8d5f80d..a8c9ab5f2c 100644
--- a/scripts/translate.py
+++ b/scripts/translate.py
@@ -44,6 +44,7 @@
":RFC:`[^`]+`",
":rfc:`[^`]+`",
":doc:`[^`]+`",
+ ":source:`[^`]+`",
":manpage:`[^`]+`",
":sup:`[^`]+`",
"``[^`]+``",
diff --git a/using/unix.po b/using/unix.po
index da3b7dd465..a68b80ac1a 100644
--- a/using/unix.po
+++ b/using/unix.po
@@ -13,12 +13,12 @@ msgstr ""
"POT-Creation-Date: 2021-10-16 21:42+0200\n"
"PO-Revision-Date: 2020-10-05 20:17+0200\n"
"Last-Translator: \n"
-"Language: es_ES\n"
"Language-Team: python-doc-es\n"
-"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+"Language: es_ES\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=(n != 1)\n"
"Generated-By: Babel 2.9.1\n"
#: ../Doc/using/unix.rst:7
@@ -147,15 +147,14 @@ msgid "The build process consists of the usual commands::"
msgstr "El proceso de construcción consta de los comandos habituales::"
#: ../Doc/using/unix.rst:81
-#, fuzzy
msgid ""
":ref:`Configuration options <configure-options>` and caveats for specific "
"Unix platforms are extensively documented in the :source:`README.rst` file "
"in the root of the Python source tree."
msgstr ""
-"Las opciones de configuración y las advertencias para plataformas Unix "
-"específicas están ampliamente documentadas en el fichero :source:`README."
-"rst` del árbol de origen."
+":ref:`Opciones de configuración <configure-options>` y las advertencias para "
+"plataformas Unix específicas están ampliamente documentadas en el archivo :"
+"source:`README.rst` en la raíz del árbol de fuentes de Python."
#: ../Doc/using/unix.rst:87
msgid ""
@@ -280,7 +279,7 @@ msgstr ""
#: ../Doc/using/unix.rst:141
msgid "Custom OpenSSL"
-msgstr ""
+msgstr "OpenSSL personalizado"
#: ../Doc/using/unix.rst:143
msgid ""
@@ -290,18 +289,27 @@ msgid ""
"directory should also contain a ``cert.pem`` file and/or a ``certs`` "
"directory."
msgstr ""
+"Para utilizar la configuración de OpenSSL de su proveedor y el almacén de "
+"confianza del sistema, busque el directorio con el archivo ``openssl.cnf`` o "
+"el enlace simbólico en ``/etc``. En la mayoría de las distribuciones, el "
+"archivo está en ``/etc/ssl`` o ``/etc/pki/tls``. El directorio también debe "
+"contener un archivo ``cert.pem`` y / o un directorio ``certs``."
#: ../Doc/using/unix.rst:154
msgid ""
"Download, build, and install OpenSSL. Make sure you use ``install_sw`` and "
"not ``install``. The ``install_sw`` target does not override ``openssl.cnf``."
msgstr ""
+"Descargue, compile e instale OpenSSL. Asegúrese de utilizar ``install_sw`` y "
+"no ``install``. El destino ``install_sw`` no anula ``openssl.cnf``."
#: ../Doc/using/unix.rst:172
msgid ""
"Build Python with custom OpenSSL (see the configure `--with-openssl` and `--"
"with-openssl-rpath` options)"
msgstr ""
+"Compile Python con OpenSSL personalizado (consulte las opciones configure `--"
+"with-openssl` y` --with-openssl-rpath`)"
#: ../Doc/using/unix.rst:187
msgid ""
@@ -309,3 +317,7 @@ msgid ""
"recompile Python to update OpenSSL. It's sufficient to replace the custom "
"OpenSSL installation with a newer version."
msgstr ""
+"Las versiones de parche de OpenSSL tienen una ABI compatible con versiones "
+"anteriores. No es necesario volver a compilar Python para actualizar "
+"OpenSSL. Es suficiente reemplazar la instalación personalizada de OpenSSL "
+"con una versión más nueva."
|
python__python-docs-es-1762 | Translate 'library/os.po'
This needs to reach 100% translated.
The rendered version of this file will be available at https://docs.python.org/es/3.10/library/os.html once translated.
Meanwhile, the English version is shown.
Current stats for `library/os.po`:
* Fuzzy: 27
* Percent translated: 94.8%
* Entries: 804 / 848
* Untranslated: 44
Please, comment here if you want this file to be assigned to you and an member will assign it to you as soon as possible, so you can start working on it.
Remember to follow the steps in our [Contributing Guide](https://python-docs-es.readthedocs.io/page/CONTRIBUTING.html).
| [
{
"content": "import os\nimport re\nimport sys\nfrom typing import Dict, Tuple\n\nimport polib\n\nVERBOSE = False\nDEBUG = False\nSKIP_TRANSLATED_ENTRIES = True\n\ntry:\n from deep_translator import GoogleTranslator\nexcept ImportError:\n print(\"Error: This util script needs `deep_translator` to be installed\")\n sys.exit(1)\n\n_patterns = [\n \":c:func:`[^`]+`\",\n \":c:type:`[^`]+`\",\n \":c:macro:`[^`]+`\",\n \":c:member:`[^`]+`\",\n \":c:data:`[^`]+`\",\n \":py:data:`[^`]+`\",\n \":py:mod:`[^`]+`\",\n \":func:`[^`]+`\",\n \":mod:`[^`]+`\",\n \":ref:`[^`]+`\",\n \":class:`[^`]+`\",\n \":pep:`[^`]+`\",\n \":data:`[^`]+`\",\n \":exc:`[^`]+`\",\n \":term:`[^`]+`\",\n \":meth:`[^`]+`\",\n \":envvar:`[^`]+`\",\n \":file:`[^`]+`\",\n \":attr:`[^`]+`\",\n \":const:`[^`]+`\",\n \":issue:`[^`]+`\",\n \":opcode:`[^`]+`\",\n \":option:`[^`]+`\",\n \":program:`[^`]+`\",\n \":keyword:`[^`]+`\",\n \":RFC:`[^`]+`\",\n \":rfc:`[^`]+`\",\n \":doc:`[^`]+`\",\n \"``[^`]+``\",\n \"`[^`]+`__\",\n \"`[^`]+`_\",\n \"\\*\\*[^\\*]+\\*\\*\", # bold text between **\n \"\\*[^\\*]+\\*\", # italic text between *\n]\n\n_exps = [re.compile(e) for e in _patterns]\n\ndef protect_sphinx_directives(s: str) -> Tuple[dict, str]:\n \"\"\"\n Parameters:\n string containing the text to translate\n\n Returns:\n dictionary containing all the placeholder text as keys\n and the correct value.\n \"\"\"\n\n i = 0\n d: Dict[str, str] = {}\n for exp in _exps:\n matches = exp.findall(s)\n if DEBUG:\n print(exp, matches)\n for match in matches:\n ph = f\"XASDF{str(i).zfill(2)}\"\n s = s.replace(match, ph)\n if ph in d and VERBOSE:\n print(f\"Error: {ph} is already in the dictionary\")\n print(\"new\", match)\n print(\"old\", d[ph])\n d[ph] = match\n i += 1\n return d, s\n\n\ndef undo_sphinx_directives_protection(placeholders: dict, translated_text: str) -> str:\n for ph, value in placeholders.items():\n translated_text = translated_text.replace(ph, value)\n if DEBUG:\n print(ph, value)\n print(translated_text)\n return translated_text\n\n\nif __name__ == \"__main__\":\n filename = sys.argv[1]\n if not os.path.isfile(filename):\n print(f\"File not found: '{filename}'\")\n sys.exit(-1)\n\n po = polib.pofile(filename)\n translator = GoogleTranslator(source=\"en\", target=\"es\")\n\n for entry in po:\n # If the entry has already a translation, skip.\n if SKIP_TRANSLATED_ENTRIES and entry.msgstr:\n continue\n\n print(\"\\nEN|\", entry.msgid)\n placeholders, temp_text = protect_sphinx_directives(entry.msgid)\n if VERBOSE:\n print(temp_text)\n print(placeholders)\n\n # Translate the temporary text without sphinx statements\n translated_text = translator.translate(temp_text)\n\n # Recover sphinx statements\n real_text = undo_sphinx_directives_protection(placeholders, translated_text)\n print(\"ES|\", real_text)\n\n # Replace the po file translated entry\n entry.msgstr = real_text\n\n # Save the file after all the entries are translated\n po.save()\n",
"path": "scripts/translate.py"
}
] | [
{
"content": "import os\nimport re\nimport sys\nfrom typing import Dict, Tuple\n\nimport polib\n\nVERBOSE = False\nDEBUG = False\nSKIP_TRANSLATED_ENTRIES = True\n\ntry:\n from deep_translator import GoogleTranslator\nexcept ImportError:\n print(\"Error: This util script needs `deep_translator` to be installed\")\n sys.exit(1)\n\n_patterns = [\n \":c:func:`[^`]+`\",\n \":c:type:`[^`]+`\",\n \":c:macro:`[^`]+`\",\n \":c:member:`[^`]+`\",\n \":c:data:`[^`]+`\",\n \":py:data:`[^`]+`\",\n \":py:mod:`[^`]+`\",\n \":func:`[^`]+`\",\n \":mod:`[^`]+`\",\n \":ref:`[^`]+`\",\n \":class:`[^`]+`\",\n \":pep:`[^`]+`\",\n \":data:`[^`]+`\",\n \":exc:`[^`]+`\",\n \":term:`[^`]+`\",\n \":meth:`[^`]+`\",\n \":envvar:`[^`]+`\",\n \":file:`[^`]+`\",\n \":attr:`[^`]+`\",\n \":const:`[^`]+`\",\n \":issue:`[^`]+`\",\n \":opcode:`[^`]+`\",\n \":option:`[^`]+`\",\n \":program:`[^`]+`\",\n \":keyword:`[^`]+`\",\n \":RFC:`[^`]+`\",\n \":rfc:`[^`]+`\",\n \":doc:`[^`]+`\",\n \":manpage:`[^`]+`\",\n \":sup:`[^`]+`\",\n \"``[^`]+``\",\n \"`[^`]+`__\",\n \"`[^`]+`_\",\n \"\\*\\*[^\\*]+\\*\\*\", # bold text between **\n \"\\*[^\\*]+\\*\", # italic text between *\n]\n\n_exps = [re.compile(e) for e in _patterns]\n\ndef protect_sphinx_directives(s: str) -> Tuple[dict, str]:\n \"\"\"\n Parameters:\n string containing the text to translate\n\n Returns:\n dictionary containing all the placeholder text as keys\n and the correct value.\n \"\"\"\n\n i = 0\n d: Dict[str, str] = {}\n for exp in _exps:\n matches = exp.findall(s)\n if DEBUG:\n print(exp, matches)\n for match in matches:\n ph = f\"XASDF{str(i).zfill(2)}\"\n s = s.replace(match, ph)\n if ph in d and VERBOSE:\n print(f\"Error: {ph} is already in the dictionary\")\n print(\"new\", match)\n print(\"old\", d[ph])\n d[ph] = match\n i += 1\n return d, s\n\n\ndef undo_sphinx_directives_protection(placeholders: dict, translated_text: str) -> str:\n for ph, value in placeholders.items():\n translated_text = translated_text.replace(ph, value)\n if DEBUG:\n print(ph, value)\n print(translated_text)\n return translated_text\n\n\nif __name__ == \"__main__\":\n filename = sys.argv[1]\n if not os.path.isfile(filename):\n print(f\"File not found: '{filename}'\")\n sys.exit(-1)\n\n po = polib.pofile(filename)\n translator = GoogleTranslator(source=\"en\", target=\"es\")\n\n for entry in po:\n # If the entry has already a translation, skip.\n if SKIP_TRANSLATED_ENTRIES and entry.msgstr:\n continue\n\n print(\"\\nEN|\", entry.msgid)\n placeholders, temp_text = protect_sphinx_directives(entry.msgid)\n if VERBOSE:\n print(temp_text)\n print(placeholders)\n\n # Translate the temporary text without sphinx statements\n translated_text = translator.translate(temp_text)\n\n # Recover sphinx statements\n real_text = undo_sphinx_directives_protection(placeholders, translated_text)\n print(\"ES|\", real_text)\n\n # Replace the po file translated entry\n entry.msgstr = real_text\n\n # Save the file after all the entries are translated\n po.save()\n",
"path": "scripts/translate.py"
}
] | diff --git a/dictionaries/library_os.txt b/dictionaries/library_os.txt
index 08f036ea71..97081ad219 100644
--- a/dictionaries/library_os.txt
+++ b/dictionaries/library_os.txt
@@ -1,43 +1,42 @@
-inodo
-nanosegundos
-urandom
-umask
-syscall
-quantum
-glibc
-errno
-pty
-Desestablece
-TerminateProcess
-fork
+configurarlos
+ctime
Cygwin
+Desestablece
+egid
ejecutabilidad
-misceláneas
entropía
-interactividad
-v
-ruid
-sgid
-suid
+errno
euid
-egid
+execv
+fork
+glibc
+group
+initgroups
+inodo
+interactividad
+merge
+misceláneas
+nanosegundos
+pty
+putenv
+quantum
+reescritura
rgid
-round
robin
-subshell
-signal
-subshell
-stdio
-ctime
-configurarlos
-reescritura
-stat
+round
+ruid
setgid
-setuid
setgroups
-initgroups
-putenv
+setuid
+sgid
+signal
spawn
-execv
-group
-merge
\ No newline at end of file
+stat
+stdio
+subcapa
+subshell
+suid
+syscall
+TerminateProcess
+umask
+urandom
diff --git a/library/os.po b/library/os.po
index 23c64bec87..8923200fd5 100644
--- a/library/os.po
+++ b/library/os.po
@@ -13,12 +13,12 @@ msgstr ""
"POT-Creation-Date: 2021-10-16 21:42+0200\n"
"PO-Revision-Date: 2021-08-19 21:45-0500\n"
"Last-Translator: Cristián Maureira-Fredes <[email protected]>\n"
-"Language: es\n"
"Language-Team: python-doc-es\n"
-"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+"Language: es\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=(n != 1)\n"
"Generated-By: Babel 2.9.1\n"
#: ../Doc/library/os.rst:2
@@ -86,10 +86,10 @@ msgstr ""
"se retorna una ruta o un archivo."
#: ../Doc/library/os.rst:35
-#, fuzzy
msgid ""
"On VxWorks, os.popen, os.fork, os.execv and os.spawn*p* are not supported."
-msgstr "En VxWorks, no están soportados os.fork, os.execv y os.spawn*p*."
+msgstr ""
+"En VxWorks, os.popen, os.fork, os.execv y os.spawn*p* no son compatibles."
#: ../Doc/library/os.rst:39
msgid ""
@@ -136,7 +136,6 @@ msgstr ""
"entorno"
#: ../Doc/library/os.rst:68
-#, fuzzy
msgid ""
"In Python, file names, command line arguments, and environment variables are "
"represented using the string type. On some systems, decoding these strings "
@@ -145,11 +144,12 @@ msgid ""
"this conversion (see :func:`sys.getfilesystemencoding`)."
msgstr ""
"En Python, los nombres de archivo, los argumentos de la línea de comandos y "
-"las variables de entorno están representados usando cadena de caracteres. En "
-"algunos sistemas, decodificar esas cadenas desde y hacia *bytes* es "
-"necesario para pasárselos al sistema operativo. Python usa la codificación "
-"del sistema operativo para realizar esta conversión (ver :func:`sys."
-"getfilesystemencoding`)."
+"las variables de entorno se representan mediante el tipo de cadena de "
+"caracteres. En algunos sistemas, es necesario decodificar estas cadenas "
+"desde y hacia bytes antes de pasarlas al sistema operativo. Python usa la :"
+"term:`codificación del sistema de archivos y manejador de errores "
+"<filesystem encoding and error handler>` para realizar esta conversión "
+"(consulte :func:`sys.getfilesystemencoding`)."
#: ../Doc/library/os.rst:74
msgid ""
@@ -158,6 +158,11 @@ msgid ""
"filesystem_encoding` and :c:member:`~PyConfig.filesystem_errors` members of :"
"c:type:`PyConfig`."
msgstr ""
+"La :term:`codificación del sistema de archivos y manejador de errores "
+"<filesystem encoding and error handler>` se configuran al inicio de Python "
+"mediante la función :c:func:`PyConfig_Read`: consulte los miembros :c:member:"
+"`~PyConfig.filesystem_encoding` y :c:member:`~PyConfig.filesystem_errors` "
+"de :c:type:`PyConfig`."
#: ../Doc/library/os.rst:79
msgid ""
@@ -181,40 +186,53 @@ msgid ""
"system encoding fails to provide this guarantee, API functions can raise :"
"exc:`UnicodeError`."
msgstr ""
+"La :term:`codificación del sistema de archivos y manejador de errores "
+"<filesystem encoding and error handler>` debe garantizar la decodificación "
+"exitosa de todos los bytes por debajo de 128. Si la codificación del sistema "
+"de archivos no proporciona esta garantía, las funciones de API pueden "
+"generar :exc:`UnicodeError`."
#: ../Doc/library/os.rst:92
msgid "See also the :term:`locale encoding`."
msgstr ""
+"Consulte también la :term:`codificación de la configuración regional <locale "
+"encoding>`."
#: ../Doc/library/os.rst:98
msgid "Python UTF-8 Mode"
-msgstr ""
+msgstr "Modo Python UTF-8"
#: ../Doc/library/os.rst:100
msgid "See :pep:`540` for more details."
-msgstr ""
+msgstr "Consulte :pep:`540` para obtener más detalles."
#: ../Doc/library/os.rst:103
msgid ""
"The Python UTF-8 Mode ignores the :term:`locale encoding` and forces the "
"usage of the UTF-8 encoding:"
msgstr ""
+"El modo Python UTF-8 ignora la :term:`codificación de la configuración "
+"regional <locale encoding>` y fuerza el uso de la codificación UTF-8:"
#: ../Doc/library/os.rst:106
msgid ""
"Use UTF-8 as the :term:`filesystem encoding <filesystem encoding and error "
"handler>`."
msgstr ""
+"Utilice UTF-8 como :term:`codificación del sistema de archivos y manejador "
+"de errores <filesystem encoding and error handler>`."
#: ../Doc/library/os.rst:108
msgid ":func:`sys.getfilesystemencoding()` returns ``'UTF-8'``."
-msgstr ""
+msgstr ":func:`sys.getfilesystemencoding()` retorna ``'UTF-8'``."
#: ../Doc/library/os.rst:109
msgid ""
":func:`locale.getpreferredencoding()` returns ``'UTF-8'`` (the "
"*do_setlocale* argument has no effect)."
msgstr ""
+":func:`locale.getpreferredencoding()` retorna ``'UTF-8'`` (el argumento "
+"*do_setlocale* no tiene ningún efecto)."
#: ../Doc/library/os.rst:111
msgid ""
@@ -224,12 +242,19 @@ msgid ""
"(:data:`sys.stderr` continues to use ``backslashreplace`` as it does in the "
"default locale-aware mode)"
msgstr ""
+":data:`sys.stdin`, :data:`sys.stdout` y :data:`sys.stderr` usan UTF-8 como "
+"su codificación de texto, con ``surrogateescape`` :ref:`error handler <error-"
+"handlers>` habilitado para :data:`sys.stdin` y :data:`sys.stdout` (:data:"
+"`sys.stderr` continúa usando ``backslashreplace`` como lo hace en el modo "
+"predeterminado de reconocimiento de configuración regional)"
#: ../Doc/library/os.rst:116
msgid ""
"On Unix, :func:`os.device_encoding` returns ``'UTF-8'``. rather than the "
"device encoding."
msgstr ""
+"En Unix, :func:`os.device_encoding` retorna ``'UTF-8'``. en lugar de la "
+"codificación del dispositivo."
#: ../Doc/library/os.rst:119
msgid ""
@@ -237,22 +262,30 @@ msgid ""
"envvar:`PYTHONIOENCODING` (just as they can be in the default locale-aware "
"mode)."
msgstr ""
+"Tenga en cuenta que :envvar:`PYTHONIOENCODING` puede anular la configuración "
+"de transmisión estándar en el modo UTF-8 (al igual que en el modo "
+"predeterminado con reconocimiento de configuración regional)."
#: ../Doc/library/os.rst:123
msgid ""
"As a consequence of the changes in those lower level APIs, other higher "
"level APIs also exhibit different default behaviours:"
msgstr ""
+"Como consecuencia de los cambios en esas API de nivel inferior, otras API de "
+"nivel superior también exhiben diferentes comportamientos predeterminados:"
#: ../Doc/library/os.rst:126
msgid ""
"Command line arguments, environment variables and filenames are decoded to "
"text using the UTF-8 encoding."
msgstr ""
+"Los argumentos de la línea de comandos, las variables de entorno y los "
+"nombres de archivo se decodifican en texto utilizando la codificación UTF-8."
#: ../Doc/library/os.rst:128
msgid ":func:`os.fsdecode()` and :func:`os.fsencode()` use the UTF-8 encoding."
msgstr ""
+":func:`os.fsdecode()` y :func:`os.fsencode()` utilizan la codificación UTF-8."
#: ../Doc/library/os.rst:129
msgid ""
@@ -261,6 +294,11 @@ msgid ""
"default so that attempting to open a binary file in text mode is likely to "
"raise an exception rather than producing nonsense data."
msgstr ""
+":func:`open()`, :func:`io.open()` y :func:`codecs.open()` utilizan la "
+"codificación UTF-8 de forma predeterminada. Sin embargo, todavía usan el "
+"controlador de errores estricto de forma predeterminada, por lo que es "
+"probable que intentar abrir un archivo binario en modo texto genere una "
+"excepción en lugar de producir datos sin sentido."
#: ../Doc/library/os.rst:134
msgid ""
@@ -268,12 +306,17 @@ msgid ""
"is ``C`` or ``POSIX`` at Python startup (see the :c:func:`PyConfig_Read` "
"function)."
msgstr ""
+"El :ref:`Modo Python UTF-8 <utf8-mode>` se habilita si la configuración "
+"regional LC_CTYPE es ``C`` o ``POSIX`` al iniciar Python (consulte la "
+"función :c:func:`PyConfig_Read`)."
#: ../Doc/library/os.rst:138
msgid ""
"It can be enabled or disabled using the :option:`-X utf8 <-X>` command line "
"option and the :envvar:`PYTHONUTF8` environment variable."
msgstr ""
+"Se puede habilitar o deshabilitar usando la opción de línea de comando :"
+"option:`-X utf8 <-X>` y la variable de entorno :envvar:`PYTHONUTF8`."
#: ../Doc/library/os.rst:141
msgid ""
@@ -284,18 +327,32 @@ msgid ""
"or fails. In such legacy locales, the interpreter will default to enabling "
"UTF-8 mode unless explicitly instructed not to do so."
msgstr ""
+"Si la variable de entorno :envvar:`PYTHONUTF8` no está configurada en "
+"absoluto, entonces el intérprete utiliza de forma predeterminada la "
+"configuración regional actual, *a no ser que* la configuración regional "
+"actual se identifica como una configuración regional heredada basada en "
+"ASCII (como se describe para :envvar:`PYTHONCOERCECLOCALE`) y la coerción de "
+"la configuración regional está deshabilitada o falla . En tales "
+"configuraciones regionales heredadas, el intérprete habilitará de forma "
+"predeterminada el modo UTF-8 a menos que se le indique explícitamente que no "
+"lo haga."
#: ../Doc/library/os.rst:148
msgid ""
"The Python UTF-8 Mode can only be enabled at the Python startup. Its value "
"can be read from :data:`sys.flags.utf8_mode <sys.flags>`."
msgstr ""
+"El modo Python UTF-8 solo se puede habilitar al inicio de Python. Su valor "
+"se puede leer en :data:`sys.flags.utf8_mode <sys.flags>`."
#: ../Doc/library/os.rst:151
msgid ""
"See also the :ref:`UTF-8 mode on Windows <win-utf8-mode>` and the :term:"
"`filesystem encoding and error handler`."
msgstr ""
+"Consulte también :ref:`modo UTF-8 en Windows <win-utf8-mode>` y :term:"
+"`codificación del sistema de archivos y manejador de errores <filesystem "
+"encoding and error handler>`."
#: ../Doc/library/os.rst:158
msgid "Process Parameters"
@@ -425,14 +482,13 @@ msgstr ""
"así que es mejor modificar ``os.environ``."
#: ../Doc/library/os.rst:197
-#, fuzzy
msgid ""
"On some platforms, including FreeBSD and macOS, setting ``environ`` may "
"cause memory leaks. Refer to the system documentation for :c:func:`putenv`."
msgstr ""
-"En algunas plataformas, como FreeBSD y Mac OS X, establece ``environ`` "
-"pueden generar pérdidas de memoria. Hay que referirse a la documentación del "
-"sistema para la función :c:func:`putenv`."
+"En algunas plataformas, incluidas FreeBSD y macOS, configurar ``environ`` "
+"puede provocar pérdidas de memoria. Consulte la documentación del sistema "
+"para :c:func:`putenv`."
#: ../Doc/library/os.rst:201
msgid ""
@@ -478,15 +534,12 @@ msgid "These functions are described in :ref:`os-file-dir`."
msgstr "Estas funciones están detalladas en :ref:`os-file-dir`."
#: ../Doc/library/os.rst:236
-#, fuzzy
msgid ""
"Encode :term:`path-like <path-like object>` *filename* to the :term:"
"`filesystem encoding and error handler`; return :class:`bytes` unchanged."
msgstr ""
-"Codifica un nombre de archivo :term:`tipo ruta <path-like object>` con la "
-"codificación del sistema de archivos usando el controlador de errores "
-"``'surrogateescape'``, o ``'strict'`` en Windows; retorna :class:`bytes` sin "
-"alterar."
+"Codifique :term:`path-like <path-like object>` *filename* en :term:"
+"`filesystem encoding and error handler`; retorna :class:`bytes` sin cambios."
#: ../Doc/library/os.rst:240
msgid ":func:`fsdecode` is the reverse function."
@@ -501,15 +554,12 @@ msgstr ""
"`os.PathLike`."
#: ../Doc/library/os.rst:251
-#, fuzzy
msgid ""
"Decode the :term:`path-like <path-like object>` *filename* from the :term:"
"`filesystem encoding and error handler`; return :class:`str` unchanged."
msgstr ""
-"Decodifica un nombre de archivo :term:`tipo ruta <path-like object>` desde "
-"la codificación del sistema de archivos usando el controlador de errores "
-"``'surrogateescape'``, o ``'strict'`` en Windows; retorna :class:`str` sin "
-"alterar."
+"Decodifica el :term:`path-like <path-like object>` *filename* del :term:"
+"`filesystem encoding and error handler`; retorna :class:`str` sin cambios."
#: ../Doc/library/os.rst:255
msgid ":func:`fsencode` is the reverse function."
@@ -648,7 +698,6 @@ msgstr ""
"actual."
#: ../Doc/library/os.rst:372
-#, fuzzy
msgid ""
"On macOS, :func:`getgroups` behavior differs somewhat from other Unix "
"platforms. If the Python interpreter was built with a deployment target of :"
@@ -664,20 +713,20 @@ msgid ""
"`MACOSX_DEPLOYMENT_TARGET`, can be obtained with :func:`sysconfig."
"get_config_var`."
msgstr ""
-"En Mac OS X, la función :func:`getgroups` se comporta diferente que en otras "
-"plataformas del tipo Unix. Si el intérprete de Python se compiló con un "
-"objetivo de despliegue igual a :const:`10.5` o anterior, la función :func:"
-"`getgroups` retorna la lista de *ids* de grupos efectivos asociados con el "
-"proceso actual; esta lista está limitada a un número de entradas definidas a "
-"nivel de sistema, típicamente 16, y puede modificarse con la ejecución de :"
-"func:`setgroups` si se tiene los privilegios adecuados. Si se compila con un "
-"objetivo de despliegue mayor que :const:`10.5`, :func:`getgroups` retorna la "
-"lista de acceso de grupo actual asociada para el *id* efectivo del usuario "
-"del proceso; la lista de acceso de grupo puede cambiar durante el ciclo de "
-"vida del proceso, no se ve afectada por las llamadas a :func:`setgroups`, y "
-"su longitud no está limitada a 16. El valor de objetivo de despliegue, :"
-"const:`MACOSX_DEPLOYMENT_TARGET`, se puede ver con :func:`sysconfig."
-"get_config_var`."
+"En macOS, el comportamiento de :func:`getgroups` difiere algo del de otras "
+"plataformas Unix. Si el intérprete de Python se creó con un destino de "
+"implementación de :const:`10.5` o anterior, :func:`getgroups` retorna la "
+"lista de identificadores de grupo efectivos asociados con el proceso de "
+"usuario actual; esta lista está limitada a un número de entradas definido "
+"por el sistema, normalmente 16, y puede modificarse mediante llamadas a :"
+"func:`setgroups` si tiene los privilegios adecuados. Si se construyó con un "
+"objetivo de implementación mayor que :const:`10.5`, :func:`getgroups` "
+"retorna la lista de acceso de grupo actual para el usuario asociado con el "
+"ID de usuario efectivo del proceso; la lista de acceso de grupo puede "
+"cambiar durante la vida útil del proceso, no se ve afectada por las llamadas "
+"a :func:`setgroups` y su longitud no está limitada a 16. El valor de destino "
+"de implementación, :const:`MACOSX_DEPLOYMENT_TARGET`, se puede obtener con :"
+"func:`sysconfig.get_config_var`."
#: ../Doc/library/os.rst:389
msgid ""
@@ -814,14 +863,13 @@ msgstr ""
"elementos de ``os.environ``."
#: ../Doc/library/os.rst:519
-#, fuzzy
msgid ""
"On some platforms, including FreeBSD and macOS, setting ``environ`` may "
"cause memory leaks. Refer to the system documentation for :c:func:`putenv`."
msgstr ""
-"En algunas plataformas, como FreeBSD y Mac OS X, establecer ``environ`` "
-"pueden generar fugas de memoria. Referirse a la documentación para :c:func:"
-"`putenv`."
+"En algunas plataformas, incluidas FreeBSD y macOS, configurar ``environ`` "
+"puede provocar pérdidas de memoria. Consulte la documentación del sistema "
+"para :c:func:`putenv`."
#: ../Doc/library/os.rst:522
msgid ""
@@ -860,17 +908,16 @@ msgstr ""
"disponible sólo para el superusuario."
#: ../Doc/library/os.rst:557
-#, fuzzy
msgid ""
"On macOS, the length of *groups* may not exceed the system-defined maximum "
"number of effective group ids, typically 16. See the documentation for :func:"
"`getgroups` for cases where it may not return the same group list set by "
"calling setgroups()."
msgstr ""
-"En Mac OS X, la longitud de *groups* no puede exceder el número máximo de "
-"identificadores de grupo efectivos definidos por el sistema, generalmente "
-"16. Consulte la documentación de :func:`getgroups` para casos en los que no "
-"puede retornar el mismo conjunto de listas de grupos llamando a setgroups()."
+"En macOS, la longitud de *groups* no puede exceder el número máximo definido "
+"por el sistema de ID de grupo efectivos, por lo general 16. Consulte la "
+"documentación de :func:`getgroups` para ver los casos en los que puede no "
+"retornar la misma lista de grupos establecida llamando a setgroups()."
#: ../Doc/library/os.rst:564
msgid ""
@@ -1222,10 +1269,12 @@ msgid ""
"On Unix, if the :ref:`Python UTF-8 Mode <utf8-mode>` is enabled, return "
"``'UTF-8'`` rather than the device encoding."
msgstr ""
+"En Unix, si el :ref:`Modo Python UTF-8 <utf8-mode>` está habilitado, retorna "
+"``'UTF-8'`` en lugar de la codificación del dispositivo."
#: ../Doc/library/os.rst:816
msgid "On Unix, the function now implements the Python UTF-8 Mode."
-msgstr ""
+msgstr "En Unix, la función ahora implementa el modo Python UTF-8."
#: ../Doc/library/os.rst:822
msgid ""
@@ -1624,15 +1673,16 @@ msgid "The above constants are only available on Windows."
msgstr "Las constantes anteriores sólo están disponibles en Windows."
#: ../Doc/library/os.rst:1098
-#, fuzzy
msgid "The above constants are only available on macOS."
-msgstr "Las constantes anteriores sólo están disponibles en Windows."
+msgstr "Las constantes anteriores solo están disponibles en macOS."
#: ../Doc/library/os.rst:1100
msgid ""
"Add :data:`O_EVTONLY`, :data:`O_FSYNC`, :data:`O_SYMLINK` and :data:"
"`O_NOFOLLOW_ANY` constants."
msgstr ""
+"Agregue las constantes :data:`O_EVTONLY`, :data:`O_FSYNC`, :data:`O_SYMLINK` "
+"y :data:`O_NOFOLLOW_ANY`."
#: ../Doc/library/os.rst:1114
msgid ""
@@ -1887,7 +1937,7 @@ msgstr ":data:`RWF_SYNC`"
#: ../Doc/library/os.rst:1298
msgid ":data:`RWF_APPEND`"
-msgstr ""
+msgstr ":data:`RWF_APPEND`"
#: ../Doc/library/os.rst:1300
msgid "Return the total number of bytes actually written."
@@ -1908,28 +1958,26 @@ msgstr ""
"requiere Linux 4.7 o posterior."
#: ../Doc/library/os.rst:1316
-#, fuzzy
msgid ""
"Provide a per-write equivalent of the :data:`O_DSYNC` :func:`os.open` flag. "
"This flag effect applies only to the data range written by the system call."
msgstr ""
-"Proporciona un equivalente por escritura de la flag :data:`O_SYNC`` "
-"`open(2)``. Esta flag sólo se aplica al rango de datos escrito por la "
-"llamada al sistema."
+"Proporcione un equivalente por escritura del indicador :data:`O_DSYNC` :func:"
+"`os.open`. Este efecto de bandera se aplica solo al rango de datos escrito "
+"por la llamada al sistema."
#: ../Doc/library/os.rst:1320 ../Doc/library/os.rst:1330
msgid ":ref:`Availability <availability>`: Linux 4.7 and newer."
msgstr ":ref:`Disponibilidad <availability>`: Linux 4.7 y más nuevos."
#: ../Doc/library/os.rst:1326
-#, fuzzy
msgid ""
"Provide a per-write equivalent of the :data:`O_SYNC` :func:`os.open` flag. "
"This flag effect applies only to the data range written by the system call."
msgstr ""
-"Proporciona un equivalente por escritura de la flag :data:`O_SYNC`` "
-"`open(2)``. Esta flag sólo se aplica al rango de datos escrito por la "
-"llamada al sistema."
+"Proporcione un equivalente por escritura del indicador :data:`O_SYNC` :func:"
+"`os.open`. Este efecto de bandera se aplica solo al rango de datos escrito "
+"por la llamada al sistema."
#: ../Doc/library/os.rst:1336
msgid ""
@@ -1940,11 +1988,16 @@ msgid ""
"of the file. However, if the *offset* argument is ``-1``, the current file "
"*offset* is updated."
msgstr ""
+"Proporcione un equivalente por escritura del indicador :data:`O_APPEND` :"
+"func:`os.open`. Esta bandera es significativa solo para :func:`os.pwritev` y "
+"su efecto se aplica solo al rango de datos escrito por la llamada al "
+"sistema. El argumento *offset* no afecta la operación de escritura; los "
+"datos siempre se añaden al final del archivo. Sin embargo, si el argumento "
+"*offset* es ``-1``, se actualiza el *offset* actual del archivo."
#: ../Doc/library/os.rst:1344
-#, fuzzy
msgid ":ref:`Availability <availability>`: Linux 4.16 and newer."
-msgstr ":ref:`Disponibilidad <availability>`: Linux 4.6 y más nuevos."
+msgstr ":ref:`Disponibilidad <availability>`: Linux 4.16 y más nuevos."
#: ../Doc/library/os.rst:1350
msgid "Read at most *n* bytes from file descriptor *fd*."
@@ -1991,26 +2044,24 @@ msgstr ""
"posición actual de *in_fd* y la posición de *in_fd* se actualiza."
#: ../Doc/library/os.rst:1382
-#, fuzzy
msgid ""
"The second case may be used on macOS and FreeBSD where *headers* and "
"*trailers* are arbitrary sequences of buffers that are written before and "
"after the data from *in_fd* is written. It returns the same as the first "
"case."
msgstr ""
-"El segundo caso puede usarse en Mac OS X y FreeBSD donde *headers* y "
+"El segundo caso se puede usar en macOS y FreeBSD donde *headers* y "
"*trailers* son secuencias arbitrarias de búferes que se escriben antes y "
-"después de que los datos de *in_fd* sean escritos. Retorna lo mismo que el "
+"después de que se escriban los datos de *in_fd*. Retorna lo mismo que el "
"primer caso."
#: ../Doc/library/os.rst:1386
-#, fuzzy
msgid ""
"On macOS and FreeBSD, a value of ``0`` for *count* specifies to send until "
"the end of *in_fd* is reached."
msgstr ""
-"En Mac OS X y FreeBSD, un valor de 0 para *count* especifica enviar hasta el "
-"que final de *in_fd* sea alcanzado."
+"En macOS y FreeBSD, un valor de ``0`` para *count* especifica enviar hasta "
+"que se alcanza el final de *in_fd*."
#: ../Doc/library/os.rst:1389
msgid ""
@@ -2063,7 +2114,6 @@ msgstr ""
"Parámetros para la función :func:`sendfile`, si la implementación los admite."
#: ../Doc/library/os.rst:1434
-#, fuzzy
msgid ""
"Transfer *count* bytes from file descriptor *src*, starting from offset "
"*offset_src*, to file descriptor *dst*, starting from offset *offset_dst*. "
@@ -2074,13 +2124,16 @@ msgid ""
"the same filesystem, otherwise an :exc:`OSError` is raised with :attr:"
"`~OSError.errno` set to :data:`errno.EXDEV`."
msgstr ""
-"Copia *count* bytes del descriptor de archivo *src*, comenzando desde offset "
-"*offset_src*, al descriptor de archivo *dst*, comenzando desde offset "
-"*offset_dst*. Si *offset_src* es None, entonces *src* se lee desde la "
-"posición actual; respectivamente para *offset_dst*. Los archivos señalados "
-"por *src* y *dst* deben estar en el mismo sistema de archivos; de lo "
-"contrario, se genera una :exc:`OSError` con :attr:`~OSError.errno` "
-"establecido en :data:`errno.EXDEV`."
+"Transfiera *count* bytes desde el descriptor de archivo *src*, comenzando "
+"desde el desplazamiento *offset_src*, al descriptor de archivo *dst*, "
+"comenzando desde el desplazamiento *offset_dst*. Al menos uno de los "
+"descriptores de archivo debe hacer referencia a una tubería. Si *offset_src* "
+"es None, entonces *src* se lee desde la posición actual; respectivamente "
+"para *offset_dst*. El desplazamiento asociado al descriptor de archivo que "
+"hace referencia a una tubería debe ser ``None``. Los archivos señalados por "
+"*src* y *dst* deben residir en el mismo sistema de archivos; de lo "
+"contrario, se genera un :exc:`OSError` con :attr:`~OSError.errno` "
+"configurado en :data:`errno.EXDEV`."
#: ../Doc/library/os.rst:1448
msgid ""
@@ -2090,13 +2143,18 @@ msgid ""
"sense to block because there are no writers connected to the write end of "
"the pipe."
msgstr ""
+"Una vez completado con éxito, retorna el número de bytes empalmados hacia o "
+"desde la tubería. Un valor de retorno de 0 significa el final de la entrada. "
+"Si *src* se refiere a una tubería, esto significa que no hay datos para "
+"transferir y no tendría sentido bloquear porque no hay escritores conectados "
+"al extremo de escritura de la tubería."
#: ../Doc/library/os.rst:1455
-#, fuzzy
msgid ""
":ref:`Availability <availability>`: Linux kernel >= 2.6.17 and glibc >= 2.5"
msgstr ""
-":ref:`Disponibilidad <availability>`: Kernel de Linux >= 4.5 o glibc >= 2.27."
+":ref:`Disponibilidad <availability>`: Kernel de Linux >= 2.6.17 y glibc >= "
+"2.5."
#: ../Doc/library/os.rst:1467
msgid ""
@@ -3232,14 +3290,14 @@ msgstr ""
"anteriormente."
#: ../Doc/library/os.rst:2248
-#, fuzzy
msgid ""
"Remove (delete) the file *path*. If *path* is a directory, an :exc:"
"`IsADirectoryError` is raised. Use :func:`rmdir` to remove directories. If "
"the file does not exist, a :exc:`FileNotFoundError` is raised."
msgstr ""
-"Elimina (elimine) el archivo *path*. Si *path* es un directorio, se genera "
-"un :exc:`IsADirectoryError`. Use :func:`rmdir` para eliminar directorios."
+"Quite (elimine) el archivo *path*. Si *path* es un directorio, se genera un :"
+"exc:`IsADirectoryError`. Utilice :func:`rmdir` para eliminar directorios. Si "
+"el archivo no existe, se lanza un :exc:`FileNotFoundError`."
#: ../Doc/library/os.rst:2252 ../Doc/library/os.rst:2363
#: ../Doc/library/os.rst:2999
@@ -4158,10 +4216,9 @@ msgstr ""
"contiene el archivo."
#: ../Doc/library/os.rst:2792
-#, fuzzy
msgid "On macOS systems, the following attributes may also be available:"
msgstr ""
-"En los sistemas Mac OS, los siguientes atributos también pueden estar "
+"En los sistemas macOS, los siguientes atributos también pueden estar "
"disponibles:"
#: ../Doc/library/os.rst:2796
@@ -4925,6 +4982,12 @@ msgid ""
"`eventfd(2)` for more information. By default, the new file descriptor is :"
"ref:`non-inheritable <fd_inheritance>`."
msgstr ""
+"Crea y retorna un descriptor de archivo de eventos. Los descriptores de "
+"archivo admiten :func:`read` y :func:`write` sin procesar con un tamaño de "
+"búfer de 8, :func:`~select.select`, :func:`~select.poll` y similares. "
+"Consulte la página de manual :manpage:`eventfd(2)` para obtener más "
+"información. De forma predeterminada, el nuevo descriptor de archivo :ref:"
+"`no es heredable <fd_inheritance>`."
#: ../Doc/library/os.rst:3331
msgid ""
@@ -4933,18 +4996,27 @@ msgid ""
"to a 32 bit unsigned int although the event counter is an unsigned 64 bit "
"integer with a maximum value of 2\\ :sup:`64`\\ -\\ 2."
msgstr ""
+"*initval* es el valor inicial del contador de eventos. El valor inicial debe "
+"ser un entero sin signo de 32 bits. Tenga en cuenta que el valor inicial "
+"está limitado a un entero sin signo de 32 bits, aunque el contador de "
+"eventos es un entero de 64 bits sin signo con un valor máximo de 2 \\ :sup:"
+"`64`\\ -\\ 2."
#: ../Doc/library/os.rst:3336
msgid ""
"*flags* can be constructed from :const:`EFD_CLOEXEC`, :const:`EFD_NONBLOCK`, "
"and :const:`EFD_SEMAPHORE`."
msgstr ""
+"*flags* se puede construir a partir de :const:`EFD_CLOEXEC`, :const:"
+"`EFD_NONBLOCK` y :const:`EFD_SEMAPHORE`."
#: ../Doc/library/os.rst:3339
msgid ""
"If :const:`EFD_SEMAPHORE` is specified and the event counter is non-zero, :"
"func:`eventfd_read` returns 1 and decrements the counter by one."
msgstr ""
+"Si se especifica :const:`EFD_SEMAPHORE` y el contador de eventos no es "
+"cero, :func:`eventfd_read` retorna 1 y reduce el contador en uno."
#: ../Doc/library/os.rst:3342
msgid ""
@@ -4952,12 +5024,17 @@ msgid ""
"zero, :func:`eventfd_read` returns the current event counter value and "
"resets the counter to zero."
msgstr ""
+"Si no se especifica :const:`EFD_SEMAPHORE` y el contador de eventos no es "
+"cero, :func:`eventfd_read` retorna el valor actual del contador de eventos y "
+"restablece el contador a cero."
#: ../Doc/library/os.rst:3346
msgid ""
"If the event counter is zero and :const:`EFD_NONBLOCK` is not specified, :"
"func:`eventfd_read` blocks."
msgstr ""
+"Si el contador de eventos es cero y no se especifica :const:`EFD_NONBLOCK`, :"
+"func:`eventfd_read` se bloquea."
#: ../Doc/library/os.rst:3349
msgid ""
@@ -4965,14 +5042,16 @@ msgid ""
"write operation would increment the counter to a value larger than 2\\ :sup:"
"`64`\\ -\\ 2."
msgstr ""
+":func:`eventfd_write` incrementa el contador de eventos. Escribe bloques si "
+"la operación de escritura incrementaría el contador a un valor mayor que \\ :"
+"sup:`64`\\ -\\ 2."
#: ../Doc/library/os.rst:3371
-#, fuzzy
msgid ""
":ref:`Availability <availability>`: Linux 2.6.27 or newer with glibc 2.8 or "
"newer."
msgstr ""
-":ref:`Disponibilidad <availability>`: Linux 3.17 o posterior con glibc 2.27 "
+":ref:`Disponibilidad <availability>`: Linux 2.6.27 o posterior con glibc 2.8 "
"o posterior."
#: ../Doc/library/os.rst:3376
@@ -4980,41 +5059,51 @@ msgid ""
"Read value from an :func:`eventfd` file descriptor and return a 64 bit "
"unsigned int. The function does not verify that *fd* is an :func:`eventfd`."
msgstr ""
+"Lee el valor de un descriptor de archivo :func:`eventfd` y retorna un int "
+"sin signo de 64 bits. La función no verifica que *fd* sea un :func:`eventfd`."
#: ../Doc/library/os.rst:3380 ../Doc/library/os.rst:3389
#: ../Doc/library/os.rst:3397 ../Doc/library/os.rst:3406
-#, fuzzy
msgid ":ref:`Availability <availability>`: See :func:`eventfd`"
-msgstr ":ref:`Disponibilidad <availability>`: Unix."
+msgstr ":ref:`Disponibilidad <availability>`: Ver :func:`eventfd`"
#: ../Doc/library/os.rst:3385
msgid ""
"Add value to an :func:`eventfd` file descriptor. *value* must be a 64 bit "
"unsigned int. The function does not verify that *fd* is an :func:`eventfd`."
msgstr ""
+"Agregue valor a un descriptor de archivo :func:`eventfd`. *value* debe ser "
+"un int sin signo de 64 bits. La función no verifica que *fd* sea un :func:"
+"`eventfd`."
#: ../Doc/library/os.rst:3394
msgid "Set close-on-exec flag for new :func:`eventfd` file descriptor."
msgstr ""
+"Establezca el indicador close-on-exec para el nuevo descriptor de archivo :"
+"func:`eventfd`."
#: ../Doc/library/os.rst:3402
msgid ""
"Set :const:`O_NONBLOCK` status flag for new :func:`eventfd` file descriptor."
msgstr ""
+"Establezca el indicador de estado :const:`O_NONBLOCK` para el nuevo "
+"descriptor de archivo :func:`eventfd`."
#: ../Doc/library/os.rst:3411
msgid ""
"Provide semaphore-like semantics for reads from a :func:`eventfd` file "
"descriptor. On read the internal counter is decremented by one."
msgstr ""
+"Proporcione semántica similar a un semáforo para las lecturas de un "
+"descriptor de archivo :func:`eventfd`. Al leer, el contador interno se "
+"reduce en uno."
#: ../Doc/library/os.rst:3415
-#, fuzzy
msgid ""
":ref:`Availability <availability>`: Linux 2.6.30 or newer with glibc 2.8 or "
"newer."
msgstr ""
-":ref:`Disponibilidad <availability>`: Linux 3.17 o posterior con glibc 2.27 "
+":ref:`Disponibilidad <availability>`: Linux 2.6.30 o posterior con glibc 2.8 "
"o posterior."
#: ../Doc/library/os.rst:3420
@@ -5071,17 +5160,16 @@ msgstr ""
"argumento ``path``."
#: ../Doc/library/os.rst:3460
-#, fuzzy
msgid ""
"Removes the extended filesystem attribute *attribute* from *path*. "
"*attribute* should be bytes or str (directly or indirectly through the :"
"class:`PathLike` interface). If it is a string, it is encoded with the :term:"
"`filesystem encoding and error handler`."
msgstr ""
-"Elimina el atributo del sistema de archivos extendido *attribute* de *path*. "
+"Elimina el atributo extendido del sistema de archivos *attribute* de *path*. "
"*attribute* debe ser bytes o str (directa o indirectamente a través de la "
-"interfaz :class:`PathLike`). Si es una cadena, se codifica con la "
-"codificación del sistema de archivos."
+"interfaz :class:`PathLike`). Si es una cadena, está codificada con :term:"
+"`filesystem encoding and error handler`."
#: ../Doc/library/os.rst:3468
msgid ""
@@ -5092,7 +5180,6 @@ msgstr ""
"argumentos ``path``, ``attribute``."
#: ../Doc/library/os.rst:3476
-#, fuzzy
msgid ""
"Set the extended filesystem attribute *attribute* on *path* to *value*. "
"*attribute* must be a bytes or str with no embedded NULs (directly or "
@@ -5104,13 +5191,13 @@ msgid ""
"will not be created and ``EEXISTS`` will be raised."
msgstr ""
"Establece el atributo del sistema de archivos extendido *attribute* en "
-"*path* a *value*. *attribute* debe ser un byte o str sin NUL incrustados "
-"(directa o indirectamente a través de la interfaz :class:`PathLike`). Si es "
-"un str, se codifica con la codificación del sistema de archivos. *flags* "
-"pueden ser :data:`XATTR_REPLACE` o :data:`XATTR_CREATE`. Si :data:"
-"`XATTR_REPLACE` se proporciona y el atributo no existe, se generará "
-"``EEXISTS``. Si :data:`XATTR_CREATE` se proporciona y el atributo ya existe, "
-"el atributo no se creará y se generará ``ENODATA``."
+"*path* en *value*. *attribute* debe ser un bytes o una cadena sin NUL "
+"incrustados (directa o indirectamente a través de la interfaz :class:"
+"`PathLike`). Si es una cadena, está codificado con :term:`filesystem "
+"encoding and error handler`. *flags* puede ser :data:`XATTR_REPLACE` o :data:"
+"`XATTR_CREATE`. Si se proporciona :data:`XATTR_REPLACE` y el atributo no "
+"existe, se generará ``ENODATA``. Si se proporciona :data:`XATTR_CREATE` y el "
+"atributo ya existe, no se creará el atributo y se lanza ``EEXISTS``."
#: ../Doc/library/os.rst:3490
msgid ""
@@ -6198,6 +6285,9 @@ msgid ""
"string. This argument may have no effect when using this function to launch "
"a document."
msgstr ""
+"Al iniciar una aplicación, especifique *arguments* para que se pase como una "
+"sola cadena. Es posible que este argumento no tenga ningún efecto cuando se "
+"utiliza esta función para iniciar un documento."
#: ../Doc/library/os.rst:4177
msgid ""
@@ -6205,6 +6295,9 @@ msgid ""
"*cwd* argument. This should be an absolute path. A relative *path* will be "
"resolved against this argument."
msgstr ""
+"El directorio de trabajo predeterminado se hereda, pero el argumento *cwd* "
+"puede anularlo. Este debería ser un camino absoluto. Un *path* relativo se "
+"resolverá contra este argumento."
#: ../Doc/library/os.rst:4181
msgid ""
@@ -6212,9 +6305,12 @@ msgid ""
"effect will depend on the application being launched. Values are integers as "
"supported by the Win32 :c:func:`ShellExecute` function."
msgstr ""
+"Utilice *show_cmd* para anular el estilo de ventana predeterminado. Si esto "
+"tiene algún efecto dependerá de la aplicación que se esté iniciando. Los "
+"valores son números enteros admitidos por la función Win32 :c:func:"
+"`ShellExecute`."
#: ../Doc/library/os.rst:4185
-#, fuzzy
msgid ""
":func:`startfile` returns as soon as the associated application is launched. "
"There is no option to wait for the application to close, and no way to "
@@ -6224,14 +6320,13 @@ msgid ""
"the :func:`os.path.normpath` function to ensure that paths are properly "
"encoded for Win32."
msgstr ""
-":func:`startfile` vuelve tan pronto como se inicia la aplicación asociada. "
-"No hay opción de esperar a que la aplicación se cierre y no hay forma de "
+":func:`startfile` retorna tan pronto como se inicia la aplicación asociada. "
+"No hay opción para esperar a que se cierre la aplicación y no hay forma de "
"recuperar el estado de salida de la aplicación. El parámetro *path* es "
-"relativo al directorio actual. Si desea utilizar una ruta absoluta, "
-"asegúrese de que el primer carácter no sea una barra inclinada (``'/'``); la "
-"función subyacente Win32 :c:func:`ShellExecute` no funciona si lo es. Use la "
-"función :func:`os.path.normpath` para asegurarse de que la ruta esté "
-"codificada correctamente para Win32."
+"relativo al directorio actual o *cwd*. Si desea utilizar una ruta absoluta, "
+"asegúrese de que el primer carácter no sea una barra (``'/'``) Utilice :mod:"
+"`pathlib` o la función :func:`os.path.normpath` para asegurarse de que las "
+"rutas estén codificadas correctamente para Win32."
#: ../Doc/library/os.rst:4193
msgid ""
@@ -6253,22 +6348,22 @@ msgstr ""
"argumentos ``path``, ``operation``."
#: ../Doc/library/os.rst:4199
-#, fuzzy
msgid ""
"Raises an :ref:`auditing event <auditing>` ``os.startfile/2`` with arguments "
"``path``, ``operation``, ``arguments``, ``cwd``, ``show_cmd``."
msgstr ""
-"Lanza un :ref:`evento de auditoría <auditing>` ``os.startfile`` con "
-"argumentos ``path``, ``operation``."
+"Lanza un :ref:`evento de auditoria <auditing>` ``os.startfile/2`` con "
+"argumentos ``path``, ``operation``, ``arguments``, ``cwd``, ``show_cmd``."
#: ../Doc/library/os.rst:4203
msgid ""
"Added the *arguments*, *cwd* and *show_cmd* arguments, and the ``os."
"startfile/2`` audit event."
msgstr ""
+"Se agregaron los argumentos *arguments*, *cwd* y *show_cmd*, y el evento de "
+"auditoría ``os.startfile/2``."
#: ../Doc/library/os.rst:4210
-#, fuzzy
msgid ""
"Execute the command (a string) in a subshell. This is implemented by "
"calling the Standard C function :c:func:`system`, and has the same "
@@ -6278,17 +6373,21 @@ msgid ""
"not specify the meaning of the return value of the C function, so the return "
"value of the Python function is system-dependent."
msgstr ""
-"Ejecute el comando (una cadena) en una subshell. Esto se implementa llamando "
-"a la función Estándar C :c:func:`system`, y tiene las mismas limitaciones. "
+"Ejecute el comando (una cadena) en una subcapa. Esto se implementa llamando "
+"a la función C estándar :c:func:`system` y tiene las mismas limitaciones. "
"Los cambios en :data:`sys.stdin`, etc. no se reflejan en el entorno del "
"comando ejecutado. Si *command* genera alguna salida, se enviará al flujo de "
-"salida estándar del intérprete."
+"salida estándar del intérprete. El estándar C no especifica el significado "
+"del valor de retorno de la función C, por lo que el valor de retorno de la "
+"función de Python depende del sistema."
#: ../Doc/library/os.rst:4218
msgid ""
"On Unix, the return value is the exit status of the process encoded in the "
"format specified for :func:`wait`."
msgstr ""
+"En Unix, el valor de retorno es el estado de salida del proceso codificado "
+"en el formato especificado para :func:`wait`."
#: ../Doc/library/os.rst:4221
msgid ""
@@ -6884,14 +6983,13 @@ msgstr ""
"de política de programación anteriores."
#: ../Doc/library/os.rst:4638
-#, fuzzy
msgid ""
"Set the scheduling parameters for the process with PID *pid*. A *pid* of 0 "
"means the calling process. *param* is a :class:`sched_param` instance."
msgstr ""
-"Establece parámetros de programación para el proceso con PID *pid*. Un *pid* "
-"de 0 significa el proceso de llamada. *param* es una instancia de :class:"
-"`sched_param`."
+"Establece los parámetros de programación para el proceso con PID *pid*. Un "
+"*pid* de 0 significa el proceso de llamada. *param* es una instancia de :"
+"class:`sched_param`."
#: ../Doc/library/os.rst:4644
msgid ""
diff --git a/scripts/translate.py b/scripts/translate.py
index 16d0c50aa4..26c8d5f80d 100644
--- a/scripts/translate.py
+++ b/scripts/translate.py
@@ -44,6 +44,8 @@
":RFC:`[^`]+`",
":rfc:`[^`]+`",
":doc:`[^`]+`",
+ ":manpage:`[^`]+`",
+ ":sup:`[^`]+`",
"``[^`]+``",
"`[^`]+`__",
"`[^`]+`_",
|
qtile__qtile-2450 | Widgets missing from docs
Some widgets seem to have disappeared from the [official docs](http://docs.qtile.org/en/latest/manual/ref/widgets.html) (specifically `Mpris2` and `KeyboardKbdd`).
Given that these are two widgets that I've done some work on, it may be linked to that (but not immediately sure how).
I can do some bisecting and report back.
# Qtile version
Latest (3fb1e46 at time of posting)
EDIT: I can't replicate this on my system. However, given the identity of the two widgets, my guess is that this is related to `dbus-next` and that the modules need to be included in the `docs\conf.py` file here:
https://github.com/qtile/qtile/blob/3fb1e4645910c67bf6d302d59302dfb88f10c4ad/docs/conf.py#L27-L48
I'll submit a PR on that basis.
| [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Qtile documentation build configuration file, created by\n# sphinx-quickstart on Sat Feb 11 15:20:21 2012.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\nimport setuptools_scm\nimport sys\nfrom unittest.mock import MagicMock\n\n\nclass Mock(MagicMock):\n # xcbq does a dir() on objects and pull stuff out of them and tries to sort\n # the result. MagicMock has a bunch of stuff that can't be sorted, so let's\n # like about dir().\n def __dir__(self):\n return []\n\nMOCK_MODULES = [\n 'libqtile._ffi_pango',\n 'libqtile.backend.x11._ffi_xcursors',\n 'cairocffi',\n 'cairocffi.xcb',\n 'cairocffi.pixbuf',\n 'cffi',\n 'dateutil',\n 'dateutil.parser',\n 'iwlib',\n 'keyring',\n 'mpd',\n 'psutil',\n 'trollius',\n 'xcffib',\n 'xcffib.randr',\n 'xcffib.render',\n 'xcffib.xfixes',\n 'xcffib.xinerama',\n 'xcffib.xproto',\n 'xdg.IconTheme',\n]\nsys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('.'))\nsys.path.insert(0, os.path.abspath('../'))\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.coverage',\n 'sphinx.ext.graphviz',\n 'sphinx.ext.todo',\n 'sphinx.ext.viewcode',\n 'sphinxcontrib.seqdiag',\n 'sphinx_qtile',\n 'numpydoc',\n]\n\nnumpydoc_show_class_members = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = []\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Qtile'\ncopyright = u'2008-2020, Aldo Cortesi and contributers'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = setuptools_scm.get_version(root=\"..\")\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n\n# -- Options for HTML output --------fautod-------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#html_theme = 'default'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\nhtml_favicon = '_static/favicon.ico'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {'index': 'index.html'}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\nhtml_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Qtiledoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'Qtile.tex', u'Qtile Documentation',\n u'Aldo Cortesi', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\n#man_pages = []\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'Qtile', u'Qtile Documentation',\n u'Aldo Cortesi', 'Qtile', 'A hackable tiling window manager.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# only import and set the theme if we're building docs locally\nif not os.environ.get('READTHEDOCS'):\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n\ngraphviz_dot_args = ['-Lg']\n\n# A workaround for the responsive tables always having annoying scrollbars.\ndef setup(app):\n app.add_css_file(\"no_scrollbars.css\")\n",
"path": "docs/conf.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Qtile documentation build configuration file, created by\n# sphinx-quickstart on Sat Feb 11 15:20:21 2012.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\nimport setuptools_scm\nimport sys\nfrom unittest.mock import MagicMock\n\n\nclass Mock(MagicMock):\n # xcbq does a dir() on objects and pull stuff out of them and tries to sort\n # the result. MagicMock has a bunch of stuff that can't be sorted, so let's\n # like about dir().\n def __dir__(self):\n return []\n\nMOCK_MODULES = [\n 'libqtile._ffi_pango',\n 'libqtile.backend.x11._ffi_xcursors',\n 'cairocffi',\n 'cairocffi.xcb',\n 'cairocffi.pixbuf',\n 'cffi',\n 'dateutil',\n 'dateutil.parser',\n 'dbus_next',\n 'dbus_next.aio',\n 'dbus_next.service',\n 'dbus_next.constants',\n 'iwlib',\n 'keyring',\n 'mpd',\n 'psutil',\n 'trollius',\n 'xcffib',\n 'xcffib.randr',\n 'xcffib.render',\n 'xcffib.xfixes',\n 'xcffib.xinerama',\n 'xcffib.xproto',\n 'xdg.IconTheme',\n]\nsys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('.'))\nsys.path.insert(0, os.path.abspath('../'))\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.coverage',\n 'sphinx.ext.graphviz',\n 'sphinx.ext.todo',\n 'sphinx.ext.viewcode',\n 'sphinxcontrib.seqdiag',\n 'sphinx_qtile',\n 'numpydoc',\n]\n\nnumpydoc_show_class_members = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = []\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Qtile'\ncopyright = u'2008-2020, Aldo Cortesi and contributers'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = setuptools_scm.get_version(root=\"..\")\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n\n# -- Options for HTML output --------fautod-------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#html_theme = 'default'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\nhtml_favicon = '_static/favicon.ico'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {'index': 'index.html'}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\nhtml_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Qtiledoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'Qtile.tex', u'Qtile Documentation',\n u'Aldo Cortesi', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\n#man_pages = []\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'Qtile', u'Qtile Documentation',\n u'Aldo Cortesi', 'Qtile', 'A hackable tiling window manager.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# only import and set the theme if we're building docs locally\nif not os.environ.get('READTHEDOCS'):\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n\ngraphviz_dot_args = ['-Lg']\n\n# A workaround for the responsive tables always having annoying scrollbars.\ndef setup(app):\n app.add_css_file(\"no_scrollbars.css\")\n",
"path": "docs/conf.py"
}
] | diff --git a/docs/conf.py b/docs/conf.py
index aaaa5d23bf..bc52457694 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -33,6 +33,10 @@ def __dir__(self):
'cffi',
'dateutil',
'dateutil.parser',
+ 'dbus_next',
+ 'dbus_next.aio',
+ 'dbus_next.service',
+ 'dbus_next.constants',
'iwlib',
'keyring',
'mpd',
|
wemake-services__wemake-python-styleguide-1630 | Allow async magic __call__
# Thesis
`__call__` method should be asyncable.
```python
import asyncio
import os
class RequestHandler(object):
async def __call__(self, reader, writer):
ipc_input = (await reader.readline()).decode('utf8').strip()
writer.write('You sent: "{0}"\n'.format(ipc_input).encode('utf8'))
async def bootstrap():
handler = RequestHandler()
server = await asyncio.start_unix_server(handler, path='/var/tmp/my_sock')
os.chmod('/var/tmp/my_sock', 0o666)
while True:
try:
await asyncio.sleep(120)
except asyncio.CancelledError:
server.close()
break
asyncio.run(bootstrap())
```
## Reasoning
Much like how `__call__` can be a generator there is no reason it can't be async.
| [
{
"content": "\"\"\"\nThis module contains list of white- and black-listed ``python`` members.\n\nWe add values here when we want to make them public.\nOr when a value is reused in several places.\nThen, we automatically have to add it here and document it.\n\nOther constants that are not used across modules\nand does not require to be documented can be defined where they are used.\n\nAll values here must be documented with ``#:`` comments.\n\"\"\"\n\nimport math\nimport re\n\nfrom typing_extensions import Final\n\n#: List of functions we forbid to use.\nFUNCTIONS_BLACKLIST: Final = frozenset((\n # Code generation:\n 'eval',\n 'exec',\n 'compile',\n\n # Termination:\n 'exit',\n 'quit',\n\n # Magic:\n 'globals',\n 'locals',\n 'vars',\n 'dir',\n\n # IO:\n 'print',\n 'pprint',\n 'input',\n 'breakpoint',\n\n # Attribute access:\n 'hasattr',\n 'delattr',\n\n # Gratis:\n 'copyright',\n 'help',\n 'credits',\n\n # Dynamic imports:\n '__import__',\n\n # OOP:\n 'staticmethod',\n\n # Mypy:\n 'reveal_type',\n))\n\n#: List of module metadata we forbid to use.\nMODULE_METADATA_VARIABLES_BLACKLIST: Final = frozenset((\n '__author__',\n '__all__',\n '__version__',\n '__about__',\n))\n\n#: List of variable names we forbid to use.\nVARIABLE_NAMES_BLACKLIST: Final = frozenset((\n # Meaningless words:\n 'data',\n 'result',\n 'results',\n 'item',\n 'items',\n 'value',\n 'values',\n 'val',\n 'vals',\n 'var',\n 'vars',\n 'variable',\n 'content',\n 'contents',\n 'info',\n 'handle',\n 'handler',\n 'file',\n 'obj',\n 'objects',\n 'objs',\n 'some',\n 'do',\n 'param',\n 'params',\n 'parameters',\n\n # Confuseables:\n 'no',\n 'true',\n 'false',\n\n # Names from examples:\n 'foo',\n 'bar',\n 'baz',\n))\n\n#: List of characters sequences that are hard to read.\nUNREADABLE_CHARACTER_COMBINATIONS: Final = frozenset((\n '1l',\n '1I',\n '0O',\n 'O0',\n # Not included: 'lI', 'l1', 'Il'\n # Because these names are quite common in real words.\n))\n\n#: List of special names that are used only as first argument in methods.\nSPECIAL_ARGUMENT_NAMES_WHITELIST: Final = frozenset((\n 'self',\n 'cls',\n 'mcs',\n))\n\n#: List of all magic methods from the python docs.\nALL_MAGIC_METHODS: Final = frozenset((\n '__new__',\n '__init__',\n '__del__',\n\n '__repr__',\n '__str__',\n '__bytes__',\n '__format__',\n\n '__lt__',\n '__le__',\n '__eq__',\n '__ne__',\n '__gt__',\n '__ge__',\n\n '__hash__',\n '__bool__',\n\n '__getattr__',\n '__getattribute__',\n '__setattr__',\n '__delattr__',\n '__dir__',\n\n '__get__',\n '__set__',\n '__delete__',\n '__set_name__',\n\n '__init_subclass__',\n '__instancecheck__',\n '__subclasscheck__',\n '__class_getitem__',\n\n '__call__',\n '__len__',\n '__length_hint__',\n '__getitem__',\n '__setitem__',\n '__delitem__',\n '__missing__',\n '__iter__',\n '__reversed__',\n '__contains__',\n\n '__add__',\n '__sub__',\n '__mul__',\n '__matmul__',\n '__truediv__',\n '__floordiv__',\n '__mod__',\n '__divmod__',\n '__pow__',\n '__lshift__',\n '__rshift__',\n '__and__',\n '__xor__',\n '__or__',\n '__radd__',\n '__rsub__',\n '__rmul__',\n '__rmatmul__',\n '__rtruediv__',\n '__rfloordiv__',\n '__rmod__',\n '__rdivmod__',\n '__rpow__',\n '__rlshift__',\n '__rrshift__',\n '__rand__',\n '__rxor__',\n '__ror__',\n '__iadd__',\n '__isub__',\n '__imul__',\n '__imatmul__',\n '__itruediv__',\n '__ifloordiv__',\n '__imod__',\n '__ipow__',\n '__ilshift__',\n '__irshift__',\n '__iand__',\n '__ixor__',\n '__ior__',\n '__neg__',\n '__pos__',\n '__abs__',\n '__invert__',\n '__complex__',\n '__int__',\n '__float__',\n '__index__',\n '__round__',\n '__trunc__',\n '__floor__',\n '__ceil__',\n\n '__enter__',\n '__exit__',\n\n '__await__',\n '__aiter__',\n '__anext__',\n '__aenter__',\n '__aexit__',\n))\n\n#: List of magic methods that are forbidden to use.\nMAGIC_METHODS_BLACKLIST: Final = frozenset((\n # Since we don't use `del`:\n '__del__',\n '__delitem__',\n '__delete__',\n\n # Since we don't use `pickle`:\n '__reduce__',\n '__reduce_ex__',\n\n '__dir__', # since we don't use `dir()`\n '__delattr__', # since we don't use `delattr()`\n))\n\n#: List of magic methods that are not allowed to be generators.\nYIELD_MAGIC_METHODS_BLACKLIST: Final = ALL_MAGIC_METHODS.difference({\n # Allowed to be used with ``yield`` keyword:\n '__call__', # Fixes Issue:146\n '__iter__',\n})\n\n#: List of magic methods that are not allowed to be async.\nASYNC_MAGIC_METHODS_BLACKLIST: Final = ALL_MAGIC_METHODS.difference({\n # In order of appearance on\n # https://docs.python.org/3/reference/datamodel.html#basic-customization\n # Allowed magic methods are:\n '__anext__',\n '__aenter__',\n '__aexit__',\n})\n\n#: List of builtin classes that are allowed to subclass.\nALLOWED_BUILTIN_CLASSES: Final = frozenset((\n 'type',\n 'object',\n))\n\n#: List of nested functions' names we allow to use.\nNESTED_FUNCTIONS_WHITELIST: Final = frozenset((\n 'decorator',\n 'factory',\n 'wrapper',\n))\n\n#: List of allowed ``__future__`` imports.\nFUTURE_IMPORTS_WHITELIST: Final = frozenset((\n 'annotations',\n 'generator_stop',\n))\n\n#: List of blacklisted module names.\nMODULE_NAMES_BLACKLIST: Final = frozenset((\n 'util',\n 'utils',\n 'utilities',\n 'helpers',\n))\n\n#: List of allowed module magic names.\nMAGIC_MODULE_NAMES_WHITELIST: Final = frozenset((\n '__init__',\n '__main__',\n))\n\n#: List of bad magic module functions.\nMAGIC_MODULE_NAMES_BLACKLIST: Final = frozenset((\n '__getattr__',\n '__dir__',\n))\n\n#: Regex pattern to name modules.\nMODULE_NAME_PATTERN: Final = re.compile(r'^_?_?[a-z][a-z\\d_]*[a-z\\d](__)?$')\n\n#: Common numbers that are allowed to be used without being called \"magic\".\nMAGIC_NUMBERS_WHITELIST: Final = frozenset((\n 0, # both int and float\n 0.1,\n 0.5,\n 1.0,\n 100,\n 1000,\n 1024, # bytes\n 24, # hours\n 60, # seconds, minutes\n\n 1j, # imaginary part of a complex number\n))\n\n#: Maximum amount of ``pragma`` no-cover comments per module.\nMAX_NO_COVER_COMMENTS: Final = 5\n\n#: Maximum length of ``yield`` ``tuple`` expressions.\nMAX_LEN_YIELD_TUPLE: Final = 5\n\n#: Maximum number of compare nodes in a single expression.\nMAX_COMPARES: Final = 2\n\n#: Maximum number of conditions in a single ``if`` or ``while`` statement.\nMAX_CONDITIONS: Final = 4\n\n#: Maximum number of `elif` blocks in a single `if` condition:\nMAX_ELIFS: Final = 3\n\n#: Maximum number of ``except`` cases in a single ``try`` clause.\nMAX_EXCEPT_CASES: Final = 3\n\n#: Approximate constants which real values should be imported from math module.\nMATH_APPROXIMATE_CONSTANTS: Final = frozenset((\n math.pi,\n math.e,\n math.tau,\n))\n\n#: List of vague method names that may cause confusion if imported as is:\nVAGUE_IMPORTS_BLACKLIST: Final = frozenset((\n 'read',\n 'write',\n 'load',\n 'loads',\n 'dump',\n 'dumps',\n 'parse',\n 'safe_load',\n 'safe_dump',\n 'load_all',\n 'dump_all',\n 'safe_load_all',\n 'safe_dump_all',\n))\n\n#: List of literals without arguments we forbid to use.\nLITERALS_BLACKLIST: Final = frozenset((\n 'int',\n 'float',\n 'str',\n 'bytes',\n 'bool',\n 'complex',\n))\n\n#: List of functions in which arguments must be tuples.\nTUPLE_ARGUMENTS_METHODS: Final = frozenset((\n 'frozenset',\n))\n\n#: Conditions that can appear in the ``if`` statement to allow nested imports.\nALLOWED_NESTED_IMPORTS_CONDITIONS: Final = frozenset((\n 'TYPE_CHECKING',\n))\n\n#: List of commonly used aliases\nALIAS_NAMES_WHITELIST: Final = frozenset((\n 'np',\n 'pd',\n 'df',\n 'plt',\n 'sns',\n 'tf',\n 'cv',\n))\n\n# Internal variables\n# ==================\n\n# Please, do not touch values beyond this line!\n# ---------------------------------------------\n\n# They are not publicly documented since they are not used by the end user.\n# But, we still need them to be defined here.\n\n# Used as a default filename, when it is not passed by flake8:\nSTDIN: Final = 'stdin'\n\n# Used to specify as a placeholder for `__init__`:\nINIT: Final = '__init__'\n\n# Used to determine when we are running on Windows:\nWINDOWS_OS: Final = 'nt'\n\n# Used as a placeholder for special `_` variable:\nUNUSED_PLACEHOLDER: Final = '_'\n",
"path": "wemake_python_styleguide/constants.py"
}
] | [
{
"content": "\"\"\"\nThis module contains list of white- and black-listed ``python`` members.\n\nWe add values here when we want to make them public.\nOr when a value is reused in several places.\nThen, we automatically have to add it here and document it.\n\nOther constants that are not used across modules\nand does not require to be documented can be defined where they are used.\n\nAll values here must be documented with ``#:`` comments.\n\"\"\"\n\nimport math\nimport re\n\nfrom typing_extensions import Final\n\n#: List of functions we forbid to use.\nFUNCTIONS_BLACKLIST: Final = frozenset((\n # Code generation:\n 'eval',\n 'exec',\n 'compile',\n\n # Termination:\n 'exit',\n 'quit',\n\n # Magic:\n 'globals',\n 'locals',\n 'vars',\n 'dir',\n\n # IO:\n 'print',\n 'pprint',\n 'input',\n 'breakpoint',\n\n # Attribute access:\n 'hasattr',\n 'delattr',\n\n # Gratis:\n 'copyright',\n 'help',\n 'credits',\n\n # Dynamic imports:\n '__import__',\n\n # OOP:\n 'staticmethod',\n\n # Mypy:\n 'reveal_type',\n))\n\n#: List of module metadata we forbid to use.\nMODULE_METADATA_VARIABLES_BLACKLIST: Final = frozenset((\n '__author__',\n '__all__',\n '__version__',\n '__about__',\n))\n\n#: List of variable names we forbid to use.\nVARIABLE_NAMES_BLACKLIST: Final = frozenset((\n # Meaningless words:\n 'data',\n 'result',\n 'results',\n 'item',\n 'items',\n 'value',\n 'values',\n 'val',\n 'vals',\n 'var',\n 'vars',\n 'variable',\n 'content',\n 'contents',\n 'info',\n 'handle',\n 'handler',\n 'file',\n 'obj',\n 'objects',\n 'objs',\n 'some',\n 'do',\n 'param',\n 'params',\n 'parameters',\n\n # Confuseables:\n 'no',\n 'true',\n 'false',\n\n # Names from examples:\n 'foo',\n 'bar',\n 'baz',\n))\n\n#: List of characters sequences that are hard to read.\nUNREADABLE_CHARACTER_COMBINATIONS: Final = frozenset((\n '1l',\n '1I',\n '0O',\n 'O0',\n # Not included: 'lI', 'l1', 'Il'\n # Because these names are quite common in real words.\n))\n\n#: List of special names that are used only as first argument in methods.\nSPECIAL_ARGUMENT_NAMES_WHITELIST: Final = frozenset((\n 'self',\n 'cls',\n 'mcs',\n))\n\n#: List of all magic methods from the python docs.\nALL_MAGIC_METHODS: Final = frozenset((\n '__new__',\n '__init__',\n '__del__',\n\n '__repr__',\n '__str__',\n '__bytes__',\n '__format__',\n\n '__lt__',\n '__le__',\n '__eq__',\n '__ne__',\n '__gt__',\n '__ge__',\n\n '__hash__',\n '__bool__',\n\n '__getattr__',\n '__getattribute__',\n '__setattr__',\n '__delattr__',\n '__dir__',\n\n '__get__',\n '__set__',\n '__delete__',\n '__set_name__',\n\n '__init_subclass__',\n '__instancecheck__',\n '__subclasscheck__',\n '__class_getitem__',\n\n '__call__',\n '__len__',\n '__length_hint__',\n '__getitem__',\n '__setitem__',\n '__delitem__',\n '__missing__',\n '__iter__',\n '__reversed__',\n '__contains__',\n\n '__add__',\n '__sub__',\n '__mul__',\n '__matmul__',\n '__truediv__',\n '__floordiv__',\n '__mod__',\n '__divmod__',\n '__pow__',\n '__lshift__',\n '__rshift__',\n '__and__',\n '__xor__',\n '__or__',\n '__radd__',\n '__rsub__',\n '__rmul__',\n '__rmatmul__',\n '__rtruediv__',\n '__rfloordiv__',\n '__rmod__',\n '__rdivmod__',\n '__rpow__',\n '__rlshift__',\n '__rrshift__',\n '__rand__',\n '__rxor__',\n '__ror__',\n '__iadd__',\n '__isub__',\n '__imul__',\n '__imatmul__',\n '__itruediv__',\n '__ifloordiv__',\n '__imod__',\n '__ipow__',\n '__ilshift__',\n '__irshift__',\n '__iand__',\n '__ixor__',\n '__ior__',\n '__neg__',\n '__pos__',\n '__abs__',\n '__invert__',\n '__complex__',\n '__int__',\n '__float__',\n '__index__',\n '__round__',\n '__trunc__',\n '__floor__',\n '__ceil__',\n\n '__enter__',\n '__exit__',\n\n '__await__',\n '__aiter__',\n '__anext__',\n '__aenter__',\n '__aexit__',\n))\n\n#: List of magic methods that are forbidden to use.\nMAGIC_METHODS_BLACKLIST: Final = frozenset((\n # Since we don't use `del`:\n '__del__',\n '__delitem__',\n '__delete__',\n\n # Since we don't use `pickle`:\n '__reduce__',\n '__reduce_ex__',\n\n '__dir__', # since we don't use `dir()`\n '__delattr__', # since we don't use `delattr()`\n))\n\n#: List of magic methods that are not allowed to be generators.\nYIELD_MAGIC_METHODS_BLACKLIST: Final = ALL_MAGIC_METHODS.difference({\n # Allowed to be used with ``yield`` keyword:\n '__call__', # Fixes Issue:146\n '__iter__',\n})\n\n#: List of magic methods that are not allowed to be async.\nASYNC_MAGIC_METHODS_BLACKLIST: Final = ALL_MAGIC_METHODS.difference({\n # In order of appearance on\n # https://docs.python.org/3/reference/datamodel.html#basic-customization\n # Allowed magic methods are:\n '__anext__',\n '__aenter__',\n '__aexit__',\n '__call__',\n})\n\n#: List of builtin classes that are allowed to subclass.\nALLOWED_BUILTIN_CLASSES: Final = frozenset((\n 'type',\n 'object',\n))\n\n#: List of nested functions' names we allow to use.\nNESTED_FUNCTIONS_WHITELIST: Final = frozenset((\n 'decorator',\n 'factory',\n 'wrapper',\n))\n\n#: List of allowed ``__future__`` imports.\nFUTURE_IMPORTS_WHITELIST: Final = frozenset((\n 'annotations',\n 'generator_stop',\n))\n\n#: List of blacklisted module names.\nMODULE_NAMES_BLACKLIST: Final = frozenset((\n 'util',\n 'utils',\n 'utilities',\n 'helpers',\n))\n\n#: List of allowed module magic names.\nMAGIC_MODULE_NAMES_WHITELIST: Final = frozenset((\n '__init__',\n '__main__',\n))\n\n#: List of bad magic module functions.\nMAGIC_MODULE_NAMES_BLACKLIST: Final = frozenset((\n '__getattr__',\n '__dir__',\n))\n\n#: Regex pattern to name modules.\nMODULE_NAME_PATTERN: Final = re.compile(r'^_?_?[a-z][a-z\\d_]*[a-z\\d](__)?$')\n\n#: Common numbers that are allowed to be used without being called \"magic\".\nMAGIC_NUMBERS_WHITELIST: Final = frozenset((\n 0, # both int and float\n 0.1,\n 0.5,\n 1.0,\n 100,\n 1000,\n 1024, # bytes\n 24, # hours\n 60, # seconds, minutes\n\n 1j, # imaginary part of a complex number\n))\n\n#: Maximum amount of ``pragma`` no-cover comments per module.\nMAX_NO_COVER_COMMENTS: Final = 5\n\n#: Maximum length of ``yield`` ``tuple`` expressions.\nMAX_LEN_YIELD_TUPLE: Final = 5\n\n#: Maximum number of compare nodes in a single expression.\nMAX_COMPARES: Final = 2\n\n#: Maximum number of conditions in a single ``if`` or ``while`` statement.\nMAX_CONDITIONS: Final = 4\n\n#: Maximum number of `elif` blocks in a single `if` condition:\nMAX_ELIFS: Final = 3\n\n#: Maximum number of ``except`` cases in a single ``try`` clause.\nMAX_EXCEPT_CASES: Final = 3\n\n#: Approximate constants which real values should be imported from math module.\nMATH_APPROXIMATE_CONSTANTS: Final = frozenset((\n math.pi,\n math.e,\n math.tau,\n))\n\n#: List of vague method names that may cause confusion if imported as is:\nVAGUE_IMPORTS_BLACKLIST: Final = frozenset((\n 'read',\n 'write',\n 'load',\n 'loads',\n 'dump',\n 'dumps',\n 'parse',\n 'safe_load',\n 'safe_dump',\n 'load_all',\n 'dump_all',\n 'safe_load_all',\n 'safe_dump_all',\n))\n\n#: List of literals without arguments we forbid to use.\nLITERALS_BLACKLIST: Final = frozenset((\n 'int',\n 'float',\n 'str',\n 'bytes',\n 'bool',\n 'complex',\n))\n\n#: List of functions in which arguments must be tuples.\nTUPLE_ARGUMENTS_METHODS: Final = frozenset((\n 'frozenset',\n))\n\n#: Conditions that can appear in the ``if`` statement to allow nested imports.\nALLOWED_NESTED_IMPORTS_CONDITIONS: Final = frozenset((\n 'TYPE_CHECKING',\n))\n\n#: List of commonly used aliases\nALIAS_NAMES_WHITELIST: Final = frozenset((\n 'np',\n 'pd',\n 'df',\n 'plt',\n 'sns',\n 'tf',\n 'cv',\n))\n\n# Internal variables\n# ==================\n\n# Please, do not touch values beyond this line!\n# ---------------------------------------------\n\n# They are not publicly documented since they are not used by the end user.\n# But, we still need them to be defined here.\n\n# Used as a default filename, when it is not passed by flake8:\nSTDIN: Final = 'stdin'\n\n# Used to specify as a placeholder for `__init__`:\nINIT: Final = '__init__'\n\n# Used to determine when we are running on Windows:\nWINDOWS_OS: Final = 'nt'\n\n# Used as a placeholder for special `_` variable:\nUNUSED_PLACEHOLDER: Final = '_'\n",
"path": "wemake_python_styleguide/constants.py"
}
] | diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 5f9abc0dc..9a6577b3b 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -18,6 +18,8 @@ jobs:
- name: Install poetry
run: |
+ pip install -U pip
+
curl -sSL \
"https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py" | python
@@ -39,7 +41,7 @@ jobs:
make test
- - name: Upload coverage to Codecov
+ - name: Upload coverage to Codecov
uses: codecov/codecov-action@v1
with:
file: ./coverage.xml
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5773218d4..c409bc377 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -14,6 +14,7 @@ Semantic versioning in our case means:
### Features
- Forbids using too many variables in a tuple unpacking
+- Allow `__call__` method to be asynchronous
### Bugfixes
diff --git a/Makefile b/Makefile
index bc043cb7d..d75786b5f 100644
--- a/Makefile
+++ b/Makefile
@@ -16,7 +16,7 @@ unit:
package:
poetry run poetry check
poetry run pip check
- poetry run safety check --bare --full-report
+ poetry run safety check --full-report
.PHONY: test
test: lint unit package
diff --git a/tests/test_visitors/test_ast/test_classes/test_methods/test_async_magic_methods.py b/tests/test_visitors/test_ast/test_classes/test_methods/test_async_magic_methods.py
index da974edd0..47ff9bbcf 100644
--- a/tests/test_visitors/test_ast/test_classes/test_methods/test_async_magic_methods.py
+++ b/tests/test_visitors/test_ast/test_classes/test_methods/test_async_magic_methods.py
@@ -46,6 +46,7 @@ def test_wrong_async_magic_used(
'__anext__',
'__aenter__',
'__aexit__',
+ '__call__',
'__custom__',
])
def test_correct_async_magic_used(
diff --git a/wemake_python_styleguide/constants.py b/wemake_python_styleguide/constants.py
index c570a0f1f..acfedccf4 100644
--- a/wemake_python_styleguide/constants.py
+++ b/wemake_python_styleguide/constants.py
@@ -266,6 +266,7 @@
'__anext__',
'__aenter__',
'__aexit__',
+ '__call__',
})
#: List of builtin classes that are allowed to subclass.
|
open-telemetry__opentelemetry-python-1145 | ReadTheDocs configuration issue
The RTD documentation is not updated with my changes in #1096 https://opentelemetry-python.readthedocs.io/en/latest/sdk/resources.html
However, it works fine locally.
**Steps to reproduce**
Reproduced in all builds on RTD. For example, see logs here: https://readthedocs.org/projects/opentelemetry-python/builds/11937984/ (all builds [here](https://readthedocs.org/projects/google-cloud-opentelemetry/builds/))
You can run the commands in the logs to repro locally and see many of these types of errors:
```
WARNING: autodoc: failed to import module 'zipkin' from module 'opentelemetry.exporter'; the following exception was raised:
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/envs/latest/lib/python3.8/site-packages/sphinx/ext/autodoc/importer.py", line 32, in import_module
return importlib.import_module(modname)
File "/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/envs/latest/lib/python3.8/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1014, in _gcd_import
File "<frozen importlib._bootstrap>", line 991, in _find_and_load
File "<frozen importlib._bootstrap>", line 975, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 671, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 783, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/checkouts/latest/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py", line 72, in <module>
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
File "/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/checkouts/latest/opentelemetry-sdk/src/opentelemetry/sdk/__init__.py", line 19, in <module>
from . import metrics, trace, util
File "/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/checkouts/latest/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py", line 33, in <module>
from opentelemetry.sdk.resources import Resource
File "/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/checkouts/latest/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py", line 98, in <module>
OPENTELEMETRY_SDK_VERSION = pkg_resources.get_distribution(
File "/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/envs/latest/lib/python3.8/site-packages/pkg_resources/__init__.py", line 481, in get_distribution
dist = get_provider(dist)
File "/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/envs/latest/lib/python3.8/site-packages/pkg_resources/__init__.py", line 357, in get_provider
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
File "/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/envs/latest/lib/python3.8/site-packages/pkg_resources/__init__.py", line 900, in require
needed = self.resolve(parse_requirements(requirements))
File "/home/docs/checkouts/readthedocs.org/user_builds/opentelemetry-python/envs/latest/lib/python3.8/site-packages/pkg_resources/__init__.py", line 786, in resolve
raise DistributionNotFound(req, requirers)
pkg_resources.DistributionNotFound: The 'opentelemetry-sdk' distribution was not found and is required by the application
```
**What is the expected behavior?**
<img width="1218" alt="Screen Shot 2020-09-22 at 3 23 21 PM" src="https://user-images.githubusercontent.com/1510004/93927952-9410df00-fce7-11ea-9328-2d4c9951089e.png">
**What is the actual behavior?**
See logs above
| [
{
"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\nfrom os import listdir\nfrom os.path import isdir, join\n\n# configure django to avoid the following exception:\n# django.core.exceptions.ImproperlyConfigured: Requested settings, but settings\n# are not configured. You must either define the environment variable\n# DJANGO_SETTINGS_MODULE or call settings.configure() before accessing settings.\nfrom django.conf import settings\n\nsettings.configure()\n\n\nsource_dirs = [\n os.path.abspath(\"../opentelemetry-api/src/\"),\n os.path.abspath(\"../opentelemetry-sdk/src/\"),\n os.path.abspath(\"../opentelemetry-instrumentation/src/\"),\n]\n\nexp = \"../exporter\"\nexp_dirs = [\n os.path.abspath(\"/\".join([\"../exporter\", f, \"src\"]))\n for f in listdir(exp)\n if isdir(join(exp, f))\n]\n\ninstr = \"../instrumentation\"\ninstr_dirs = [\n os.path.abspath(\"/\".join([\"../instrumentation\", f, \"src\"]))\n for f in listdir(instr)\n if isdir(join(instr, f))\n]\n\nsys.path[:0] = source_dirs + exp_dirs + instr_dirs\n\n# -- Project information -----------------------------------------------------\n\nproject = \"OpenTelemetry Python\"\ncopyright = \"OpenTelemetry Authors\" # pylint: disable=redefined-builtin\nauthor = \"OpenTelemetry Authors\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Easy automatic cross-references for `code in backticks`\ndefault_role = \"any\"\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n # API doc generation\n \"sphinx.ext.autodoc\",\n # Support for google-style docstrings\n \"sphinx.ext.napoleon\",\n # Infer types from hints instead of docstrings\n \"sphinx_autodoc_typehints\",\n # Add links to source from generated docs\n \"sphinx.ext.viewcode\",\n # Link to other sphinx docs\n \"sphinx.ext.intersphinx\",\n # Add a .nojekyll file to the generated HTML docs\n # https://help.github.com/en/articles/files-that-start-with-an-underscore-are-missing\n \"sphinx.ext.githubpages\",\n # Support external links to different versions in the Github repo\n \"sphinx.ext.extlinks\",\n]\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"opentracing\": (\n \"https://opentracing-python.readthedocs.io/en/latest/\",\n None,\n ),\n \"aiohttp\": (\"https://aiohttp.readthedocs.io/en/stable/\", None),\n \"wrapt\": (\"https://wrapt.readthedocs.io/en/latest/\", None),\n \"pymongo\": (\"https://pymongo.readthedocs.io/en/stable/\", None),\n}\n\n# http://www.sphinx-doc.org/en/master/config.html#confval-nitpicky\n# Sphinx will warn about all references where the target cannot be found.\nnitpicky = True\n# Sphinx does not recognize generic type TypeVars\n# Container supposedly were fixed, but does not work\n# https://github.com/sphinx-doc/sphinx/pull/3744\nnitpick_ignore = [\n (\"py:class\", \"ValueT\"),\n (\"py:class\", \"MetricT\"),\n # Even if wrapt is added to intersphinx_mapping, sphinx keeps failing\n # with \"class reference target not found: ObjectProxy\".\n (\"py:class\", \"ObjectProxy\"),\n # TODO: Understand why sphinx is not able to find this local class\n (\"py:class\", \"opentelemetry.trace.propagation.textmap.TextMapPropagator\",),\n (\n \"any\",\n \"opentelemetry.trace.propagation.textmap.TextMapPropagator.extract\",\n ),\n (\n \"any\",\n \"opentelemetry.trace.propagation.textmap.TextMapPropagator.inject\",\n ),\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\nautodoc_default_options = {\n \"members\": True,\n \"undoc-members\": True,\n \"show-inheritance\": True,\n \"member-order\": \"bysource\",\n}\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = []\n\n# Support external links to specific versions of the files in the Github repo\nbranch = os.environ.get(\"READTHEDOCS_VERSION\")\nif branch is None or branch == \"latest\":\n branch = \"master\"\n\nREPO = \"open-telemetry/opentelemetry-python/\"\nscm_raw_web = \"https://raw.githubusercontent.com/\" + REPO + branch\nscm_web = \"https://github.com/\" + REPO + \"blob/\" + branch\n\n# Store variables in the epilogue so they are globally available.\nrst_epilog = \"\"\"\n.. |SCM_WEB| replace:: {s}\n.. |SCM_RAW_WEB| replace:: {sr}\n.. |SCM_BRANCH| replace:: {b}\n\"\"\".format(\n s=scm_web, sr=scm_raw_web, b=branch\n)\n\n# used to have links to repo files\nextlinks = {\n \"scm_raw_web\": (scm_raw_web + \"/%s\", \"scm_raw_web\"),\n \"scm_web\": (scm_web + \"/%s\", \"scm_web\"),\n}\n",
"path": "docs/conf.py"
}
] | [
{
"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\nfrom os import listdir\nfrom os.path import isdir, join\n\n# configure django to avoid the following exception:\n# django.core.exceptions.ImproperlyConfigured: Requested settings, but settings\n# are not configured. You must either define the environment variable\n# DJANGO_SETTINGS_MODULE or call settings.configure() before accessing settings.\nfrom django.conf import settings\n\nsettings.configure()\n\n\nsource_dirs = [\n os.path.abspath(\"../opentelemetry-instrumentation/src/\"),\n]\n\nexp = \"../exporter\"\nexp_dirs = [\n os.path.abspath(\"/\".join([\"../exporter\", f, \"src\"]))\n for f in listdir(exp)\n if isdir(join(exp, f))\n]\n\ninstr = \"../instrumentation\"\ninstr_dirs = [\n os.path.abspath(\"/\".join([\"../instrumentation\", f, \"src\"]))\n for f in listdir(instr)\n if isdir(join(instr, f))\n]\n\nsys.path[:0] = source_dirs + exp_dirs + instr_dirs\n\n# -- Project information -----------------------------------------------------\n\nproject = \"OpenTelemetry Python\"\ncopyright = \"OpenTelemetry Authors\" # pylint: disable=redefined-builtin\nauthor = \"OpenTelemetry Authors\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Easy automatic cross-references for `code in backticks`\ndefault_role = \"any\"\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n # API doc generation\n \"sphinx.ext.autodoc\",\n # Support for google-style docstrings\n \"sphinx.ext.napoleon\",\n # Infer types from hints instead of docstrings\n \"sphinx_autodoc_typehints\",\n # Add links to source from generated docs\n \"sphinx.ext.viewcode\",\n # Link to other sphinx docs\n \"sphinx.ext.intersphinx\",\n # Add a .nojekyll file to the generated HTML docs\n # https://help.github.com/en/articles/files-that-start-with-an-underscore-are-missing\n \"sphinx.ext.githubpages\",\n # Support external links to different versions in the Github repo\n \"sphinx.ext.extlinks\",\n]\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"opentracing\": (\n \"https://opentracing-python.readthedocs.io/en/latest/\",\n None,\n ),\n \"aiohttp\": (\"https://aiohttp.readthedocs.io/en/stable/\", None),\n \"wrapt\": (\"https://wrapt.readthedocs.io/en/latest/\", None),\n \"pymongo\": (\"https://pymongo.readthedocs.io/en/stable/\", None),\n}\n\n# http://www.sphinx-doc.org/en/master/config.html#confval-nitpicky\n# Sphinx will warn about all references where the target cannot be found.\nnitpicky = True\n# Sphinx does not recognize generic type TypeVars\n# Container supposedly were fixed, but does not work\n# https://github.com/sphinx-doc/sphinx/pull/3744\nnitpick_ignore = [\n (\"py:class\", \"ValueT\"),\n (\"py:class\", \"MetricT\"),\n # Even if wrapt is added to intersphinx_mapping, sphinx keeps failing\n # with \"class reference target not found: ObjectProxy\".\n (\"py:class\", \"ObjectProxy\"),\n # TODO: Understand why sphinx is not able to find this local class\n (\"py:class\", \"opentelemetry.trace.propagation.textmap.TextMapPropagator\",),\n (\n \"any\",\n \"opentelemetry.trace.propagation.textmap.TextMapPropagator.extract\",\n ),\n (\n \"any\",\n \"opentelemetry.trace.propagation.textmap.TextMapPropagator.inject\",\n ),\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\nautodoc_default_options = {\n \"members\": True,\n \"undoc-members\": True,\n \"show-inheritance\": True,\n \"member-order\": \"bysource\",\n}\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = []\n\n# Support external links to specific versions of the files in the Github repo\nbranch = os.environ.get(\"READTHEDOCS_VERSION\")\nif branch is None or branch == \"latest\":\n branch = \"master\"\n\nREPO = \"open-telemetry/opentelemetry-python/\"\nscm_raw_web = \"https://raw.githubusercontent.com/\" + REPO + branch\nscm_web = \"https://github.com/\" + REPO + \"blob/\" + branch\n\n# Store variables in the epilogue so they are globally available.\nrst_epilog = \"\"\"\n.. |SCM_WEB| replace:: {s}\n.. |SCM_RAW_WEB| replace:: {sr}\n.. |SCM_BRANCH| replace:: {b}\n\"\"\".format(\n s=scm_web, sr=scm_raw_web, b=branch\n)\n\n# used to have links to repo files\nextlinks = {\n \"scm_raw_web\": (scm_raw_web + \"/%s\", \"scm_raw_web\"),\n \"scm_web\": (scm_web + \"/%s\", \"scm_web\"),\n}\n",
"path": "docs/conf.py"
}
] | diff --git a/docs-requirements.txt b/docs-requirements.txt
index e98a0d35dfb..a3168862770 100644
--- a/docs-requirements.txt
+++ b/docs-requirements.txt
@@ -2,6 +2,11 @@ sphinx~=2.4
sphinx-rtd-theme~=0.4
sphinx-autodoc-typehints~=1.10.2
+# Need to install the api/sdk in the venv for autodoc. Modifying sys.path
+# doesn't work for pkg_resources.
+./opentelemetry-api
+./opentelemetry-sdk
+
# Required by ext packages
asgiref~=3.0
asyncpg>=0.12.0
diff --git a/docs/conf.py b/docs/conf.py
index d15d8b2ed5b..68b871aaac2 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -25,8 +25,6 @@
source_dirs = [
- os.path.abspath("../opentelemetry-api/src/"),
- os.path.abspath("../opentelemetry-sdk/src/"),
os.path.abspath("../opentelemetry-instrumentation/src/"),
]
|
HybirdCorp__creme_crm-431 | [WIP] fix python3.9.12 3.9.13 ci issue
| [
{
"content": "__version__ = '2.4-alpha1'\n\n# App registry hooking ---------------------------------------------------------\n\ntry:\n from django.apps.config import AppConfig\n from django.apps.registry import Apps\nexcept ImportError:\n # This error may appear with old versions of setuptools during installation\n import sys\n\n sys.stderr.write(\n 'Django is not installed ; '\n 'ignore this message if you are installing Creme.'\n )\nelse:\n AppConfig.all_apps_ready = lambda self: None\n\n _original_populate = Apps.populate\n\n def _hooked_populate(self, installed_apps=None):\n if self.ready:\n return\n\n if getattr(self, '_all_apps_ready', False):\n return\n\n _original_populate(self, installed_apps)\n\n with self._lock:\n if getattr(self, '_all_apps_ready', False):\n return\n\n for app_config in self.get_app_configs():\n app_config.all_apps_ready()\n\n self._all_apps_ready = True\n\n Apps.populate = _hooked_populate\n",
"path": "creme/__init__.py"
}
] | [
{
"content": "__version__ = '2.4-alpha1'\n\n\ndef get_version():\n return __version__\n\n\n# App registry hooking ---------------------------------------------------------\n\ntry:\n from django.apps.config import AppConfig\n from django.apps.registry import Apps\nexcept ImportError:\n # This error may appear with old versions of setuptools during installation\n import sys\n\n sys.stderr.write(\n 'Django is not installed ; '\n 'ignore this message if you are installing Creme.'\n )\nelse:\n AppConfig.all_apps_ready = lambda self: None\n\n _original_populate = Apps.populate\n\n def _hooked_populate(self, installed_apps=None):\n if self.ready:\n return\n\n if getattr(self, '_all_apps_ready', False):\n return\n\n _original_populate(self, installed_apps)\n\n with self._lock:\n if getattr(self, '_all_apps_ready', False):\n return\n\n for app_config in self.get_app_configs():\n app_config.all_apps_ready()\n\n self._all_apps_ready = True\n\n Apps.populate = _hooked_populate\n",
"path": "creme/__init__.py"
}
] | diff --git a/.circleci/config.yml b/.circleci/config.yml
index e1450abb5f..e63a241f5f 100755
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -7,6 +7,9 @@ parameters:
instance_directory:
type: string
default: 'creme_project'
+ pycache_prefix:
+ type: string
+ default: 'creme-crm-cache-v1'
commands:
wait-database:
@@ -31,29 +34,29 @@ commands:
- run: sudo apt install -y python3-dev graphviz libgraphviz-dev pkg-config
install-py-dev-env:
- parameters:
- python:
- type: string
steps:
- - run: which <<parameters.python>>
+ - run: which python
+ - run: python --version
+ - run: python --version > /tmp/python_version
+ - run: cat /tmp/python_version
+
+ - run: cd << pipeline.parameters.source_path >> && python -c "import creme; print(creme.get_version())" > /tmp/creme_version
+ - run: cat /tmp/creme_version
- restore_cache:
keys:
- - << parameters.python >>-creme_crm-v2.4-{{ checksum "<< pipeline.parameters.source_path >>/setup.cfg" }}
- - << parameters.python >>-creme_crm-v2.4
+ - << pipeline.parameters.pycache_prefix >>-{{ checksum "/tmp/python_version" }}-{{ checksum "/tmp/creme_version" }}-{{ checksum "<< pipeline.parameters.source_path >>/setup.cfg" }}
- - run: <<parameters.python>> -m venv ~/venv
+ - run: python -m venv ~/venv
# Require setuptools v46.4.0 at least
- run: ~/venv/bin/pip install -U pip setuptools
- run: echo "source ~/venv/bin/activate" >> $BASH_ENV
- - run: which python
- run: pip install -U -e << pipeline.parameters.source_path >>[dev,mysql,pgsql,graphs]
- - run: python --version
- run: pip freeze
- run: pip list --outdated
- save_cache:
- key: << parameters.python >>-creme_crm-v2.4-{{ checksum "<< pipeline.parameters.source_path >>/setup.cfg" }}
+ key: << pipeline.parameters.pycache_prefix >>-{{ checksum "/tmp/python_version" }}-{{ checksum "/tmp/creme_version" }}-{{ checksum "<< pipeline.parameters.source_path >>/setup.cfg" }}
paths: "~/venv"
install-node-env:
@@ -158,8 +161,7 @@ jobs:
steps:
- checkout-creme
- install-creme-system-packages
- - install-py-dev-env:
- python: "python3.7"
+ - install-py-dev-env
- run: make -C << pipeline.parameters.source_path >> isort-check
@@ -169,8 +171,7 @@ jobs:
steps:
- checkout-creme
- install-creme-system-packages
- - install-py-dev-env:
- python: "python3.7"
+ - install-py-dev-env
- run: flake8 << pipeline.parameters.source_path >>/creme/ --config << pipeline.parameters.source_path >>/setup.cfg
@@ -191,8 +192,7 @@ jobs:
port: 3306
- install-creme-system-packages
- run: sudo apt install -y mariadb-client
- - install-py-dev-env:
- python: "python3.7"
+ - install-py-dev-env
- create-creme-project
- setup-creme-unit-tests:
local_settings: 'mysql_settings'
@@ -216,8 +216,7 @@ jobs:
- wait-database:
port: 5432
- install-creme-system-packages
- - install-py-dev-env:
- python: "python3.7"
+ - install-py-dev-env
- create-creme-project
- setup-creme-unit-tests:
local_settings: 'pgsql_settings'
@@ -234,8 +233,7 @@ jobs:
language: fr_FR
encoding: UTF-8
- install-creme-system-packages
- - install-py-dev-env:
- python: "python3.7"
+ - install-py-dev-env
- create-creme-project
- setup-creme-unit-tests
- run-creme-unit-tests
@@ -258,8 +256,7 @@ jobs:
- wait-database:
port: 5432
- install-creme-system-packages
- - install-py-dev-env:
- python: "python3.7"
+ - install-py-dev-env
- create-creme-project
- setup-creme-unit-tests:
local_settings: 'pgsql_settings'
@@ -276,8 +273,7 @@ jobs:
language: fr_FR
encoding: UTF-8
- install-creme-system-packages
- - install-py-dev-env:
- python: "python3.8"
+ - install-py-dev-env
- create-creme-project
- setup-creme-unit-tests
- run-creme-unit-tests
@@ -293,8 +289,7 @@ jobs:
language: fr_FR
encoding: UTF-8
- install-creme-system-packages
- - install-py-dev-env:
- python: "python3.9"
+ - install-py-dev-env
- create-creme-project
- setup-creme-unit-tests
- run-creme-unit-tests
@@ -310,8 +305,7 @@ jobs:
language: fr_FR
encoding: UTF-8
- install-creme-system-packages
- - install-py-dev-env:
- python: "python3.10"
+ - install-py-dev-env
- create-creme-project
- setup-creme-unit-tests
- run-creme-unit-tests
@@ -323,8 +317,7 @@ jobs:
steps:
- checkout-creme
- install-creme-system-packages
- - install-py-dev-env:
- python: "python3.7"
+ - install-py-dev-env
- create-creme-project
- setup-creme-statics
- install-node-env
@@ -361,8 +354,7 @@ jobs:
- browser-tools/install-browser-tools
- checkout-creme
- install-creme-system-packages
- - install-py-dev-env:
- python: "python3.7"
+ - install-py-dev-env
- create-creme-project
- setup-creme-statics
- install-node-env
diff --git a/creme/__init__.py b/creme/__init__.py
index 8e78936fce..0cf661a023 100644
--- a/creme/__init__.py
+++ b/creme/__init__.py
@@ -1,5 +1,10 @@
__version__ = '2.4-alpha1'
+
+def get_version():
+ return __version__
+
+
# App registry hooking ---------------------------------------------------------
try:
|
kornia__kornia-2476 | Next release?
## 🚀 Feature
Hi, when will the next kornia release on conda or pypi be?
## Motivation
Last conda release was in April, and new features have landed since then, but are unavailable in wheels.
| [
{
"content": "# NOTE: kornia filters and geometry must go first since are the core of the library\n# and by changing the import order you might get into a circular dependencies issue.\nfrom . import filters\nfrom . import geometry\nfrom . import grad_estimator\n\n# import the other modules for convenience\nfrom . import augmentation, color, contrib, core, enhance, feature, io, losses, metrics, morphology, tracking, utils, x\n\n# NOTE: we are going to expose to top level very few things\nfrom kornia.constants import pi\nfrom kornia.testing import xla_is_available\nfrom kornia.utils import eye_like, vec_like, create_meshgrid, image_to_tensor, tensor_to_image\n\n# Version variable\n__version__ = \"0.6.13-dev\"\n",
"path": "kornia/__init__.py"
}
] | [
{
"content": "# NOTE: kornia filters and geometry must go first since are the core of the library\n# and by changing the import order you might get into a circular dependencies issue.\nfrom . import filters\nfrom . import geometry\nfrom . import grad_estimator\n\n# import the other modules for convenience\nfrom . import augmentation, color, contrib, core, enhance, feature, io, losses, metrics, morphology, tracking, utils, x\n\n# NOTE: we are going to expose to top level very few things\nfrom kornia.constants import pi\nfrom kornia.testing import xla_is_available\nfrom kornia.utils import eye_like, vec_like, create_meshgrid, image_to_tensor, tensor_to_image\n\n# Version variable\n__version__ = \"0.7.0\"\n",
"path": "kornia/__init__.py"
}
] | diff --git a/kornia/__init__.py b/kornia/__init__.py
index b9f6844940..3499c3d694 100644
--- a/kornia/__init__.py
+++ b/kornia/__init__.py
@@ -13,4 +13,4 @@
from kornia.utils import eye_like, vec_like, create_meshgrid, image_to_tensor, tensor_to_image
# Version variable
-__version__ = "0.6.13-dev"
+__version__ = "0.7.0"
diff --git a/test/feature/test_matching.py b/test/feature/test_matching.py
index 209d57f85a..25c15c7e08 100644
--- a/test/feature/test_matching.py
+++ b/test/feature/test_matching.py
@@ -468,8 +468,11 @@ def test_real(self, device, dtype, data):
assert idxs.shape[0] == dists.shape[0]
assert dists.shape[0] <= data_dev['descs1'].shape[0]
assert dists.shape[0] <= data_dev['descs2'].shape[0]
- expected_idxs = data_dev['lightglue_disk_idxs'].long()
- assert_close(idxs, expected_idxs, rtol=1e-4, atol=1e-4)
+ # key doesn't exist in data_dev
+ # expected_idxs = data_dev['lightglue_disk_idxs'].long()
+ data_dev['expected_idxs'].long()
+ # TODO: fix this
+ # assert_close(idxs, expected_idxs, rtol=1e-4, atol=1e-4)
@pytest.mark.parametrize("data", ["lightglue_idxs"], indirect=True)
def test_single_nocrash(self, device, dtype, data):
|
imAsparky__django-cookiecutter-251 | [BUG]: Initial commit message is bounded by "". Remove the quotation marks
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- imAsparky cookiecutter-pypackage version:
- Python version:
- OS: [e.g. iOS]
- Browser [e.g. chrome, safari]
- Version [e.g. 22]
**Smartphone (please complete the following information):**
- Device: [e.g. iPhone6]
- OS: [e.g. iOS8.1]
- Browser [e.g. stock browser, safari]
- Version [e.g. 22]
**Additional context**
Add any other context about the problem here.
| [
{
"content": "#!/usr/bin/env python\n\"\"\"django-cookiecutter post project generation jobs.\"\"\"\nimport os\nimport subprocess # nosec\nfrom shutil import rmtree\n\nPROJECT_DIRECTORY = os.path.realpath(os.path.curdir)\n\nREMOTE_REPO = \"[email protected]:{{cookiecutter.github_username}}/\\\n{{cookiecutter.git_project_name}}.git\"\n\n\nGIT_USER = \"{{cookiecutter.author_name}}\"\nGIT_EMAIL = \"{{cookiecutter.github_user_email}}\"\n\n\nREMOVE_FILES = [\n '{% if cookiecutter.use_pyup_io == \"n\" %} \\\n .pyup.yml {% endif %}',\n '{% if cookiecutter.include_sphinx_docs == \"n\" %} \\\n docs {% endif %}',\n '{% if cookiecutter.use_readthedocs == \"n\" %} \\\n .readthedocs.yaml {% endif %}',\n '{% if cookiecutter.include_contributor_covenant_code_of_conduct == \"n\" %} \\\n docs/source/code-of-conduct.rst {% endif %}',\n '{% if cookiecutter.include_documentation_templates == \"n\" %} \\\n docs/source/doc-templates {% endif %}',\n '{% if cookiecutter.include_how_to_contribute_template == \"n\" %} \\\n docs/source/how-tos/how-to-contribute.rst {% endif %}',\n '{% if cookiecutter.open_source_license == \"Not open source\" %} \\\n LICENSE.rst {% endif %}',\n '{% if cookiecutter.create_conventional_commits_edit_message == \"n\" %} \\\n .github/.git-commit-template.txt {% endif %}',\n '{% if cookiecutter.use_pre_commit == \"n\" %} \\\n .pre-commit-config.yaml {% endif %}',\n '{% if cookiecutter.use_GH_action_semantic_version == \"n\" %} \\\n CHANGELOG.md {% endif %}',\n '{% if cookiecutter.use_GH_action_semantic_version == \"n\" %} \\\n .github/semantic.yaml {% endif %}',\n '{% if cookiecutter.use_GH_action_semantic_version == \"n\" %} \\\n .github/workflows/semantic_release.yaml {% endif %}',\n '{% if cookiecutter.create_repo_auto_test_workflow == \"n\" %} \\\n .github/workflows/test_contribution.yaml {% endif %}',\n '{% if cookiecutter.use_GH_custom_issue_templates == \"n\" %} \\\n .github/ISSUE_TEMPLATE {% endif %}',\n '{% if cookiecutter.use_GH_custom_issue_templates == \"y\" %} \\\n .github/ISSUE_TEMPLATE.md {% endif %}',\n '{% if cookiecutter.deploy_with_docker == \"n\" %} \\\n Dockerfile {% endif %}',\n '{% if cookiecutter.deploy_with_docker == \"n\" %} \\\n .dockerignore {% endif %}',\n '{% if cookiecutter.deploy_with_docker == \"n\" %} \\\n compose {% endif %}',\n '{% if cookiecutter.deploy_with_docker == \"n\" %} \\\n docker-entrypoint.sh {% endif %}',\n]\n\n# Helper functions\n\n\ndef post_gen_setup(*args, supress_exception=False, cwd=None):\n \"\"\"Helper to set up the Django project with the chosen options.\"\"\"\n cur_dir = os.getcwd()\n\n try:\n if cwd:\n os.chdir(cwd)\n\n with subprocess.Popen( # nosec\n args, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n ) as proc:\n\n out, err = proc.communicate()\n out = out.decode(\"utf-8\")\n err = err.decode(\"utf-8\")\n if err and not supress_exception:\n raise Exception(err)\n if err and supress_exception:\n return out\n\n return out\n\n finally:\n os.chdir(cur_dir)\n\n\ndef remove_files(filepath):\n \"\"\"Remove files not required for this generated Django project.\"\"\"\n\n for path in filepath:\n path = path.strip()\n if path and os.path.exists(path):\n if os.path.isdir(path):\n rmtree(path)\n else:\n os.unlink(path)\n\n\n# Git functions\n\n\ndef init_git():\n \"\"\"Initialise git repository and set the remote.\"\"\"\n if not os.path.exists(os.path.join(PROJECT_DIRECTORY, \".git\")):\n post_gen_setup(\n \"git\",\n \"init\",\n supress_exception=True,\n cwd=PROJECT_DIRECTORY,\n )\n\n post_gen_setup(\n \"git\",\n \"branch\",\n \"-M\",\n \"main\",\n cwd=PROJECT_DIRECTORY,\n )\n\n post_gen_setup(\n \"git\",\n \"remote\",\n \"add\",\n \"origin\",\n REMOTE_REPO,\n cwd=PROJECT_DIRECTORY,\n )\n post_gen_setup(\n \"git\",\n \"config\",\n \"user.name\",\n GIT_USER,\n cwd=PROJECT_DIRECTORY,\n )\n post_gen_setup(\n \"git\",\n \"config\",\n \"user.email\",\n GIT_EMAIL,\n cwd=PROJECT_DIRECTORY,\n )\n\n\ndef git_add_and_commit_initial():\n \"\"\"Add the local files and commit to the git repository.\"\"\"\n post_gen_setup(\n \"git\",\n \"add\",\n \"-A\",\n cwd=PROJECT_DIRECTORY,\n )\n\n post_gen_setup(\n \"git\",\n \"commit\",\n \"-m\",\n '\"chore(git): Initial Commit\"',\n cwd=PROJECT_DIRECTORY,\n )\n\n\ndef git_configure_custom_commit_message():\n \"\"\"Configure git to use the custom commit message template.\"\"\"\n if os.path.exists(os.path.join(PROJECT_DIRECTORY, \".git\")):\n post_gen_setup(\n \"git\",\n \"config\",\n \"--local\",\n \"commit.template\",\n \".github/.git-commit-template.txt\",\n cwd=PROJECT_DIRECTORY,\n )\n\n\nif __name__ == \"__main__\":\n\n remove_files(REMOVE_FILES)\n\n # Git options\n\n if \"{{ cookiecutter.automatic_set_up_git_and_initial_commit }}\" == \"y\":\n init_git()\n git_add_and_commit_initial()\n\n if \"{{ cookiecutter.create_conventional_commits_edit_message}}\" == \"y\":\n git_configure_custom_commit_message()\n",
"path": "hooks/post_gen_project.py"
}
] | [
{
"content": "#!/usr/bin/env python\n\"\"\"django-cookiecutter post project generation jobs.\"\"\"\nimport os\nimport subprocess # nosec\nfrom shutil import rmtree\n\nPROJECT_DIRECTORY = os.path.realpath(os.path.curdir)\n\nREMOTE_REPO = \"[email protected]:{{cookiecutter.github_username}}/\\\n{{cookiecutter.git_project_name}}.git\"\n\n\nGIT_USER = \"{{cookiecutter.author_name}}\"\nGIT_EMAIL = \"{{cookiecutter.github_user_email}}\"\n\n\nREMOVE_FILES = [\n '{% if cookiecutter.use_pyup_io == \"n\" %} \\\n .pyup.yml {% endif %}',\n '{% if cookiecutter.include_sphinx_docs == \"n\" %} \\\n docs {% endif %}',\n '{% if cookiecutter.use_readthedocs == \"n\" %} \\\n .readthedocs.yaml {% endif %}',\n '{% if cookiecutter.include_contributor_covenant_code_of_conduct == \"n\" %} \\\n docs/source/code-of-conduct.rst {% endif %}',\n '{% if cookiecutter.include_documentation_templates == \"n\" %} \\\n docs/source/doc-templates {% endif %}',\n '{% if cookiecutter.include_how_to_contribute_template == \"n\" %} \\\n docs/source/how-tos/how-to-contribute.rst {% endif %}',\n '{% if cookiecutter.open_source_license == \"Not open source\" %} \\\n LICENSE.rst {% endif %}',\n '{% if cookiecutter.create_conventional_commits_edit_message == \"n\" %} \\\n .github/.git-commit-template.txt {% endif %}',\n '{% if cookiecutter.use_pre_commit == \"n\" %} \\\n .pre-commit-config.yaml {% endif %}',\n '{% if cookiecutter.use_GH_action_semantic_version == \"n\" %} \\\n CHANGELOG.md {% endif %}',\n '{% if cookiecutter.use_GH_action_semantic_version == \"n\" %} \\\n .github/semantic.yaml {% endif %}',\n '{% if cookiecutter.use_GH_action_semantic_version == \"n\" %} \\\n .github/workflows/semantic_release.yaml {% endif %}',\n '{% if cookiecutter.create_repo_auto_test_workflow == \"n\" %} \\\n .github/workflows/test_contribution.yaml {% endif %}',\n '{% if cookiecutter.use_GH_custom_issue_templates == \"n\" %} \\\n .github/ISSUE_TEMPLATE {% endif %}',\n '{% if cookiecutter.use_GH_custom_issue_templates == \"y\" %} \\\n .github/ISSUE_TEMPLATE.md {% endif %}',\n '{% if cookiecutter.deploy_with_docker == \"n\" %} \\\n Dockerfile {% endif %}',\n '{% if cookiecutter.deploy_with_docker == \"n\" %} \\\n .dockerignore {% endif %}',\n '{% if cookiecutter.deploy_with_docker == \"n\" %} \\\n compose {% endif %}',\n '{% if cookiecutter.deploy_with_docker == \"n\" %} \\\n docker-entrypoint.sh {% endif %}',\n]\n\n# Helper functions\n\n\ndef post_gen_setup(*args, supress_exception=False, cwd=None):\n \"\"\"Helper to set up the Django project with the chosen options.\"\"\"\n cur_dir = os.getcwd()\n\n try:\n if cwd:\n os.chdir(cwd)\n\n with subprocess.Popen( # nosec\n args, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n ) as proc:\n\n out, err = proc.communicate()\n out = out.decode(\"utf-8\")\n err = err.decode(\"utf-8\")\n if err and not supress_exception:\n raise Exception(err)\n if err and supress_exception:\n return out\n\n return out\n\n finally:\n os.chdir(cur_dir)\n\n\ndef remove_files(filepath):\n \"\"\"Remove files not required for this generated Django project.\"\"\"\n\n for path in filepath:\n path = path.strip()\n if path and os.path.exists(path):\n if os.path.isdir(path):\n rmtree(path)\n else:\n os.unlink(path)\n\n\n# Git functions\n\n\ndef init_git():\n \"\"\"Initialise git repository and set the remote.\"\"\"\n if not os.path.exists(os.path.join(PROJECT_DIRECTORY, \".git\")):\n post_gen_setup(\n \"git\",\n \"init\",\n supress_exception=True,\n cwd=PROJECT_DIRECTORY,\n )\n\n post_gen_setup(\n \"git\",\n \"branch\",\n \"-M\",\n \"main\",\n cwd=PROJECT_DIRECTORY,\n )\n\n post_gen_setup(\n \"git\",\n \"remote\",\n \"add\",\n \"origin\",\n REMOTE_REPO,\n cwd=PROJECT_DIRECTORY,\n )\n post_gen_setup(\n \"git\",\n \"config\",\n \"user.name\",\n GIT_USER,\n cwd=PROJECT_DIRECTORY,\n )\n post_gen_setup(\n \"git\",\n \"config\",\n \"user.email\",\n GIT_EMAIL,\n cwd=PROJECT_DIRECTORY,\n )\n\n\ndef git_add_and_commit_initial():\n \"\"\"Add the local files and commit to the git repository.\"\"\"\n post_gen_setup(\n \"git\",\n \"add\",\n \"-A\",\n cwd=PROJECT_DIRECTORY,\n )\n\n post_gen_setup(\n \"git\",\n \"commit\",\n \"-m\",\n \"chore(git): Initial Commit\",\n cwd=PROJECT_DIRECTORY,\n )\n\n\ndef git_configure_custom_commit_message():\n \"\"\"Configure git to use the custom commit message template.\"\"\"\n if os.path.exists(os.path.join(PROJECT_DIRECTORY, \".git\")):\n post_gen_setup(\n \"git\",\n \"config\",\n \"--local\",\n \"commit.template\",\n \".github/.git-commit-template.txt\",\n cwd=PROJECT_DIRECTORY,\n )\n\n\nif __name__ == \"__main__\":\n\n remove_files(REMOVE_FILES)\n\n # Git options\n\n if \"{{ cookiecutter.automatic_set_up_git_and_initial_commit }}\" == \"y\":\n init_git()\n git_add_and_commit_initial()\n\n if \"{{ cookiecutter.create_conventional_commits_edit_message}}\" == \"y\":\n git_configure_custom_commit_message()\n",
"path": "hooks/post_gen_project.py"
}
] | diff --git a/hooks/post_gen_project.py b/hooks/post_gen_project.py
index fb39b27c..debd7c1e 100644
--- a/hooks/post_gen_project.py
+++ b/hooks/post_gen_project.py
@@ -154,7 +154,7 @@ def git_add_and_commit_initial():
"git",
"commit",
"-m",
- '"chore(git): Initial Commit"',
+ "chore(git): Initial Commit",
cwd=PROJECT_DIRECTORY,
)
|
microsoft__ptvsd-1923 | test_flask_breakpoint_multiproc fails with run=attach_by_socket
Python 3.7, Win32.
Logs are unclear - needs investigation.
| [
{
"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nimport runpy\nimport sys\n\n# ptvsd.__main__ should have preloaded pydevd properly before importing this module.\n# Otherwise, some stdlib modules above might have had imported threading before pydevd\n# could perform the necessary detours in it.\nassert \"pydevd\" in sys.modules\nimport pydevd\n\nimport ptvsd\nfrom ptvsd.common import compat, fmt, log, options as common_opts\nfrom ptvsd.server import options\n\n\nTARGET = \"<filename> | -m <module> | -c <code> | --pid <pid>\"\n\nHELP = \"\"\"ptvsd {0}\nSee https://aka.ms/ptvsd for documentation.\n\nUsage: ptvsd [--client] --host <address> [--port <port>]\n [--wait]\n [--no-subprocesses]\n [--log-dir <path>] [--log-stderr]\n {1}\n\"\"\".format(\n ptvsd.__version__, TARGET\n)\n\n\ndef in_range(parser, start, stop):\n def parse(s):\n n = parser(s)\n if start is not None and n < start:\n raise ValueError(fmt(\"must be >= {0}\", start))\n if stop is not None and n >= stop:\n raise ValueError(fmt(\"must be < {0}\", stop))\n return n\n\n return parse\n\n\nport = in_range(int, 0, 2 ** 16)\n\npid = in_range(int, 0, None)\n\n\ndef print_help_and_exit(switch, it):\n print(HELP, file=sys.stderr)\n sys.exit(0)\n\n\ndef print_version_and_exit(switch, it):\n print(ptvsd.__version__)\n sys.exit(0)\n\n\ndef set_arg(varname, parser=(lambda x: x), options=options):\n def action(arg, it):\n value = parser(next(it))\n setattr(options, varname, value)\n\n return action\n\n\ndef set_const(varname, value, options=options):\n def do(arg, it):\n setattr(options, varname, value)\n\n return do\n\n\ndef set_log_stderr():\n def do(arg, it):\n log.stderr.levels |= set(log.LEVELS)\n\n return do\n\n\ndef set_target(kind, parser=(lambda x: x), positional=False):\n def do(arg, it):\n options.target_kind = kind\n options.target = parser(arg if positional else next(it))\n\n return do\n\n\n# fmt: off\nswitches = [\n # Switch Placeholder Action Required\n # ====== =========== ====== ========\n\n # Switches that are documented for use by end users.\n ((\"-?\", \"-h\", \"--help\"), None, print_help_and_exit, False),\n ((\"-V\", \"--version\"), None, print_version_and_exit, False),\n (\"--client\", None, set_const(\"client\", True), False),\n (\"--host\", \"<address>\", set_arg(\"host\"), True),\n (\"--port\", \"<port>\", set_arg(\"port\", port), False),\n (\"--wait\", None, set_const(\"wait\", True), False),\n (\"--no-subprocesses\", None, set_const(\"multiprocess\", False), False),\n (\"--log-dir\", \"<path>\", set_arg(\"log_dir\", options=common_opts), False),\n (\"--log-stderr\", None, set_log_stderr(), False),\n\n # Switches that are used internally by the IDE or ptvsd itself.\n (\"--subprocess-of\", \"<pid>\", set_arg(\"subprocess_of\", pid), False),\n (\"--subprocess-notify\", \"<port>\", set_arg(\"subprocess_notify\", port), False),\n\n # Targets. The \"\" entry corresponds to positional command line arguments,\n # i.e. the ones not preceded by any switch name.\n (\"\", \"<filename>\", set_target(\"file\", positional=True), False),\n (\"-m\", \"<module>\", set_target(\"module\"), False),\n (\"-c\", \"<code>\", set_target(\"code\"), False),\n (\"--pid\", \"<pid>\", set_target(\"pid\", pid), False),\n]\n# fmt: on\n\n\ndef parse(args):\n seen = set()\n it = (compat.filename(arg) for arg in args)\n\n while True:\n try:\n arg = next(it)\n except StopIteration:\n raise ValueError(\"missing target: \" + TARGET)\n\n switch = arg if arg.startswith(\"-\") else \"\"\n for i, (sw, placeholder, action, _) in enumerate(switches):\n if not isinstance(sw, tuple):\n sw = (sw,)\n if switch in sw:\n break\n else:\n raise ValueError(\"unrecognized switch \" + switch)\n\n if i in seen:\n raise ValueError(\"duplicate switch \" + switch)\n else:\n seen.add(i)\n\n try:\n action(arg, it)\n except StopIteration:\n assert placeholder is not None\n raise ValueError(fmt(\"{0}: missing {1}\", switch, placeholder))\n except Exception as exc:\n raise ValueError(fmt(\"invalid {0} {1}: {2}\", switch, placeholder, exc))\n\n if options.target is not None:\n break\n\n for i, (sw, placeholder, _, required) in enumerate(switches):\n if not required or i in seen:\n continue\n if isinstance(sw, tuple):\n sw = sw[0]\n message = fmt(\"missing required {0}\", sw)\n if placeholder is not None:\n message += \" \" + placeholder\n raise ValueError(message)\n\n if options.target_kind == \"pid\" and options.wait:\n raise ValueError(\"--pid does not support --wait\")\n\n return it\n\n\ndef setup_debug_server(argv_0):\n # We need to set up sys.argv[0] before invoking attach() or enable_attach(),\n # because they use it to report the \"process\" event. Thus, we can't rely on\n # run_path() and run_module() doing that, even though they will eventually.\n sys.argv[0] = compat.filename(argv_0)\n log.debug(\"sys.argv after patching: {0!r}\", sys.argv)\n\n debug = ptvsd.attach if options.client else ptvsd.enable_attach\n debug(address=options, multiprocess=options)\n\n if options.wait:\n ptvsd.wait_for_attach()\n\n\ndef run_file():\n setup_debug_server(options.target)\n\n # run_path has one difference with invoking Python from command-line:\n # if the target is a file (rather than a directory), it does not add its\n # parent directory to sys.path. Thus, importing other modules from the\n # same directory is broken unless sys.path is patched here.\n if os.path.isfile(options.target):\n dir = os.path.dirname(options.target)\n sys.path.insert(0, dir)\n else:\n log.debug(\"Not a file: {0!j}\", options.target)\n\n log.describe_environment(\"Pre-launch environment:\")\n log.info(\"Running file {0!j}\", options.target)\n runpy.run_path(options.target, run_name=\"__main__\")\n\n\ndef run_module():\n # Add current directory to path, like Python itself does for -m. This must\n # be in place before trying to use find_spec below to resolve submodules.\n sys.path.insert(0, \"\")\n\n # We want to do the same thing that run_module() would do here, without\n # actually invoking it. On Python 3, it's exposed as a public API, but\n # on Python 2, we have to invoke a private function in runpy for this.\n # Either way, if it fails to resolve for any reason, just leave argv as is.\n argv_0 = sys.argv[0]\n try:\n if sys.version_info >= (3,):\n from importlib.util import find_spec\n\n spec = find_spec(options.target)\n if spec is not None:\n argv_0 = spec.origin\n else:\n _, _, _, argv_0 = runpy._get_module_details(options.target)\n except Exception:\n log.exception(\"Error determining module path for sys.argv\")\n\n setup_debug_server(argv_0)\n\n # On Python 2, module name must be a non-Unicode string, because it ends up\n # a part of module's __package__, and Python will refuse to run the module\n # if __package__ is Unicode.\n target = (\n compat.filename_bytes(options.target)\n if sys.version_info < (3,)\n else options.target\n )\n\n log.describe_environment(\"Pre-launch environment:\")\n log.info(\"Running module {0!r}\", target)\n\n # Docs say that runpy.run_module is equivalent to -m, but it's not actually\n # the case for packages - -m sets __name__ to \"__main__\", but run_module sets\n # it to \"pkg.__main__\". This breaks everything that uses the standard pattern\n # __name__ == \"__main__\" to detect being run as a CLI app. On the other hand,\n # runpy._run_module_as_main is a private function that actually implements -m.\n try:\n run_module_as_main = runpy._run_module_as_main\n except AttributeError:\n log.warning(\"runpy._run_module_as_main is missing, falling back to run_module.\")\n runpy.run_module(target, alter_sys=True)\n else:\n run_module_as_main(target, alter_argv=True)\n\n\ndef run_code():\n log.describe_environment(\"Pre-launch environment:\")\n log.info(\"Running code:\\n\\n{0}\", options.target)\n\n # Add current directory to path, like Python itself does for -c.\n sys.path.insert(0, \"\")\n code = compile(options.target, \"<string>\", \"exec\")\n\n setup_debug_server(\"-c\")\n eval(code, {})\n\n\ndef attach_to_pid():\n log.info(\"Attaching to process with PID={0}\", options.target)\n\n pid = options.target\n host = options.host\n port = options.port\n client = options.client\n log_dir = common_opts.log_dir\n if log_dir is None:\n log_dir = \"\"\n\n try:\n attach_pid_injected_dirname = os.path.join(\n os.path.dirname(ptvsd.__file__), \"server\"\n )\n assert os.path.exists(attach_pid_injected_dirname)\n\n log_dir = log_dir.replace(\"\\\\\", \"/\")\n\n encode = lambda s: list(bytearray(s.encode(\"utf-8\")))\n setup = {\n \"script\": encode(attach_pid_injected_dirname),\n \"host\": encode(host),\n \"port\": port,\n \"client\": client,\n \"log_dir\": encode(log_dir),\n }\n\n python_code = \"\"\"\nimport sys;\nimport codecs;\ndecode = lambda s: codecs.utf_8_decode(bytearray(s))[0];\nscript_path = decode({script});\nsys.path.insert(0, script_path);\nimport attach_pid_injected;\nsys.path.remove(script_path);\nhost = decode({host});\nlog_dir = decode({log_dir}) or None;\nattach_pid_injected.attach(port={port}, host=host, client={client}, log_dir=log_dir)\n\"\"\"\n python_code = python_code.replace(\"\\r\", \"\").replace(\"\\n\", \"\").format(**setup)\n log.info(\"Code to be injected: \\n{0}\", python_code.replace(\";\", \";\\n\"))\n\n # pydevd restriction on characters in injected code.\n assert not (\n {'\"', \"'\", \"\\r\", \"\\n\"} & set(python_code)\n ), \"Injected code should not contain any single quotes, double quots, or newlines.\"\n\n pydevd_attach_to_process_path = os.path.join(\n os.path.dirname(pydevd.__file__), \"pydevd_attach_to_process\"\n )\n\n assert os.path.exists(pydevd_attach_to_process_path)\n sys.path.append(pydevd_attach_to_process_path)\n\n import add_code_to_python_process # noqa\n\n show_debug_info_on_target_process = 0 # hard-coded (1 to debug)\n log.info(\"Injecting code into process with PID={0} ...\", pid)\n add_code_to_python_process.run_python_code(\n pid,\n python_code,\n connect_debugger_tracing=True,\n show_debug_info=show_debug_info_on_target_process,\n )\n except Exception:\n raise log.exception(\"Code injection into PID={0} failed:\", pid)\n log.info(\"Code injection into PID={0} completed.\", pid)\n\n\ndef main():\n original_argv = sys.argv\n try:\n sys.argv[1:] = parse(sys.argv[1:])\n except Exception as ex:\n print(HELP + \"\\nError: \" + str(ex), file=sys.stderr)\n sys.exit(2)\n\n log.to_file(prefix=\"ptvsd.server\")\n log.describe_environment(\"ptvsd.server startup environment:\")\n log.info(\n \"sys.argv before parsing: {0!r}\\n\" \" after parsing: {1!r}\",\n original_argv,\n sys.argv,\n )\n\n try:\n run = {\n \"file\": run_file,\n \"module\": run_module,\n \"code\": run_code,\n \"pid\": attach_to_pid,\n }[options.target_kind]\n run()\n except SystemExit as ex:\n log.exception(\"Debuggee exited via SystemExit: {0!r}\", ex.code, level=\"debug\")\n raise\n",
"path": "src/ptvsd/server/cli.py"
}
] | [
{
"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nimport runpy\nimport sys\n\n# ptvsd.__main__ should have preloaded pydevd properly before importing this module.\n# Otherwise, some stdlib modules above might have had imported threading before pydevd\n# could perform the necessary detours in it.\nassert \"pydevd\" in sys.modules\nimport pydevd\n\nimport ptvsd\nfrom ptvsd.common import compat, fmt, log, options as common_opts\nfrom ptvsd.server import options\n\n\nTARGET = \"<filename> | -m <module> | -c <code> | --pid <pid>\"\n\nHELP = \"\"\"ptvsd {0}\nSee https://aka.ms/ptvsd for documentation.\n\nUsage: ptvsd [--client] --host <address> [--port <port>]\n [--wait]\n [--no-subprocesses]\n [--log-dir <path>] [--log-stderr]\n {1}\n\"\"\".format(\n ptvsd.__version__, TARGET\n)\n\n\ndef in_range(parser, start, stop):\n def parse(s):\n n = parser(s)\n if start is not None and n < start:\n raise ValueError(fmt(\"must be >= {0}\", start))\n if stop is not None and n >= stop:\n raise ValueError(fmt(\"must be < {0}\", stop))\n return n\n\n return parse\n\n\nport = in_range(int, 0, 2 ** 16)\n\npid = in_range(int, 0, None)\n\n\ndef print_help_and_exit(switch, it):\n print(HELP, file=sys.stderr)\n sys.exit(0)\n\n\ndef print_version_and_exit(switch, it):\n print(ptvsd.__version__)\n sys.exit(0)\n\n\ndef set_arg(varname, parser=(lambda x: x), options=options):\n def action(arg, it):\n value = parser(next(it))\n setattr(options, varname, value)\n\n return action\n\n\ndef set_const(varname, value, options=options):\n def do(arg, it):\n setattr(options, varname, value)\n\n return do\n\n\ndef set_log_stderr():\n def do(arg, it):\n log.stderr.levels |= set(log.LEVELS)\n\n return do\n\n\ndef set_target(kind, parser=(lambda x: x), positional=False):\n def do(arg, it):\n options.target_kind = kind\n options.target = parser(arg if positional else next(it))\n\n return do\n\n\n# fmt: off\nswitches = [\n # Switch Placeholder Action Required\n # ====== =========== ====== ========\n\n # Switches that are documented for use by end users.\n ((\"-?\", \"-h\", \"--help\"), None, print_help_and_exit, False),\n ((\"-V\", \"--version\"), None, print_version_and_exit, False),\n (\"--client\", None, set_const(\"client\", True), False),\n (\"--host\", \"<address>\", set_arg(\"host\"), True),\n (\"--port\", \"<port>\", set_arg(\"port\", port), False),\n (\"--wait\", None, set_const(\"wait\", True), False),\n (\"--no-subprocesses\", None, set_const(\"multiprocess\", False), False),\n (\"--log-dir\", \"<path>\", set_arg(\"log_dir\", options=common_opts), False),\n (\"--log-stderr\", None, set_log_stderr(), False),\n\n # Switches that are used internally by the IDE or ptvsd itself.\n (\"--subprocess-of\", \"<pid>\", set_arg(\"subprocess_of\", pid), False),\n (\"--subprocess-notify\", \"<port>\", set_arg(\"subprocess_notify\", port), False),\n\n # Targets. The \"\" entry corresponds to positional command line arguments,\n # i.e. the ones not preceded by any switch name.\n (\"\", \"<filename>\", set_target(\"file\", positional=True), False),\n (\"-m\", \"<module>\", set_target(\"module\"), False),\n (\"-c\", \"<code>\", set_target(\"code\"), False),\n (\"--pid\", \"<pid>\", set_target(\"pid\", pid), False),\n]\n# fmt: on\n\n\ndef parse(args):\n seen = set()\n it = (compat.filename(arg) for arg in args)\n\n while True:\n try:\n arg = next(it)\n except StopIteration:\n raise ValueError(\"missing target: \" + TARGET)\n\n switch = arg if arg.startswith(\"-\") else \"\"\n for i, (sw, placeholder, action, _) in enumerate(switches):\n if not isinstance(sw, tuple):\n sw = (sw,)\n if switch in sw:\n break\n else:\n raise ValueError(\"unrecognized switch \" + switch)\n\n if i in seen:\n raise ValueError(\"duplicate switch \" + switch)\n else:\n seen.add(i)\n\n try:\n action(arg, it)\n except StopIteration:\n assert placeholder is not None\n raise ValueError(fmt(\"{0}: missing {1}\", switch, placeholder))\n except Exception as exc:\n raise ValueError(fmt(\"invalid {0} {1}: {2}\", switch, placeholder, exc))\n\n if options.target is not None:\n break\n\n for i, (sw, placeholder, _, required) in enumerate(switches):\n if not required or i in seen:\n continue\n if isinstance(sw, tuple):\n sw = sw[0]\n message = fmt(\"missing required {0}\", sw)\n if placeholder is not None:\n message += \" \" + placeholder\n raise ValueError(message)\n\n if options.target_kind == \"pid\" and options.wait:\n raise ValueError(\"--pid does not support --wait\")\n\n return it\n\n\ndef setup_debug_server(argv_0):\n # We need to set up sys.argv[0] before invoking attach() or enable_attach(),\n # because they use it to report the \"process\" event. Thus, we can't rely on\n # run_path() and run_module() doing that, even though they will eventually.\n sys.argv[0] = compat.filename(argv_0)\n log.debug(\"sys.argv after patching: {0!r}\", sys.argv)\n\n debug = ptvsd.attach if options.client else ptvsd.enable_attach\n debug(address=options, multiprocess=options)\n\n if options.wait:\n ptvsd.wait_for_attach()\n\n\ndef run_file():\n setup_debug_server(options.target)\n\n # run_path has one difference with invoking Python from command-line:\n # if the target is a file (rather than a directory), it does not add its\n # parent directory to sys.path. Thus, importing other modules from the\n # same directory is broken unless sys.path is patched here.\n if os.path.isfile(options.target):\n dir = os.path.dirname(options.target)\n sys.path.insert(0, dir)\n else:\n log.debug(\"Not a file: {0!j}\", options.target)\n\n log.describe_environment(\"Pre-launch environment:\")\n log.info(\"Running file {0!j}\", options.target)\n runpy.run_path(options.target, run_name=\"__main__\")\n\n\ndef run_module():\n # Add current directory to path, like Python itself does for -m. This must\n # be in place before trying to use find_spec below to resolve submodules.\n sys.path.insert(0, \"\")\n\n # We want to do the same thing that run_module() would do here, without\n # actually invoking it. On Python 3, it's exposed as a public API, but\n # on Python 2, we have to invoke a private function in runpy for this.\n # Either way, if it fails to resolve for any reason, just leave argv as is.\n argv_0 = sys.argv[0]\n try:\n if sys.version_info >= (3,):\n from importlib.util import find_spec\n\n spec = find_spec(options.target)\n if spec is not None:\n argv_0 = spec.origin\n else:\n _, _, _, argv_0 = runpy._get_module_details(options.target)\n except Exception:\n log.exception(\"Error determining module path for sys.argv\")\n\n setup_debug_server(argv_0)\n\n # On Python 2, module name must be a non-Unicode string, because it ends up\n # a part of module's __package__, and Python will refuse to run the module\n # if __package__ is Unicode.\n target = (\n compat.filename_bytes(options.target)\n if sys.version_info < (3,)\n else options.target\n )\n\n log.describe_environment(\"Pre-launch environment:\")\n log.info(\"Running module {0!r}\", target)\n\n # Docs say that runpy.run_module is equivalent to -m, but it's not actually\n # the case for packages - -m sets __name__ to \"__main__\", but run_module sets\n # it to \"pkg.__main__\". This breaks everything that uses the standard pattern\n # __name__ == \"__main__\" to detect being run as a CLI app. On the other hand,\n # runpy._run_module_as_main is a private function that actually implements -m.\n try:\n run_module_as_main = runpy._run_module_as_main\n except AttributeError:\n log.warning(\"runpy._run_module_as_main is missing, falling back to run_module.\")\n runpy.run_module(target, alter_sys=True)\n else:\n run_module_as_main(target, alter_argv=True)\n\n\ndef run_code():\n log.describe_environment(\"Pre-launch environment:\")\n log.info(\"Running code:\\n\\n{0}\", options.target)\n\n # Add current directory to path, like Python itself does for -c.\n sys.path.insert(0, \"\")\n code = compile(options.target, \"<string>\", \"exec\")\n\n setup_debug_server(\"-c\")\n eval(code, {})\n\n\ndef attach_to_pid():\n log.info(\"Attaching to process with PID={0}\", options.target)\n\n pid = options.target\n host = options.host\n port = options.port\n client = options.client\n log_dir = common_opts.log_dir\n if log_dir is None:\n log_dir = \"\"\n\n try:\n attach_pid_injected_dirname = os.path.join(\n os.path.dirname(ptvsd.__file__), \"server\"\n )\n assert os.path.exists(attach_pid_injected_dirname)\n\n log_dir = log_dir.replace(\"\\\\\", \"/\")\n\n encode = lambda s: list(bytearray(s.encode(\"utf-8\")))\n setup = {\n \"script\": encode(attach_pid_injected_dirname),\n \"host\": encode(host),\n \"port\": port,\n \"client\": client,\n \"log_dir\": encode(log_dir),\n }\n\n python_code = \"\"\"\nimport sys;\nimport codecs;\ndecode = lambda s: codecs.utf_8_decode(bytearray(s))[0];\nscript_path = decode({script});\nsys.path.insert(0, script_path);\nimport attach_pid_injected;\nsys.path.remove(script_path);\nhost = decode({host});\nlog_dir = decode({log_dir}) or None;\nattach_pid_injected.attach(port={port}, host=host, client={client}, log_dir=log_dir)\n\"\"\"\n python_code = python_code.replace(\"\\r\", \"\").replace(\"\\n\", \"\").format(**setup)\n log.info(\"Code to be injected: \\n{0}\", python_code.replace(\";\", \";\\n\"))\n\n # pydevd restriction on characters in injected code.\n assert not (\n {'\"', \"'\", \"\\r\", \"\\n\"} & set(python_code)\n ), \"Injected code should not contain any single quotes, double quots, or newlines.\"\n\n pydevd_attach_to_process_path = os.path.join(\n os.path.dirname(pydevd.__file__), \"pydevd_attach_to_process\"\n )\n\n assert os.path.exists(pydevd_attach_to_process_path)\n sys.path.append(pydevd_attach_to_process_path)\n\n import add_code_to_python_process # noqa\n\n show_debug_info_on_target_process = 0 # hard-coded (1 to debug)\n log.info(\"Injecting code into process with PID={0} ...\", pid)\n add_code_to_python_process.run_python_code(\n pid,\n python_code,\n connect_debugger_tracing=True,\n show_debug_info=show_debug_info_on_target_process,\n )\n except Exception:\n raise log.exception(\"Code injection into PID={0} failed:\", pid)\n log.info(\"Code injection into PID={0} completed.\", pid)\n\n\ndef main():\n original_argv = list(sys.argv)\n try:\n sys.argv[1:] = parse(sys.argv[1:])\n except Exception as ex:\n print(HELP + \"\\nError: \" + str(ex), file=sys.stderr)\n sys.exit(2)\n\n log.to_file(prefix=\"ptvsd.server\")\n log.describe_environment(\"ptvsd.server startup environment:\")\n log.info(\n \"sys.argv before parsing: {0!r}\\n\" \" after parsing: {1!r}\",\n original_argv,\n sys.argv,\n )\n\n try:\n run = {\n \"file\": run_file,\n \"module\": run_module,\n \"code\": run_code,\n \"pid\": attach_to_pid,\n }[options.target_kind]\n run()\n except SystemExit as ex:\n log.exception(\"Debuggee exited via SystemExit: {0!r}\", ex.code, level=\"debug\")\n raise\n",
"path": "src/ptvsd/server/cli.py"
}
] | diff --git a/src/ptvsd/server/cli.py b/src/ptvsd/server/cli.py
index 339820624..499833382 100644
--- a/src/ptvsd/server/cli.py
+++ b/src/ptvsd/server/cli.py
@@ -337,7 +337,7 @@ def attach_to_pid():
def main():
- original_argv = sys.argv
+ original_argv = list(sys.argv)
try:
sys.argv[1:] = parse(sys.argv[1:])
except Exception as ex:
diff --git a/tests/debug/session.py b/tests/debug/session.py
index 7ec19f284..4607b62fc 100644
--- a/tests/debug/session.py
+++ b/tests/debug/session.py
@@ -561,8 +561,9 @@ def _request_start(self, method):
)
if start_request.response is not None:
- # It was an immediate response - configuration is not possible. Just get
- # the "process" event, and return to caller.
+ # It was an immediate response - either the request failed, or there is
+ # no configuration stage for this debug session.
+ start_request.response.result # raise exception if failed
return self.wait_for_process()
# We got "initialized" - now we need to yield to the caller, so that it can
diff --git a/tests/net.py b/tests/net.py
index 83107f8de..2e21ab1fa 100644
--- a/tests/net.py
+++ b/tests/net.py
@@ -63,11 +63,11 @@ def wait_until_port_is_listening(port, interval=1, max_attempts=1000):
try:
log.info("Probing localhost:{0} (attempt {1})...", port, i)
sock.connect(("localhost", port))
- except socket.error:
+ except socket.error as exc:
# The first attempt will almost always fail, because the port isn't
# open yet. But if it keeps failing after that, we want to know why.
if i > 1:
- log.exception()
+ log.warning("Failed to connect to localhost:{0}:\n{1}", port, exc)
time.sleep(interval)
else:
log.info("localhost:{0} is listening - server is up!", port)
diff --git a/tests/test_data/flask1/main.py b/tests/test_data/flask1/main.py
index c14c129bc..ddd228908 100644
--- a/tests/test_data/flask1/main.py
+++ b/tests/test_data/flask1/main.py
@@ -5,4 +5,4 @@
import debug_me # noqa
import runpy
-runpy.run_module("flask", run_name="__main__")
+runpy.run_module("flask", run_name="__main__", alter_sys=True)
diff --git a/tests/timeline.py b/tests/timeline.py
index fa3b6e316..92acbae8d 100644
--- a/tests/timeline.py
+++ b/tests/timeline.py
@@ -1104,6 +1104,10 @@ def __init__(self, request_occ, message):
def body(self):
return self.message.body
+ @property
+ def result(self):
+ return self.message.result
+
@property
def success(self):
return self.message.success
|
mitmproxy__mitmproxy-1864 | Missing documentation about unit testing inline scripts
At the company I work on we are developing some inline scripts to use internally.
We are struggling to create test for our scripts because the only code we found to test scripts are the ones at [/mitmproxy/mitmproxy/blob/v0.17/test/mitmproxy/test_examples.py](/mitmproxy/mitmproxy/blob/v0.17/test/mitmproxy/test_examples.py).
The examples are quite good but real documentation is missing for the ones who wants to unit test.
Is there another place where I can find better (maybe more advanced) test examples ?
| [
{
"content": "from mitmproxy import controller\nfrom mitmproxy import http\nfrom mitmproxy import tcp\nfrom mitmproxy import websocket\n\nEvents = frozenset([\n \"clientconnect\",\n \"clientdisconnect\",\n \"serverconnect\",\n \"serverdisconnect\",\n\n \"tcp_start\",\n \"tcp_message\",\n \"tcp_error\",\n \"tcp_end\",\n\n \"http_connect\",\n \"request\",\n \"requestheaders\",\n \"response\",\n \"responseheaders\",\n \"error\",\n\n \"intercept\",\n \"resume\",\n\n \"websocket_handshake\",\n \"websocket_start\",\n \"websocket_message\",\n \"websocket_error\",\n \"websocket_end\",\n\n \"next_layer\",\n\n \"configure\",\n \"done\",\n \"log\",\n \"start\",\n \"tick\",\n])\n\n\ndef event_sequence(f):\n if isinstance(f, http.HTTPFlow):\n if f.request:\n yield \"requestheaders\", f\n yield \"request\", f\n if f.response:\n yield \"responseheaders\", f\n yield \"response\", f\n if f.error:\n yield \"error\", f\n elif isinstance(f, websocket.WebSocketFlow):\n messages = f.messages\n f.messages = []\n f.reply = controller.DummyReply()\n yield \"websocket_start\", f\n while messages:\n f.messages.append(messages.pop(0))\n yield \"websocket_message\", f\n if f.error:\n yield \"websocket_error\", f\n yield \"websocket_end\", f\n elif isinstance(f, tcp.TCPFlow):\n messages = f.messages\n f.messages = []\n f.reply = controller.DummyReply()\n yield \"tcp_start\", f\n while messages:\n f.messages.append(messages.pop(0))\n yield \"tcp_message\", f\n if f.error:\n yield \"tcp_error\", f\n yield \"tcp_end\", f\n else:\n raise NotImplementedError\n",
"path": "mitmproxy/events.py"
}
] | [
{
"content": "from mitmproxy import controller\nfrom mitmproxy import http\nfrom mitmproxy import tcp\nfrom mitmproxy import websocket\n\nEvents = frozenset([\n \"clientconnect\",\n \"clientdisconnect\",\n \"serverconnect\",\n \"serverdisconnect\",\n\n \"tcp_start\",\n \"tcp_message\",\n \"tcp_error\",\n \"tcp_end\",\n\n \"http_connect\",\n \"request\",\n \"requestheaders\",\n \"response\",\n \"responseheaders\",\n \"error\",\n\n \"websocket_handshake\",\n \"websocket_start\",\n \"websocket_message\",\n \"websocket_error\",\n \"websocket_end\",\n\n \"next_layer\",\n\n \"configure\",\n \"done\",\n \"log\",\n \"start\",\n \"tick\",\n])\n\n\ndef event_sequence(f):\n if isinstance(f, http.HTTPFlow):\n if f.request:\n yield \"requestheaders\", f\n yield \"request\", f\n if f.response:\n yield \"responseheaders\", f\n yield \"response\", f\n if f.error:\n yield \"error\", f\n elif isinstance(f, websocket.WebSocketFlow):\n messages = f.messages\n f.messages = []\n f.reply = controller.DummyReply()\n yield \"websocket_start\", f\n while messages:\n f.messages.append(messages.pop(0))\n yield \"websocket_message\", f\n if f.error:\n yield \"websocket_error\", f\n yield \"websocket_end\", f\n elif isinstance(f, tcp.TCPFlow):\n messages = f.messages\n f.messages = []\n f.reply = controller.DummyReply()\n yield \"tcp_start\", f\n while messages:\n f.messages.append(messages.pop(0))\n yield \"tcp_message\", f\n if f.error:\n yield \"tcp_error\", f\n yield \"tcp_end\", f\n else:\n raise NotImplementedError\n",
"path": "mitmproxy/events.py"
}
] | diff --git a/docs/scripting/overview.rst b/docs/scripting/overview.rst
index 7df5532df5..84f2135e59 100644
--- a/docs/scripting/overview.rst
+++ b/docs/scripting/overview.rst
@@ -131,6 +131,23 @@ purposes. You can use it as follows:
:language: python
+Testing
+-------
+
+Mitmproxy includes a number of helpers for testing addons. The
+``mitmproxy.test.taddons`` module contains a context helper that takes care of
+setting up and tearing down the addon event context. The
+``mitmproxy.test.tflow`` module contains helpers for quickly creating test
+flows. Pydoc is the canonical reference for these modules, and mitmproxy's own
+test suite is an excellent source of examples of usage. Here, for instance, is
+the mitmproxy unit tests for the `anticache` option, demonstrating a good
+cross-section of the test helpers:
+
+.. literalinclude:: ../../test/mitmproxy/addons/test_anticache.py
+ :caption: :src:`test/mitmproxy/addons/test_anticache.py`
+ :language: python
+
+
Developing scripts
------------------
diff --git a/mitmproxy/events.py b/mitmproxy/events.py
index f144b4128b..53f236ca3b 100644
--- a/mitmproxy/events.py
+++ b/mitmproxy/events.py
@@ -21,9 +21,6 @@
"responseheaders",
"error",
- "intercept",
- "resume",
-
"websocket_handshake",
"websocket_start",
"websocket_message",
|
cloudtools__troposphere-835 | Add Tags to AWS::EC2::NatGateway
[AWS::EC2::NatGateway](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-natgateway.html)
Use the Tags property to specify resource tags for a NAT gateway.
| [
{
"content": "# Copyright (c) 2012-2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSHelperFn, AWSObject, AWSProperty\nfrom .validators import (\n boolean, exactly_one, integer, integer_range,\n network_port, positive_integer\n)\n\ntry:\n from awacs.aws import Policy\n policytypes = (dict, Policy)\nexcept ImportError:\n policytypes = dict,\n\n\nclass Tag(AWSProperty):\n props = {\n 'Key': (basestring, True),\n 'Value': (basestring, True)\n }\n\n def __init__(self, key=None, value=None, **kwargs):\n # provided for backward compatibility\n if key is not None:\n kwargs['Key'] = key\n if value is not None:\n kwargs['Value'] = value\n super(Tag, self).__init__(**kwargs)\n\n\nclass CustomerGateway(AWSObject):\n resource_type = \"AWS::EC2::CustomerGateway\"\n\n props = {\n 'BgpAsn': (integer, True),\n 'IpAddress': (basestring, True),\n 'Tags': (list, False),\n 'Type': (basestring, True),\n }\n\n\nclass DHCPOptions(AWSObject):\n resource_type = \"AWS::EC2::DHCPOptions\"\n\n props = {\n 'DomainName': (basestring, False),\n 'DomainNameServers': (list, False),\n 'NetbiosNameServers': (list, False),\n 'NetbiosNodeType': (integer, False),\n 'NtpServers': (list, False),\n 'Tags': (list, False),\n }\n\n\nclass EgressOnlyInternetGateway(AWSObject):\n resource_type = \"AWS::EC2::EgressOnlyInternetGateway\"\n\n props = {\n 'VpcId': (basestring, True),\n }\n\n\nclass EIP(AWSObject):\n resource_type = \"AWS::EC2::EIP\"\n\n props = {\n 'InstanceId': (basestring, False),\n 'Domain': (basestring, False),\n }\n\n\nclass EIPAssociation(AWSObject):\n resource_type = \"AWS::EC2::EIPAssociation\"\n\n props = {\n 'AllocationId': (basestring, False),\n 'EIP': (basestring, False),\n 'InstanceId': (basestring, False),\n 'NetworkInterfaceId': (basestring, False),\n 'PrivateIpAddress': (basestring, False),\n }\n\n\nclass FlowLog(AWSObject):\n resource_type = \"AWS::EC2::FlowLog\"\n\n props = {\n 'DeliverLogsPermissionArn': (basestring, True),\n 'LogGroupName': (basestring, True),\n 'ResourceId': (basestring, True),\n 'ResourceType': (basestring, True),\n 'TrafficType': (basestring, True),\n }\n\n\nclass NatGateway(AWSObject):\n resource_type = \"AWS::EC2::NatGateway\"\n\n props = {\n 'AllocationId': (basestring, True),\n 'SubnetId': (basestring, True),\n }\n\n\nclass EBSBlockDevice(AWSProperty):\n props = {\n 'DeleteOnTermination': (boolean, False),\n 'Encrypted': (boolean, False),\n 'Iops': (integer, False), # Conditional\n 'SnapshotId': (basestring, False), # Conditional\n 'VolumeSize': (integer, False), # Conditional\n 'VolumeType': (basestring, False),\n }\n\n\nclass BlockDeviceMapping(AWSProperty):\n props = {\n 'DeviceName': (basestring, True),\n 'Ebs': (EBSBlockDevice, False), # Conditional\n 'NoDevice': (dict, False),\n 'VirtualName': (basestring, False), # Conditional\n }\n\n\nclass MountPoint(AWSProperty):\n props = {\n 'Device': (basestring, True),\n 'VolumeId': (basestring, True),\n }\n\n\nclass Placement(AWSProperty):\n props = {\n 'AvailabilityZone': (basestring, False),\n 'GroupName': (basestring, False),\n }\n\n\nclass Ipv6Addresses(AWSHelperFn):\n def __init__(self, address):\n self.data = {\n 'Ipv6Address': address,\n }\n\n\nclass PrivateIpAddressSpecification(AWSProperty):\n props = {\n 'Primary': (boolean, True),\n 'PrivateIpAddress': (basestring, True),\n }\n\n\nclass NetworkInterfaceProperty(AWSProperty):\n props = {\n 'AssociatePublicIpAddress': (boolean, False),\n 'DeleteOnTermination': (boolean, False),\n 'Description': (basestring, False),\n 'DeviceIndex': (integer, True),\n 'GroupSet': ([basestring], False),\n 'NetworkInterfaceId': (basestring, False),\n 'Ipv6AddressCount': (integer, False),\n 'Ipv6Addresses': ([Ipv6Addresses], False),\n 'PrivateIpAddress': (basestring, False),\n 'PrivateIpAddresses': ([PrivateIpAddressSpecification], False),\n 'SecondaryPrivateIpAddressCount': (integer, False),\n 'SubnetId': (basestring, False),\n }\n\n\nclass AssociationParameters(AWSProperty):\n props = {\n 'Key': (basestring, True),\n 'Value': ([basestring], True),\n }\n\n\nclass SsmAssociations(AWSProperty):\n props = {\n 'AssociationParameters': ([AssociationParameters], False),\n 'DocumentName': (basestring, True),\n }\n\n\nclass Host(AWSObject):\n resource_type = \"AWS::EC2::Host\"\n\n props = {\n 'AutoPlacement': (basestring, False),\n 'AvailabilityZone': (basestring, True),\n 'InstanceType': (basestring, True),\n }\n\n\nclass Instance(AWSObject):\n resource_type = \"AWS::EC2::Instance\"\n\n props = {\n 'Affinity': (basestring, False),\n 'AvailabilityZone': (basestring, False),\n 'BlockDeviceMappings': (list, False),\n 'DisableApiTermination': (boolean, False),\n 'EbsOptimized': (boolean, False),\n 'HostId': (basestring, False),\n 'IamInstanceProfile': (basestring, False),\n 'ImageId': (basestring, True),\n 'InstanceInitiatedShutdownBehavior': (basestring, False),\n 'InstanceType': (basestring, False),\n 'Ipv6AddressCount': (integer, False),\n 'Ipv6Addresses': ([Ipv6Addresses], False),\n 'KernelId': (basestring, False),\n 'KeyName': (basestring, False),\n 'Monitoring': (boolean, False),\n 'NetworkInterfaces': ([NetworkInterfaceProperty], False),\n 'PlacementGroupName': (basestring, False),\n 'PrivateIpAddress': (basestring, False),\n 'RamdiskId': (basestring, False),\n 'SecurityGroupIds': (list, False),\n 'SecurityGroups': (list, False),\n 'SsmAssociations': ([SsmAssociations], False),\n 'SourceDestCheck': (boolean, False),\n 'SubnetId': (basestring, False),\n 'Tags': (list, False),\n 'Tenancy': (basestring, False),\n 'UserData': (basestring, False),\n 'Volumes': (list, False),\n }\n\n\nclass InternetGateway(AWSObject):\n resource_type = \"AWS::EC2::InternetGateway\"\n\n props = {\n 'Tags': (list, False),\n }\n\n\nclass NetworkAcl(AWSObject):\n resource_type = \"AWS::EC2::NetworkAcl\"\n\n props = {\n 'Tags': (list, False),\n 'VpcId': (basestring, True),\n }\n\n\nclass ICMP(AWSProperty):\n props = {\n 'Code': (integer, False),\n 'Type': (integer, False),\n }\n\n\nclass PortRange(AWSProperty):\n props = {\n 'From': (network_port, False),\n 'To': (network_port, False),\n }\n\n\nclass NetworkAclEntry(AWSObject):\n resource_type = \"AWS::EC2::NetworkAclEntry\"\n\n props = {\n 'CidrBlock': (basestring, False),\n 'Egress': (boolean, False),\n 'Icmp': (ICMP, False), # Conditional\n 'Ipv6CidrBlock': (basestring, False),\n 'NetworkAclId': (basestring, True),\n 'PortRange': (PortRange, False), # Conditional\n 'Protocol': (network_port, True),\n 'RuleAction': (basestring, True),\n 'RuleNumber': (integer_range(1, 32766), True),\n }\n\n def validate(self):\n conds = [\n 'CidrBlock',\n 'Ipv6CidrBlock',\n ]\n exactly_one(self.__class__.__name__, self.properties, conds)\n\n\nclass NetworkInterface(AWSObject):\n resource_type = \"AWS::EC2::NetworkInterface\"\n\n props = {\n 'Description': (basestring, False),\n 'GroupSet': (list, False),\n 'Ipv6AddressCount': (integer, False),\n 'Ipv6Addresses': ([Ipv6Addresses], False),\n 'PrivateIpAddress': (basestring, False),\n 'PrivateIpAddresses': ([PrivateIpAddressSpecification], False),\n 'SecondaryPrivateIpAddressCount': (integer, False),\n 'SourceDestCheck': (boolean, False),\n 'SubnetId': (basestring, True),\n 'Tags': (list, False),\n }\n\n\nclass NetworkInterfaceAttachment(AWSObject):\n resource_type = \"AWS::EC2::NetworkInterfaceAttachment\"\n\n props = {\n 'DeleteOnTermination': (boolean, False),\n 'DeviceIndex': (integer, True),\n 'InstanceId': (basestring, True),\n 'NetworkInterfaceId': (basestring, True),\n }\n\n\nPERMISSION_INSTANCE_ATTACH = 'INSTANCE-ATTACH'\nPERMISSION_EIP_ASSOCIATE = 'EIP-ASSOCIATE'\n\n\nclass NetworkInterfacePermission(AWSObject):\n resource_type = \"AWS::EC2::NetworkInterfacePermission\"\n\n props = {\n 'AwsAccountId': (basestring, True),\n 'NetworkInterfaceId': (basestring, True),\n 'Permission': (basestring, True),\n }\n\n\nclass Route(AWSObject):\n resource_type = \"AWS::EC2::Route\"\n\n props = {\n 'DestinationCidrBlock': (basestring, False),\n 'DestinationIpv6CidrBlock': (basestring, False),\n 'EgressOnlyInternetGatewayId': (basestring, False),\n 'GatewayId': (basestring, False),\n 'InstanceId': (basestring, False),\n 'NatGatewayId': (basestring, False),\n 'NetworkInterfaceId': (basestring, False),\n 'RouteTableId': (basestring, True),\n 'VpcPeeringConnectionId': (basestring, False),\n }\n\n def validate(self):\n cidr_conds = [\n 'DestinationCidrBlock',\n 'DestinationIpv6CidrBlock',\n ]\n gateway_conds = [\n 'EgressOnlyInternetGatewayId',\n 'GatewayId',\n 'InstanceId',\n 'NatGatewayId',\n 'NetworkInterfaceId',\n 'VpcPeeringConnectionId'\n ]\n exactly_one(self.__class__.__name__, self.properties, cidr_conds)\n exactly_one(self.__class__.__name__, self.properties, gateway_conds)\n\n\nclass RouteTable(AWSObject):\n resource_type = \"AWS::EC2::RouteTable\"\n\n props = {\n 'Tags': (list, False),\n 'VpcId': (basestring, True),\n }\n\n\nclass SecurityGroupEgress(AWSObject):\n resource_type = \"AWS::EC2::SecurityGroupEgress\"\n\n props = {\n 'CidrIp': (basestring, False),\n 'CidrIpv6': (basestring, False),\n 'DestinationPrefixListId': (basestring, False),\n 'DestinationSecurityGroupId': (basestring, False),\n 'FromPort': (network_port, True),\n 'GroupId': (basestring, True),\n 'IpProtocol': (basestring, True),\n 'ToPort': (network_port, True),\n #\n # Workaround for a bug in CloudFormation and EC2 where the\n # DestinationSecurityGroupId property is ignored causing\n # egress rules targeting a security group to be ignored.\n # Using SourceSecurityGroupId instead works fine even in\n # egress rules. AWS have known about this bug for a while.\n #\n 'SourceSecurityGroupId': (basestring, False),\n }\n\n def validate(self):\n conds = [\n 'CidrIp',\n 'CidrIpv6',\n 'DestinationPrefixListId',\n 'DestinationSecurityGroupId',\n ]\n exactly_one(self.__class__.__name__, self.properties, conds)\n\n\nclass SecurityGroupIngress(AWSObject):\n resource_type = \"AWS::EC2::SecurityGroupIngress\"\n\n props = {\n 'CidrIp': (basestring, False),\n 'CidrIpv6': (basestring, False),\n 'FromPort': (network_port, False), # conditional\n 'GroupName': (basestring, False),\n 'GroupId': (basestring, False),\n 'IpProtocol': (basestring, True),\n 'SourceSecurityGroupName': (basestring, False),\n 'SourceSecurityGroupId': (basestring, False),\n 'SourceSecurityGroupOwnerId': (basestring, False),\n 'ToPort': (network_port, False), # conditional\n }\n\n def validate(self):\n conds = [\n 'CidrIp',\n 'CidrIpv6',\n 'SourceSecurityGroupName',\n 'SourceSecurityGroupId',\n ]\n exactly_one(self.__class__.__name__, self.properties, conds)\n\n\nclass SecurityGroupRule(AWSProperty):\n props = {\n 'CidrIp': (basestring, False),\n 'CidrIpv6': (basestring, False),\n 'FromPort': (network_port, False),\n 'IpProtocol': (basestring, True),\n 'SourceSecurityGroupId': (basestring, False),\n 'SourceSecurityGroupName': (basestring, False),\n 'SourceSecurityGroupOwnerId': (basestring, False),\n 'ToPort': (network_port, False),\n 'DestinationSecurityGroupId': (basestring, False),\n }\n\n\nclass SecurityGroup(AWSObject):\n resource_type = \"AWS::EC2::SecurityGroup\"\n\n props = {\n 'GroupName': (basestring, False),\n 'GroupDescription': (basestring, True),\n 'SecurityGroupEgress': (list, False),\n 'SecurityGroupIngress': (list, False),\n 'VpcId': (basestring, False),\n 'Tags': (list, False),\n }\n\n\nclass Subnet(AWSObject):\n resource_type = \"AWS::EC2::Subnet\"\n\n props = {\n 'AssignIPv6AddressOnCreation': (boolean, False),\n 'AvailabilityZone': (basestring, False),\n 'CidrBlock': (basestring, True),\n 'Ipv6CidrBlock': (basestring, False),\n 'MapPublicIpOnLaunch': (boolean, False),\n 'Tags': (list, False),\n 'VpcId': (basestring, True),\n }\n\n def validate(self):\n if 'Ipv6CidrBlock' in self.properties:\n if not self.properties.get('AssignIPv6AddressOnCreation'):\n raise ValueError(\n \"If Ipv6CidrBlock is present, \"\n \"AssignIPv6AddressOnCreation must be set to True\"\n )\n\n\nclass SubnetNetworkAclAssociation(AWSObject):\n resource_type = \"AWS::EC2::SubnetNetworkAclAssociation\"\n\n props = {\n 'SubnetId': (basestring, True),\n 'NetworkAclId': (basestring, True),\n }\n\n\nclass SubnetRouteTableAssociation(AWSObject):\n resource_type = \"AWS::EC2::SubnetRouteTableAssociation\"\n\n props = {\n 'RouteTableId': (basestring, True),\n 'SubnetId': (basestring, True),\n }\n\n\nclass Volume(AWSObject):\n resource_type = \"AWS::EC2::Volume\"\n\n props = {\n 'AutoEnableIO': (boolean, False),\n 'AvailabilityZone': (basestring, True),\n 'Encrypted': (boolean, False),\n 'Iops': (positive_integer, False),\n 'KmsKeyId': (basestring, False),\n 'Size': (positive_integer, False),\n 'SnapshotId': (basestring, False),\n 'Tags': (list, False),\n 'VolumeType': (basestring, False),\n }\n\n\nclass VolumeAttachment(AWSObject):\n resource_type = \"AWS::EC2::VolumeAttachment\"\n\n props = {\n 'Device': (basestring, True),\n 'InstanceId': (basestring, True),\n 'VolumeId': (basestring, True),\n }\n\n\nclass VPC(AWSObject):\n resource_type = \"AWS::EC2::VPC\"\n\n props = {\n 'CidrBlock': (basestring, True),\n 'EnableDnsSupport': (boolean, False),\n 'EnableDnsHostnames': (boolean, False),\n 'InstanceTenancy': (basestring, False),\n 'Tags': (list, False),\n }\n\n\nclass VPCDHCPOptionsAssociation(AWSObject):\n resource_type = \"AWS::EC2::VPCDHCPOptionsAssociation\"\n\n props = {\n 'DhcpOptionsId': (basestring, True),\n 'VpcId': (basestring, True),\n }\n\n\nclass VPCEndpoint(AWSObject):\n resource_type = \"AWS::EC2::VPCEndpoint\"\n\n props = {\n 'PolicyDocument': (policytypes, False),\n 'RouteTableIds': ([basestring], False),\n 'ServiceName': (basestring, True),\n 'VpcId': (basestring, True),\n }\n\n\nclass VPCGatewayAttachment(AWSObject):\n resource_type = \"AWS::EC2::VPCGatewayAttachment\"\n\n props = {\n 'InternetGatewayId': (basestring, False),\n 'VpcId': (basestring, True),\n 'VpnGatewayId': (basestring, False),\n }\n\n\nclass VPNConnection(AWSObject):\n resource_type = \"AWS::EC2::VPNConnection\"\n\n props = {\n 'Type': (basestring, True),\n 'CustomerGatewayId': (basestring, True),\n 'StaticRoutesOnly': (boolean, False),\n 'Tags': (list, False),\n 'VpnGatewayId': (basestring, True),\n }\n\n\nclass VPNConnectionRoute(AWSObject):\n resource_type = \"AWS::EC2::VPNConnectionRoute\"\n\n props = {\n 'DestinationCidrBlock': (basestring, True),\n 'VpnConnectionId': (basestring, True),\n }\n\n\nclass VPNGateway(AWSObject):\n resource_type = \"AWS::EC2::VPNGateway\"\n\n props = {\n 'Type': (basestring, True),\n 'Tags': (list, False),\n }\n\n\nclass VPNGatewayRoutePropagation(AWSObject):\n resource_type = \"AWS::EC2::VPNGatewayRoutePropagation\"\n\n props = {\n 'RouteTableIds': ([basestring], True),\n 'VpnGatewayId': (basestring, True),\n }\n\n\nclass VPCPeeringConnection(AWSObject):\n resource_type = \"AWS::EC2::VPCPeeringConnection\"\n\n props = {\n 'PeerVpcId': (basestring, True),\n 'VpcId': (basestring, True),\n 'Tags': (list, False),\n 'PeerOwnerId': (basestring, False),\n 'PeerRoleArn': (basestring, False),\n }\n\n\nclass Monitoring(AWSProperty):\n props = {\n 'Enabled': (boolean, False),\n }\n\n\nclass NetworkInterfaces(AWSProperty):\n props = {\n 'AssociatePublicIpAddress': (boolean, False),\n 'DeleteOnTermination': (boolean, False),\n 'Description': (basestring, False),\n 'DeviceIndex': (integer, True),\n 'Groups': ([basestring], False),\n 'Ipv6AddressCount': (integer, False),\n 'Ipv6Addresses': ([Ipv6Addresses], False),\n 'NetworkInterfaceId': (basestring, False),\n 'PrivateIpAddresses': ([PrivateIpAddressSpecification], False),\n 'SecondaryPrivateIpAddressCount': (integer, False),\n 'SubnetId': (basestring, False),\n }\n\n\nclass SecurityGroups(AWSProperty):\n props = {\n 'GroupId': (basestring, False),\n }\n\n\nclass IamInstanceProfile(AWSProperty):\n props = {\n 'Arn': (basestring, False),\n }\n\n\nclass LaunchSpecifications(AWSProperty):\n props = {\n 'BlockDeviceMappings': ([BlockDeviceMapping], False),\n 'EbsOptimized': (boolean, False),\n 'IamInstanceProfile': (IamInstanceProfile, False),\n 'ImageId': (basestring, True),\n 'InstanceType': (basestring, True),\n 'KernelId': (basestring, False),\n 'KeyName': (basestring, False),\n 'Monitoring': (Monitoring, False),\n 'NetworkInterfaces': ([NetworkInterfaces], False),\n 'Placement': (Placement, False),\n 'RamdiskId': (basestring, False),\n 'SecurityGroups': ([SecurityGroups], False),\n 'SpotPrice': (basestring, False),\n 'SubnetId': (basestring, False),\n 'UserData': (basestring, False),\n 'WeightedCapacity': (positive_integer, False),\n }\n\n\nclass SpotFleetRequestConfigData(AWSProperty):\n props = {\n 'AllocationStrategy': (basestring, False),\n 'ExcessCapacityTerminationPolicy': (basestring, False),\n 'IamFleetRole': (basestring, True),\n 'ReplaceUnhealthyInstances': (boolean, False),\n 'LaunchSpecifications': ([LaunchSpecifications], True),\n 'SpotPrice': (basestring, True),\n 'TargetCapacity': (positive_integer, True),\n 'TerminateInstancesWithExpiration': (boolean, False),\n 'Type': (basestring, False),\n 'ValidFrom': (basestring, False),\n 'ValidUntil': (basestring, False),\n }\n\n\nclass SpotFleet(AWSObject):\n resource_type = \"AWS::EC2::SpotFleet\"\n\n props = {\n 'SpotFleetRequestConfigData': (SpotFleetRequestConfigData, True),\n }\n\n\nclass PlacementGroup(AWSObject):\n resource_type = \"AWS::EC2::PlacementGroup\"\n\n props = {\n 'Strategy': (basestring, True),\n }\n\n\nclass SubnetCidrBlock(AWSObject):\n resource_type = \"AWS::EC2::SubnetCidrBlock\"\n\n props = {\n 'Ipv6CidrBlock': (basestring, True),\n 'SubnetId': (basestring, True),\n }\n\n\nclass VPCCidrBlock(AWSObject):\n resource_type = \"AWS::EC2::VPCCidrBlock\"\n\n props = {\n 'AmazonProvidedIpv6CidrBlock': (boolean, False),\n 'CidrBlock': (basestring, False),\n 'VpcId': (basestring, True),\n }\n",
"path": "troposphere/ec2.py"
}
] | [
{
"content": "# Copyright (c) 2012-2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSHelperFn, AWSObject, AWSProperty\nfrom .validators import (\n boolean, exactly_one, integer, integer_range,\n network_port, positive_integer\n)\n\ntry:\n from awacs.aws import Policy\n policytypes = (dict, Policy)\nexcept ImportError:\n policytypes = dict,\n\n\nclass Tag(AWSProperty):\n props = {\n 'Key': (basestring, True),\n 'Value': (basestring, True)\n }\n\n def __init__(self, key=None, value=None, **kwargs):\n # provided for backward compatibility\n if key is not None:\n kwargs['Key'] = key\n if value is not None:\n kwargs['Value'] = value\n super(Tag, self).__init__(**kwargs)\n\n\nclass CustomerGateway(AWSObject):\n resource_type = \"AWS::EC2::CustomerGateway\"\n\n props = {\n 'BgpAsn': (integer, True),\n 'IpAddress': (basestring, True),\n 'Tags': (list, False),\n 'Type': (basestring, True),\n }\n\n\nclass DHCPOptions(AWSObject):\n resource_type = \"AWS::EC2::DHCPOptions\"\n\n props = {\n 'DomainName': (basestring, False),\n 'DomainNameServers': (list, False),\n 'NetbiosNameServers': (list, False),\n 'NetbiosNodeType': (integer, False),\n 'NtpServers': (list, False),\n 'Tags': (list, False),\n }\n\n\nclass EgressOnlyInternetGateway(AWSObject):\n resource_type = \"AWS::EC2::EgressOnlyInternetGateway\"\n\n props = {\n 'VpcId': (basestring, True),\n }\n\n\nclass EIP(AWSObject):\n resource_type = \"AWS::EC2::EIP\"\n\n props = {\n 'InstanceId': (basestring, False),\n 'Domain': (basestring, False),\n }\n\n\nclass EIPAssociation(AWSObject):\n resource_type = \"AWS::EC2::EIPAssociation\"\n\n props = {\n 'AllocationId': (basestring, False),\n 'EIP': (basestring, False),\n 'InstanceId': (basestring, False),\n 'NetworkInterfaceId': (basestring, False),\n 'PrivateIpAddress': (basestring, False),\n }\n\n\nclass FlowLog(AWSObject):\n resource_type = \"AWS::EC2::FlowLog\"\n\n props = {\n 'DeliverLogsPermissionArn': (basestring, True),\n 'LogGroupName': (basestring, True),\n 'ResourceId': (basestring, True),\n 'ResourceType': (basestring, True),\n 'TrafficType': (basestring, True),\n }\n\n\nclass NatGateway(AWSObject):\n resource_type = \"AWS::EC2::NatGateway\"\n\n props = {\n 'AllocationId': (basestring, True),\n 'SubnetId': (basestring, True),\n 'Tags': (list, False),\n }\n\n\nclass EBSBlockDevice(AWSProperty):\n props = {\n 'DeleteOnTermination': (boolean, False),\n 'Encrypted': (boolean, False),\n 'Iops': (integer, False), # Conditional\n 'SnapshotId': (basestring, False), # Conditional\n 'VolumeSize': (integer, False), # Conditional\n 'VolumeType': (basestring, False),\n }\n\n\nclass BlockDeviceMapping(AWSProperty):\n props = {\n 'DeviceName': (basestring, True),\n 'Ebs': (EBSBlockDevice, False), # Conditional\n 'NoDevice': (dict, False),\n 'VirtualName': (basestring, False), # Conditional\n }\n\n\nclass MountPoint(AWSProperty):\n props = {\n 'Device': (basestring, True),\n 'VolumeId': (basestring, True),\n }\n\n\nclass Placement(AWSProperty):\n props = {\n 'AvailabilityZone': (basestring, False),\n 'GroupName': (basestring, False),\n }\n\n\nclass Ipv6Addresses(AWSHelperFn):\n def __init__(self, address):\n self.data = {\n 'Ipv6Address': address,\n }\n\n\nclass PrivateIpAddressSpecification(AWSProperty):\n props = {\n 'Primary': (boolean, True),\n 'PrivateIpAddress': (basestring, True),\n }\n\n\nclass NetworkInterfaceProperty(AWSProperty):\n props = {\n 'AssociatePublicIpAddress': (boolean, False),\n 'DeleteOnTermination': (boolean, False),\n 'Description': (basestring, False),\n 'DeviceIndex': (integer, True),\n 'GroupSet': ([basestring], False),\n 'NetworkInterfaceId': (basestring, False),\n 'Ipv6AddressCount': (integer, False),\n 'Ipv6Addresses': ([Ipv6Addresses], False),\n 'PrivateIpAddress': (basestring, False),\n 'PrivateIpAddresses': ([PrivateIpAddressSpecification], False),\n 'SecondaryPrivateIpAddressCount': (integer, False),\n 'SubnetId': (basestring, False),\n }\n\n\nclass AssociationParameters(AWSProperty):\n props = {\n 'Key': (basestring, True),\n 'Value': ([basestring], True),\n }\n\n\nclass SsmAssociations(AWSProperty):\n props = {\n 'AssociationParameters': ([AssociationParameters], False),\n 'DocumentName': (basestring, True),\n }\n\n\nclass Host(AWSObject):\n resource_type = \"AWS::EC2::Host\"\n\n props = {\n 'AutoPlacement': (basestring, False),\n 'AvailabilityZone': (basestring, True),\n 'InstanceType': (basestring, True),\n }\n\n\nclass Instance(AWSObject):\n resource_type = \"AWS::EC2::Instance\"\n\n props = {\n 'Affinity': (basestring, False),\n 'AvailabilityZone': (basestring, False),\n 'BlockDeviceMappings': (list, False),\n 'DisableApiTermination': (boolean, False),\n 'EbsOptimized': (boolean, False),\n 'HostId': (basestring, False),\n 'IamInstanceProfile': (basestring, False),\n 'ImageId': (basestring, True),\n 'InstanceInitiatedShutdownBehavior': (basestring, False),\n 'InstanceType': (basestring, False),\n 'Ipv6AddressCount': (integer, False),\n 'Ipv6Addresses': ([Ipv6Addresses], False),\n 'KernelId': (basestring, False),\n 'KeyName': (basestring, False),\n 'Monitoring': (boolean, False),\n 'NetworkInterfaces': ([NetworkInterfaceProperty], False),\n 'PlacementGroupName': (basestring, False),\n 'PrivateIpAddress': (basestring, False),\n 'RamdiskId': (basestring, False),\n 'SecurityGroupIds': (list, False),\n 'SecurityGroups': (list, False),\n 'SsmAssociations': ([SsmAssociations], False),\n 'SourceDestCheck': (boolean, False),\n 'SubnetId': (basestring, False),\n 'Tags': (list, False),\n 'Tenancy': (basestring, False),\n 'UserData': (basestring, False),\n 'Volumes': (list, False),\n }\n\n\nclass InternetGateway(AWSObject):\n resource_type = \"AWS::EC2::InternetGateway\"\n\n props = {\n 'Tags': (list, False),\n }\n\n\nclass NetworkAcl(AWSObject):\n resource_type = \"AWS::EC2::NetworkAcl\"\n\n props = {\n 'Tags': (list, False),\n 'VpcId': (basestring, True),\n }\n\n\nclass ICMP(AWSProperty):\n props = {\n 'Code': (integer, False),\n 'Type': (integer, False),\n }\n\n\nclass PortRange(AWSProperty):\n props = {\n 'From': (network_port, False),\n 'To': (network_port, False),\n }\n\n\nclass NetworkAclEntry(AWSObject):\n resource_type = \"AWS::EC2::NetworkAclEntry\"\n\n props = {\n 'CidrBlock': (basestring, False),\n 'Egress': (boolean, False),\n 'Icmp': (ICMP, False), # Conditional\n 'Ipv6CidrBlock': (basestring, False),\n 'NetworkAclId': (basestring, True),\n 'PortRange': (PortRange, False), # Conditional\n 'Protocol': (network_port, True),\n 'RuleAction': (basestring, True),\n 'RuleNumber': (integer_range(1, 32766), True),\n }\n\n def validate(self):\n conds = [\n 'CidrBlock',\n 'Ipv6CidrBlock',\n ]\n exactly_one(self.__class__.__name__, self.properties, conds)\n\n\nclass NetworkInterface(AWSObject):\n resource_type = \"AWS::EC2::NetworkInterface\"\n\n props = {\n 'Description': (basestring, False),\n 'GroupSet': (list, False),\n 'Ipv6AddressCount': (integer, False),\n 'Ipv6Addresses': ([Ipv6Addresses], False),\n 'PrivateIpAddress': (basestring, False),\n 'PrivateIpAddresses': ([PrivateIpAddressSpecification], False),\n 'SecondaryPrivateIpAddressCount': (integer, False),\n 'SourceDestCheck': (boolean, False),\n 'SubnetId': (basestring, True),\n 'Tags': (list, False),\n }\n\n\nclass NetworkInterfaceAttachment(AWSObject):\n resource_type = \"AWS::EC2::NetworkInterfaceAttachment\"\n\n props = {\n 'DeleteOnTermination': (boolean, False),\n 'DeviceIndex': (integer, True),\n 'InstanceId': (basestring, True),\n 'NetworkInterfaceId': (basestring, True),\n }\n\n\nPERMISSION_INSTANCE_ATTACH = 'INSTANCE-ATTACH'\nPERMISSION_EIP_ASSOCIATE = 'EIP-ASSOCIATE'\n\n\nclass NetworkInterfacePermission(AWSObject):\n resource_type = \"AWS::EC2::NetworkInterfacePermission\"\n\n props = {\n 'AwsAccountId': (basestring, True),\n 'NetworkInterfaceId': (basestring, True),\n 'Permission': (basestring, True),\n }\n\n\nclass Route(AWSObject):\n resource_type = \"AWS::EC2::Route\"\n\n props = {\n 'DestinationCidrBlock': (basestring, False),\n 'DestinationIpv6CidrBlock': (basestring, False),\n 'EgressOnlyInternetGatewayId': (basestring, False),\n 'GatewayId': (basestring, False),\n 'InstanceId': (basestring, False),\n 'NatGatewayId': (basestring, False),\n 'NetworkInterfaceId': (basestring, False),\n 'RouteTableId': (basestring, True),\n 'VpcPeeringConnectionId': (basestring, False),\n }\n\n def validate(self):\n cidr_conds = [\n 'DestinationCidrBlock',\n 'DestinationIpv6CidrBlock',\n ]\n gateway_conds = [\n 'EgressOnlyInternetGatewayId',\n 'GatewayId',\n 'InstanceId',\n 'NatGatewayId',\n 'NetworkInterfaceId',\n 'VpcPeeringConnectionId'\n ]\n exactly_one(self.__class__.__name__, self.properties, cidr_conds)\n exactly_one(self.__class__.__name__, self.properties, gateway_conds)\n\n\nclass RouteTable(AWSObject):\n resource_type = \"AWS::EC2::RouteTable\"\n\n props = {\n 'Tags': (list, False),\n 'VpcId': (basestring, True),\n }\n\n\nclass SecurityGroupEgress(AWSObject):\n resource_type = \"AWS::EC2::SecurityGroupEgress\"\n\n props = {\n 'CidrIp': (basestring, False),\n 'CidrIpv6': (basestring, False),\n 'DestinationPrefixListId': (basestring, False),\n 'DestinationSecurityGroupId': (basestring, False),\n 'FromPort': (network_port, True),\n 'GroupId': (basestring, True),\n 'IpProtocol': (basestring, True),\n 'ToPort': (network_port, True),\n #\n # Workaround for a bug in CloudFormation and EC2 where the\n # DestinationSecurityGroupId property is ignored causing\n # egress rules targeting a security group to be ignored.\n # Using SourceSecurityGroupId instead works fine even in\n # egress rules. AWS have known about this bug for a while.\n #\n 'SourceSecurityGroupId': (basestring, False),\n }\n\n def validate(self):\n conds = [\n 'CidrIp',\n 'CidrIpv6',\n 'DestinationPrefixListId',\n 'DestinationSecurityGroupId',\n ]\n exactly_one(self.__class__.__name__, self.properties, conds)\n\n\nclass SecurityGroupIngress(AWSObject):\n resource_type = \"AWS::EC2::SecurityGroupIngress\"\n\n props = {\n 'CidrIp': (basestring, False),\n 'CidrIpv6': (basestring, False),\n 'FromPort': (network_port, False), # conditional\n 'GroupName': (basestring, False),\n 'GroupId': (basestring, False),\n 'IpProtocol': (basestring, True),\n 'SourceSecurityGroupName': (basestring, False),\n 'SourceSecurityGroupId': (basestring, False),\n 'SourceSecurityGroupOwnerId': (basestring, False),\n 'ToPort': (network_port, False), # conditional\n }\n\n def validate(self):\n conds = [\n 'CidrIp',\n 'CidrIpv6',\n 'SourceSecurityGroupName',\n 'SourceSecurityGroupId',\n ]\n exactly_one(self.__class__.__name__, self.properties, conds)\n\n\nclass SecurityGroupRule(AWSProperty):\n props = {\n 'CidrIp': (basestring, False),\n 'CidrIpv6': (basestring, False),\n 'FromPort': (network_port, False),\n 'IpProtocol': (basestring, True),\n 'SourceSecurityGroupId': (basestring, False),\n 'SourceSecurityGroupName': (basestring, False),\n 'SourceSecurityGroupOwnerId': (basestring, False),\n 'ToPort': (network_port, False),\n 'DestinationSecurityGroupId': (basestring, False),\n }\n\n\nclass SecurityGroup(AWSObject):\n resource_type = \"AWS::EC2::SecurityGroup\"\n\n props = {\n 'GroupName': (basestring, False),\n 'GroupDescription': (basestring, True),\n 'SecurityGroupEgress': (list, False),\n 'SecurityGroupIngress': (list, False),\n 'VpcId': (basestring, False),\n 'Tags': (list, False),\n }\n\n\nclass Subnet(AWSObject):\n resource_type = \"AWS::EC2::Subnet\"\n\n props = {\n 'AssignIPv6AddressOnCreation': (boolean, False),\n 'AvailabilityZone': (basestring, False),\n 'CidrBlock': (basestring, True),\n 'Ipv6CidrBlock': (basestring, False),\n 'MapPublicIpOnLaunch': (boolean, False),\n 'Tags': (list, False),\n 'VpcId': (basestring, True),\n }\n\n def validate(self):\n if 'Ipv6CidrBlock' in self.properties:\n if not self.properties.get('AssignIPv6AddressOnCreation'):\n raise ValueError(\n \"If Ipv6CidrBlock is present, \"\n \"AssignIPv6AddressOnCreation must be set to True\"\n )\n\n\nclass SubnetNetworkAclAssociation(AWSObject):\n resource_type = \"AWS::EC2::SubnetNetworkAclAssociation\"\n\n props = {\n 'SubnetId': (basestring, True),\n 'NetworkAclId': (basestring, True),\n }\n\n\nclass SubnetRouteTableAssociation(AWSObject):\n resource_type = \"AWS::EC2::SubnetRouteTableAssociation\"\n\n props = {\n 'RouteTableId': (basestring, True),\n 'SubnetId': (basestring, True),\n }\n\n\nclass Volume(AWSObject):\n resource_type = \"AWS::EC2::Volume\"\n\n props = {\n 'AutoEnableIO': (boolean, False),\n 'AvailabilityZone': (basestring, True),\n 'Encrypted': (boolean, False),\n 'Iops': (positive_integer, False),\n 'KmsKeyId': (basestring, False),\n 'Size': (positive_integer, False),\n 'SnapshotId': (basestring, False),\n 'Tags': (list, False),\n 'VolumeType': (basestring, False),\n }\n\n\nclass VolumeAttachment(AWSObject):\n resource_type = \"AWS::EC2::VolumeAttachment\"\n\n props = {\n 'Device': (basestring, True),\n 'InstanceId': (basestring, True),\n 'VolumeId': (basestring, True),\n }\n\n\nclass VPC(AWSObject):\n resource_type = \"AWS::EC2::VPC\"\n\n props = {\n 'CidrBlock': (basestring, True),\n 'EnableDnsSupport': (boolean, False),\n 'EnableDnsHostnames': (boolean, False),\n 'InstanceTenancy': (basestring, False),\n 'Tags': (list, False),\n }\n\n\nclass VPCDHCPOptionsAssociation(AWSObject):\n resource_type = \"AWS::EC2::VPCDHCPOptionsAssociation\"\n\n props = {\n 'DhcpOptionsId': (basestring, True),\n 'VpcId': (basestring, True),\n }\n\n\nclass VPCEndpoint(AWSObject):\n resource_type = \"AWS::EC2::VPCEndpoint\"\n\n props = {\n 'PolicyDocument': (policytypes, False),\n 'RouteTableIds': ([basestring], False),\n 'ServiceName': (basestring, True),\n 'VpcId': (basestring, True),\n }\n\n\nclass VPCGatewayAttachment(AWSObject):\n resource_type = \"AWS::EC2::VPCGatewayAttachment\"\n\n props = {\n 'InternetGatewayId': (basestring, False),\n 'VpcId': (basestring, True),\n 'VpnGatewayId': (basestring, False),\n }\n\n\nclass VPNConnection(AWSObject):\n resource_type = \"AWS::EC2::VPNConnection\"\n\n props = {\n 'Type': (basestring, True),\n 'CustomerGatewayId': (basestring, True),\n 'StaticRoutesOnly': (boolean, False),\n 'Tags': (list, False),\n 'VpnGatewayId': (basestring, True),\n }\n\n\nclass VPNConnectionRoute(AWSObject):\n resource_type = \"AWS::EC2::VPNConnectionRoute\"\n\n props = {\n 'DestinationCidrBlock': (basestring, True),\n 'VpnConnectionId': (basestring, True),\n }\n\n\nclass VPNGateway(AWSObject):\n resource_type = \"AWS::EC2::VPNGateway\"\n\n props = {\n 'Type': (basestring, True),\n 'Tags': (list, False),\n }\n\n\nclass VPNGatewayRoutePropagation(AWSObject):\n resource_type = \"AWS::EC2::VPNGatewayRoutePropagation\"\n\n props = {\n 'RouteTableIds': ([basestring], True),\n 'VpnGatewayId': (basestring, True),\n }\n\n\nclass VPCPeeringConnection(AWSObject):\n resource_type = \"AWS::EC2::VPCPeeringConnection\"\n\n props = {\n 'PeerVpcId': (basestring, True),\n 'VpcId': (basestring, True),\n 'Tags': (list, False),\n 'PeerOwnerId': (basestring, False),\n 'PeerRoleArn': (basestring, False),\n }\n\n\nclass Monitoring(AWSProperty):\n props = {\n 'Enabled': (boolean, False),\n }\n\n\nclass NetworkInterfaces(AWSProperty):\n props = {\n 'AssociatePublicIpAddress': (boolean, False),\n 'DeleteOnTermination': (boolean, False),\n 'Description': (basestring, False),\n 'DeviceIndex': (integer, True),\n 'Groups': ([basestring], False),\n 'Ipv6AddressCount': (integer, False),\n 'Ipv6Addresses': ([Ipv6Addresses], False),\n 'NetworkInterfaceId': (basestring, False),\n 'PrivateIpAddresses': ([PrivateIpAddressSpecification], False),\n 'SecondaryPrivateIpAddressCount': (integer, False),\n 'SubnetId': (basestring, False),\n }\n\n\nclass SecurityGroups(AWSProperty):\n props = {\n 'GroupId': (basestring, False),\n }\n\n\nclass IamInstanceProfile(AWSProperty):\n props = {\n 'Arn': (basestring, False),\n }\n\n\nclass LaunchSpecifications(AWSProperty):\n props = {\n 'BlockDeviceMappings': ([BlockDeviceMapping], False),\n 'EbsOptimized': (boolean, False),\n 'IamInstanceProfile': (IamInstanceProfile, False),\n 'ImageId': (basestring, True),\n 'InstanceType': (basestring, True),\n 'KernelId': (basestring, False),\n 'KeyName': (basestring, False),\n 'Monitoring': (Monitoring, False),\n 'NetworkInterfaces': ([NetworkInterfaces], False),\n 'Placement': (Placement, False),\n 'RamdiskId': (basestring, False),\n 'SecurityGroups': ([SecurityGroups], False),\n 'SpotPrice': (basestring, False),\n 'SubnetId': (basestring, False),\n 'UserData': (basestring, False),\n 'WeightedCapacity': (positive_integer, False),\n }\n\n\nclass SpotFleetRequestConfigData(AWSProperty):\n props = {\n 'AllocationStrategy': (basestring, False),\n 'ExcessCapacityTerminationPolicy': (basestring, False),\n 'IamFleetRole': (basestring, True),\n 'ReplaceUnhealthyInstances': (boolean, False),\n 'LaunchSpecifications': ([LaunchSpecifications], True),\n 'SpotPrice': (basestring, True),\n 'TargetCapacity': (positive_integer, True),\n 'TerminateInstancesWithExpiration': (boolean, False),\n 'Type': (basestring, False),\n 'ValidFrom': (basestring, False),\n 'ValidUntil': (basestring, False),\n }\n\n\nclass SpotFleet(AWSObject):\n resource_type = \"AWS::EC2::SpotFleet\"\n\n props = {\n 'SpotFleetRequestConfigData': (SpotFleetRequestConfigData, True),\n }\n\n\nclass PlacementGroup(AWSObject):\n resource_type = \"AWS::EC2::PlacementGroup\"\n\n props = {\n 'Strategy': (basestring, True),\n }\n\n\nclass SubnetCidrBlock(AWSObject):\n resource_type = \"AWS::EC2::SubnetCidrBlock\"\n\n props = {\n 'Ipv6CidrBlock': (basestring, True),\n 'SubnetId': (basestring, True),\n }\n\n\nclass VPCCidrBlock(AWSObject):\n resource_type = \"AWS::EC2::VPCCidrBlock\"\n\n props = {\n 'AmazonProvidedIpv6CidrBlock': (boolean, False),\n 'CidrBlock': (basestring, False),\n 'VpcId': (basestring, True),\n }\n",
"path": "troposphere/ec2.py"
}
] | diff --git a/troposphere/ec2.py b/troposphere/ec2.py
index c4de721d1..746e414df 100644
--- a/troposphere/ec2.py
+++ b/troposphere/ec2.py
@@ -102,6 +102,7 @@ class NatGateway(AWSObject):
props = {
'AllocationId': (basestring, True),
'SubnetId': (basestring, True),
+ 'Tags': (list, False),
}
|
pypa__setuptools-2584 | Add mechanism for side-by-side comparison of setup.py and its equivalent setup.cfg
We have many documentation examples that are purely declarative and are either documented as `setup.py` or `setup.cfg`. It would be really awesome if, for each of these, we had the option to have either both versions side-by-side or, even better, in a sort of "tabbed container", like the one in the [code sample at the bottom of this example](https://leetcode.com/articles/median-of-two-sorted-arrays/).
Requirements for this:
1. Cannot *link to* any third-party javascript dependencies. Ideally we wouldn't use any at all, but if you do they must be vendored in the documentation.
2. If javascript is disabled, it has to fall back to something intelligible.
Ideally it would be implemented in pure CSS / HTML if that's at all possible.
| [
{
"content": "extensions = ['sphinx.ext.autodoc', 'jaraco.packaging.sphinx', 'rst.linker']\n\nmaster_doc = \"index\"\n\nlink_files = {\n '../CHANGES.rst': dict(\n using=dict(\n BB='https://bitbucket.org',\n GH='https://github.com',\n ),\n replace=[\n dict(\n pattern=r'(Issue )?#(?P<issue>\\d+)',\n url='{package_url}/issues/{issue}',\n ),\n dict(\n pattern=r'BB Pull Request ?#(?P<bb_pull_request>\\d+)',\n url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}',\n ),\n dict(\n pattern=r'Distribute #(?P<distribute>\\d+)',\n url='{BB}/tarek/distribute/issue/{distribute}',\n ),\n dict(\n pattern=r'Buildout #(?P<buildout>\\d+)',\n url='{GH}/buildout/buildout/issues/{buildout}',\n ),\n dict(\n pattern=r'Old Setuptools #(?P<old_setuptools>\\d+)',\n url='http://bugs.python.org/setuptools/issue{old_setuptools}',\n ),\n dict(\n pattern=r'Jython #(?P<jython>\\d+)',\n url='http://bugs.jython.org/issue{jython}',\n ),\n dict(\n pattern=r'(Python #|bpo-)(?P<python>\\d+)',\n url='http://bugs.python.org/issue{python}',\n ),\n dict(\n pattern=r'Interop #(?P<interop>\\d+)',\n url='{GH}/pypa/interoperability-peps/issues/{interop}',\n ),\n dict(\n pattern=r'Pip #(?P<pip>\\d+)',\n url='{GH}/pypa/pip/issues/{pip}',\n ),\n dict(\n pattern=r'Packaging #(?P<packaging>\\d+)',\n url='{GH}/pypa/packaging/issues/{packaging}',\n ),\n dict(\n pattern=r'[Pp]ackaging (?P<packaging_ver>\\d+(\\.\\d+)+)',\n url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst',\n ),\n dict(\n pattern=r'PEP[- ](?P<pep_number>\\d+)',\n url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/',\n ),\n dict(\n pattern=r'setuptools_svn #(?P<setuptools_svn>\\d+)',\n url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}',\n ),\n dict(\n pattern=r'pypa/distutils#(?P<distutils>\\d+)',\n url='{GH}/pypa/distutils/issues/{distutils}',\n ),\n dict(\n pattern=r'^(?m)((?P<scm_version>v?\\d+(\\.\\d+){1,2}))\\n[-=]+\\n',\n with_scm='{text}\\n{rev[timestamp]:%d %b %Y}\\n',\n ),\n ],\n ),\n}\n\nintersphinx_mapping = {\n 'pypa-build': ('https://pypa-build.readthedocs.io/en/latest/', None)\n}\n\n# Add support for linking usernames\ngithub_url = 'https://github.com'\ngithub_sponsors_url = f'{github_url}/sponsors'\nextlinks = {\n 'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323\n}\nextensions += ['sphinx.ext.extlinks', 'sphinx.ext.intersphinx']\n\n# Be strict about any broken references:\nnitpicky = True\n\n# Ref: https://github.com/python-attrs/attrs/pull/571/files\\\n# #diff-85987f48f1258d9ee486e3191495582dR82\ndefault_role = 'any'\n\n# Custom sidebar templates, maps document names to template names.\nhtml_theme = 'alabaster'\ntemplates_path = ['_templates']\nhtml_sidebars = {'index': ['tidelift-sidebar.html']}\n",
"path": "docs/conf.py"
}
] | [
{
"content": "extensions = ['sphinx.ext.autodoc', 'jaraco.packaging.sphinx', 'rst.linker']\n\nmaster_doc = \"index\"\n\nlink_files = {\n '../CHANGES.rst': dict(\n using=dict(\n BB='https://bitbucket.org',\n GH='https://github.com',\n ),\n replace=[\n dict(\n pattern=r'(Issue )?#(?P<issue>\\d+)',\n url='{package_url}/issues/{issue}',\n ),\n dict(\n pattern=r'BB Pull Request ?#(?P<bb_pull_request>\\d+)',\n url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}',\n ),\n dict(\n pattern=r'Distribute #(?P<distribute>\\d+)',\n url='{BB}/tarek/distribute/issue/{distribute}',\n ),\n dict(\n pattern=r'Buildout #(?P<buildout>\\d+)',\n url='{GH}/buildout/buildout/issues/{buildout}',\n ),\n dict(\n pattern=r'Old Setuptools #(?P<old_setuptools>\\d+)',\n url='http://bugs.python.org/setuptools/issue{old_setuptools}',\n ),\n dict(\n pattern=r'Jython #(?P<jython>\\d+)',\n url='http://bugs.jython.org/issue{jython}',\n ),\n dict(\n pattern=r'(Python #|bpo-)(?P<python>\\d+)',\n url='http://bugs.python.org/issue{python}',\n ),\n dict(\n pattern=r'Interop #(?P<interop>\\d+)',\n url='{GH}/pypa/interoperability-peps/issues/{interop}',\n ),\n dict(\n pattern=r'Pip #(?P<pip>\\d+)',\n url='{GH}/pypa/pip/issues/{pip}',\n ),\n dict(\n pattern=r'Packaging #(?P<packaging>\\d+)',\n url='{GH}/pypa/packaging/issues/{packaging}',\n ),\n dict(\n pattern=r'[Pp]ackaging (?P<packaging_ver>\\d+(\\.\\d+)+)',\n url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst',\n ),\n dict(\n pattern=r'PEP[- ](?P<pep_number>\\d+)',\n url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/',\n ),\n dict(\n pattern=r'setuptools_svn #(?P<setuptools_svn>\\d+)',\n url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}',\n ),\n dict(\n pattern=r'pypa/distutils#(?P<distutils>\\d+)',\n url='{GH}/pypa/distutils/issues/{distutils}',\n ),\n dict(\n pattern=r'^(?m)((?P<scm_version>v?\\d+(\\.\\d+){1,2}))\\n[-=]+\\n',\n with_scm='{text}\\n{rev[timestamp]:%d %b %Y}\\n',\n ),\n ],\n ),\n}\n\n\n# Add support for linking usernames\ngithub_url = 'https://github.com'\ngithub_sponsors_url = f'{github_url}/sponsors'\nextlinks = {\n 'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323\n}\nextensions += ['sphinx.ext.extlinks']\n\n# Be strict about any broken references:\nnitpicky = True\n\n# Ref: https://github.com/python-attrs/attrs/pull/571/files\\\n# #diff-85987f48f1258d9ee486e3191495582dR82\ndefault_role = 'any'\n\n# Custom sidebar templates, maps document names to template names.\nhtml_theme = 'alabaster'\ntemplates_path = ['_templates']\nhtml_sidebars = {'index': ['tidelift-sidebar.html']}\n\n# Add support for inline tabs\nextensions += ['sphinx_inline_tabs']\n",
"path": "docs/conf.py"
}
] | diff --git a/changelog.d/2584.doc.rst b/changelog.d/2584.doc.rst
new file mode 100644
index 0000000000..3474049963
--- /dev/null
+++ b/changelog.d/2584.doc.rst
@@ -0,0 +1 @@
+Added ``sphinx-inline-tabs`` extension to allow for comparison of ``setup.py`` and its equivalent ``setup.cfg`` -- by :user:`amy-lei`
\ No newline at end of file
diff --git a/docs/conf.py b/docs/conf.py
index 18cd7bdc49..581d8d5e75 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -93,3 +93,6 @@
html_theme = 'alabaster'
templates_path = ['_templates']
html_sidebars = {'index': ['tidelift-sidebar.html']}
+
+# Add support for inline tabs
+extensions += ['sphinx_inline_tabs']
diff --git a/docs/requirements.txt b/docs/requirements.txt
index 104d68faef..0292759301 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -3,5 +3,6 @@ sphinx
jaraco.packaging>=6.1
rst.linker>=1.9
pygments-github-lexers==0.0.5
+sphinx-inline-tabs
setuptools>=34
diff --git a/docs/userguide/dependency_management.rst b/docs/userguide/dependency_management.rst
index 0eb2186494..6108d9b2e9 100644
--- a/docs/userguide/dependency_management.rst
+++ b/docs/userguide/dependency_management.rst
@@ -49,23 +49,27 @@ be able to run. ``setuptools`` support automatically download and install
these dependencies when the package is installed. Although there is more
finess to it, let's start with a simple example.
-.. code-block:: ini
+.. tab:: setup.cfg
- [options]
- #...
- install_requires =
- docutils
- BazSpam ==1.1
+ .. code-block:: ini
+
+ [options]
+ #...
+ install_requires =
+ docutils
+ BazSpam ==1.1
+
+.. tab:: setup.py
-.. code-block:: python
+ .. code-block:: python
- setup(
- #...,
- install_requires = [
- 'docutils',
- 'BazSpam ==1.1'
- ]
- )
+ setup(
+ #...,
+ install_requires = [
+ 'docutils',
+ 'BazSpam ==1.1'
+ ]
+ )
When your project is installed (e.g. using pip), all of the dependencies not
@@ -82,41 +86,49 @@ specific dependencies. For example, the ``enum`` package was added in Python
3.4, therefore, package that depends on it can elect to install it only when
the Python version is older than 3.4. To accomplish this
-.. code-block:: ini
-
- [options]
- #...
- install_requires =
- enum34;python_version<'3.4'
+.. tab:: setup.cfg
-.. code-block:: python
+ .. code-block:: ini
- setup(
+ [options]
#...
- install_requires=[
- "enum34;python_version<'3.4'",]
- )
+ install_requires =
+ enum34;python_version<'3.4'
+
+.. tab:: setup.py
+
+ .. code-block:: python
+
+ setup(
+ #...
+ install_requires=[
+ "enum34;python_version<'3.4'",]
+ )
Similarly, if you also wish to declare ``pywin32`` with a minimal version of 1.0
and only install it if the user is using a Windows operating system:
-.. code-block:: ini
-
- [options]
- #...
- install_requires =
- enum34;python_version<'3.4'
- pywin32 >= 1.0;platform_system=='Windows'
+.. tab:: setup.cfg
-.. code-block:: python
+ .. code-block:: ini
- setup(
+ [options]
#...
- install_requires=[
- "enum34;python_version<'3.4'",
- "pywin32 >= 1.0;platform_system=='Windows'"
- ]
- )
+ install_requires =
+ enum34;python_version<'3.4'
+ pywin32 >= 1.0;platform_system=='Windows'
+
+.. tab:: setup.py
+
+ .. code-block:: python
+
+ setup(
+ #...
+ install_requires=[
+ "enum34;python_version<'3.4'",
+ "pywin32 >= 1.0;platform_system=='Windows'"
+ ]
+ )
The environmental markers that may be used for testing platform types are
detailed in `PEP 508 <https://www.python.org/dev/peps/pep-0508/>`_.
@@ -181,20 +193,24 @@ The ``dependency_links`` option takes the form of a list of URL strings. For
example, this will cause a search of the specified page for eggs or source
distributions, if the package's dependencies aren't already installed:
-.. code-block:: ini
-
- [options]
- #...
- dependency_links = http://peak.telecommunity.com/snapshots/
+.. tab:: setup.cfg
-.. code-block:: python
+ .. code-block:: ini
- setup(
+ [options]
#...
- dependency_links=[
- "http://peak.telecommunity.com/snapshots/"
- ],
- )
+ dependency_links = http://peak.telecommunity.com/snapshots/
+
+.. tab:: setup.py
+
+ .. code-block:: python
+
+ setup(
+ #...
+ dependency_links=[
+ "http://peak.telecommunity.com/snapshots/"
+ ],
+ )
Optional dependencies
@@ -211,24 +227,28 @@ ancillary functions such as "tests" and "docs".
For example, Package-A offers optional PDF support and requires two other
dependencies for it to work:
-.. code-block:: ini
+.. tab:: setup.cfg
- [metadata]
- name = Package-A
+ .. code-block:: ini
- [options.extras_require]
- PDF = ReportLab>=1.2; RXP
+ [metadata]
+ name = Package-A
+ [options.extras_require]
+ PDF = ReportLab>=1.2; RXP
-.. code-block:: python
- setup(
- name="Project-A",
- #...
- extras_require={
- "PDF": ["ReportLab>=1.2", "RXP"],
- }
- )
+.. tab:: setup.py
+
+ .. code-block:: python
+
+ setup(
+ name="Project-A",
+ #...
+ extras_require={
+ "PDF": ["ReportLab>=1.2", "RXP"],
+ }
+ )
The name ``PDF`` is an arbitary identifier of such a list of dependencies, to
which other components can refer and have them installed. There are two common
@@ -236,31 +256,35 @@ use cases.
First is the console_scripts entry point:
-.. code-block:: ini
+.. tab:: setup.cfg
- [metadata]
- name = Project A
- #...
+ .. code-block:: ini
- [options]
- #...
- entry_points=
- [console_scripts]
- rst2pdf = project_a.tools.pdfgen [PDF]
- rst2html = project_a.tools.htmlgen
-
-.. code-block:: python
-
- setup(
- name = "Project-A"
- #...,
- entry_points={
- "console_scripts": [
- "rst2pdf = project_a.tools.pdfgen [PDF]",
- "rst2html = project_a.tools.htmlgen",
- ],
- }
- )
+ [metadata]
+ name = Project A
+ #...
+
+ [options]
+ #...
+ entry_points=
+ [console_scripts]
+ rst2pdf = project_a.tools.pdfgen [PDF]
+ rst2html = project_a.tools.htmlgen
+
+.. tab:: setup.py
+
+ .. code-block:: python
+
+ setup(
+ name = "Project-A"
+ #...,
+ entry_points={
+ "console_scripts": [
+ "rst2pdf = project_a.tools.pdfgen [PDF]",
+ "rst2html = project_a.tools.htmlgen",
+ ],
+ }
+ )
This syntax indicates that the entry point (in this case a console script)
is only valid when the PDF extra is installed. It is up to the installer
@@ -273,24 +297,28 @@ The second use case is that other package can use this "extra" for their
own dependencies. For example, if "Project-B" needs "project A" with PDF support
installed, it might declare the dependency like this:
-.. code-block:: ini
+.. tab:: setup.cfg
- [metadata]
- name = Project-B
- #...
+ .. code-block:: ini
- [options]
- #...
- install_requires =
- Project-A[PDF]
+ [metadata]
+ name = Project-B
+ #...
+
+ [options]
+ #...
+ install_requires =
+ Project-A[PDF]
+
+.. tab:: setup.py
-.. code-block:: python
+ .. code-block:: python
- setup(
- name="Project-B",
- install_requires=["Project-A[PDF]"],
- ...
- )
+ setup(
+ name="Project-B",
+ install_requires=["Project-A[PDF]"],
+ ...
+ )
This will cause ReportLab to be installed along with project A, if project B is
installed -- even if project A was already installed. In this way, a project
diff --git a/docs/userguide/package_discovery.rst b/docs/userguide/package_discovery.rst
index de4ef6682f..842ade828d 100644
--- a/docs/userguide/package_discovery.rst
+++ b/docs/userguide/package_discovery.rst
@@ -19,36 +19,44 @@ Package Discovery and Namespace Package
support for namespace package. Normally, you would specify the package to be
included manually in the following manner:
-.. code-block:: ini
-
- [options]
- #...
- packages =
- mypkg1
- mypkg2
+.. tab:: setup.cfg
-.. code-block:: python
+ .. code-block:: ini
- setup(
+ [options]
#...
- packages = ['mypkg1', 'mypkg2']
- )
+ packages =
+ mypkg1
+ mypkg2
+
+.. tab:: setup.py
+
+ .. code-block:: python
+
+ setup(
+ #...
+ packages = ['mypkg1', 'mypkg2']
+ )
This can get tiresome reallly quickly. To speed things up, we introduce two
functions provided by setuptools:
-.. code-block:: ini
+.. tab:: setup.cfg
- [options]
- packages = find:
- #or
- packages = find_namespace:
+ .. code-block:: ini
-.. code-block:: python
+ [options]
+ packages = find:
+ #or
+ packages = find_namespace:
- from setuptools import find_packages
- #or
- from setuptools import find_namespace_packages
+.. tab:: setup.py
+
+ .. code-block:: python
+
+ from setuptools import find_packages
+ #or
+ from setuptools import find_namespace_packages
Using ``find:`` or ``find_packages``
@@ -71,30 +79,34 @@ it, consider the following directory
To have your setup.cfg or setup.py to automatically include packages found
in ``src`` that starts with the name ``pkg`` and not ``additional``:
-.. code-block:: ini
+.. tab:: setup.cfg
- [options]
- packages = find:
- package_dir =
- =src
+ .. code-block:: ini
- [options.packages.find]
- where = src
- include = pkg*
- exclude = additional
+ [options]
+ packages = find:
+ package_dir =
+ =src
-.. code-block:: python
+ [options.packages.find]
+ where = src
+ include = pkg*
+ exclude = additional
- setup(
- #...
- packages = find_packages(
- where = 'src',
- include = ['pkg*',],
- exclude = ['additional',]
- ),
- package_dir = {"":"src"}
- #...
- )
+.. tab:: setup.py
+
+ .. code-block:: python
+
+ setup(
+ #...
+ packages = find_packages(
+ where = 'src',
+ include = ['pkg*',],
+ exclude = ['additional',]
+ ),
+ package_dir = {"":"src"}
+ #...
+ )
.. _Namespace Packages:
@@ -195,17 +207,21 @@ following:
And the ``namespace_packages`` keyword in your ``setup.cfg`` or ``setup.py``:
-.. code-block:: ini
+.. tab:: setup.cfg
- [options]
- namespace_packages = timmins
+ .. code-block:: ini
-.. code-block:: python
+ [options]
+ namespace_packages = timmins
+
+.. tab:: setup.py
+
+ .. code-block:: python
- setup(
- # ...
- namespace_packages = ['timmins']
- )
+ setup(
+ # ...
+ namespace_packages = ['timmins']
+ )
And your directory should look like this
diff --git a/docs/userguide/quickstart.rst b/docs/userguide/quickstart.rst
index 1d557d47bd..16cd4f7192 100644
--- a/docs/userguide/quickstart.rst
+++ b/docs/userguide/quickstart.rst
@@ -37,26 +37,45 @@ package your project:
requires = ["setuptools", "wheel"]
build-backend = "setuptools.build_meta"
-Then, you will need a ``setup.cfg`` to specify your package information,
-such as metadata, contents, dependencies, etc. Here we demonstrate the minimum
+Then, you will need a ``setup.cfg`` or ``setup.py`` to specify your package
+information, such as metadata, contents, dependencies, etc. Here we demonstrate
+the minimum
-.. code-block:: ini
+.. tab:: setup.cfg
- [metadata]
- name = mypackage
- version = 0.0.1
+ .. code-block:: ini
- [options]
- packages = mypackage
- install_requires =
- requests
- importlib; python_version == "2.6"
+ [metadata]
+ name = mypackage
+ version = 0.0.1
+
+ [options]
+ packages = mypackage
+ install_requires =
+ requests
+ importlib; python_version == "2.6"
+
+.. tab:: setup.py
+
+ .. code-block:: python
+
+ from setuptools import setup
+
+ setup(
+ name='mypackage"'
+ version='0.0.1',
+ packages=['mypackage'],
+ install_requires=[
+ 'requests',
+ 'importlib; python_version == "2.6"',
+ ],
+ )
This is what your project would look like::
~/mypackage/
pyproject.toml
- setup.cfg
+ setup.cfg # or setup.py
mypackage/__init__.py
Then, you need an installer, such as `pep517 <https://pypi.org/project/pep517/>`_
diff --git a/setup.cfg b/setup.cfg
index 14ff445daa..9de9f87f88 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -65,6 +65,7 @@ docs =
sphinx
jaraco.packaging >= 8.2
rst.linker >= 1.9
+ sphinx-inline-tabs
# local
pygments-github-lexers==0.0.5
|
OpenMined__PySyft-155 | Set up CI for automated testing and style checks
Now that our codebase is growing (hooray!), we should set up CI for automated testing and style checks (PEP8, PEP257).
Choices include [CircleCI](https://circleci.com) and [TravisCI](https://travis-ci.org). These can be integrated into our repo such that every pull request will be checked before review.
| [
{
"content": "import os\nfrom setuptools import setup,find_packages\n\n# Utility function to read the README file.\n# Used for the long_description. It's nice, because now 1) we have a top level\n# README file and 2) it's easier to type in the README file than to put a raw\n# string in below ...\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nrequirements = read('requirements.txt').split()\n\nsetup(\n name = \"syft\",\n version = \"0.1.0\",\n author = \"Amber Trask\",\n author_email = \"[email protected]\",\n description = (\"A library for Homomorphically Encrypted Deep Learning Algorithms\"),\n license = \"Apache-2.0\",\n keywords = \"deep learning machine artificial intelligence homomorphic encryption\",\n packages=find_packages(exclude=['notebooks', 'test*','dist']),\n include_package_data=True,\n long_description=read('README.md'),\n url='github.com/OpenMined/Syft',\n classifiers=[\n \"Development Status :: 1 - Alpha\",\n ],\n scripts=['bin/syft_cmd'],\n install_requires=requirements,\n setup_requires=['pytest-runner'],\n tests_require=['pytest']\n)\n",
"path": "setup.py"
}
] | [
{
"content": "import os\nfrom setuptools import setup,find_packages\n\n# Utility function to read the README file.\n# Used for the long_description. It's nice, because now 1) we have a top level\n# README file and 2) it's easier to type in the README file than to put a raw\n# string in below ...\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nrequirements = read('requirements.txt').split()\n\nsetup(\n name = \"syft\",\n version = \"0.1.0\",\n author = \"Amber Trask\",\n author_email = \"[email protected]\",\n description = (\"A library for Homomorphically Encrypted Deep Learning Algorithms\"),\n license = \"Apache-2.0\",\n keywords = \"deep learning machine artificial intelligence homomorphic encryption\",\n packages=find_packages(exclude=['notebooks', 'test*','dist']),\n include_package_data=True,\n long_description=read('README.md'),\n url='github.com/OpenMined/Syft',\n classifiers=[\n \"Development Status :: 1 - Alpha\",\n ],\n scripts=['bin/syft_cmd'],\n install_requires=requirements,\n setup_requires=['pytest-runner'],\n tests_require=['pytest', 'pytest-flake8']\n)\n",
"path": "setup.py"
}
] | diff --git a/.circleci/config.yml b/.circleci/config.yml
old mode 100644
new mode 100755
index 2463e5be82e..48ff5d9bcb4
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -21,12 +21,16 @@ jobs:
- run:
name: Run tests
command: |
- docker run -it pysyft pytest tests/test_tensor.py
+ docker run -it pysyft pytest && py.test --flake8
- deploy:
name: Push application Docker image
command: |
- if [ "${CIRCLE_BRANCH}" == "master" ]; then
+ if [ "${CIRCLE_BRANCH}" == "develop" ]; then
docker login -e "${DOCKERHUB_EMAIL}" -u "${DOCKERHUB_USER}" -p "${DOCKERHUB_PASS}"
- docker tag pysyft "${DOCKERHUB_REPO}:${CIRCLE_TAG}"
- docker push "${DOCKERHUB_REPO}:${CIRCLE_TAG}"
+ docker tag pysyft "${DOCKERHUB_REPO}:edge"
+ docker push "${DOCKERHUB_REPO}:edge"
+ elif [ "${CIRCLE_BRANCH}" == "master" ]; then
+ docker login -e "${DOCKERHUB_EMAIL}" -u "${DOCKERHUB_USER}" -p "${DOCKERHUB_PASS}"
+ docker tag pysyft "${DOCKERHUB_REPO}:latest"
+ docker push "${DOCKERHUB_REPO}:latest"
fi
diff --git a/Dockerfile b/Dockerfile
old mode 100644
new mode 100755
index 9eac210a245..09efd8942d8
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,10 +1,10 @@
FROM alpine:edge
RUN ["apk", "add", "--no-cache", "python3", "python3-dev", "musl-dev", "linux-headers", "g++", "gmp-dev", "mpfr-dev", "mpc1-dev", "ca-certificates"]
-RUN ["mkdir", "/syft"]
-COPY requirements.txt /syft
+RUN ["mkdir", "/PySyft"]
+COPY requirements.txt /PySyft
-WORKDIR /syft
+WORKDIR /PySyft
RUN ["pip3", "install", "-r", "requirements.txt"]
-COPY . /syft
+COPY . /PySyft
RUN ["python3", "setup.py", "install"]
diff --git a/setup.py b/setup.py
index 0a3aff4faec..48a4e06a3d1 100755
--- a/setup.py
+++ b/setup.py
@@ -28,5 +28,5 @@ def read(fname):
scripts=['bin/syft_cmd'],
install_requires=requirements,
setup_requires=['pytest-runner'],
- tests_require=['pytest']
+ tests_require=['pytest', 'pytest-flake8']
)
|
localstack__localstack-1075 | Update code climate and badge
https://codeclimate.com/github/atlassian/localstack is the old repo, is there a new code climate check for the new repo? The README is pointing to this old code climate project.
| [
{
"content": "import os\nimport localstack_client.config\n\n# LocalStack version\nVERSION = '0.8.9'\n\n# default AWS region\nif 'DEFAULT_REGION' not in os.environ:\n os.environ['DEFAULT_REGION'] = 'us-east-1'\nDEFAULT_REGION = os.environ['DEFAULT_REGION']\n\n# constant to represent the \"local\" region, i.e., local machine\nREGION_LOCAL = 'local'\n\n# dev environment\nENV_DEV = 'dev'\n\n# backend service ports, for services that are behind a proxy (counting down from 4566)\nDEFAULT_PORT_APIGATEWAY_BACKEND = 4566\nDEFAULT_PORT_KINESIS_BACKEND = 4565\nDEFAULT_PORT_DYNAMODB_BACKEND = 4564\nDEFAULT_PORT_S3_BACKEND = 4563\nDEFAULT_PORT_SNS_BACKEND = 4562\nDEFAULT_PORT_SQS_BACKEND = 4561\nDEFAULT_PORT_ELASTICSEARCH_BACKEND = 4560\nDEFAULT_PORT_CLOUDFORMATION_BACKEND = 4559\n\nDEFAULT_PORT_WEB_UI = 8080\n\nLOCALHOST = 'localhost'\n\n# version of the Maven dependency with Java utility code\nLOCALSTACK_MAVEN_VERSION = '0.1.15'\n\n# map of default service APIs and ports to be spun up (fetch map from localstack_client)\nDEFAULT_SERVICE_PORTS = localstack_client.config.get_service_ports()\n\n# host to bind to when starting the services\nBIND_HOST = '0.0.0.0'\n\n# AWS user account ID used for tests\nTEST_AWS_ACCOUNT_ID = '000000000000'\nos.environ['TEST_AWS_ACCOUNT_ID'] = TEST_AWS_ACCOUNT_ID\n\n# root code folder\nLOCALSTACK_ROOT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))\n\n# virtualenv folder\nLOCALSTACK_VENV_FOLDER = os.path.join(LOCALSTACK_ROOT_FOLDER, '.venv')\nif not os.path.isdir(LOCALSTACK_VENV_FOLDER):\n # assuming this package lives here: <python>/lib/pythonX.X/site-packages/localstack/\n LOCALSTACK_VENV_FOLDER = os.path.realpath(os.path.join(LOCALSTACK_ROOT_FOLDER, '..', '..', '..'))\n\n# API Gateway path to indicate a user request sent to the gateway\nPATH_USER_REQUEST = '_user_request_'\n\n# name of LocalStack Docker image\nDOCKER_IMAGE_NAME = 'localstack/localstack'\n\n# environment variable name to tag local test runs\nENV_INTERNAL_TEST_RUN = 'LOCALSTACK_INTERNAL_TEST_RUN'\n\n# content types\nAPPLICATION_AMZ_JSON_1_0 = 'application/x-amz-json-1.0'\nAPPLICATION_AMZ_JSON_1_1 = 'application/x-amz-json-1.1'\nAPPLICATION_JSON = 'application/json'\n\n# Lambda defaults\nLAMBDA_TEST_ROLE = 'arn:aws:iam::%s:role/lambda-test-role' % TEST_AWS_ACCOUNT_ID\n\n# installation constants\nELASTICSEARCH_JAR_URL = 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.2.0.zip'\nDYNAMODB_JAR_URL = 'https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip'\nELASTICMQ_JAR_URL = 'https://s3-eu-west-1.amazonaws.com/softwaremill-public/elasticmq-server-0.14.2.jar'\nSTS_JAR_URL = 'http://central.maven.org/maven2/com/amazonaws/aws-java-sdk-sts/1.11.14/aws-java-sdk-sts-1.11.14.jar'\n\n# API endpoint for analytics events\nAPI_ENDPOINT = 'https://api.localstack.cloud/v1'\n",
"path": "localstack/constants.py"
}
] | [
{
"content": "import os\nimport localstack_client.config\n\n# LocalStack version\nVERSION = '0.8.10'\n\n# default AWS region\nif 'DEFAULT_REGION' not in os.environ:\n os.environ['DEFAULT_REGION'] = 'us-east-1'\nDEFAULT_REGION = os.environ['DEFAULT_REGION']\n\n# constant to represent the \"local\" region, i.e., local machine\nREGION_LOCAL = 'local'\n\n# dev environment\nENV_DEV = 'dev'\n\n# backend service ports, for services that are behind a proxy (counting down from 4566)\nDEFAULT_PORT_APIGATEWAY_BACKEND = 4566\nDEFAULT_PORT_KINESIS_BACKEND = 4565\nDEFAULT_PORT_DYNAMODB_BACKEND = 4564\nDEFAULT_PORT_S3_BACKEND = 4563\nDEFAULT_PORT_SNS_BACKEND = 4562\nDEFAULT_PORT_SQS_BACKEND = 4561\nDEFAULT_PORT_ELASTICSEARCH_BACKEND = 4560\nDEFAULT_PORT_CLOUDFORMATION_BACKEND = 4559\n\nDEFAULT_PORT_WEB_UI = 8080\n\nLOCALHOST = 'localhost'\n\n# version of the Maven dependency with Java utility code\nLOCALSTACK_MAVEN_VERSION = '0.1.15'\n\n# map of default service APIs and ports to be spun up (fetch map from localstack_client)\nDEFAULT_SERVICE_PORTS = localstack_client.config.get_service_ports()\n\n# host to bind to when starting the services\nBIND_HOST = '0.0.0.0'\n\n# AWS user account ID used for tests\nTEST_AWS_ACCOUNT_ID = '000000000000'\nos.environ['TEST_AWS_ACCOUNT_ID'] = TEST_AWS_ACCOUNT_ID\n\n# root code folder\nLOCALSTACK_ROOT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))\n\n# virtualenv folder\nLOCALSTACK_VENV_FOLDER = os.path.join(LOCALSTACK_ROOT_FOLDER, '.venv')\nif not os.path.isdir(LOCALSTACK_VENV_FOLDER):\n # assuming this package lives here: <python>/lib/pythonX.X/site-packages/localstack/\n LOCALSTACK_VENV_FOLDER = os.path.realpath(os.path.join(LOCALSTACK_ROOT_FOLDER, '..', '..', '..'))\n\n# API Gateway path to indicate a user request sent to the gateway\nPATH_USER_REQUEST = '_user_request_'\n\n# name of LocalStack Docker image\nDOCKER_IMAGE_NAME = 'localstack/localstack'\n\n# environment variable name to tag local test runs\nENV_INTERNAL_TEST_RUN = 'LOCALSTACK_INTERNAL_TEST_RUN'\n\n# content types\nAPPLICATION_AMZ_JSON_1_0 = 'application/x-amz-json-1.0'\nAPPLICATION_AMZ_JSON_1_1 = 'application/x-amz-json-1.1'\nAPPLICATION_JSON = 'application/json'\n\n# Lambda defaults\nLAMBDA_TEST_ROLE = 'arn:aws:iam::%s:role/lambda-test-role' % TEST_AWS_ACCOUNT_ID\n\n# installation constants\nELASTICSEARCH_JAR_URL = 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.2.0.zip'\nDYNAMODB_JAR_URL = 'https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip'\nELASTICMQ_JAR_URL = 'https://s3-eu-west-1.amazonaws.com/softwaremill-public/elasticmq-server-0.14.2.jar'\nSTS_JAR_URL = 'http://central.maven.org/maven2/com/amazonaws/aws-java-sdk-sts/1.11.14/aws-java-sdk-sts-1.11.14.jar'\n\n# API endpoint for analytics events\nAPI_ENDPOINT = 'https://api.localstack.cloud/v1'\n",
"path": "localstack/constants.py"
}
] | diff --git a/README.md b/README.md
index 0ed9bde568b1e..8ed764360f628 100644
--- a/README.md
+++ b/README.md
@@ -1,8 +1,8 @@
-[](https://travis-ci.org/localstack/localstack) [](#backers) [](#sponsors) [](https://coveralls.io/github/atlassian/localstack?branch=master)
+[](https://travis-ci.org/localstack/localstack) [](#backers) [](#sponsors) [](https://coveralls.io/github/localstack/localstack?branch=master)
[](https://gitter.im/localstack/Platform)
[](https://badge.fury.io/py/localstack)
[](https://img.shields.io/pypi/l/localstack.svg)
-[](https://codeclimate.com/github/atlassian/localstack)
+[](https://codeclimate.com/github/localstack/localstack)
[](https://twitter.com/_localstack)
# LocalStack - A fully functional local AWS cloud stack
@@ -412,6 +412,7 @@ localstack web
## Change Log
+* v0.8.10: Add kclpy to pip package; fix badges in README
* v0.8.9: Replace moto-ext with upstream moto; fix SNS message attributes; fix swagger; make external SQS port configurable; support for SNS DeleteTopic; S3 notifications for multipart uploads; support requestContext in AWS_PROXY integration; update docs for SSL usage
* v0.8.8: Support Docker network config for Lambda containers; support queryStringParameters for Lambda AWS_PROXY apigateway; add AWS SecretsManager service; add SQS/Lambda integration; add support for Firehose Kinesis source; add GetAlias to Lambda API; add function properties to LambdaContext for invocations; fix extraction of Java Lambda archives; check region headers for SNS; fix Lambda output buffering; fix S3 download of gzip; bump ElasticMQ to 0.14.5; fix Lambda response codes; fix syntax issues for Python 3.7
* v0.8.7: Support .Net Core 2.0 and nodejs8.10 Lambdas; refactor Java libs and integrate with JUnit 5; support tags for ES domains; add CloudFormation support for SNS topics; fix kinesis error injection; fix override of `ES_JAVA_OPTS`; fix SQS CORS preflight response; fix S3 content md5 checks and Host header; fix ES startup issue; Bump elasticmq to 0.13.10; bump kinesalite version
@@ -520,7 +521,7 @@ Support this project by becoming a sponsor. Your logo will show up here with a l
## License
-Copyright (c) 2017 *LocalStack* maintainers and contributors.
+Copyright (c) 2017-2019 *LocalStack* maintainers and contributors.
Copyright (c) 2016 Atlassian and others.
@@ -528,26 +529,26 @@ This version of *LocalStack* is released under the Apache License, Version 2.0 (
By downloading and using this software you agree to the
[End-User License Agreement (EULA)](doc/end_user_license_agreement).
-We build on a number of third-party software tools, with the following licenses:
-
-Third-Party software | License
-----------------------------|-----------------------
-**Python/pip modules:** |
-airspeed | BSD License
-amazon_kclpy | Amazon Software License
-boto3 | Apache License 2.0
-coverage | Apache License 2.0
-docopt | MIT License
-elasticsearch | Apache License 2.0
-flask | BSD License
-flask_swagger | MIT License
-jsonpath-rw | Apache License 2.0
-moto | Apache License 2.0
-nose | GNU LGPL
-pep8 | Expat license
-requests | Apache License 2.0
-subprocess32 | PSF License
-**Node.js/npm modules:** |
-kinesalite | MIT License
-**Other tools:** |
-Elasticsearch | Apache License 2.0
+We build on a number of third-party software tools, including the following:
+
+Third-Party software | License
+--------------------------|-----------------------
+**Python/pip modules:** |
+airspeed | BSD License
+amazon_kclpy | Amazon Software License
+boto3 | Apache License 2.0
+coverage | Apache License 2.0
+docopt | MIT License
+elasticsearch | Apache License 2.0
+flask | BSD License
+flask_swagger | MIT License
+jsonpath-rw | Apache License 2.0
+moto | Apache License 2.0
+nose | GNU LGPL
+pep8 | Expat license
+requests | Apache License 2.0
+subprocess32 | PSF License
+**Node.js/npm modules:** |
+kinesalite | MIT License
+**Other tools:** |
+Elasticsearch | Apache License 2.0
diff --git a/localstack/constants.py b/localstack/constants.py
index 74af689d6dd86..e8fc1aeb6693f 100644
--- a/localstack/constants.py
+++ b/localstack/constants.py
@@ -2,7 +2,7 @@
import localstack_client.config
# LocalStack version
-VERSION = '0.8.9'
+VERSION = '0.8.10'
# default AWS region
if 'DEFAULT_REGION' not in os.environ:
diff --git a/requirements.txt b/requirements.txt
index 84a5f5fdc6ccc..03410e291ace6 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,7 +2,7 @@
# from the dependencies when we build the pip package
airspeed==0.5.10
-amazon-kclpy==1.4.5 #extended-lib
+amazon-kclpy==1.5.0
awscli>=1.14.18
boto>=2.49.0
boto3>=1.9.71
|
pyca__cryptography-3035 | 1.4 Test failure TestRSACertificate.test_cert_serial_warning
While testing an upgrade of the GNU Guix python-cryptography package from 1.3.2 to 1.4, I encountered the following failure of `TestRSACertificate.test_cert_serial_warning[backend0]`.
```
=================================== FAILURES ===================================
____________ TestRSACertificate.test_cert_serial_warning[backend0] _____________
self = <tests.test_x509.TestRSACertificate object at 0x7fffe0fa7978>
backend = <cryptography.hazmat.backends.openssl.backend.Backend object at 0x7ffff3020278>
def test_cert_serial_warning(self, backend):
cert = _load_cert(
os.path.join("x509", "PKITS_data", "certs", "GoodCACert.crt"),
x509.load_der_x509_certificate,
backend
)
with warnings.catch_warnings():
warnings.simplefilter("always", utils.DeprecatedIn10)
> with pytest.deprecated_call():
E TypeError: deprecated_call() missing 1 required positional argument: 'func'
tests/test_x509.py:516: TypeError
============ 1 failed, 83123 passed, 3222 skipped in 164.91 seconds ============
```
The dependency graph is using Python 3.4.3 and Hypothesis 3.1.0. Please let me know what other information I can provide to help debug this failure.
| [
{
"content": "#!/usr/bin/env python\n\n# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport platform\nimport subprocess\nimport sys\nfrom distutils.command.build import build\n\nimport pkg_resources\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\nfrom setuptools.command.test import test\n\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\nabout = {}\nwith open(os.path.join(src_dir, \"cryptography\", \"__about__.py\")) as f:\n exec(f.read(), about)\n\n\nVECTORS_DEPENDENCY = \"cryptography_vectors=={0}\".format(about['__version__'])\n\nrequirements = [\n \"idna>=2.0\",\n \"pyasn1>=0.1.8\",\n \"six>=1.4.1\",\n \"setuptools>=11.3\",\n]\nsetup_requirements = []\n\nif sys.version_info < (3, 4):\n requirements.append(\"enum34\")\n\nif sys.version_info < (3, 3):\n requirements.append(\"ipaddress\")\n\nif platform.python_implementation() == \"PyPy\":\n if sys.pypy_version_info < (2, 6):\n raise RuntimeError(\n \"cryptography 1.0 is not compatible with PyPy < 2.6. Please \"\n \"upgrade PyPy to use this library.\"\n )\nelse:\n requirements.append(\"cffi>=1.4.1\")\n setup_requirements.append(\"cffi>=1.4.1\")\n\ntest_requirements = [\n \"pytest\",\n \"pretend\",\n \"iso8601\",\n \"pyasn1_modules\",\n]\nif sys.version_info[:2] > (2, 6):\n test_requirements.append(\"hypothesis>=1.11.4\")\n\n\n# If there's no vectors locally that probably means we are in a tarball and\n# need to go and get the matching vectors package from PyPi\nif not os.path.exists(os.path.join(base_dir, \"vectors/setup.py\")):\n test_requirements.append(VECTORS_DEPENDENCY)\n\n\ndef cc_is_available():\n return sys.platform == \"darwin\" and list(map(\n int, platform.mac_ver()[0].split(\".\"))) >= [10, 8, 0]\n\n\nbackends = [\n \"openssl = cryptography.hazmat.backends.openssl:backend\"\n]\n\nif cc_is_available():\n backends.append(\n \"commoncrypto = cryptography.hazmat.backends.commoncrypto:backend\",\n )\n\n\nclass PyTest(test):\n def finalize_options(self):\n test.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n # This means there's a vectors/ folder with the package in here.\n # cd into it, install the vectors package and then refresh sys.path\n if VECTORS_DEPENDENCY not in test_requirements:\n subprocess.check_call(\n [sys.executable, \"setup.py\", \"install\"], cwd=\"vectors\"\n )\n pkg_resources.get_distribution(\"cryptography_vectors\").activate()\n\n def run_tests(self):\n # Import here because in module scope the eggs are not loaded.\n import pytest\n test_args = [os.path.join(base_dir, \"tests\")]\n errno = pytest.main(test_args)\n sys.exit(errno)\n\n\ndef keywords_with_side_effects(argv):\n \"\"\"\n Get a dictionary with setup keywords that (can) have side effects.\n\n :param argv: A list of strings with command line arguments.\n :returns: A dictionary with keyword arguments for the ``setup()`` function.\n\n This setup.py script uses the setuptools 'setup_requires' feature because\n this is required by the cffi package to compile extension modules. The\n purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi\n build process as a result of setup.py invocations that don't need the cffi\n module to be built (setup.py serves the dual purpose of exposing package\n metadata).\n\n All of the options listed by ``python setup.py --help`` that print\n information should be recognized here. The commands ``clean``,\n ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.\n Any combination of these options and commands is also supported.\n\n This function was originally based on the `setup.py script`_ of SciPy (see\n also the discussion in `pip issue #25`_).\n\n .. _pip issue #25: https://github.com/pypa/pip/issues/25\n .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py\n \"\"\"\n no_setup_requires_arguments = (\n '-h', '--help',\n '-n', '--dry-run',\n '-q', '--quiet',\n '-v', '--verbose',\n '-V', '--version',\n '--author',\n '--author-email',\n '--classifiers',\n '--contact',\n '--contact-email',\n '--description',\n '--egg-base',\n '--fullname',\n '--help-commands',\n '--keywords',\n '--licence',\n '--license',\n '--long-description',\n '--maintainer',\n '--maintainer-email',\n '--name',\n '--no-user-cfg',\n '--obsoletes',\n '--platforms',\n '--provides',\n '--requires',\n '--url',\n 'clean',\n 'egg_info',\n 'register',\n 'sdist',\n 'upload',\n )\n\n def is_short_option(argument):\n \"\"\"Check whether a command line argument is a short option.\"\"\"\n return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'\n\n def expand_short_options(argument):\n \"\"\"Expand combined short options into canonical short options.\"\"\"\n return ('-' + char for char in argument[1:])\n\n def argument_without_setup_requirements(argv, i):\n \"\"\"Check whether a command line argument needs setup requirements.\"\"\"\n if argv[i] in no_setup_requires_arguments:\n # Simple case: An argument which is either an option or a command\n # which doesn't need setup requirements.\n return True\n elif (is_short_option(argv[i]) and\n all(option in no_setup_requires_arguments\n for option in expand_short_options(argv[i]))):\n # Not so simple case: Combined short options none of which need\n # setup requirements.\n return True\n elif argv[i - 1:i] == ['--egg-base']:\n # Tricky case: --egg-info takes an argument which should not make\n # us use setup_requires (defeating the purpose of this code).\n return True\n else:\n return False\n\n if all(argument_without_setup_requirements(argv, i)\n for i in range(1, len(argv))):\n return {\n \"cmdclass\": {\n \"build\": DummyBuild,\n \"install\": DummyInstall,\n \"test\": DummyPyTest,\n }\n }\n else:\n cffi_modules = [\n \"src/_cffi_src/build_openssl.py:ffi\",\n \"src/_cffi_src/build_constant_time.py:ffi\",\n \"src/_cffi_src/build_padding.py:ffi\",\n ]\n if cc_is_available():\n cffi_modules.append(\"src/_cffi_src/build_commoncrypto.py:ffi\")\n\n return {\n \"setup_requires\": setup_requirements,\n \"cmdclass\": {\n \"test\": PyTest,\n },\n \"cffi_modules\": cffi_modules\n }\n\n\nsetup_requires_error = (\"Requested setup command that needs 'setup_requires' \"\n \"while command line arguments implied a side effect \"\n \"free command or option.\")\n\n\nclass DummyBuild(build):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py build`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyInstall(install):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py install``\n as one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyPyTest(test):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py test`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run_tests(self):\n raise RuntimeError(setup_requires_error)\n\n\nwith open(os.path.join(base_dir, \"README.rst\")) as f:\n long_description = f.read()\n\n\nsetup(\n name=about[\"__title__\"],\n version=about[\"__version__\"],\n\n description=about[\"__summary__\"],\n long_description=long_description,\n license=about[\"__license__\"],\n url=about[\"__uri__\"],\n\n author=about[\"__author__\"],\n author_email=about[\"__email__\"],\n\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security :: Cryptography\",\n ],\n\n package_dir={\"\": \"src\"},\n packages=find_packages(where=\"src\", exclude=[\"_cffi_src\", \"_cffi_src.*\"]),\n include_package_data=True,\n\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require={\n \"test\": test_requirements,\n \"docstest\": [\n \"doc8\",\n \"pyenchant\",\n \"readme_renderer\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"sphinxcontrib-spelling\",\n ],\n \"pep8test\": [\n \"flake8\",\n \"flake8-import-order\",\n \"pep8-naming\",\n ],\n },\n\n # for cffi\n zip_safe=False,\n ext_package=\"cryptography.hazmat.bindings\",\n entry_points={\n \"cryptography.backends\": backends,\n },\n **keywords_with_side_effects(sys.argv)\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n\n# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport platform\nimport subprocess\nimport sys\nfrom distutils.command.build import build\n\nimport pkg_resources\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\nfrom setuptools.command.test import test\n\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\nabout = {}\nwith open(os.path.join(src_dir, \"cryptography\", \"__about__.py\")) as f:\n exec(f.read(), about)\n\n\nVECTORS_DEPENDENCY = \"cryptography_vectors=={0}\".format(about['__version__'])\n\nrequirements = [\n \"idna>=2.0\",\n \"pyasn1>=0.1.8\",\n \"six>=1.4.1\",\n \"setuptools>=11.3\",\n]\nsetup_requirements = []\n\nif sys.version_info < (3, 4):\n requirements.append(\"enum34\")\n\nif sys.version_info < (3, 3):\n requirements.append(\"ipaddress\")\n\nif platform.python_implementation() == \"PyPy\":\n if sys.pypy_version_info < (2, 6):\n raise RuntimeError(\n \"cryptography 1.0 is not compatible with PyPy < 2.6. Please \"\n \"upgrade PyPy to use this library.\"\n )\nelse:\n requirements.append(\"cffi>=1.4.1\")\n setup_requirements.append(\"cffi>=1.4.1\")\n\ntest_requirements = [\n \"pytest>=2.9.0\",\n \"pretend\",\n \"iso8601\",\n \"pyasn1_modules\",\n]\nif sys.version_info[:2] > (2, 6):\n test_requirements.append(\"hypothesis>=1.11.4\")\n\n\n# If there's no vectors locally that probably means we are in a tarball and\n# need to go and get the matching vectors package from PyPi\nif not os.path.exists(os.path.join(base_dir, \"vectors/setup.py\")):\n test_requirements.append(VECTORS_DEPENDENCY)\n\n\ndef cc_is_available():\n return sys.platform == \"darwin\" and list(map(\n int, platform.mac_ver()[0].split(\".\"))) >= [10, 8, 0]\n\n\nbackends = [\n \"openssl = cryptography.hazmat.backends.openssl:backend\"\n]\n\nif cc_is_available():\n backends.append(\n \"commoncrypto = cryptography.hazmat.backends.commoncrypto:backend\",\n )\n\n\nclass PyTest(test):\n def finalize_options(self):\n test.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n # This means there's a vectors/ folder with the package in here.\n # cd into it, install the vectors package and then refresh sys.path\n if VECTORS_DEPENDENCY not in test_requirements:\n subprocess.check_call(\n [sys.executable, \"setup.py\", \"install\"], cwd=\"vectors\"\n )\n pkg_resources.get_distribution(\"cryptography_vectors\").activate()\n\n def run_tests(self):\n # Import here because in module scope the eggs are not loaded.\n import pytest\n test_args = [os.path.join(base_dir, \"tests\")]\n errno = pytest.main(test_args)\n sys.exit(errno)\n\n\ndef keywords_with_side_effects(argv):\n \"\"\"\n Get a dictionary with setup keywords that (can) have side effects.\n\n :param argv: A list of strings with command line arguments.\n :returns: A dictionary with keyword arguments for the ``setup()`` function.\n\n This setup.py script uses the setuptools 'setup_requires' feature because\n this is required by the cffi package to compile extension modules. The\n purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi\n build process as a result of setup.py invocations that don't need the cffi\n module to be built (setup.py serves the dual purpose of exposing package\n metadata).\n\n All of the options listed by ``python setup.py --help`` that print\n information should be recognized here. The commands ``clean``,\n ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.\n Any combination of these options and commands is also supported.\n\n This function was originally based on the `setup.py script`_ of SciPy (see\n also the discussion in `pip issue #25`_).\n\n .. _pip issue #25: https://github.com/pypa/pip/issues/25\n .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py\n \"\"\"\n no_setup_requires_arguments = (\n '-h', '--help',\n '-n', '--dry-run',\n '-q', '--quiet',\n '-v', '--verbose',\n '-V', '--version',\n '--author',\n '--author-email',\n '--classifiers',\n '--contact',\n '--contact-email',\n '--description',\n '--egg-base',\n '--fullname',\n '--help-commands',\n '--keywords',\n '--licence',\n '--license',\n '--long-description',\n '--maintainer',\n '--maintainer-email',\n '--name',\n '--no-user-cfg',\n '--obsoletes',\n '--platforms',\n '--provides',\n '--requires',\n '--url',\n 'clean',\n 'egg_info',\n 'register',\n 'sdist',\n 'upload',\n )\n\n def is_short_option(argument):\n \"\"\"Check whether a command line argument is a short option.\"\"\"\n return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'\n\n def expand_short_options(argument):\n \"\"\"Expand combined short options into canonical short options.\"\"\"\n return ('-' + char for char in argument[1:])\n\n def argument_without_setup_requirements(argv, i):\n \"\"\"Check whether a command line argument needs setup requirements.\"\"\"\n if argv[i] in no_setup_requires_arguments:\n # Simple case: An argument which is either an option or a command\n # which doesn't need setup requirements.\n return True\n elif (is_short_option(argv[i]) and\n all(option in no_setup_requires_arguments\n for option in expand_short_options(argv[i]))):\n # Not so simple case: Combined short options none of which need\n # setup requirements.\n return True\n elif argv[i - 1:i] == ['--egg-base']:\n # Tricky case: --egg-info takes an argument which should not make\n # us use setup_requires (defeating the purpose of this code).\n return True\n else:\n return False\n\n if all(argument_without_setup_requirements(argv, i)\n for i in range(1, len(argv))):\n return {\n \"cmdclass\": {\n \"build\": DummyBuild,\n \"install\": DummyInstall,\n \"test\": DummyPyTest,\n }\n }\n else:\n cffi_modules = [\n \"src/_cffi_src/build_openssl.py:ffi\",\n \"src/_cffi_src/build_constant_time.py:ffi\",\n \"src/_cffi_src/build_padding.py:ffi\",\n ]\n if cc_is_available():\n cffi_modules.append(\"src/_cffi_src/build_commoncrypto.py:ffi\")\n\n return {\n \"setup_requires\": setup_requirements,\n \"cmdclass\": {\n \"test\": PyTest,\n },\n \"cffi_modules\": cffi_modules\n }\n\n\nsetup_requires_error = (\"Requested setup command that needs 'setup_requires' \"\n \"while command line arguments implied a side effect \"\n \"free command or option.\")\n\n\nclass DummyBuild(build):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py build`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyInstall(install):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py install``\n as one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyPyTest(test):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py test`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run_tests(self):\n raise RuntimeError(setup_requires_error)\n\n\nwith open(os.path.join(base_dir, \"README.rst\")) as f:\n long_description = f.read()\n\n\nsetup(\n name=about[\"__title__\"],\n version=about[\"__version__\"],\n\n description=about[\"__summary__\"],\n long_description=long_description,\n license=about[\"__license__\"],\n url=about[\"__uri__\"],\n\n author=about[\"__author__\"],\n author_email=about[\"__email__\"],\n\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security :: Cryptography\",\n ],\n\n package_dir={\"\": \"src\"},\n packages=find_packages(where=\"src\", exclude=[\"_cffi_src\", \"_cffi_src.*\"]),\n include_package_data=True,\n\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require={\n \"test\": test_requirements,\n \"docstest\": [\n \"doc8\",\n \"pyenchant\",\n \"readme_renderer\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"sphinxcontrib-spelling\",\n ],\n \"pep8test\": [\n \"flake8\",\n \"flake8-import-order\",\n \"pep8-naming\",\n ],\n },\n\n # for cffi\n zip_safe=False,\n ext_package=\"cryptography.hazmat.bindings\",\n entry_points={\n \"cryptography.backends\": backends,\n },\n **keywords_with_side_effects(sys.argv)\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 4b632507fcf1..3bacae7b2ed6 100644
--- a/setup.py
+++ b/setup.py
@@ -58,7 +58,7 @@
setup_requirements.append("cffi>=1.4.1")
test_requirements = [
- "pytest",
+ "pytest>=2.9.0",
"pretend",
"iso8601",
"pyasn1_modules",
|
django-wiki__django-wiki-447 | Ordered list doesn't render properly
I am running django-wiki on Mac OS X 10.10.4 with a django 1.8 project and PostgreSQL database.
Here's what pip reports from my virtualenv:
```
-e git://github.com/django-wiki/django-wiki.git@23705786796424a3b36be77c1e78d7316f89bb00#egg=wiki-master
```
I can display an ordered list fine with:
```
1. Dog
1. Cat
1. Mouse
```
That is:
1. Dog
2. Cat
3. Mouse
However, if the ordered list is preceded by an unordered list:
```
- Dog
- Cat
- Mouse
1. Dog
1. Cat
1. Mouse
```
Then the ordered list is rendered as an unordered list:
- Dog
- Cat
- Mouse
- Dog
- Cat
- Mouse
| [
{
"content": "from __future__ import absolute_import\nfrom __future__ import unicode_literals\n# -*- coding: utf-8 -*-\nfrom django.conf import settings as django_settings\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.utils.translation import ugettext_lazy as _\n\n# Should urls be case sensitive?\nURL_CASE_SENSITIVE = getattr(django_settings, 'WIKI_URL_CASE_SENSITIVE', False)\n\n# Non-configurable (at the moment)\nAPP_LABEL = 'wiki'\nWIKI_LANGUAGE = 'markdown'\n\n# The editor class to use -- maybe a 3rd party or your own...? You can always\n# extend the built-in editor and customize it....\nEDITOR = getattr(\n django_settings,\n 'WIKI_EDITOR',\n 'wiki.editors.markitup.MarkItUp')\n\nMARKDOWN_KWARGS = {\n 'extensions': [\n 'footnotes',\n 'attr_list',\n 'extra',\n 'codehilite',\n ],\n 'safe_mode': 'replace',\n 'extension_configs': {\n 'toc': {\n 'title': _('Table of Contents')}},\n}\nMARKDOWN_KWARGS.update(getattr(django_settings, 'WIKI_MARKDOWN_KWARGS', {}))\n\n# This slug is used in URLPath if an article has been deleted. The children of the\n# URLPath of that article are moved to lost and found. They keep their permissions\n# and all their content.\nLOST_AND_FOUND_SLUG = getattr(\n django_settings,\n 'WIKI_LOST_AND_FOUND_SLUG',\n 'lost-and-found')\n\n# When True, this blocks new slugs that resolve to non-wiki views, stopping\n# users creating articles that conflict with overlapping URLs from other apps.\nCHECK_SLUG_URL_AVAILABLE = getattr(\n django_settings,\n 'WIKI_CHECK_SLUG_URL_AVAILABLE',\n True)\n\n# Do we want to log IPs?\nLOG_IPS_ANONYMOUS = getattr(django_settings, 'WIKI_LOG_IPS_ANONYMOUS', True)\nLOG_IPS_USERS = getattr(django_settings, 'WIKI_LOG_IPS_USERS', False)\n\n####################################\n# PERMISSIONS AND ACCOUNT HANDLING #\n####################################\n\n# NB! None of these callables need to handle anonymous users as they are treated\n# in separate settings...\n\n# A function returning True/False if a user has permission to\n# read contents of an article + plugins\n# Relevance: viewing articles and plugins\nCAN_READ = getattr(django_settings, 'WIKI_CAN_READ', None)\n\n# A function returning True/False if a user has permission to\n# change contents, ie add new revisions to an article\n# Often, plugins also use this\n# Relevance: editing articles, changing revisions, editing plugins\nCAN_WRITE = getattr(django_settings, 'WIKI_CAN_WRITE', None)\n\n# A function returning True/False if a user has permission to assign\n# permissions on an article\n# Relevance: changing owner and group membership\nCAN_ASSIGN = getattr(django_settings, 'WIKI_CAN_ASSIGN', None)\n\n# A function returning True/False if the owner of an article has permission to change\n# the group to a user's own groups\n# Relevance: changing group membership\nCAN_ASSIGN_OWNER = getattr(django_settings, 'WIKI_ASSIGN_OWNER', None)\n\n# A function returning True/False if a user has permission to change\n# read/write access for groups and others\nCAN_CHANGE_PERMISSIONS = getattr(\n django_settings,\n 'WIKI_CAN_CHANGE_PERMISSIONS',\n None)\n\n# Specifies if a user has access to soft deletion of articles\nCAN_DELETE = getattr(django_settings, 'WIKI_CAN_DELETE', None)\n\n# A function returning True/False if a user has permission to change\n# moderate, ie. lock articles and permanently delete content.\nCAN_MODERATE = getattr(django_settings, 'WIKI_CAN_MODERATE', None)\n\n# A function returning True/False if a user has permission to create\n# new groups and users for the wiki.\nCAN_ADMIN = getattr(django_settings, 'WIKI_CAN_ADMIN', None)\n\n# Treat anonymous (non logged in) users as the \"other\" user group\nANONYMOUS = getattr(django_settings, 'WIKI_ANONYMOUS', True)\n\n# Globally enable write access for anonymous users, if true anonymous users will be treated\n# as the others_write boolean field on models.Article.\nANONYMOUS_WRITE = getattr(django_settings, 'WIKI_ANONYMOUS_WRITE', False)\n\n# Globally enable create access for anonymous users\n# Defaults to ANONYMOUS_WRITE.\nANONYMOUS_CREATE = getattr(\n django_settings,\n 'WIKI_ANONYMOUS_CREATE',\n ANONYMOUS_WRITE)\n\n# Default setting to allow anonymous users upload access (used in\n# plugins.attachments and plugins.images).\nANONYMOUS_UPLOAD = getattr(django_settings, 'WIKI_ANONYMOUS_UPLOAD', False)\n\n# Sign up, login and logout views should be accessible\nACCOUNT_HANDLING = getattr(django_settings, 'WIKI_ACCOUNT_HANDLING', True)\n\n# Signup allowed? If it's not allowed, logged in superusers can still access\n# the signup page to create new users.\nACCOUNT_SIGNUP_ALLOWED = ACCOUNT_HANDLING and getattr(\n django_settings, 'WIKI_ACCOUNT_SIGNUP_ALLOWED', True\n)\n\nif ACCOUNT_HANDLING:\n LOGIN_URL = reverse_lazy(\"wiki:login\")\n LOGOUT_URL = reverse_lazy(\"wiki:logout\")\n SIGNUP_URL = reverse_lazy(\"wiki:signup\")\nelse:\n LOGIN_URL = getattr(django_settings, \"LOGIN_URL\", \"/\")\n LOGOUT_URL = getattr(django_settings, \"LOGOUT_URL\", \"/\")\n SIGNUP_URL = getattr(django_settings, \"WIKI_SIGNUP_URL\", \"/\")\n\n##################\n# OTHER SETTINGS #\n##################\n\n# Maximum amount of children to display in a menu before going \"+more\"\n# NEVER set this to 0 as it will wrongly inform the user that there are no\n# children and for instance that an article can be safely deleted.\nSHOW_MAX_CHILDREN = getattr(django_settings, 'WIKI_SHOW_MAX_CHILDREN', 20)\n\nUSE_BOOTSTRAP_SELECT_WIDGET = getattr(\n django_settings,\n 'WIKI_USE_BOOTSTRAP_SELECT_WIDGET',\n True)\n\n#: dottedname of class used to construct urlpatterns for wiki.\n#:\n#: Default is wiki.urls.WikiURLPatterns. To customize urls or view handlers,\n#: you can derive from this.\nURL_CONFIG_CLASS = getattr(\n django_settings,\n 'WIKI_URL_CONFIG_CLASS',\n 'wiki.urls.WikiURLPatterns')\n\n# Search view - dotted path denoting where the search view Class is located\nSEARCH_VIEW = getattr(\n django_settings,\n 'WIKI_SEARCH_VIEW',\n 'wiki.views.article.SearchView'\n if not 'wiki.plugins.haystack' in django_settings.INSTALLED_APPS\n else\n 'wiki.plugins.haystack.views.HaystackSearchView'\n)\n\n# Seconds of timeout before renewing article cache. Articles are automatically\n# renewed whenever an edit occurs but article content may be generated from\n# other objects that are changed.\nCACHE_TIMEOUT = getattr(django_settings, 'WIKI_CACHE_TIMEOUT', 600)\n\n###################\n# SPAM PROTECTION #\n###################\n\n# Maximum allowed revisions per hour for any given user or IP\nREVISIONS_PER_HOUR = getattr(django_settings, 'WIKI_REVISIONS_PER_HOUR', 60)\n\n# Maximum allowed revisions per minute for any given user or IP\nREVISIONS_PER_MINUTES = getattr(\n django_settings,\n 'WIKI_REVISIONS_PER_MINUTES',\n 5)\n\n# Maximum allowed revisions per hour for any given user or IP\nREVISIONS_PER_HOUR_ANONYMOUS = getattr(\n django_settings,\n 'WIKI_REVISIONS_PER_HOUR_ANONYMOUS',\n 10)\n\n# Maximum allowed revisions per hour for any given user or IP\nREVISIONS_PER_MINUTES_ANONYMOUS = getattr(\n django_settings,\n 'WIKI_REVISIONS_PER_MINUTES_ANONYMOUS',\n 2)\n\n# Number of minutes for looking up REVISIONS_PER_MINUTES and\n# REVISIONS_PER_MINUTES_ANONYMOUS\nREVISIONS_MINUTES_LOOKBACK = getattr(\n django_settings,\n 'WIKI_REVISIONS_MINUTES_LOOKBACK',\n 2)\n\n###########\n# STORAGE #\n###########\n\nfrom django.core.files.storage import default_storage\nSTORAGE_BACKEND = getattr(\n django_settings,\n 'WIKI_STORAGE_BACKEND',\n default_storage)\n\nUSE_SENDFILE = getattr(django_settings, 'WIKI_ATTACHMENTS_USE_SENDFILE', False)\n\n####################\n# PLANNED SETTINGS #\n####################\n\n# Maximum revisions to keep for an article, 0=unlimited\nMAX_REVISIONS = getattr(django_settings, 'WIKI_MAX_REVISIONS', 100)\n\n# Maximum age of revisions in days, 0=unlimited\nMAX_REVISION_AGE = getattr(django_settings, 'MAX_REVISION_AGE', 365)\n",
"path": "wiki/conf/settings.py"
}
] | [
{
"content": "from __future__ import absolute_import\nfrom __future__ import unicode_literals\n# -*- coding: utf-8 -*-\nfrom django.conf import settings as django_settings\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.utils.translation import ugettext_lazy as _\n\n# Should urls be case sensitive?\nURL_CASE_SENSITIVE = getattr(django_settings, 'WIKI_URL_CASE_SENSITIVE', False)\n\n# Non-configurable (at the moment)\nAPP_LABEL = 'wiki'\nWIKI_LANGUAGE = 'markdown'\n\n# The editor class to use -- maybe a 3rd party or your own...? You can always\n# extend the built-in editor and customize it....\nEDITOR = getattr(\n django_settings,\n 'WIKI_EDITOR',\n 'wiki.editors.markitup.MarkItUp')\n\nMARKDOWN_KWARGS = {\n 'extensions': [\n 'footnotes',\n 'attr_list',\n 'extra',\n 'codehilite',\n 'sane_lists',\n ],\n 'safe_mode': 'replace',\n 'extension_configs': {\n 'toc': {\n 'title': _('Table of Contents')}},\n}\nMARKDOWN_KWARGS.update(getattr(django_settings, 'WIKI_MARKDOWN_KWARGS', {}))\n\n# This slug is used in URLPath if an article has been deleted. The children of the\n# URLPath of that article are moved to lost and found. They keep their permissions\n# and all their content.\nLOST_AND_FOUND_SLUG = getattr(\n django_settings,\n 'WIKI_LOST_AND_FOUND_SLUG',\n 'lost-and-found')\n\n# When True, this blocks new slugs that resolve to non-wiki views, stopping\n# users creating articles that conflict with overlapping URLs from other apps.\nCHECK_SLUG_URL_AVAILABLE = getattr(\n django_settings,\n 'WIKI_CHECK_SLUG_URL_AVAILABLE',\n True)\n\n# Do we want to log IPs?\nLOG_IPS_ANONYMOUS = getattr(django_settings, 'WIKI_LOG_IPS_ANONYMOUS', True)\nLOG_IPS_USERS = getattr(django_settings, 'WIKI_LOG_IPS_USERS', False)\n\n####################################\n# PERMISSIONS AND ACCOUNT HANDLING #\n####################################\n\n# NB! None of these callables need to handle anonymous users as they are treated\n# in separate settings...\n\n# A function returning True/False if a user has permission to\n# read contents of an article + plugins\n# Relevance: viewing articles and plugins\nCAN_READ = getattr(django_settings, 'WIKI_CAN_READ', None)\n\n# A function returning True/False if a user has permission to\n# change contents, ie add new revisions to an article\n# Often, plugins also use this\n# Relevance: editing articles, changing revisions, editing plugins\nCAN_WRITE = getattr(django_settings, 'WIKI_CAN_WRITE', None)\n\n# A function returning True/False if a user has permission to assign\n# permissions on an article\n# Relevance: changing owner and group membership\nCAN_ASSIGN = getattr(django_settings, 'WIKI_CAN_ASSIGN', None)\n\n# A function returning True/False if the owner of an article has permission to change\n# the group to a user's own groups\n# Relevance: changing group membership\nCAN_ASSIGN_OWNER = getattr(django_settings, 'WIKI_ASSIGN_OWNER', None)\n\n# A function returning True/False if a user has permission to change\n# read/write access for groups and others\nCAN_CHANGE_PERMISSIONS = getattr(\n django_settings,\n 'WIKI_CAN_CHANGE_PERMISSIONS',\n None)\n\n# Specifies if a user has access to soft deletion of articles\nCAN_DELETE = getattr(django_settings, 'WIKI_CAN_DELETE', None)\n\n# A function returning True/False if a user has permission to change\n# moderate, ie. lock articles and permanently delete content.\nCAN_MODERATE = getattr(django_settings, 'WIKI_CAN_MODERATE', None)\n\n# A function returning True/False if a user has permission to create\n# new groups and users for the wiki.\nCAN_ADMIN = getattr(django_settings, 'WIKI_CAN_ADMIN', None)\n\n# Treat anonymous (non logged in) users as the \"other\" user group\nANONYMOUS = getattr(django_settings, 'WIKI_ANONYMOUS', True)\n\n# Globally enable write access for anonymous users, if true anonymous users will be treated\n# as the others_write boolean field on models.Article.\nANONYMOUS_WRITE = getattr(django_settings, 'WIKI_ANONYMOUS_WRITE', False)\n\n# Globally enable create access for anonymous users\n# Defaults to ANONYMOUS_WRITE.\nANONYMOUS_CREATE = getattr(\n django_settings,\n 'WIKI_ANONYMOUS_CREATE',\n ANONYMOUS_WRITE)\n\n# Default setting to allow anonymous users upload access (used in\n# plugins.attachments and plugins.images).\nANONYMOUS_UPLOAD = getattr(django_settings, 'WIKI_ANONYMOUS_UPLOAD', False)\n\n# Sign up, login and logout views should be accessible\nACCOUNT_HANDLING = getattr(django_settings, 'WIKI_ACCOUNT_HANDLING', True)\n\n# Signup allowed? If it's not allowed, logged in superusers can still access\n# the signup page to create new users.\nACCOUNT_SIGNUP_ALLOWED = ACCOUNT_HANDLING and getattr(\n django_settings, 'WIKI_ACCOUNT_SIGNUP_ALLOWED', True\n)\n\nif ACCOUNT_HANDLING:\n LOGIN_URL = reverse_lazy(\"wiki:login\")\n LOGOUT_URL = reverse_lazy(\"wiki:logout\")\n SIGNUP_URL = reverse_lazy(\"wiki:signup\")\nelse:\n LOGIN_URL = getattr(django_settings, \"LOGIN_URL\", \"/\")\n LOGOUT_URL = getattr(django_settings, \"LOGOUT_URL\", \"/\")\n SIGNUP_URL = getattr(django_settings, \"WIKI_SIGNUP_URL\", \"/\")\n\n##################\n# OTHER SETTINGS #\n##################\n\n# Maximum amount of children to display in a menu before going \"+more\"\n# NEVER set this to 0 as it will wrongly inform the user that there are no\n# children and for instance that an article can be safely deleted.\nSHOW_MAX_CHILDREN = getattr(django_settings, 'WIKI_SHOW_MAX_CHILDREN', 20)\n\nUSE_BOOTSTRAP_SELECT_WIDGET = getattr(\n django_settings,\n 'WIKI_USE_BOOTSTRAP_SELECT_WIDGET',\n True)\n\n#: dottedname of class used to construct urlpatterns for wiki.\n#:\n#: Default is wiki.urls.WikiURLPatterns. To customize urls or view handlers,\n#: you can derive from this.\nURL_CONFIG_CLASS = getattr(\n django_settings,\n 'WIKI_URL_CONFIG_CLASS',\n 'wiki.urls.WikiURLPatterns')\n\n# Search view - dotted path denoting where the search view Class is located\nSEARCH_VIEW = getattr(\n django_settings,\n 'WIKI_SEARCH_VIEW',\n 'wiki.views.article.SearchView'\n if not 'wiki.plugins.haystack' in django_settings.INSTALLED_APPS\n else\n 'wiki.plugins.haystack.views.HaystackSearchView'\n)\n\n# Seconds of timeout before renewing article cache. Articles are automatically\n# renewed whenever an edit occurs but article content may be generated from\n# other objects that are changed.\nCACHE_TIMEOUT = getattr(django_settings, 'WIKI_CACHE_TIMEOUT', 600)\n\n###################\n# SPAM PROTECTION #\n###################\n\n# Maximum allowed revisions per hour for any given user or IP\nREVISIONS_PER_HOUR = getattr(django_settings, 'WIKI_REVISIONS_PER_HOUR', 60)\n\n# Maximum allowed revisions per minute for any given user or IP\nREVISIONS_PER_MINUTES = getattr(\n django_settings,\n 'WIKI_REVISIONS_PER_MINUTES',\n 5)\n\n# Maximum allowed revisions per hour for any given user or IP\nREVISIONS_PER_HOUR_ANONYMOUS = getattr(\n django_settings,\n 'WIKI_REVISIONS_PER_HOUR_ANONYMOUS',\n 10)\n\n# Maximum allowed revisions per hour for any given user or IP\nREVISIONS_PER_MINUTES_ANONYMOUS = getattr(\n django_settings,\n 'WIKI_REVISIONS_PER_MINUTES_ANONYMOUS',\n 2)\n\n# Number of minutes for looking up REVISIONS_PER_MINUTES and\n# REVISIONS_PER_MINUTES_ANONYMOUS\nREVISIONS_MINUTES_LOOKBACK = getattr(\n django_settings,\n 'WIKI_REVISIONS_MINUTES_LOOKBACK',\n 2)\n\n###########\n# STORAGE #\n###########\n\nfrom django.core.files.storage import default_storage\nSTORAGE_BACKEND = getattr(\n django_settings,\n 'WIKI_STORAGE_BACKEND',\n default_storage)\n\nUSE_SENDFILE = getattr(django_settings, 'WIKI_ATTACHMENTS_USE_SENDFILE', False)\n\n####################\n# PLANNED SETTINGS #\n####################\n\n# Maximum revisions to keep for an article, 0=unlimited\nMAX_REVISIONS = getattr(django_settings, 'WIKI_MAX_REVISIONS', 100)\n\n# Maximum age of revisions in days, 0=unlimited\nMAX_REVISION_AGE = getattr(django_settings, 'MAX_REVISION_AGE', 365)\n",
"path": "wiki/conf/settings.py"
}
] | diff --git a/wiki/conf/settings.py b/wiki/conf/settings.py
index 125a08c3c..93f9f9873 100644
--- a/wiki/conf/settings.py
+++ b/wiki/conf/settings.py
@@ -25,6 +25,7 @@
'attr_list',
'extra',
'codehilite',
+ 'sane_lists',
],
'safe_mode': 'replace',
'extension_configs': {
|
wagtail__wagtail-10050 | Django Unittest issues with `StructValue.__init__()`
<!--
Found a bug? Please fill out the sections below. 👍
-->
### Issue Summary
Recently we've bumped packages in our project and some tests which are using `setUpTestData` in test suite has started failing.
Below I'll give more context.
In our project we `django.TestCase` to test our headless API. Some of our tests require data to be populated before each test. To improve performance we've decided to use `setUpTestData` method in some of test suites. Small example :
```python
class TestSomeViewSet(TestCase):
@classmethod
def setUpTestData(cls):
root_page = Page.objects.get(slug='root')
cls.tested_page_1 = MyTestPage(
title='test app resource 1', resources=json.dumps(MOCK_RESOURCES_1) #some dict mocks imported from another module
)
cls.tested_page_2 = MyTestPage(
title='test app resource 2', resources=json.dumps(MOCK_RESOURCES_2)
)
cls.parent_page = ParentPage(title='test.com')
cls.another_parent_page = AnotherParentPage(title='region 1', timezone=settings.TIMEZONES[0])
cls.language_1 = LanguagePage(title='language 1')
cls.language_2 = LanguagePage(title='language 2')
root_page.add_child(instance=cls.parent_page)
cls.site_config_1.add_child(instance=cls.another_parent_page)
cls.region_1.add_child(instance=cls.language_1)
cls.region_1.add_child(instance=cls.language_2)
cls.language_1.add_child(instance=cls.tested_page_1)
cls.language_2.add_child(instance=cls.app_tested_page_2)
cls.parent_page.save()
cls.another_parent_page.save()
cls.language_1.save()
cls.language_2.save()
cls.tested_page_1.save()
cls.tested_page_2.save()
def test_list_view_returns_expected_response(self):
url = reverse('app-resources-list')
response = self.client.get(url)
expected_response = get_configuration_list_expected_response( #this function generates expected output
[self.tested_page_1, self.tested_page_2]
```
This test fails with with packages versions described below in "Technical details". Here is traceback:
```
/usr/local/lib/python3.10/site-packages/django/test/testcases.py:1384: in __get__
data = deepcopy(self.data, memo)
/usr/local/lib/python3.10/copy.py:172: in deepcopy
y = _reconstruct(x, memo, *rv)
/usr/local/lib/python3.10/copy.py:271: in _reconstruct
state = deepcopy(state, memo)
/usr/local/lib/python3.10/copy.py:146: in deepcopy
y = copier(x, memo)
/usr/local/lib/python3.10/copy.py:231: in _deepcopy_dict
y[deepcopy(key, memo)] = deepcopy(value, memo)
/usr/local/lib/python3.10/copy.py:172: in deepcopy
y = _reconstruct(x, memo, *rv)
/usr/local/lib/python3.10/copy.py:271: in _reconstruct
state = deepcopy(state, memo)
/usr/local/lib/python3.10/copy.py:146: in deepcopy
y = copier(x, memo)
/usr/local/lib/python3.10/copy.py:231: in _deepcopy_dict
y[deepcopy(key, memo)] = deepcopy(value, memo)
/usr/local/lib/python3.10/copy.py:146: in deepcopy
y = copier(x, memo)
/usr/local/lib/python3.10/copy.py:206: in _deepcopy_list
append(deepcopy(a, memo))
/usr/local/lib/python3.10/copy.py:172: in deepcopy
y = _reconstruct(x, memo, *rv)
/usr/local/lib/python3.10/copy.py:271: in _reconstruct
state = deepcopy(state, memo)
/usr/local/lib/python3.10/copy.py:146: in deepcopy
y = copier(x, memo)
/usr/local/lib/python3.10/copy.py:231: in _deepcopy_dict
y[deepcopy(key, memo)] = deepcopy(value, memo)
/usr/local/lib/python3.10/copy.py:172: in deepcopy
y = _reconstruct(x, memo, *rv)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
x = StructValue([('name', 'application'), ('content', '{"config": "data"}')])
memo = {139851395249680: <Locale: English - United States>, 139851395379648: {'_state': <django.db.models.base.ModelState obj...le': <Locale: English - United States>, 'owner': None, 'site_page': <SiteConfig: test.com>}}, 139851395528192: {}, ...}
func = <class 'wagtail.blocks.struct_block.StructValue'>, args = ()
state = {'block': <applications.configuration.models.ResourceBlock object at 0x7f31b22fc400>}
listiter = None, dictiter = <odict_iterator object at 0x7f31b0c25760>
deepcopy = <function deepcopy at 0x7f31b6b82830>
def _reconstruct(x, memo, func, args,
state=None, listiter=None, dictiter=None,
deepcopy=deepcopy):
deep = memo is not None
if deep and args:
args = (deepcopy(arg, memo) for arg in args)
> y = func(*args)
E TypeError: StructValue.__init__() missing 1 required positional argument: 'block'
/usr/local/lib/python3.10/copy.py:265: TypeError
```
When we run those test with `setUp` instead of `setUpTestData` method or we change versions of Wagtail and Django to:
- Wagtail <= 4.1 and Django < 4.1.x
- Wagtail 4.0.x and Django == 4.1
tests pass without any issues.
It looks like `StructValue` class might not support deepcopy-ing, but this is only suggestion, cuz when I was reading django release notes I found this (1st point from list): https://docs.djangoproject.com/en/4.1/releases/4.1/#features-removed-in-4-1
### Technical details
- Python version: 3.10.5.
- Django version: 4.1.4
- Wagtail version: 4.1.1
| [
{
"content": "import collections\n\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.forms.utils import ErrorList\nfrom django.template.loader import render_to_string\nfrom django.utils.functional import cached_property\nfrom django.utils.html import format_html, format_html_join\nfrom django.utils.safestring import mark_safe\n\nfrom wagtail.admin.staticfiles import versioned_static\nfrom wagtail.telepath import Adapter, register\n\nfrom .base import Block, BoundBlock, DeclarativeSubBlocksMetaclass, get_help_icon\n\n__all__ = [\"BaseStructBlock\", \"StructBlock\", \"StructValue\"]\n\n\nclass StructBlockValidationError(ValidationError):\n def __init__(self, block_errors=None):\n self.block_errors = block_errors\n super().__init__(\"Validation error in StructBlock\", params=block_errors)\n\n\nclass StructBlockValidationErrorAdapter(Adapter):\n js_constructor = \"wagtail.blocks.StructBlockValidationError\"\n\n def js_args(self, error):\n if error.block_errors is None:\n return [None]\n else:\n return [\n {\n name: error_list.as_data()\n for name, error_list in error.block_errors.items()\n }\n ]\n\n @cached_property\n def media(self):\n return forms.Media(\n js=[\n versioned_static(\"wagtailadmin/js/telepath/blocks.js\"),\n ]\n )\n\n\nregister(StructBlockValidationErrorAdapter(), StructBlockValidationError)\n\n\nclass StructValue(collections.OrderedDict):\n \"\"\"A class that generates a StructBlock value from provided sub-blocks\"\"\"\n\n def __init__(self, block, *args):\n super().__init__(*args)\n self.block = block\n\n def __html__(self):\n return self.block.render(self)\n\n def render_as_block(self, context=None):\n return self.block.render(self, context=context)\n\n @cached_property\n def bound_blocks(self):\n return collections.OrderedDict(\n [\n (name, block.bind(self.get(name)))\n for name, block in self.block.child_blocks.items()\n ]\n )\n\n\nclass PlaceholderBoundBlock(BoundBlock):\n \"\"\"\n Provides a render_form method that outputs a block placeholder, for use in custom form_templates\n \"\"\"\n\n def render_form(self):\n return format_html('<div data-structblock-child=\"{}\"></div>', self.block.name)\n\n\nclass BaseStructBlock(Block):\n def __init__(self, local_blocks=None, **kwargs):\n self._constructor_kwargs = kwargs\n\n super().__init__(**kwargs)\n\n # create a local (shallow) copy of base_blocks so that it can be supplemented by local_blocks\n self.child_blocks = self.base_blocks.copy()\n if local_blocks:\n for name, block in local_blocks:\n block.set_name(name)\n self.child_blocks[name] = block\n\n def get_default(self):\n \"\"\"\n Any default value passed in the constructor or self.meta is going to be a dict\n rather than a StructValue; for consistency, we need to convert it to a StructValue\n for StructBlock to work with\n \"\"\"\n return self._to_struct_value(\n [\n (\n name,\n self.meta.default[name]\n if name in self.meta.default\n else block.get_default(),\n )\n for name, block in self.child_blocks.items()\n ]\n )\n\n def value_from_datadict(self, data, files, prefix):\n return self._to_struct_value(\n [\n (name, block.value_from_datadict(data, files, \"%s-%s\" % (prefix, name)))\n for name, block in self.child_blocks.items()\n ]\n )\n\n def value_omitted_from_data(self, data, files, prefix):\n return all(\n block.value_omitted_from_data(data, files, \"%s-%s\" % (prefix, name))\n for name, block in self.child_blocks.items()\n )\n\n def clean(self, value):\n result = (\n []\n ) # build up a list of (name, value) tuples to be passed to the StructValue constructor\n errors = {}\n for name, val in value.items():\n try:\n result.append((name, self.child_blocks[name].clean(val)))\n except ValidationError as e:\n errors[name] = ErrorList([e])\n\n if errors:\n raise StructBlockValidationError(errors)\n\n return self._to_struct_value(result)\n\n def to_python(self, value):\n \"\"\"Recursively call to_python on children and return as a StructValue\"\"\"\n return self._to_struct_value(\n [\n (\n name,\n (\n child_block.to_python(value[name])\n if name in value\n else child_block.get_default()\n )\n # NB the result of get_default is NOT passed through to_python, as it's expected\n # to be in the block's native type already\n )\n for name, child_block in self.child_blocks.items()\n ]\n )\n\n def bulk_to_python(self, values):\n # values is a list of dicts; split this into a series of per-subfield lists so that we can\n # call bulk_to_python on each subfield\n\n values_by_subfield = {}\n for name, child_block in self.child_blocks.items():\n # We need to keep track of which dicts actually have an item for this field, as missing\n # values will be populated with child_block.get_default(); this is expected to be a\n # value in the block's native type, and should therefore not undergo conversion via\n # bulk_to_python.\n indexes = []\n raw_values = []\n for i, val in enumerate(values):\n if name in val:\n indexes.append(i)\n raw_values.append(val[name])\n\n converted_values = child_block.bulk_to_python(raw_values)\n # create a mapping from original index to converted value\n converted_values_by_index = dict(zip(indexes, converted_values))\n\n # now loop over all list indexes, falling back on the default for any indexes not in\n # the mapping, to arrive at the final list for this subfield\n values_by_subfield[name] = []\n for i in range(0, len(values)):\n try:\n converted_value = converted_values_by_index[i]\n except KeyError:\n converted_value = child_block.get_default()\n\n values_by_subfield[name].append(converted_value)\n\n # now form the final list of StructValues, with each one constructed by taking the\n # appropriately-indexed item from all of the per-subfield lists\n return [\n self._to_struct_value(\n {name: values_by_subfield[name][i] for name in self.child_blocks.keys()}\n )\n for i in range(0, len(values))\n ]\n\n def _to_struct_value(self, block_items):\n \"\"\"Return a Structvalue representation of the sub-blocks in this block\"\"\"\n return self.meta.value_class(self, block_items)\n\n def get_prep_value(self, value):\n \"\"\"Recursively call get_prep_value on children and return as a plain dict\"\"\"\n return {\n name: self.child_blocks[name].get_prep_value(val)\n for name, val in value.items()\n }\n\n def get_form_state(self, value):\n return {\n name: self.child_blocks[name].get_form_state(val)\n for name, val in value.items()\n }\n\n def get_api_representation(self, value, context=None):\n \"\"\"Recursively call get_api_representation on children and return as a plain dict\"\"\"\n return {\n name: self.child_blocks[name].get_api_representation(val, context=context)\n for name, val in value.items()\n }\n\n def get_searchable_content(self, value):\n content = []\n\n for name, block in self.child_blocks.items():\n content.extend(\n block.get_searchable_content(value.get(name, block.get_default()))\n )\n\n return content\n\n def extract_references(self, value):\n for name, block in self.child_blocks.items():\n for model, object_id, model_path, content_path in block.extract_references(\n value.get(name, block.get_default())\n ):\n model_path = f\"{name}.{model_path}\" if model_path else name\n content_path = f\"{name}.{content_path}\" if content_path else name\n yield model, object_id, model_path, content_path\n\n def deconstruct(self):\n \"\"\"\n Always deconstruct StructBlock instances as if they were plain StructBlocks with all of the\n field definitions passed to the constructor - even if in reality this is a subclass of StructBlock\n with the fields defined declaratively, or some combination of the two.\n\n This ensures that the field definitions get frozen into migrations, rather than leaving a reference\n to a custom subclass in the user's models.py that may or may not stick around.\n \"\"\"\n path = \"wagtail.blocks.StructBlock\"\n args = [list(self.child_blocks.items())]\n kwargs = self._constructor_kwargs\n return (path, args, kwargs)\n\n def check(self, **kwargs):\n errors = super().check(**kwargs)\n for name, child_block in self.child_blocks.items():\n errors.extend(child_block.check(**kwargs))\n errors.extend(child_block._check_name(**kwargs))\n\n return errors\n\n def render_basic(self, value, context=None):\n return format_html(\n \"<dl>\\n{}\\n</dl>\",\n format_html_join(\"\\n\", \" <dt>{}</dt>\\n <dd>{}</dd>\", value.items()),\n )\n\n def render_form_template(self):\n # Support for custom form_template options in meta. Originally form_template would have been\n # invoked once for each occurrence of this block in the stream data, but this rendering now\n # happens client-side, so we need to turn the Django template into one that can be used by\n # the client-side code. This is done by rendering it up-front with placeholder objects as\n # child blocks - these return <div data-structblock-child=\"first-name\"></div> from their\n # render_form_method.\n # The change to client-side rendering means that the `value` and `errors` arguments on\n # `get_form_context` no longer receive real data; these are passed the block's default value\n # and None respectively.\n context = self.get_form_context(\n self.get_default(), prefix=\"__PREFIX__\", errors=None\n )\n return mark_safe(render_to_string(self.meta.form_template, context))\n\n def get_form_context(self, value, prefix=\"\", errors=None):\n return {\n \"children\": collections.OrderedDict(\n [\n (\n name,\n PlaceholderBoundBlock(\n block, value.get(name), prefix=\"%s-%s\" % (prefix, name)\n ),\n )\n for name, block in self.child_blocks.items()\n ]\n ),\n \"help_text\": getattr(self.meta, \"help_text\", None),\n \"classname\": self.meta.form_classname,\n \"block_definition\": self,\n \"prefix\": prefix,\n }\n\n class Meta:\n default = {}\n form_classname = \"struct-block\"\n form_template = None\n value_class = StructValue\n label_format = None\n # No icon specified here, because that depends on the purpose that the\n # block is being used for. Feel encouraged to specify an icon in your\n # descendant block type\n icon = \"placeholder\"\n\n\nclass StructBlock(BaseStructBlock, metaclass=DeclarativeSubBlocksMetaclass):\n pass\n\n\nclass StructBlockAdapter(Adapter):\n js_constructor = \"wagtail.blocks.StructBlock\"\n\n def js_args(self, block):\n meta = {\n \"label\": block.label,\n \"required\": block.required,\n \"icon\": block.meta.icon,\n \"classname\": block.meta.form_classname,\n }\n\n help_text = getattr(block.meta, \"help_text\", None)\n if help_text:\n meta[\"helpText\"] = help_text\n meta[\"helpIcon\"] = get_help_icon()\n\n if block.meta.form_template:\n meta[\"formTemplate\"] = block.render_form_template()\n\n if block.meta.label_format:\n meta[\"labelFormat\"] = block.meta.label_format\n\n return [\n block.name,\n block.child_blocks.values(),\n meta,\n ]\n\n @cached_property\n def media(self):\n return forms.Media(\n js=[\n versioned_static(\"wagtailadmin/js/telepath/blocks.js\"),\n ]\n )\n\n\nregister(StructBlockAdapter(), StructBlock)\n",
"path": "wagtail/blocks/struct_block.py"
}
] | [
{
"content": "import collections\n\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.forms.utils import ErrorList\nfrom django.template.loader import render_to_string\nfrom django.utils.functional import cached_property\nfrom django.utils.html import format_html, format_html_join\nfrom django.utils.safestring import mark_safe\n\nfrom wagtail.admin.staticfiles import versioned_static\nfrom wagtail.telepath import Adapter, register\n\nfrom .base import Block, BoundBlock, DeclarativeSubBlocksMetaclass, get_help_icon\n\n__all__ = [\"BaseStructBlock\", \"StructBlock\", \"StructValue\"]\n\n\nclass StructBlockValidationError(ValidationError):\n def __init__(self, block_errors=None):\n self.block_errors = block_errors\n super().__init__(\"Validation error in StructBlock\", params=block_errors)\n\n\nclass StructBlockValidationErrorAdapter(Adapter):\n js_constructor = \"wagtail.blocks.StructBlockValidationError\"\n\n def js_args(self, error):\n if error.block_errors is None:\n return [None]\n else:\n return [\n {\n name: error_list.as_data()\n for name, error_list in error.block_errors.items()\n }\n ]\n\n @cached_property\n def media(self):\n return forms.Media(\n js=[\n versioned_static(\"wagtailadmin/js/telepath/blocks.js\"),\n ]\n )\n\n\nregister(StructBlockValidationErrorAdapter(), StructBlockValidationError)\n\n\nclass StructValue(collections.OrderedDict):\n \"\"\"A class that generates a StructBlock value from provided sub-blocks\"\"\"\n\n def __init__(self, block, *args):\n super().__init__(*args)\n self.block = block\n\n def __html__(self):\n return self.block.render(self)\n\n def render_as_block(self, context=None):\n return self.block.render(self, context=context)\n\n @cached_property\n def bound_blocks(self):\n return collections.OrderedDict(\n [\n (name, block.bind(self.get(name)))\n for name, block in self.block.child_blocks.items()\n ]\n )\n\n def __reduce__(self):\n return (self.__class__, (self.block,), None, None, iter(self.items()))\n\n\nclass PlaceholderBoundBlock(BoundBlock):\n \"\"\"\n Provides a render_form method that outputs a block placeholder, for use in custom form_templates\n \"\"\"\n\n def render_form(self):\n return format_html('<div data-structblock-child=\"{}\"></div>', self.block.name)\n\n\nclass BaseStructBlock(Block):\n def __init__(self, local_blocks=None, **kwargs):\n self._constructor_kwargs = kwargs\n\n super().__init__(**kwargs)\n\n # create a local (shallow) copy of base_blocks so that it can be supplemented by local_blocks\n self.child_blocks = self.base_blocks.copy()\n if local_blocks:\n for name, block in local_blocks:\n block.set_name(name)\n self.child_blocks[name] = block\n\n def get_default(self):\n \"\"\"\n Any default value passed in the constructor or self.meta is going to be a dict\n rather than a StructValue; for consistency, we need to convert it to a StructValue\n for StructBlock to work with\n \"\"\"\n return self._to_struct_value(\n [\n (\n name,\n self.meta.default[name]\n if name in self.meta.default\n else block.get_default(),\n )\n for name, block in self.child_blocks.items()\n ]\n )\n\n def value_from_datadict(self, data, files, prefix):\n return self._to_struct_value(\n [\n (name, block.value_from_datadict(data, files, \"%s-%s\" % (prefix, name)))\n for name, block in self.child_blocks.items()\n ]\n )\n\n def value_omitted_from_data(self, data, files, prefix):\n return all(\n block.value_omitted_from_data(data, files, \"%s-%s\" % (prefix, name))\n for name, block in self.child_blocks.items()\n )\n\n def clean(self, value):\n result = (\n []\n ) # build up a list of (name, value) tuples to be passed to the StructValue constructor\n errors = {}\n for name, val in value.items():\n try:\n result.append((name, self.child_blocks[name].clean(val)))\n except ValidationError as e:\n errors[name] = ErrorList([e])\n\n if errors:\n raise StructBlockValidationError(errors)\n\n return self._to_struct_value(result)\n\n def to_python(self, value):\n \"\"\"Recursively call to_python on children and return as a StructValue\"\"\"\n return self._to_struct_value(\n [\n (\n name,\n (\n child_block.to_python(value[name])\n if name in value\n else child_block.get_default()\n )\n # NB the result of get_default is NOT passed through to_python, as it's expected\n # to be in the block's native type already\n )\n for name, child_block in self.child_blocks.items()\n ]\n )\n\n def bulk_to_python(self, values):\n # values is a list of dicts; split this into a series of per-subfield lists so that we can\n # call bulk_to_python on each subfield\n\n values_by_subfield = {}\n for name, child_block in self.child_blocks.items():\n # We need to keep track of which dicts actually have an item for this field, as missing\n # values will be populated with child_block.get_default(); this is expected to be a\n # value in the block's native type, and should therefore not undergo conversion via\n # bulk_to_python.\n indexes = []\n raw_values = []\n for i, val in enumerate(values):\n if name in val:\n indexes.append(i)\n raw_values.append(val[name])\n\n converted_values = child_block.bulk_to_python(raw_values)\n # create a mapping from original index to converted value\n converted_values_by_index = dict(zip(indexes, converted_values))\n\n # now loop over all list indexes, falling back on the default for any indexes not in\n # the mapping, to arrive at the final list for this subfield\n values_by_subfield[name] = []\n for i in range(0, len(values)):\n try:\n converted_value = converted_values_by_index[i]\n except KeyError:\n converted_value = child_block.get_default()\n\n values_by_subfield[name].append(converted_value)\n\n # now form the final list of StructValues, with each one constructed by taking the\n # appropriately-indexed item from all of the per-subfield lists\n return [\n self._to_struct_value(\n {name: values_by_subfield[name][i] for name in self.child_blocks.keys()}\n )\n for i in range(0, len(values))\n ]\n\n def _to_struct_value(self, block_items):\n \"\"\"Return a Structvalue representation of the sub-blocks in this block\"\"\"\n return self.meta.value_class(self, block_items)\n\n def get_prep_value(self, value):\n \"\"\"Recursively call get_prep_value on children and return as a plain dict\"\"\"\n return {\n name: self.child_blocks[name].get_prep_value(val)\n for name, val in value.items()\n }\n\n def get_form_state(self, value):\n return {\n name: self.child_blocks[name].get_form_state(val)\n for name, val in value.items()\n }\n\n def get_api_representation(self, value, context=None):\n \"\"\"Recursively call get_api_representation on children and return as a plain dict\"\"\"\n return {\n name: self.child_blocks[name].get_api_representation(val, context=context)\n for name, val in value.items()\n }\n\n def get_searchable_content(self, value):\n content = []\n\n for name, block in self.child_blocks.items():\n content.extend(\n block.get_searchable_content(value.get(name, block.get_default()))\n )\n\n return content\n\n def extract_references(self, value):\n for name, block in self.child_blocks.items():\n for model, object_id, model_path, content_path in block.extract_references(\n value.get(name, block.get_default())\n ):\n model_path = f\"{name}.{model_path}\" if model_path else name\n content_path = f\"{name}.{content_path}\" if content_path else name\n yield model, object_id, model_path, content_path\n\n def deconstruct(self):\n \"\"\"\n Always deconstruct StructBlock instances as if they were plain StructBlocks with all of the\n field definitions passed to the constructor - even if in reality this is a subclass of StructBlock\n with the fields defined declaratively, or some combination of the two.\n\n This ensures that the field definitions get frozen into migrations, rather than leaving a reference\n to a custom subclass in the user's models.py that may or may not stick around.\n \"\"\"\n path = \"wagtail.blocks.StructBlock\"\n args = [list(self.child_blocks.items())]\n kwargs = self._constructor_kwargs\n return (path, args, kwargs)\n\n def check(self, **kwargs):\n errors = super().check(**kwargs)\n for name, child_block in self.child_blocks.items():\n errors.extend(child_block.check(**kwargs))\n errors.extend(child_block._check_name(**kwargs))\n\n return errors\n\n def render_basic(self, value, context=None):\n return format_html(\n \"<dl>\\n{}\\n</dl>\",\n format_html_join(\"\\n\", \" <dt>{}</dt>\\n <dd>{}</dd>\", value.items()),\n )\n\n def render_form_template(self):\n # Support for custom form_template options in meta. Originally form_template would have been\n # invoked once for each occurrence of this block in the stream data, but this rendering now\n # happens client-side, so we need to turn the Django template into one that can be used by\n # the client-side code. This is done by rendering it up-front with placeholder objects as\n # child blocks - these return <div data-structblock-child=\"first-name\"></div> from their\n # render_form_method.\n # The change to client-side rendering means that the `value` and `errors` arguments on\n # `get_form_context` no longer receive real data; these are passed the block's default value\n # and None respectively.\n context = self.get_form_context(\n self.get_default(), prefix=\"__PREFIX__\", errors=None\n )\n return mark_safe(render_to_string(self.meta.form_template, context))\n\n def get_form_context(self, value, prefix=\"\", errors=None):\n return {\n \"children\": collections.OrderedDict(\n [\n (\n name,\n PlaceholderBoundBlock(\n block, value.get(name), prefix=\"%s-%s\" % (prefix, name)\n ),\n )\n for name, block in self.child_blocks.items()\n ]\n ),\n \"help_text\": getattr(self.meta, \"help_text\", None),\n \"classname\": self.meta.form_classname,\n \"block_definition\": self,\n \"prefix\": prefix,\n }\n\n class Meta:\n default = {}\n form_classname = \"struct-block\"\n form_template = None\n value_class = StructValue\n label_format = None\n # No icon specified here, because that depends on the purpose that the\n # block is being used for. Feel encouraged to specify an icon in your\n # descendant block type\n icon = \"placeholder\"\n\n\nclass StructBlock(BaseStructBlock, metaclass=DeclarativeSubBlocksMetaclass):\n pass\n\n\nclass StructBlockAdapter(Adapter):\n js_constructor = \"wagtail.blocks.StructBlock\"\n\n def js_args(self, block):\n meta = {\n \"label\": block.label,\n \"required\": block.required,\n \"icon\": block.meta.icon,\n \"classname\": block.meta.form_classname,\n }\n\n help_text = getattr(block.meta, \"help_text\", None)\n if help_text:\n meta[\"helpText\"] = help_text\n meta[\"helpIcon\"] = get_help_icon()\n\n if block.meta.form_template:\n meta[\"formTemplate\"] = block.render_form_template()\n\n if block.meta.label_format:\n meta[\"labelFormat\"] = block.meta.label_format\n\n return [\n block.name,\n block.child_blocks.values(),\n meta,\n ]\n\n @cached_property\n def media(self):\n return forms.Media(\n js=[\n versioned_static(\"wagtailadmin/js/telepath/blocks.js\"),\n ]\n )\n\n\nregister(StructBlockAdapter(), StructBlock)\n",
"path": "wagtail/blocks/struct_block.py"
}
] | diff --git a/wagtail/blocks/struct_block.py b/wagtail/blocks/struct_block.py
index 9fe188e3bdf4..2a5824ef1413 100644
--- a/wagtail/blocks/struct_block.py
+++ b/wagtail/blocks/struct_block.py
@@ -70,6 +70,9 @@ def bound_blocks(self):
]
)
+ def __reduce__(self):
+ return (self.__class__, (self.block,), None, None, iter(self.items()))
+
class PlaceholderBoundBlock(BoundBlock):
"""
diff --git a/wagtail/tests/test_blocks.py b/wagtail/tests/test_blocks.py
index faa1e9f551b3..3febc2f72ba9 100644
--- a/wagtail/tests/test_blocks.py
+++ b/wagtail/tests/test_blocks.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*
import base64
import collections
+import copy
import json
import unittest
from decimal import Decimal
@@ -2157,6 +2158,19 @@ def test_render_structvalue_with_extra_context(self):
result = value.render_as_block(context={"language": "fr"})
self.assertEqual(result, """<h1 lang="fr">Bonjour</h1>monde <i>italique</i>""")
+ def test_copy_structvalue(self):
+ block = SectionBlock()
+ value = block.to_python({"title": "Hello", "body": "world"})
+ copied = copy.copy(value)
+
+ # Ensure we have a new object
+ self.assertIsNot(value, copied)
+
+ # Check copy operation
+ self.assertIsInstance(copied, blocks.StructValue)
+ self.assertIs(value.block, copied.block)
+ self.assertEqual(value, copied)
+
class TestStructBlockWithCustomStructValue(SimpleTestCase):
def test_initialisation(self):
|
flask-admin__flask-admin-1769 | flask-admin doesn't work on Python 2.7
Running flask-admin version `1.5.2` on Python `2.7` fails with
```
File "/usr/local/lib/python2.7/site-packages/flask_admin/contrib/sqla/form.py", line 2, in <module>
from enum import Enum
ImportError: No module named enum
```
Does flask-admin supports Python 2.7 ?
The issue is line https://github.com/flask-admin/flask-admin/blob/829c24dbe4da9e28f705d07608e3326e4ff0b209/flask_admin/contrib/sqla/form.py#L2
I fixed the issue by installing `enum34` package. flask-admin should have `enum34` as it's dependency for python versions smaller than 3.4
| [
{
"content": "# Fix for older setuptools\nimport re\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\ndef fpath(name):\n return os.path.join(os.path.dirname(__file__), name)\n\n\ndef read(fname):\n return open(fpath(fname)).read()\n\n\ndef desc():\n info = read('README.rst')\n try:\n return info + '\\n\\n' + read('doc/changelog.rst')\n except IOError:\n return info\n\n# grep flask_admin/__init__.py since python 3.x cannot import it before using 2to3\nfile_text = read(fpath('flask_admin/__init__.py'))\n\n\ndef grep(attrname):\n pattern = r\"{0}\\W*=\\W*'([^']+)'\".format(attrname)\n strval, = re.findall(pattern, file_text)\n return strval\n\n\nextras_require = {\n 'aws': ['boto'],\n 'azure': ['azure-storage-blob']\n}\n\n\ninstall_requires = [\n 'Flask>=0.7',\n 'wtforms'\n]\n\nsetup(\n name='Flask-Admin',\n version=grep('__version__'),\n url='https://github.com/flask-admin/flask-admin/',\n license='BSD',\n author=grep('__author__'),\n author_email=grep('__email__'),\n description='Simple and extensible admin interface framework for Flask',\n long_description=desc(),\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n platforms='any',\n extras_require=extras_require,\n install_requires=install_requires,\n tests_require=[\n 'nose>=1.0',\n 'pillow==2.9.0',\n 'mongoengine',\n 'pymongo',\n 'wtf-peewee',\n 'sqlalchemy',\n 'flask-mongoengine',\n 'flask-sqlalchemy',\n 'flask-babelex',\n 'shapely',\n 'geoalchemy2',\n 'psycopg2',\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n test_suite='nose.collector'\n)\n",
"path": "setup.py"
}
] | [
{
"content": "# Fix for older setuptools\nimport re\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\ndef fpath(name):\n return os.path.join(os.path.dirname(__file__), name)\n\n\ndef read(fname):\n return open(fpath(fname)).read()\n\n\ndef desc():\n info = read('README.rst')\n try:\n return info + '\\n\\n' + read('doc/changelog.rst')\n except IOError:\n return info\n\n# grep flask_admin/__init__.py since python 3.x cannot import it before using 2to3\nfile_text = read(fpath('flask_admin/__init__.py'))\n\n\ndef grep(attrname):\n pattern = r\"{0}\\W*=\\W*'([^']+)'\".format(attrname)\n strval, = re.findall(pattern, file_text)\n return strval\n\n\nextras_require = {\n 'aws': ['boto'],\n 'azure': ['azure-storage-blob']\n}\n\n\ninstall_requires = [\n 'Flask>=0.7',\n 'wtforms'\n]\n\nif sys.version_info[0:2] < (3, 4):\n # required for python < 3.4\n install_requires.append('enum34>=1.1.6')\n\n\nsetup(\n name='Flask-Admin',\n version=grep('__version__'),\n url='https://github.com/flask-admin/flask-admin/',\n license='BSD',\n author=grep('__author__'),\n author_email=grep('__email__'),\n description='Simple and extensible admin interface framework for Flask',\n long_description=desc(),\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n platforms='any',\n extras_require=extras_require,\n install_requires=install_requires,\n tests_require=[\n 'nose>=1.0',\n 'pillow==2.9.0',\n 'mongoengine',\n 'pymongo',\n 'wtf-peewee',\n 'sqlalchemy',\n 'flask-mongoengine',\n 'flask-sqlalchemy',\n 'flask-babelex',\n 'shapely',\n 'geoalchemy2',\n 'psycopg2',\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n test_suite='nose.collector'\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index ea00a6f2d..2bec77fec 100644
--- a/setup.py
+++ b/setup.py
@@ -42,6 +42,11 @@ def grep(attrname):
'wtforms'
]
+if sys.version_info[0:2] < (3, 4):
+ # required for python < 3.4
+ install_requires.append('enum34>=1.1.6')
+
+
setup(
name='Flask-Admin',
version=grep('__version__'),
|
saleor__saleor-5259 | Customer shouldn't be able to query draft order.
Customer shouldn't be able to query draft order, by id and by token.
| [
{
"content": "import graphene\nimport graphene_django_optimizer as gql_optimizer\n\nfrom ...order import OrderStatus, models\nfrom ...order.events import OrderEvents\nfrom ...order.models import OrderEvent\nfrom ...order.utils import sum_order_totals\nfrom ..utils import filter_by_period, filter_by_query_param, sort_queryset\nfrom .enums import OrderStatusFilter\nfrom .sorters import OrderSortField\nfrom .types import Order\n\nORDER_SEARCH_FIELDS = (\"id\", \"discount_name\", \"token\", \"user_email\", \"user__email\")\n\n\ndef filter_orders(qs, info, created, status, query):\n qs = filter_by_query_param(qs, query, ORDER_SEARCH_FIELDS)\n\n # DEPRECATED: Will be removed in Saleor 2.11, use the `filter` field instead.\n # filter orders by status\n if status is not None:\n if status == OrderStatusFilter.READY_TO_FULFILL:\n qs = qs.ready_to_fulfill()\n elif status == OrderStatusFilter.READY_TO_CAPTURE:\n qs = qs.ready_to_capture()\n\n # DEPRECATED: Will be removed in Saleor 2.11, use the `filter` field instead.\n # filter orders by creation date\n if created is not None:\n qs = filter_by_period(qs, created, \"created\")\n\n return gql_optimizer.query(qs, info)\n\n\ndef resolve_orders(info, created, status, query, sort_by=None):\n qs = models.Order.objects.confirmed()\n qs = sort_queryset(qs, sort_by, OrderSortField)\n return filter_orders(qs, info, created, status, query)\n\n\ndef resolve_draft_orders(info, created, query, sort_by=None):\n qs = models.Order.objects.drafts()\n qs = sort_queryset(qs, sort_by, OrderSortField)\n return filter_orders(qs, info, created, None, query)\n\n\ndef resolve_orders_total(_info, period):\n qs = models.Order.objects.confirmed().exclude(status=OrderStatus.CANCELED)\n qs = filter_by_period(qs, period, \"created\")\n return sum_order_totals(qs)\n\n\ndef resolve_order(info, order_id):\n return graphene.Node.get_node_from_global_id(info, order_id, Order)\n\n\ndef resolve_homepage_events():\n # Filter only selected events to be displayed on homepage.\n types = [\n OrderEvents.PLACED,\n OrderEvents.PLACED_FROM_DRAFT,\n OrderEvents.ORDER_FULLY_PAID,\n ]\n return OrderEvent.objects.filter(type__in=types)\n\n\ndef resolve_order_by_token(token):\n return models.Order.objects.filter(token=token).first()\n",
"path": "saleor/graphql/order/resolvers.py"
}
] | [
{
"content": "import graphene\nimport graphene_django_optimizer as gql_optimizer\n\nfrom ...order import OrderStatus, models\nfrom ...order.events import OrderEvents\nfrom ...order.models import OrderEvent\nfrom ...order.utils import sum_order_totals\nfrom ..utils import filter_by_period, filter_by_query_param, sort_queryset\nfrom .enums import OrderStatusFilter\nfrom .sorters import OrderSortField\nfrom .types import Order\n\nORDER_SEARCH_FIELDS = (\"id\", \"discount_name\", \"token\", \"user_email\", \"user__email\")\n\n\ndef filter_orders(qs, info, created, status, query):\n qs = filter_by_query_param(qs, query, ORDER_SEARCH_FIELDS)\n\n # DEPRECATED: Will be removed in Saleor 2.11, use the `filter` field instead.\n # filter orders by status\n if status is not None:\n if status == OrderStatusFilter.READY_TO_FULFILL:\n qs = qs.ready_to_fulfill()\n elif status == OrderStatusFilter.READY_TO_CAPTURE:\n qs = qs.ready_to_capture()\n\n # DEPRECATED: Will be removed in Saleor 2.11, use the `filter` field instead.\n # filter orders by creation date\n if created is not None:\n qs = filter_by_period(qs, created, \"created\")\n\n return gql_optimizer.query(qs, info)\n\n\ndef resolve_orders(info, created, status, query, sort_by=None):\n qs = models.Order.objects.confirmed()\n qs = sort_queryset(qs, sort_by, OrderSortField)\n return filter_orders(qs, info, created, status, query)\n\n\ndef resolve_draft_orders(info, created, query, sort_by=None):\n qs = models.Order.objects.drafts()\n qs = sort_queryset(qs, sort_by, OrderSortField)\n return filter_orders(qs, info, created, None, query)\n\n\ndef resolve_orders_total(_info, period):\n qs = models.Order.objects.confirmed().exclude(status=OrderStatus.CANCELED)\n qs = filter_by_period(qs, period, \"created\")\n return sum_order_totals(qs)\n\n\ndef resolve_order(info, order_id):\n return graphene.Node.get_node_from_global_id(info, order_id, Order)\n\n\ndef resolve_homepage_events():\n # Filter only selected events to be displayed on homepage.\n types = [\n OrderEvents.PLACED,\n OrderEvents.PLACED_FROM_DRAFT,\n OrderEvents.ORDER_FULLY_PAID,\n ]\n return OrderEvent.objects.filter(type__in=types)\n\n\ndef resolve_order_by_token(token):\n return (\n models.Order.objects.exclude(status=OrderStatus.DRAFT)\n .filter(token=token)\n .first()\n )\n",
"path": "saleor/graphql/order/resolvers.py"
}
] | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 21401312eb2..a367e6d2e81 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -67,6 +67,7 @@ All notable, unreleased changes to this project will be documented in this file.
- Return `null` when querying `me` as an anonymous user - #5231 by @maarcingebala
- Added `PLAYGROUND_ENABLED` environment variable/setting to allow to enable the GraphQL playground when `DEBUG` is disabled - #5254 by @NyanKiyoshi
- Fix access to order query when request from service account - #5258 by @fowczarek
+- Customer shouldn't be able to see draft orders by token - #5259 by @fowczarek
## 2.9.0
diff --git a/saleor/graphql/order/resolvers.py b/saleor/graphql/order/resolvers.py
index a5b9f205744..3b7034be16a 100644
--- a/saleor/graphql/order/resolvers.py
+++ b/saleor/graphql/order/resolvers.py
@@ -65,4 +65,8 @@ def resolve_homepage_events():
def resolve_order_by_token(token):
- return models.Order.objects.filter(token=token).first()
+ return (
+ models.Order.objects.exclude(status=OrderStatus.DRAFT)
+ .filter(token=token)
+ .first()
+ )
diff --git a/tests/api/test_order.py b/tests/api/test_order.py
index 1d8b9379513..30d52a1e34f 100644
--- a/tests/api/test_order.py
+++ b/tests/api/test_order.py
@@ -2027,6 +2027,36 @@ def test_authorized_access_to_order_by_token(
assert content["data"]["orderByToken"]["user"]["id"] == customer_user_id
+def test_query_draft_order_by_token_with_requester_as_customer(
+ user_api_client, draft_order
+):
+ draft_order.user = user_api_client.user
+ draft_order.save(update_fields=["user"])
+ query = """
+ query OrderByToken($token: UUID!) {
+ orderByToken(token: $token) {
+ id
+ }
+ }
+ """
+ response = user_api_client.post_graphql(query, {"token": draft_order.token})
+ content = get_graphql_content(response)
+ assert not content["data"]["orderByToken"]
+
+
+def test_query_draft_order_by_token_as_anonymous_customer(api_client, draft_order):
+ query = """
+ query OrderByToken($token: UUID!) {
+ orderByToken(token: $token) {
+ id
+ }
+ }
+ """
+ response = api_client.post_graphql(query, {"token": draft_order.token})
+ content = get_graphql_content(response)
+ assert not content["data"]["orderByToken"]
+
+
MUTATION_CANCEL_ORDERS = """
mutation CancelManyOrders($ids: [ID]!, $restock: Boolean!) {
orderBulkCancel(ids: $ids, restock: $restock) {
|
django-oscar__django-oscar-3365 | Support for Django 3.0
Hi,
Just a low life dev wondering if you have an expected time frame for Django 3.0 support to be released ?
I saw #3303 got merged into master a couple month ago.
Anything I can do to help the delivery process ?
Should I copy master if I want to use Django 3.0 for the time being ?
Thanks for all the wonderful work you guys do on this project ! :)
Update Readme first page of quickstart
This is what works on windows 10
| [
{
"content": "# Use 'alpha', 'beta', 'rc' or 'final' as the 4th element to indicate release type.\nVERSION = (2, 0, 1, 'final')\n\n\ndef get_short_version():\n return '%s.%s' % (VERSION[0], VERSION[1])\n\n\ndef get_version():\n version = '%s.%s' % (VERSION[0], VERSION[1])\n # Append 3rd digit if > 0\n if VERSION[2]:\n version = '%s.%s' % (version, VERSION[2])\n elif VERSION[3] != 'final':\n mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}\n version = '%s%s' % (version, mapping[VERSION[3]])\n if len(VERSION) == 5:\n version = '%s%s' % (version, VERSION[4])\n return version\n\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n 'django.contrib.flatpages',\n\n 'oscar.config.Shop',\n 'oscar.apps.analytics.apps.AnalyticsConfig',\n 'oscar.apps.checkout.apps.CheckoutConfig',\n 'oscar.apps.address.apps.AddressConfig',\n 'oscar.apps.shipping.apps.ShippingConfig',\n 'oscar.apps.catalogue.apps.CatalogueConfig',\n 'oscar.apps.catalogue.reviews.apps.CatalogueReviewsConfig',\n 'oscar.apps.communication.apps.CommunicationConfig',\n 'oscar.apps.partner.apps.PartnerConfig',\n 'oscar.apps.basket.apps.BasketConfig',\n 'oscar.apps.payment.apps.PaymentConfig',\n 'oscar.apps.offer.apps.OfferConfig',\n 'oscar.apps.order.apps.OrderConfig',\n 'oscar.apps.customer.apps.CustomerConfig',\n 'oscar.apps.search.apps.SearchConfig',\n 'oscar.apps.voucher.apps.VoucherConfig',\n 'oscar.apps.wishlists.apps.WishlistsConfig',\n 'oscar.apps.dashboard.apps.DashboardConfig',\n 'oscar.apps.dashboard.reports.apps.ReportsDashboardConfig',\n 'oscar.apps.dashboard.users.apps.UsersDashboardConfig',\n 'oscar.apps.dashboard.orders.apps.OrdersDashboardConfig',\n 'oscar.apps.dashboard.catalogue.apps.CatalogueDashboardConfig',\n 'oscar.apps.dashboard.offers.apps.OffersDashboardConfig',\n 'oscar.apps.dashboard.partners.apps.PartnersDashboardConfig',\n 'oscar.apps.dashboard.pages.apps.PagesDashboardConfig',\n 'oscar.apps.dashboard.ranges.apps.RangesDashboardConfig',\n 'oscar.apps.dashboard.reviews.apps.ReviewsDashboardConfig',\n 'oscar.apps.dashboard.vouchers.apps.VouchersDashboardConfig',\n 'oscar.apps.dashboard.communications.apps.CommunicationsDashboardConfig',\n 'oscar.apps.dashboard.shipping.apps.ShippingDashboardConfig',\n\n # 3rd-party apps that oscar depends on\n 'widget_tweaks',\n 'haystack',\n 'treebeard',\n 'django_tables2',\n]\n\n\ndefault_app_config = 'oscar.config.Shop'\n",
"path": "src/oscar/__init__.py"
}
] | [
{
"content": "# Use 'alpha', 'beta', 'rc' or 'final' as the 4th element to indicate release type.\nVERSION = (2, 1, 0, 'beta')\n\n\ndef get_short_version():\n return '%s.%s' % (VERSION[0], VERSION[1])\n\n\ndef get_version():\n version = '%s.%s' % (VERSION[0], VERSION[1])\n # Append 3rd digit if > 0\n if VERSION[2]:\n version = '%s.%s' % (version, VERSION[2])\n elif VERSION[3] != 'final':\n mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}\n version = '%s%s' % (version, mapping[VERSION[3]])\n if len(VERSION) == 5:\n version = '%s%s' % (version, VERSION[4])\n return version\n\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n 'django.contrib.flatpages',\n\n 'oscar.config.Shop',\n 'oscar.apps.analytics.apps.AnalyticsConfig',\n 'oscar.apps.checkout.apps.CheckoutConfig',\n 'oscar.apps.address.apps.AddressConfig',\n 'oscar.apps.shipping.apps.ShippingConfig',\n 'oscar.apps.catalogue.apps.CatalogueConfig',\n 'oscar.apps.catalogue.reviews.apps.CatalogueReviewsConfig',\n 'oscar.apps.communication.apps.CommunicationConfig',\n 'oscar.apps.partner.apps.PartnerConfig',\n 'oscar.apps.basket.apps.BasketConfig',\n 'oscar.apps.payment.apps.PaymentConfig',\n 'oscar.apps.offer.apps.OfferConfig',\n 'oscar.apps.order.apps.OrderConfig',\n 'oscar.apps.customer.apps.CustomerConfig',\n 'oscar.apps.search.apps.SearchConfig',\n 'oscar.apps.voucher.apps.VoucherConfig',\n 'oscar.apps.wishlists.apps.WishlistsConfig',\n 'oscar.apps.dashboard.apps.DashboardConfig',\n 'oscar.apps.dashboard.reports.apps.ReportsDashboardConfig',\n 'oscar.apps.dashboard.users.apps.UsersDashboardConfig',\n 'oscar.apps.dashboard.orders.apps.OrdersDashboardConfig',\n 'oscar.apps.dashboard.catalogue.apps.CatalogueDashboardConfig',\n 'oscar.apps.dashboard.offers.apps.OffersDashboardConfig',\n 'oscar.apps.dashboard.partners.apps.PartnersDashboardConfig',\n 'oscar.apps.dashboard.pages.apps.PagesDashboardConfig',\n 'oscar.apps.dashboard.ranges.apps.RangesDashboardConfig',\n 'oscar.apps.dashboard.reviews.apps.ReviewsDashboardConfig',\n 'oscar.apps.dashboard.vouchers.apps.VouchersDashboardConfig',\n 'oscar.apps.dashboard.communications.apps.CommunicationsDashboardConfig',\n 'oscar.apps.dashboard.shipping.apps.ShippingDashboardConfig',\n\n # 3rd-party apps that oscar depends on\n 'widget_tweaks',\n 'haystack',\n 'treebeard',\n 'django_tables2',\n]\n\n\ndefault_app_config = 'oscar.config.Shop'\n",
"path": "src/oscar/__init__.py"
}
] | diff --git a/docs/source/howto/how_to_configure_surcharges.rst b/docs/source/howto/how_to_configure_surcharges.rst
index daf68631a9b..6d708371cce 100644
--- a/docs/source/howto/how_to_configure_surcharges.rst
+++ b/docs/source/howto/how_to_configure_surcharges.rst
@@ -1,3 +1,5 @@
+.. _how_to_surcharges:
+
=========================
How to configure surcharges
=========================
@@ -25,7 +27,7 @@ This method is called in several places:
* To give the applicable surcharges to the order total calculator so wo can show the correct price breakdown.
-The ``get_applicable_surcharges`` method takes the basket and any other kwargs.
+The ``get_applicable_surcharges`` method takes the basket and any other kwargs.
These kwargs can later be determined when setting up your own surcharges.
Note that you can also implement surcharges as models just like shipping methods.
@@ -33,7 +35,7 @@ Note that you can also implement surcharges as models just like shipping methods
Custom applicators
-------------------
-If the available surcharges are the same for all customers and payment
+If the available surcharges are the same for all customers and payment
methods, then override the ``get_surcharges`` method of the repository:
.. code-block:: python
@@ -62,7 +64,7 @@ For more complex logic, override the ``is_applicable`` method:
return True
else:
return False
-
+
Surcharges
----------------
@@ -72,8 +74,8 @@ following properties which define the metadata about the surcharge:
* ``name`` - The name of the surcharges. This will be visible to the
customer during checkout and is translatable
-
-* ``code`` - The code of the surcharge. This could be the slugified name or anything else.
+
+* ``code`` - The code of the surcharge. This could be the slugified name or anything else.
The code is used as a non-translatable identifier for a charge.
Further, each surcharge must implement a ``calculate`` method which accepts the
@@ -91,7 +93,7 @@ subclassed and customised:
* :class:`~oscar.apps.checkout.surcharges.PercentageCharge` - percentage based charge
* :class:`~oscar.apps.checkout.surcharges.FlatCharge` - flat surcharge
-
+
Example usage:
.. code-block:: python
diff --git a/docs/source/internals/contributing/development-environment.rst b/docs/source/internals/contributing/development-environment.rst
index 31cdc493660..2a341a63df1 100644
--- a/docs/source/internals/contributing/development-environment.rst
+++ b/docs/source/internals/contributing/development-environment.rst
@@ -38,7 +38,7 @@ As the sandbox is a vanilla Oscar site, it is what we use to build migrations
against::
$ make sandbox
- $ sites/sandbox/manage.py schemamigration $YOURAPP --auto
+ $ sandbox/manage.py makemigrations
Writing LESS/CSS
----------------
@@ -52,7 +52,7 @@ If you want to develop the LESS files, set::
OSCAR_USE_LESS = True
-in ``sites/sandbox/settings_local.py``. This will include the on-the-fly
+in ``sandbox/settings_local.py``. This will include the on-the-fly
``less`` pre-processor. That will allow you to see changes to the LESS
files after a page reload.
diff --git a/docs/source/internals/getting_started.rst b/docs/source/internals/getting_started.rst
index 5ad36b464cd..f6f9da888ff 100644
--- a/docs/source/internals/getting_started.rst
+++ b/docs/source/internals/getting_started.rst
@@ -25,7 +25,7 @@ project:
$ mkvirtualenv oscar
$ pip install django-oscar
- $ django-admin.py startproject frobshop
+ $ django-admin startproject frobshop
If you do not have :command:`mkvirtualenv`, then replace that line with::
@@ -241,18 +241,18 @@ you will also need to include Django's i18n URLs:
.. code-block:: django
from django.apps import apps
- from django.urls import include, path
+ from django.urls import include, path
from django.contrib import admin
urlpatterns = [
- path('i18n/', include('django.conf.urls.i18n')),
+ path('i18n/', include('django.conf.urls.i18n')),
# The Django admin is not officially supported; expect breakage.
# Nonetheless, it's often useful for debugging.
- path('admin/', admin.site.urls),
+ path('admin/', admin.site.urls),
- path('', include(apps.get_app_config('oscar').urls[0])),
+ path('', include(apps.get_app_config('oscar').urls[0])),
]
diff --git a/docs/source/releases/index.rst b/docs/source/releases/index.rst
index 8b95b82c2cd..1e3cee25544 100644
--- a/docs/source/releases/index.rst
+++ b/docs/source/releases/index.rst
@@ -5,6 +5,14 @@ Release notes
Release notes for each version of Oscar published to PyPI.
+2.1 release branch
+
+.. toctree::
+ :maxdepth: 1
+
+ v2.1
+
+
2.0 release branch
.. toctree::
diff --git a/docs/source/releases/v0.7.rst b/docs/source/releases/v0.7.rst
index 73de41c4042..b37e7b80bae 100644
--- a/docs/source/releases/v0.7.rst
+++ b/docs/source/releases/v0.7.rst
@@ -192,7 +192,7 @@ Minor changes
* Experimental support for having a language prefix in the URL has been added,
and enabled for the sandbox. This can be achieved by using Django's
`i18n_patterns`_ function in your ``urls.py``. for the sandbox.
- See ``sites/sandbox/urls.py`` for an example.
+ See ``sandbox/urls.py`` for an example.
* A basic example for a multi-language sitemap has been added to the sandbox.
diff --git a/docs/source/releases/v2.1.rst b/docs/source/releases/v2.1.rst
index 6ec0ebfe6b1..07290a4d89a 100644
--- a/docs/source/releases/v2.1.rst
+++ b/docs/source/releases/v2.1.rst
@@ -1,10 +1,11 @@
-========================================
-Oscar 2.1 release notes (in development)
-========================================
+=======================
+Oscar 2.1 release notes
+=======================
:release: tbd
-Welcome to Oscar 2.1.
+Welcome to Oscar 2.1! This is a significant release which includes a number of
+new features and performance improvements.
.. contents::
:local:
@@ -15,12 +16,20 @@ Welcome to Oscar 2.1.
Compatibility
~~~~~~~~~~~~~
+Oscar 2.1 is compatible with Django versions 2.2 and 3.0, and Python versions 3.5, 3.6, 3.7 and 3.8.
+
+Support for Django versions 1.11 and 2.0 has been dropped.
.. _new_in_2.1:
What's new in Oscar 2.1?
~~~~~~~~~~~~~~~~~~~~~~~~~~
+- The ability to add arbitrary surcharges to orders (e.g., a processing fee for
+ a particular payment method) was introduced.
+ See :ref:`how_to_surcharges` for details on how to configure this.
+ This change requires a database migration.
+
- The database performance of ``offer.Range.all_products()`` was substantially
improved. The internals of that method have changed and specifically
``Range.invalidate_cached_ids()`` has been removed and replaced with
@@ -30,12 +39,13 @@ What's new in Oscar 2.1?
``ProductAttributeValue`` models was changed to use a callable, so that
Django doesn't generate migrations if a project modifies the ``OSCAR_IMAGE_FOLDER``
to specify a custom directory structure for uploaded images.
+ This change requires a database migration.
- ``catalogue.Category`` now has an ``is_public`` boolean field that serves a
- similar purpose to ``catalogue.Product.is_public`` - i.e., hides them from
+ similar purpose to ``catalogue.Product.is_public`` - i.e., to hide categories
public views. The ``Category`` model also now has a custom manager
that provides a ``browsable()`` queryset method that excludes non-public
- categories. This changes requires a database migration.
+ categories. This change requires a database migration.
Category hierarchy implies that the children of any non-public category are
also non-public. This is enforced through an ``ancestors_are_public`` field
@@ -58,7 +68,7 @@ as follows:
moved from the ``customer`` app to the ``communication`` app. In order to
preserve existing data, the table names for these models are unchanged.
- This is a change that requires database migration.
+ This change requires a database migration.
- The ``Dispatcher`` class moved from ``customer.utils`` to
``communication.utils``. ``Dispatcher`` is now responsible for sending
@@ -148,6 +158,11 @@ Removal of deprecated features
URLs that are not provided by a subclass of ``oscar.core.application.OscarDashboardConfig``
will result in a ``KeyError``.
+- ``customer.forms.PasswordResetForm.get_reset_url`` has been removed.
+
+- The internal and undocumented class ``oscar.core.compat.UnicodeCSVReader``
+ has been removed. Use ``csv.reader`` instead.
+
Minor changes
~~~~~~~~~~~~~
@@ -176,12 +191,32 @@ Minor changes
- Fixed the ``brand_title`` block in ``partials/brand.html`` so that it doesn't span unclosed HTML tags.
+- ``customer.views.ProfileUpdateView.form_valid`` was modified
+ to use a new ``send_email_changed_email`` method.
+
+- ``customer.views.ChangePasswordView.form_valid`` was modified
+ to use a new ``send_password_changed_email`` method.
+
Dependency changes
~~~~~~~~~~~~~~~~~~
-- Upgraded ``django-phonenumber-field`` to use the latest in the 3.x series.
-- Upgraded ``select2`` to version 4.0.10.
-- Upgraded ``inputmask`` to version 4.0.8.
+Python package dependencies:
+
+- Upgraded ``pillow`` to version 6.0.0 or higher.
+- Upgraded ``django-extra-views`` to version 0.13.
+- Upgraded ``django-haystack`` to version 3.0 or higher.
+- Upgraded ``django-phonenumber-field`` to version 3.0.
+- Upgraded ``django-tables2`` to version 2.2.
+- Upgraded ``sorl-thumbnail`` (optional requirement) to version 12.6.
+- Upgraded ``easy-thumbnails`` (optional requirement) to version 2.7.
+
+
+Javascript dependencies:
+
+- Upgraded ``jquery`` to version 3.5.
+- Upgraded ``inputmask`` to version 5.0.
+- Upgraded ``select2`` to version 4.0.
+- Upgraded ``tinymce`` to version 5.2.
.. _deprecated_features_in_2.1:
@@ -203,14 +238,3 @@ Deprecated features
- ``customer.notifications.services.notify_users`` is deprecated.
Use ``Dispatcher.notify_users`` instead.
-
-- ``customer.forms.PasswordResetForm.get_reset_url`` has been removed.
-
-- ``customer.views.ProfileUpdateView.form_valid`` was modified
- to use a new ``send_email_changed_email`` method.
-
-- ``customer.views.ChangePasswordView.form_valid`` was modified
- to use a new ``send_password_changed_email`` method.
-
-- The internal class ``oscar.core.compat.UnicodeSCVReader`` has been removed.
- Use ``csv.reader`` instead.
diff --git a/src/oscar/__init__.py b/src/oscar/__init__.py
index ab67522bace..372aede1df5 100644
--- a/src/oscar/__init__.py
+++ b/src/oscar/__init__.py
@@ -1,5 +1,5 @@
# Use 'alpha', 'beta', 'rc' or 'final' as the 4th element to indicate release type.
-VERSION = (2, 0, 1, 'final')
+VERSION = (2, 1, 0, 'beta')
def get_short_version():
|
doccano__doccano-1209 | ModuleNotFoundError: No module named 'auto_labeling_pipeline'
How to reproduce the behaviour
---------
<!-- Before submitting an issue, make sure to check the docs and closed issues and FAQ to see if any of the solutions work for you. https://github.com/doccano/doccano/wiki/Frequently-Asked-Questions -->
<!-- Include a code example or the steps that led to the problem. Please try to be as specific as possible. -->
I just installed `Doccano==1.2.0` (released just now..) with `pip install doccano` on my Linux machine to check out the `auto_labeling` feature. However, I got the following error running `doccano` in the shell.
```
>>> doccano
```
```
Setup Database.
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/app/manage.py", line 15, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 377, in execute
django.setup()
File "/usr/local/lib/python3.6/dist-packages/django/__init__.py", line 24, in setup
apps.populate(settings.INSTALLED_APPS)
File "/usr/local/lib/python3.6/dist-packages/django/apps/registry.py", line 114, in populate
app_config.import_models()
File "/usr/local/lib/python3.6/dist-packages/django/apps/config.py", line 211, in import_models
self.models_module = import_module(models_module_name)
File "/usr/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/usr/local/lib/python3.6/dist-packages/app/api/models.py", line 3, in <module>
from auto_labeling_pipeline.models import RequestModelFactory
ModuleNotFoundError: No module named 'auto_labeling_pipeline'
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/app/manage.py", line 15, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 377, in execute
django.setup()
File "/usr/local/lib/python3.6/dist-packages/django/__init__.py", line 24, in setup
apps.populate(settings.INSTALLED_APPS)
File "/usr/local/lib/python3.6/dist-packages/django/apps/registry.py", line 114, in populate
app_config.import_models()
File "/usr/local/lib/python3.6/dist-packages/django/apps/config.py", line 211, in import_models
self.models_module = import_module(models_module_name)
File "/usr/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/usr/local/lib/python3.6/dist-packages/app/api/models.py", line 3, in <module>
from auto_labeling_pipeline.models import RequestModelFactory
ModuleNotFoundError: No module named 'auto_labeling_pipeline'
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/app/manage.py", line 15, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 377, in execute
django.setup()
File "/usr/local/lib/python3.6/dist-packages/django/__init__.py", line 24, in setup
apps.populate(settings.INSTALLED_APPS)
File "/usr/local/lib/python3.6/dist-packages/django/apps/registry.py", line 114, in populate
app_config.import_models()
File "/usr/local/lib/python3.6/dist-packages/django/apps/config.py", line 211, in import_models
self.models_module = import_module(models_module_name)
File "/usr/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/usr/local/lib/python3.6/dist-packages/app/api/models.py", line 3, in <module>
from auto_labeling_pipeline.models import RequestModelFactory
ModuleNotFoundError: No module named 'auto_labeling_pipeline'
Create admin user.
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/app/manage.py", line 15, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 377, in execute
django.setup()
File "/usr/local/lib/python3.6/dist-packages/django/__init__.py", line 24, in setup
apps.populate(settings.INSTALLED_APPS)
File "/usr/local/lib/python3.6/dist-packages/django/apps/registry.py", line 114, in populate
app_config.import_models()
File "/usr/local/lib/python3.6/dist-packages/django/apps/config.py", line 211, in import_models
self.models_module = import_module(models_module_name)
File "/usr/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/usr/local/lib/python3.6/dist-packages/app/api/models.py", line 3, in <module>
from auto_labeling_pipeline.models import RequestModelFactory
ModuleNotFoundError: No module named 'auto_labeling_pipeline'
Starting server with port 8000.
Exception in thread django-main-thread:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python3.6/dist-packages/django/utils/autoreload.py", line 53, in wrapper
fn(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/django/core/management/commands/runserver.py", line 110, in inner_run
autoreload.raise_last_exception()
File "/usr/local/lib/python3.6/dist-packages/django/utils/autoreload.py", line 76, in raise_last_exception
raise _exception[1]
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 357, in execute
autoreload.check_errors(django.setup)()
File "/usr/local/lib/python3.6/dist-packages/django/utils/autoreload.py", line 53, in wrapper
fn(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/django/__init__.py", line 24, in setup
apps.populate(settings.INSTALLED_APPS)
File "/usr/local/lib/python3.6/dist-packages/django/apps/registry.py", line 114, in populate
app_config.import_models()
File "/usr/local/lib/python3.6/dist-packages/django/apps/config.py", line 211, in import_models
self.models_module = import_module(models_module_name)
File "/usr/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/usr/local/lib/python3.6/dist-packages/app/api/models.py", line 3, in <module>
from auto_labeling_pipeline.models import RequestModelFactory
ModuleNotFoundError: No module named 'auto_labeling_pipeline'
```
Your Environment
---------
<!-- Include details of your environment.-->
* Operating System: Ubuntu 18.04.5 LTS
* Python Version Used: 3.6.9
* When you install doccano: 02/19/21 7:40 AM GMT
* How did you install doccano (Heroku button etc): `pip install doccano`
* Doccano version: 1.2.0
| [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport io\nimport os\n\nfrom setuptools import find_packages, setup\n\nNAME = 'doccano'\nDESCRIPTION = 'doccano, text annotation tool for machine learning practitioners'\nURL = 'https://github.com/doccano/doccano'\nEMAIL = '[email protected]'\nAUTHOR = 'Hironsan'\nLICENSE = 'MIT'\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = '\\n' + f.read()\n\nrequired = [\n 'apache-libcloud>=3.2.0',\n 'colour>=0.1.5',\n 'conllu>=4.2.2',\n 'dj-database-url>=0.5.0',\n 'django-cors-headers>=3.5.0',\n 'django-filter>=2.4.0',\n 'django-rest-polymorphic>=0.1.9',\n 'djangorestframework-csv>=2.1.0',\n 'djangorestframework-xml>=2.0.0',\n 'drf-yasg>=1.20.0',\n 'environs>=9.2.0',\n 'furl>=2.1.0',\n 'pyexcel>=0.6.6',\n 'pyexcel-xlsx>=0.6.0',\n 'python-jose>=3.2.0',\n 'seqeval>=1.2.2',\n 'social-auth-app-django>=4.0.0',\n 'whitenoise>=5.2.0'\n]\n\nsetup(\n name=NAME,\n use_scm_version=True,\n setup_requires=['setuptools_scm'],\n description=DESCRIPTION,\n long_description=long_description,\n long_description_content_type='text/markdown',\n author=AUTHOR,\n author_email=EMAIL,\n url=URL,\n packages=find_packages(exclude=('*.tests',)),\n entry_points={\n 'console_scripts': [\n 'doccano = app.doccano.doccano:main'\n ]\n },\n install_requires=required,\n extras_require={\n 'postgresql': ['psycopg2-binary>=2.8.6'],\n 'mssql': ['django-mssql-backend>=2.8.1'],\n },\n include_package_data=True,\n license=LICENSE,\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy'\n ],\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport io\nimport os\n\nfrom setuptools import find_packages, setup\n\nNAME = 'doccano'\nDESCRIPTION = 'doccano, text annotation tool for machine learning practitioners'\nURL = 'https://github.com/doccano/doccano'\nEMAIL = '[email protected]'\nAUTHOR = 'Hironsan'\nLICENSE = 'MIT'\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = '\\n' + f.read()\n\nrequired = [\n 'apache-libcloud>=3.2.0',\n 'colour>=0.1.5',\n 'conllu>=4.2.2',\n 'dj-database-url>=0.5.0',\n 'django-cors-headers>=3.5.0',\n 'django-filter>=2.4.0',\n 'django-rest-polymorphic>=0.1.9',\n 'djangorestframework-csv>=2.1.0',\n 'djangorestframework-xml>=2.0.0',\n 'drf-yasg>=1.20.0',\n 'environs>=9.2.0',\n 'furl>=2.1.0',\n 'pyexcel>=0.6.6',\n 'pyexcel-xlsx>=0.6.0',\n 'python-jose>=3.2.0',\n 'seqeval>=1.2.2',\n 'social-auth-app-django>=4.0.0',\n 'whitenoise>=5.2.0',\n 'auto-labeling-pipeline>=0.1.12'\n]\n\nsetup(\n name=NAME,\n use_scm_version=True,\n setup_requires=['setuptools_scm'],\n description=DESCRIPTION,\n long_description=long_description,\n long_description_content_type='text/markdown',\n author=AUTHOR,\n author_email=EMAIL,\n url=URL,\n packages=find_packages(exclude=('*.tests',)),\n entry_points={\n 'console_scripts': [\n 'doccano = app.doccano.doccano:main'\n ]\n },\n install_requires=required,\n extras_require={\n 'postgresql': ['psycopg2-binary>=2.8.6'],\n 'mssql': ['django-mssql-backend>=2.8.1'],\n },\n include_package_data=True,\n license=LICENSE,\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy'\n ],\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 15a723b0ec..de5f683cf3 100644
--- a/setup.py
+++ b/setup.py
@@ -34,7 +34,8 @@
'python-jose>=3.2.0',
'seqeval>=1.2.2',
'social-auth-app-django>=4.0.0',
- 'whitenoise>=5.2.0'
+ 'whitenoise>=5.2.0',
+ 'auto-labeling-pipeline>=0.1.12'
]
setup(
|
sktime__sktime-170 | Documentation needs to be fixed
Single comment line on `sktime.utils.validation.forecasting` needs to be fixed. Check the image below.
Instead of `<<<<<<< HEAD:sktime/utils/validation/forecasting.py` we should see `Returns`.

| [
{
"content": "import numpy as np\nimport pandas as pd\nfrom sklearn.utils.validation import check_is_fitted\n\n__author__ = \"Markus Löning\"\n__all__ = [\"validate_y\", \"validate_X\", \"validate_y_X\", \"validate_fh\"]\n\n\ndef validate_y_X(y, X):\n \"\"\"Validate input data.\n\n Parameters\n ----------\n y : pandas Series or numpy ndarray\n X : pandas DataFrame\n\n<<<<<<< HEAD:sktime/utils/validation/forecasting.py\n Returns\n -------\n None\n\n Raises\n ------\n ValueError\n If y is an invalid input\n \"\"\"\n validate_y(y)\n validate_X(X)\n\n\ndef validate_y(y):\n \"\"\"Validate input data.\n\n Parameters\n ----------\n y : pandas Series or numpy ndarray\n\n Returns\n -------\n None\n\n Raises\n ------\n ValueError\n If y is an invalid input\n \"\"\"\n # Check if pandas series\n if not isinstance(y, pd.Series):\n raise ValueError(f'y must be a pandas Series, but found: {type(y)}')\n\n # Check if single row\n if not y.shape[0] == 1:\n raise ValueError(f'y must consist of a pandas Series with a single row, '\n f'but found: {y.shape[0]} rows')\n\n # Check if contained time series is either pandas series or numpy array\n s = y.iloc[0]\n if not isinstance(s, (np.ndarray, pd.Series)):\n raise ValueError(f'y must contain a pandas Series or numpy array, '\n f'but found: {type(s)}.')\n\n\ndef validate_X(X):\n \"\"\"Validate input data.\n\n Parameters\n ----------\n X : pandas DataFrame\n\n Returns\n -------\n None\n\n Raises\n ------\n ValueError\n If y is an invalid input\n \"\"\"\n if X is not None:\n if not isinstance(X, pd.DataFrame):\n raise ValueError(f\"`X` must a pandas DataFrame, but found: {type(X)}\")\n if X.shape[0] > 1:\n raise ValueError(f\"`X` must consist of a single row, but found: {X.shape[0]} rows\")\n\n # Check if index is the same for all columns.\n\n # Get index from first row, can be either pd.Series or np.array.\n first_index = X.iloc[0, 0].index if hasattr(X.iloc[0, 0], 'index') else pd.RangeIndex(X.iloc[0, 0].shape[0])\n\n # Series must contain at least 2 observations, otherwise should be primitive.\n if len(first_index) < 1:\n raise ValueError(f'Time series must contain at least 2 observations, but found: '\n f'{len(first_index)} observations in column: {X.columns[0]}')\n\n # Compare with remaining columns\n for c, col in enumerate(X.columns):\n index = X.iloc[0, c].index if hasattr(X.iloc[0, c], 'index') else pd.RangeIndex(X.iloc[0, 0].shape[0])\n if not np.array_equal(first_index, index):\n raise ValueError(f'Found time series with unequal index in column {col}. '\n f'Input time-series must have the same index.')\n\n\ndef validate_sp(sp):\n \"\"\"Validate seasonal periodicity.\n\n Parameters\n ----------\n sp : int\n Seasonal periodicity\n\n Returns\n -------\n sp : int\n Validated seasonal periodicity\n \"\"\"\n\n if sp is None:\n return sp\n\n else:\n if not isinstance(sp, int) and (sp >= 0):\n raise ValueError(f\"Seasonal periodicity (sp) has to be a positive integer, but found: \"\n f\"{sp} of type: {type(sp)}\")\n return sp\n\n\ndef validate_fh(fh):\n \"\"\"Validate forecasting horizon.\n\n Parameters\n ----------\n fh : int or list of int\n Forecasting horizon with steps ahead to predict.\n\n Returns\n -------\n fh : numpy array of int\n Sorted and validated forecasting horizon.\n \"\"\"\n\n # Check single integer\n if np.issubdtype(type(fh), np.integer):\n return np.array([fh], dtype=np.int)\n\n # Check array-like input\n elif isinstance(fh, list):\n if len(fh) < 1:\n raise ValueError(f\"`fh` must specify at least one step, but found: \"\n f\"{type(fh)} of length {len(fh)}\")\n if not np.all([np.issubdtype(type(h), np.integer) for h in fh]):\n raise ValueError('If `fh` is passed as a list, '\n 'it has to be a list of integers')\n\n elif isinstance(fh, np.ndarray):\n if fh.ndim > 1:\n raise ValueError(f\"`fh` must be a 1d array, but found: \"\n f\"{fh.ndim} dimensions\")\n if len(fh) < 1:\n raise ValueError(f\"`fh` must specify at least one step, but found: \"\n f\"{type(fh)} of length {len(fh)}\")\n if not np.issubdtype(fh.dtype, np.integer):\n raise ValueError(\n f'If `fh` is passed as an array, it has to be an array of '\n f'integers, but found an array of dtype: {fh.dtype}')\n\n else:\n raise ValueError(f\"`fh` has to be either a list or array of integers, or a single \"\n f\"integer, but found: {type(fh)}\")\n\n return np.asarray(np.sort(fh), dtype=np.int)\n\n\ndef check_is_fitted_in_transform(estimator, attributes, msg=None, all_or_any=all):\n \"\"\"Checks if the estimator is fitted during transform by verifying the presence of\n \"all_or_any\" of the passed attributes and raises a NotFittedError with the\n given message.\n \n Parameters\n ----------\n estimator : estimator instance.\n estimator instance for which the check is performed.\n attributes : attribute name(s) given as string or a list/tuple of strings\n Eg.:\n ``[\"coef_\", \"estimator_\", ...], \"coef_\"``\n msg : string\n The default error message is, \"This %(name)s instance is not fitted\n yet. Call 'fit' with appropriate arguments before using this method.\"\n For custom messages if \"%(name)s\" is present in the message string,\n it is substituted for the estimator name.\n Eg. : \"Estimator, %(name)s, must be fitted before sparsifying\".\n all_or_any : callable, {all, any}, default all\n Specify whether all or any of the given attributes must exist.\n Returns\n -------\n None\n \n Raises\n ------\n NotFittedError\n If the attributes are not found. \n \"\"\"\n if msg is None:\n msg = (\"This %(name)s instance has not been fitted yet. Call 'transform' with \"\n \"appropriate arguments before using this method.\")\n\n check_is_fitted(estimator, attributes=attributes, msg=msg, all_or_any=all_or_any)\n\n\ndef validate_time_index(time_index):\n \"\"\"Validate time index\n\n Parameters\n ----------\n time_index : array-like\n\n Returns\n -------\n time_index : ndarray\n \"\"\"\n # period or datetime index are not support yet\n # TODO add support for period/datetime indexing\n if isinstance(time_index, (pd.PeriodIndex, pd.DatetimeIndex)):\n raise NotImplementedError(f\"{type(time_index)} is not fully supported yet, \"\n f\"use pandas RangeIndex instead\")\n\n return np.asarray(time_index)\n\n\ndef check_consistent_time_indices(x, y):\n \"\"\"Check that x and y have consistent indices.\n\n Parameters\n ----------\n x : pandas Series\n y : pandas Series\n\n Raises:\n -------\n ValueError\n If time indicies are not equal\n \"\"\"\n\n if not x.index.equals(y.index):\n raise ValueError(f\"Found input variables with inconsistent indices\")\n",
"path": "sktime/utils/validation/forecasting.py"
}
] | [
{
"content": "import numpy as np\nimport pandas as pd\nfrom sklearn.utils.validation import check_is_fitted\n\n__author__ = \"Markus Löning\"\n__all__ = [\"validate_y\", \"validate_X\", \"validate_y_X\", \"validate_fh\"]\n\n\ndef validate_y_X(y, X):\n \"\"\"Validate input data.\n\n Parameters\n ----------\n y : pandas Series or numpy ndarray\n X : pandas DataFrame\n\n Returns\n -------\n None\n\n Raises\n ------\n ValueError\n If y is an invalid input\n \"\"\"\n validate_y(y)\n validate_X(X)\n\n\ndef validate_y(y):\n \"\"\"Validate input data.\n\n Parameters\n ----------\n y : pandas Series or numpy ndarray\n\n Returns\n -------\n None\n\n Raises\n ------\n ValueError\n If y is an invalid input\n \"\"\"\n # Check if pandas series\n if not isinstance(y, pd.Series):\n raise ValueError(f'y must be a pandas Series, but found: {type(y)}')\n\n # Check if single row\n if not y.shape[0] == 1:\n raise ValueError(f'y must consist of a pandas Series with a single row, '\n f'but found: {y.shape[0]} rows')\n\n # Check if contained time series is either pandas series or numpy array\n s = y.iloc[0]\n if not isinstance(s, (np.ndarray, pd.Series)):\n raise ValueError(f'y must contain a pandas Series or numpy array, '\n f'but found: {type(s)}.')\n\n\ndef validate_X(X):\n \"\"\"Validate input data.\n\n Parameters\n ----------\n X : pandas DataFrame\n\n Returns\n -------\n None\n\n Raises\n ------\n ValueError\n If y is an invalid input\n \"\"\"\n if X is not None:\n if not isinstance(X, pd.DataFrame):\n raise ValueError(f\"`X` must a pandas DataFrame, but found: {type(X)}\")\n if X.shape[0] > 1:\n raise ValueError(f\"`X` must consist of a single row, but found: {X.shape[0]} rows\")\n\n # Check if index is the same for all columns.\n\n # Get index from first row, can be either pd.Series or np.array.\n first_index = X.iloc[0, 0].index if hasattr(X.iloc[0, 0], 'index') else pd.RangeIndex(X.iloc[0, 0].shape[0])\n\n # Series must contain at least 2 observations, otherwise should be primitive.\n if len(first_index) < 1:\n raise ValueError(f'Time series must contain at least 2 observations, but found: '\n f'{len(first_index)} observations in column: {X.columns[0]}')\n\n # Compare with remaining columns\n for c, col in enumerate(X.columns):\n index = X.iloc[0, c].index if hasattr(X.iloc[0, c], 'index') else pd.RangeIndex(X.iloc[0, 0].shape[0])\n if not np.array_equal(first_index, index):\n raise ValueError(f'Found time series with unequal index in column {col}. '\n f'Input time-series must have the same index.')\n\n\ndef validate_sp(sp):\n \"\"\"Validate seasonal periodicity.\n\n Parameters\n ----------\n sp : int\n Seasonal periodicity\n\n Returns\n -------\n sp : int\n Validated seasonal periodicity\n \"\"\"\n\n if sp is None:\n return sp\n\n else:\n if not isinstance(sp, int) and (sp >= 0):\n raise ValueError(f\"Seasonal periodicity (sp) has to be a positive integer, but found: \"\n f\"{sp} of type: {type(sp)}\")\n return sp\n\n\ndef validate_fh(fh):\n \"\"\"Validate forecasting horizon.\n\n Parameters\n ----------\n fh : int or list of int\n Forecasting horizon with steps ahead to predict.\n\n Returns\n -------\n fh : numpy array of int\n Sorted and validated forecasting horizon.\n \"\"\"\n\n # Check single integer\n if np.issubdtype(type(fh), np.integer):\n return np.array([fh], dtype=np.int)\n\n # Check array-like input\n elif isinstance(fh, list):\n if len(fh) < 1:\n raise ValueError(f\"`fh` must specify at least one step, but found: \"\n f\"{type(fh)} of length {len(fh)}\")\n if not np.all([np.issubdtype(type(h), np.integer) for h in fh]):\n raise ValueError('If `fh` is passed as a list, '\n 'it has to be a list of integers')\n\n elif isinstance(fh, np.ndarray):\n if fh.ndim > 1:\n raise ValueError(f\"`fh` must be a 1d array, but found: \"\n f\"{fh.ndim} dimensions\")\n if len(fh) < 1:\n raise ValueError(f\"`fh` must specify at least one step, but found: \"\n f\"{type(fh)} of length {len(fh)}\")\n if not np.issubdtype(fh.dtype, np.integer):\n raise ValueError(\n f'If `fh` is passed as an array, it has to be an array of '\n f'integers, but found an array of dtype: {fh.dtype}')\n\n else:\n raise ValueError(f\"`fh` has to be either a list or array of integers, or a single \"\n f\"integer, but found: {type(fh)}\")\n\n return np.asarray(np.sort(fh), dtype=np.int)\n\n\ndef check_is_fitted_in_transform(estimator, attributes, msg=None, all_or_any=all):\n \"\"\"Checks if the estimator is fitted during transform by verifying the presence of\n \"all_or_any\" of the passed attributes and raises a NotFittedError with the\n given message.\n \n Parameters\n ----------\n estimator : estimator instance.\n estimator instance for which the check is performed.\n attributes : attribute name(s) given as string or a list/tuple of strings\n Eg.:\n ``[\"coef_\", \"estimator_\", ...], \"coef_\"``\n msg : string\n The default error message is, \"This %(name)s instance is not fitted\n yet. Call 'fit' with appropriate arguments before using this method.\"\n For custom messages if \"%(name)s\" is present in the message string,\n it is substituted for the estimator name.\n Eg. : \"Estimator, %(name)s, must be fitted before sparsifying\".\n all_or_any : callable, {all, any}, default all\n Specify whether all or any of the given attributes must exist.\n Returns\n -------\n None\n \n Raises\n ------\n NotFittedError\n If the attributes are not found. \n \"\"\"\n if msg is None:\n msg = (\"This %(name)s instance has not been fitted yet. Call 'transform' with \"\n \"appropriate arguments before using this method.\")\n\n check_is_fitted(estimator, attributes=attributes, msg=msg, all_or_any=all_or_any)\n\n\ndef validate_time_index(time_index):\n \"\"\"Validate time index\n\n Parameters\n ----------\n time_index : array-like\n\n Returns\n -------\n time_index : ndarray\n \"\"\"\n # period or datetime index are not support yet\n # TODO add support for period/datetime indexing\n if isinstance(time_index, (pd.PeriodIndex, pd.DatetimeIndex)):\n raise NotImplementedError(f\"{type(time_index)} is not fully supported yet, \"\n f\"use pandas RangeIndex instead\")\n\n return np.asarray(time_index)\n\n\ndef check_consistent_time_indices(x, y):\n \"\"\"Check that x and y have consistent indices.\n\n Parameters\n ----------\n x : pandas Series\n y : pandas Series\n\n Raises:\n -------\n ValueError\n If time indicies are not equal\n \"\"\"\n\n if not x.index.equals(y.index):\n raise ValueError(f\"Found input variables with inconsistent indices\")\n",
"path": "sktime/utils/validation/forecasting.py"
}
] | diff --git a/sktime/utils/validation/forecasting.py b/sktime/utils/validation/forecasting.py
index 19846e22115..34814f1f118 100644
--- a/sktime/utils/validation/forecasting.py
+++ b/sktime/utils/validation/forecasting.py
@@ -14,7 +14,6 @@ def validate_y_X(y, X):
y : pandas Series or numpy ndarray
X : pandas DataFrame
-<<<<<<< HEAD:sktime/utils/validation/forecasting.py
Returns
-------
None
|
docker__docker-py-1156 | Requests v2.11.0 causes breakage
This is a known issue within requests and looks like it will be fixed in v2.11.1. Documenting for the benefit of anyone else who runs into this :).
After a recent pip upgrade I found that docker-py was passing along an error from requests:
File "/usr/local/lib/python2.7/dist-packages/docker/client.py", line 307, in _stream_raw_result
for out in response.iter_content(chunk_size=1, decode_unicode=True):
File "/usr/local/lib/python2.7/dist-packages/requests/utils.py", line 372, in stream_decode_response_unicode
raise UnicodeError("Unable to decode contents with encoding %s." % encoding)
UnicodeError: Unable to decode contents with encoding None.
This has already been reported to requests (https://github.com/kennethreitz/requests/issues/3481) and fixed (https://github.com/kennethreitz/requests/commit/d7f56ba9383575a6b7d361db0123a93c70a2b42f) for the next version.
If you are running into this issue, the easiest fix for now appears to be reverting to a pre 2.11 version of requests.
| [
{
"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nfrom setuptools import setup\n\n\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\nrequirements = [\n 'requests >= 2.5.2',\n 'six >= 1.4.0',\n 'websocket-client >= 0.32.0',\n]\n\nif sys.platform == 'win32':\n requirements.append('pypiwin32 >= 219')\n\nextras_require = {\n ':python_version < \"3.5\"': 'backports.ssl_match_hostname >= 3.5',\n ':python_version < \"3.3\"': 'ipaddress >= 1.0.16',\n}\n\nversion = None\nexec(open('docker/version.py').read())\n\nwith open('./test-requirements.txt') as test_reqs_txt:\n test_requirements = [line for line in test_reqs_txt]\n\n\nsetup(\n name=\"docker-py\",\n version=version,\n description=\"Python client for Docker.\",\n url='https://github.com/docker/docker-py/',\n packages=[\n 'docker', 'docker.api', 'docker.auth', 'docker.transport',\n 'docker.utils', 'docker.utils.ports', 'docker.ssladapter'\n ],\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require=extras_require,\n zip_safe=False,\n test_suite='tests',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nfrom setuptools import setup\n\n\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\nrequirements = [\n 'requests >= 2.5.2, < 2.11',\n 'six >= 1.4.0',\n 'websocket-client >= 0.32.0',\n]\n\nif sys.platform == 'win32':\n requirements.append('pypiwin32 >= 219')\n\nextras_require = {\n ':python_version < \"3.5\"': 'backports.ssl_match_hostname >= 3.5',\n ':python_version < \"3.3\"': 'ipaddress >= 1.0.16',\n}\n\nversion = None\nexec(open('docker/version.py').read())\n\nwith open('./test-requirements.txt') as test_reqs_txt:\n test_requirements = [line for line in test_reqs_txt]\n\n\nsetup(\n name=\"docker-py\",\n version=version,\n description=\"Python client for Docker.\",\n url='https://github.com/docker/docker-py/',\n packages=[\n 'docker', 'docker.api', 'docker.auth', 'docker.transport',\n 'docker.utils', 'docker.utils.ports', 'docker.ssladapter'\n ],\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require=extras_require,\n zip_safe=False,\n test_suite='tests',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index ac58b1f94..85a449942 100644
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@
SOURCE_DIR = os.path.join(ROOT_DIR)
requirements = [
- 'requests >= 2.5.2',
+ 'requests >= 2.5.2, < 2.11',
'six >= 1.4.0',
'websocket-client >= 0.32.0',
]
|
jupyterhub__jupyterhub-882 | jupyterhub update-db fails
When I run it, I get:
```
FileNotFoundError: [Errno 2] No such file or directory: '/home/yuvipanda/code/jupyterhub-kubernetes-spawner/lib/python3.5/site-packages/jupyterhub/alembic.ini'
```
I didn't capture the full stack trace though.
| [
{
"content": "#!/usr/bin/env python3\n# coding: utf-8\n\n# Copyright (c) Juptyer Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n#-----------------------------------------------------------------------------\n# Minimal Python version sanity check (from IPython)\n#-----------------------------------------------------------------------------\n\nfrom __future__ import print_function\n\nimport os\nimport shutil\nimport sys\n\nv = sys.version_info\nif v[:2] < (3,3):\n error = \"ERROR: JupyterHub requires Python version 3.3 or above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\n\nif os.name in ('nt', 'dos'):\n error = \"ERROR: Windows is not supported\"\n print(error, file=sys.stderr)\n\n# At least we're on the python version we need, move on.\n\nimport os\nfrom glob import glob\nfrom subprocess import check_call\n\nfrom setuptools import setup\nfrom setuptools.command.bdist_egg import bdist_egg\n\npjoin = os.path.join\n\nhere = os.path.abspath(os.path.dirname(__file__))\nshare_jupyter = pjoin(here, 'share', 'jupyter', 'hub')\nstatic = pjoin(share_jupyter, 'static')\n\nis_repo = os.path.exists(pjoin(here, '.git'))\n\n#---------------------------------------------------------------------------\n# Build basic package data, etc.\n#---------------------------------------------------------------------------\n\ndef get_data_files():\n \"\"\"Get data files in share/jupyter\"\"\"\n \n data_files = []\n ntrim = len(here + os.path.sep)\n \n for (d, dirs, filenames) in os.walk(share_jupyter):\n data_files.append((\n d[ntrim:],\n [ pjoin(d, f) for f in filenames ]\n ))\n return data_files\n\ndef get_package_data():\n \"\"\"Get package data\n\n (mostly alembic config)\n \"\"\"\n package_data = {}\n package_data['jupyterhub'] = [\n 'alembic/*',\n 'alembic/versions/*',\n ]\n return package_data\n\nns = {}\nwith open(pjoin(here, 'jupyterhub', 'version.py')) as f:\n exec(f.read(), {}, ns)\n\n\npackages = []\nfor d, _, _ in os.walk('jupyterhub'):\n if os.path.exists(pjoin(d, '__init__.py')):\n packages.append(d.replace(os.path.sep, '.'))\n\nsetup_args = dict(\n name = 'jupyterhub',\n scripts = glob(pjoin('scripts', '*')),\n packages = packages,\n # dummy, so that install_data doesn't get skipped\n # this will be overridden when bower is run anyway\n data_files = get_data_files() or ['dummy'],\n package_data = get_package_data(),\n version = ns['__version__'],\n description = \"JupyterHub: A multi-user server for Jupyter notebooks\",\n long_description = \"See https://jupyterhub.readthedocs.io for more info.\",\n author = \"Jupyter Development Team\",\n author_email = \"[email protected]\",\n url = \"http://jupyter.org\",\n license = \"BSD\",\n platforms = \"Linux, Mac OS X\",\n keywords = ['Interactive', 'Interpreter', 'Shell', 'Web'],\n classifiers = [\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n ],\n)\n\n#---------------------------------------------------------------------------\n# custom distutils commands\n#---------------------------------------------------------------------------\n\n# imports here, so they are after setuptools import if there was one\nfrom distutils.cmd import Command\nfrom distutils.command.build_py import build_py\nfrom distutils.command.sdist import sdist\n\n\nnpm_path = ':'.join([\n pjoin(here, 'node_modules', '.bin'),\n os.environ.get(\"PATH\", os.defpath),\n])\n\n\ndef mtime(path):\n \"\"\"shorthand for mtime\"\"\"\n return os.stat(path).st_mtime\n\n\nclass BaseCommand(Command):\n \"\"\"Dumb empty command because Command needs subclasses to override too much\"\"\"\n user_options = []\n \n def initialize_options(self):\n pass\n \n def finalize_options(self):\n pass\n \n def get_inputs(self):\n return []\n \n def get_outputs(self):\n return []\n\n\nclass Bower(BaseCommand):\n description = \"fetch static client-side components with bower\"\n \n user_options = []\n bower_dir = pjoin(static, 'components')\n node_modules = pjoin(here, 'node_modules')\n \n def should_run(self):\n if not os.path.exists(self.bower_dir):\n return True\n return mtime(self.bower_dir) < mtime(pjoin(here, 'bower.json'))\n\n def should_run_npm(self):\n if not shutil.which('npm'):\n print(\"npm unavailable\", file=sys.stderr)\n return False\n if not os.path.exists(self.node_modules):\n return True\n return mtime(self.node_modules) < mtime(pjoin(here, 'package.json'))\n \n def run(self):\n if not self.should_run():\n print(\"bower dependencies up to date\")\n return\n \n if self.should_run_npm():\n print(\"installing build dependencies with npm\")\n check_call(['npm', 'install', '--progress=false'], cwd=here)\n os.utime(self.node_modules)\n \n env = os.environ.copy()\n env['PATH'] = npm_path\n \n try:\n check_call(\n ['bower', 'install', '--allow-root', '--config.interactive=false'],\n cwd=here,\n env=env,\n )\n except OSError as e:\n print(\"Failed to run bower: %s\" % e, file=sys.stderr)\n print(\"You can install js dependencies with `npm install`\", file=sys.stderr)\n raise\n os.utime(self.bower_dir)\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n\n\nclass CSS(BaseCommand):\n description = \"compile CSS from LESS\"\n \n def should_run(self):\n \"\"\"Does less need to run?\"\"\"\n # from IPython.html.tasks.py\n \n css_targets = [pjoin(static, 'css', 'style.min.css')]\n css_maps = [t + '.map' for t in css_targets]\n targets = css_targets + css_maps\n if not all(os.path.exists(t) for t in targets):\n # some generated files don't exist\n return True\n earliest_target = sorted(mtime(t) for t in targets)[0]\n \n # check if any .less files are newer than the generated targets\n for (dirpath, dirnames, filenames) in os.walk(static):\n for f in filenames:\n if f.endswith('.less'):\n path = pjoin(static, dirpath, f)\n timestamp = mtime(path)\n if timestamp > earliest_target:\n return True\n \n return False\n \n def run(self):\n if not self.should_run():\n print(\"CSS up-to-date\")\n return\n \n self.run_command('js')\n \n style_less = pjoin(static, 'less', 'style.less')\n style_css = pjoin(static, 'css', 'style.min.css')\n sourcemap = style_css + '.map'\n \n env = os.environ.copy()\n env['PATH'] = npm_path\n try:\n check_call([\n 'lessc', '--clean-css',\n '--source-map-basepath={}'.format(static),\n '--source-map={}'.format(sourcemap),\n '--source-map-rootpath=../',\n style_less, style_css,\n ], cwd=here, env=env)\n except OSError as e:\n print(\"Failed to run lessc: %s\" % e, file=sys.stderr)\n print(\"You can install js dependencies with `npm install`\", file=sys.stderr)\n raise\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n\n\ndef js_css_first(cls, strict=True):\n class Command(cls):\n def run(self):\n try:\n self.run_command('js')\n self.run_command('css')\n except Exception:\n if strict:\n raise\n else:\n pass\n return super().run()\n return Command\n\n\nclass bdist_egg_disabled(bdist_egg):\n \"\"\"Disabled version of bdist_egg\n\n Prevents setup.py install performing setuptools' default easy_install,\n which it should never ever do.\n \"\"\"\n def run(self):\n sys.exit(\"Aborting implicit building of eggs. Use `pip install .` to install from source.\")\n\n\nsetup_args['cmdclass'] = {\n 'js': Bower,\n 'css': CSS,\n 'build_py': js_css_first(build_py, strict=is_repo),\n 'sdist': js_css_first(sdist, strict=True),\n 'bdist_egg': bdist_egg if 'bdist_egg' in sys.argv else bdist_egg_disabled,\n}\n\n\n# setuptools requirements\n\nsetup_args['zip_safe'] = False\nfrom setuptools.command.develop import develop\nclass develop_js_css(develop):\n def run(self):\n if not self.uninstall:\n self.distribution.run_command('js')\n self.distribution.run_command('css')\n develop.run(self)\nsetup_args['cmdclass']['develop'] = develop_js_css\nsetup_args['install_requires'] = install_requires = []\n\nwith open('requirements.txt') as f:\n for line in f.readlines():\n req = line.strip()\n if not req or req.startswith('#') or '://' in req:\n continue\n install_requires.append(req)\n\n#---------------------------------------------------------------------------\n# setup\n#---------------------------------------------------------------------------\n\ndef main():\n setup(**setup_args)\n\nif __name__ == '__main__':\n main()\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python3\n# coding: utf-8\n\n# Copyright (c) Juptyer Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n#-----------------------------------------------------------------------------\n# Minimal Python version sanity check (from IPython)\n#-----------------------------------------------------------------------------\n\nfrom __future__ import print_function\n\nimport os\nimport shutil\nimport sys\n\nv = sys.version_info\nif v[:2] < (3,3):\n error = \"ERROR: JupyterHub requires Python version 3.3 or above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\n\nif os.name in ('nt', 'dos'):\n error = \"ERROR: Windows is not supported\"\n print(error, file=sys.stderr)\n\n# At least we're on the python version we need, move on.\n\nimport os\nfrom glob import glob\nfrom subprocess import check_call\n\nfrom setuptools import setup\nfrom setuptools.command.bdist_egg import bdist_egg\n\npjoin = os.path.join\n\nhere = os.path.abspath(os.path.dirname(__file__))\nshare_jupyter = pjoin(here, 'share', 'jupyter', 'hub')\nstatic = pjoin(share_jupyter, 'static')\n\nis_repo = os.path.exists(pjoin(here, '.git'))\n\n#---------------------------------------------------------------------------\n# Build basic package data, etc.\n#---------------------------------------------------------------------------\n\ndef get_data_files():\n \"\"\"Get data files in share/jupyter\"\"\"\n \n data_files = []\n ntrim = len(here + os.path.sep)\n \n for (d, dirs, filenames) in os.walk(share_jupyter):\n data_files.append((\n d[ntrim:],\n [ pjoin(d, f) for f in filenames ]\n ))\n return data_files\n\ndef get_package_data():\n \"\"\"Get package data\n\n (mostly alembic config)\n \"\"\"\n package_data = {}\n package_data['jupyterhub'] = [\n 'alembic.ini',\n 'alembic/*',\n 'alembic/versions/*',\n ]\n return package_data\n\nns = {}\nwith open(pjoin(here, 'jupyterhub', 'version.py')) as f:\n exec(f.read(), {}, ns)\n\n\npackages = []\nfor d, _, _ in os.walk('jupyterhub'):\n if os.path.exists(pjoin(d, '__init__.py')):\n packages.append(d.replace(os.path.sep, '.'))\n\nsetup_args = dict(\n name = 'jupyterhub',\n scripts = glob(pjoin('scripts', '*')),\n packages = packages,\n # dummy, so that install_data doesn't get skipped\n # this will be overridden when bower is run anyway\n data_files = get_data_files() or ['dummy'],\n package_data = get_package_data(),\n version = ns['__version__'],\n description = \"JupyterHub: A multi-user server for Jupyter notebooks\",\n long_description = \"See https://jupyterhub.readthedocs.io for more info.\",\n author = \"Jupyter Development Team\",\n author_email = \"[email protected]\",\n url = \"http://jupyter.org\",\n license = \"BSD\",\n platforms = \"Linux, Mac OS X\",\n keywords = ['Interactive', 'Interpreter', 'Shell', 'Web'],\n classifiers = [\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n ],\n)\n\n#---------------------------------------------------------------------------\n# custom distutils commands\n#---------------------------------------------------------------------------\n\n# imports here, so they are after setuptools import if there was one\nfrom distutils.cmd import Command\nfrom distutils.command.build_py import build_py\nfrom distutils.command.sdist import sdist\n\n\nnpm_path = ':'.join([\n pjoin(here, 'node_modules', '.bin'),\n os.environ.get(\"PATH\", os.defpath),\n])\n\n\ndef mtime(path):\n \"\"\"shorthand for mtime\"\"\"\n return os.stat(path).st_mtime\n\n\nclass BaseCommand(Command):\n \"\"\"Dumb empty command because Command needs subclasses to override too much\"\"\"\n user_options = []\n \n def initialize_options(self):\n pass\n \n def finalize_options(self):\n pass\n \n def get_inputs(self):\n return []\n \n def get_outputs(self):\n return []\n\n\nclass Bower(BaseCommand):\n description = \"fetch static client-side components with bower\"\n \n user_options = []\n bower_dir = pjoin(static, 'components')\n node_modules = pjoin(here, 'node_modules')\n \n def should_run(self):\n if not os.path.exists(self.bower_dir):\n return True\n return mtime(self.bower_dir) < mtime(pjoin(here, 'bower.json'))\n\n def should_run_npm(self):\n if not shutil.which('npm'):\n print(\"npm unavailable\", file=sys.stderr)\n return False\n if not os.path.exists(self.node_modules):\n return True\n return mtime(self.node_modules) < mtime(pjoin(here, 'package.json'))\n \n def run(self):\n if not self.should_run():\n print(\"bower dependencies up to date\")\n return\n \n if self.should_run_npm():\n print(\"installing build dependencies with npm\")\n check_call(['npm', 'install', '--progress=false'], cwd=here)\n os.utime(self.node_modules)\n \n env = os.environ.copy()\n env['PATH'] = npm_path\n \n try:\n check_call(\n ['bower', 'install', '--allow-root', '--config.interactive=false'],\n cwd=here,\n env=env,\n )\n except OSError as e:\n print(\"Failed to run bower: %s\" % e, file=sys.stderr)\n print(\"You can install js dependencies with `npm install`\", file=sys.stderr)\n raise\n os.utime(self.bower_dir)\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n\n\nclass CSS(BaseCommand):\n description = \"compile CSS from LESS\"\n \n def should_run(self):\n \"\"\"Does less need to run?\"\"\"\n # from IPython.html.tasks.py\n \n css_targets = [pjoin(static, 'css', 'style.min.css')]\n css_maps = [t + '.map' for t in css_targets]\n targets = css_targets + css_maps\n if not all(os.path.exists(t) for t in targets):\n # some generated files don't exist\n return True\n earliest_target = sorted(mtime(t) for t in targets)[0]\n \n # check if any .less files are newer than the generated targets\n for (dirpath, dirnames, filenames) in os.walk(static):\n for f in filenames:\n if f.endswith('.less'):\n path = pjoin(static, dirpath, f)\n timestamp = mtime(path)\n if timestamp > earliest_target:\n return True\n \n return False\n \n def run(self):\n if not self.should_run():\n print(\"CSS up-to-date\")\n return\n \n self.run_command('js')\n \n style_less = pjoin(static, 'less', 'style.less')\n style_css = pjoin(static, 'css', 'style.min.css')\n sourcemap = style_css + '.map'\n \n env = os.environ.copy()\n env['PATH'] = npm_path\n try:\n check_call([\n 'lessc', '--clean-css',\n '--source-map-basepath={}'.format(static),\n '--source-map={}'.format(sourcemap),\n '--source-map-rootpath=../',\n style_less, style_css,\n ], cwd=here, env=env)\n except OSError as e:\n print(\"Failed to run lessc: %s\" % e, file=sys.stderr)\n print(\"You can install js dependencies with `npm install`\", file=sys.stderr)\n raise\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n\n\ndef js_css_first(cls, strict=True):\n class Command(cls):\n def run(self):\n try:\n self.run_command('js')\n self.run_command('css')\n except Exception:\n if strict:\n raise\n else:\n pass\n return super().run()\n return Command\n\n\nclass bdist_egg_disabled(bdist_egg):\n \"\"\"Disabled version of bdist_egg\n\n Prevents setup.py install performing setuptools' default easy_install,\n which it should never ever do.\n \"\"\"\n def run(self):\n sys.exit(\"Aborting implicit building of eggs. Use `pip install .` to install from source.\")\n\n\nsetup_args['cmdclass'] = {\n 'js': Bower,\n 'css': CSS,\n 'build_py': js_css_first(build_py, strict=is_repo),\n 'sdist': js_css_first(sdist, strict=True),\n 'bdist_egg': bdist_egg if 'bdist_egg' in sys.argv else bdist_egg_disabled,\n}\n\n\n# setuptools requirements\n\nsetup_args['zip_safe'] = False\nfrom setuptools.command.develop import develop\nclass develop_js_css(develop):\n def run(self):\n if not self.uninstall:\n self.distribution.run_command('js')\n self.distribution.run_command('css')\n develop.run(self)\nsetup_args['cmdclass']['develop'] = develop_js_css\nsetup_args['install_requires'] = install_requires = []\n\nwith open('requirements.txt') as f:\n for line in f.readlines():\n req = line.strip()\n if not req or req.startswith('#') or '://' in req:\n continue\n install_requires.append(req)\n\n#---------------------------------------------------------------------------\n# setup\n#---------------------------------------------------------------------------\n\ndef main():\n setup(**setup_args)\n\nif __name__ == '__main__':\n main()\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 68e529ed52..4ffb77536a 100755
--- a/setup.py
+++ b/setup.py
@@ -66,6 +66,7 @@ def get_package_data():
"""
package_data = {}
package_data['jupyterhub'] = [
+ 'alembic.ini',
'alembic/*',
'alembic/versions/*',
]
|
paperless-ngx__paperless-ngx-1091 | [BUG] Constant moderate CPU usage when idle (aka. paperless is contributing to global warming)
### Description
paperless ngx relies on redis to move data between the processes. The problem is that somehow something somewhere is not configured correctly, resulting on a permanent 1-3% CPU usage (see below)
It may seem not much but given that
- it is idling, as in, not doing anything meaningful, so CPU usage should be 0%
- it is using CPU on everyone's instances all the time (thousands and thousands instances)
- multiply these two together (instanceCount * Watts), and we are wasting useful energy to do... absolutely nothing (or contribute to climate change, depending how we obtain the energy to do nothing)
### Steps to reproduce
Start up the docker image `linuxserver/paperless-ngx` with the embedded redis.
This is the typical usage
```
top - 06:49:06 up 39 days, 7:55, 0 users, load average: 0.01, 0.02, 0.01
Tasks: 19 total, 1 running, 18 sleeping, 0 stopped, 0 zombie
%Cpu(s): 1.2 us, 1.0 sy, 0.0 ni, 97.8 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
MiB Mem : 3820.6 total, 644.3 free, 1353.6 used, 1822.7 buff/cache
MiB Swap: 2048.0 total, 1244.2 free, 803.8 used. 2175.0 avail Mem
scroll coordinates: y = 1/19 (tasks), x = 1/12 (fields)
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
401 abc 20 0 50188 4204 3400 S 0.7 0.1 1:42.51 redis-server
459 abc 20 0 248844 62192 8032 S 0.7 1.6 2:02.26 python3
405 abc 20 0 181140 65540 13416 S 0.3 1.7 0:06.16 python3
434 abc 20 0 1091836 128732 22712 S 0.3 3.3 1:32.53 gunicorn
435 abc 20 0 1161544 140780 24080 S 0.3 3.6 1:44.55 gunicorn
2891 root 20 0 9124 3688 3180 R 0.3 0.1 0:01.57 top
1 root 20 0 196 0 0 S 0.0 0.0 0:00.07 s6-svscan
35 root 20 0 196 0 0 S 0.0 0.0 0:00.00 s6-supervise
396 root 20 0 196 0 0 S 0.0 0.0 0:00.00 s6-supervise
397 root 20 0 196 0 0 S 0.0 0.0 0:00.00 s6-supervise
398 root 20 0 196 0 0 S 0.0 0.0 0:00.00 s6-supervise
399 root 20 0 196 0 0 S 0.0 0.0 0:00.00 s6-supervise
```
### Webserver logs
_No response_
### Paperless-ngx version
1.7.1
### Host OS
Ubuntu
### Installation method
Docker
### Browser
None
### Configuration changes
_No response_
### Other
_No response_
| [
{
"content": "import datetime\nimport json\nimport math\nimport multiprocessing\nimport os\nimport re\nfrom typing import Final\nfrom typing import Optional\nfrom typing import Set\nfrom urllib.parse import urlparse\n\nfrom concurrent_log_handler.queue import setup_logging_queues\nfrom django.utils.translation import gettext_lazy as _\nfrom dotenv import load_dotenv\n\n# Tap paperless.conf if it's available\nif os.path.exists(\"../paperless.conf\"):\n load_dotenv(\"../paperless.conf\")\nelif os.path.exists(\"/etc/paperless.conf\"):\n load_dotenv(\"/etc/paperless.conf\")\nelif os.path.exists(\"/usr/local/etc/paperless.conf\"):\n load_dotenv(\"/usr/local/etc/paperless.conf\")\n\n# There are multiple levels of concurrency in paperless:\n# - Multiple consumers may be run in parallel.\n# - Each consumer may process multiple pages in parallel.\n# - Each Tesseract OCR run may spawn multiple threads to process a single page\n# slightly faster.\n# The performance gains from having tesseract use multiple threads are minimal.\n# However, when multiple pages are processed in parallel, the total number of\n# OCR threads may exceed the number of available cpu cores, which will\n# dramatically slow down the consumption process. This settings limits each\n# Tesseract process to one thread.\nos.environ[\"OMP_THREAD_LIMIT\"] = \"1\"\n\n\ndef __get_boolean(key: str, default: str = \"NO\") -> bool:\n \"\"\"\n Return a boolean value based on whatever the user has supplied in the\n environment based on whether the value \"looks like\" it's True or not.\n \"\"\"\n return bool(os.getenv(key, default).lower() in (\"yes\", \"y\", \"1\", \"t\", \"true\"))\n\n\ndef __get_int(key: str, default: int) -> int:\n \"\"\"\n Return an integer value based on the environment variable or a default\n \"\"\"\n return int(os.getenv(key, default))\n\n\ndef __get_float(key: str, default: float) -> float:\n \"\"\"\n Return an integer value based on the environment variable or a default\n \"\"\"\n return float(os.getenv(key, default))\n\n\n# NEVER RUN WITH DEBUG IN PRODUCTION.\nDEBUG = __get_boolean(\"PAPERLESS_DEBUG\", \"NO\")\n\n\n###############################################################################\n# Directories #\n###############################################################################\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nSTATIC_ROOT = os.getenv(\"PAPERLESS_STATICDIR\", os.path.join(BASE_DIR, \"..\", \"static\"))\n\nMEDIA_ROOT = os.getenv(\"PAPERLESS_MEDIA_ROOT\", os.path.join(BASE_DIR, \"..\", \"media\"))\nORIGINALS_DIR = os.path.join(MEDIA_ROOT, \"documents\", \"originals\")\nARCHIVE_DIR = os.path.join(MEDIA_ROOT, \"documents\", \"archive\")\nTHUMBNAIL_DIR = os.path.join(MEDIA_ROOT, \"documents\", \"thumbnails\")\n\nDATA_DIR = os.getenv(\"PAPERLESS_DATA_DIR\", os.path.join(BASE_DIR, \"..\", \"data\"))\n\nTRASH_DIR = os.getenv(\"PAPERLESS_TRASH_DIR\")\n\n# Lock file for synchronizing changes to the MEDIA directory across multiple\n# threads.\nMEDIA_LOCK = os.path.join(MEDIA_ROOT, \"media.lock\")\nINDEX_DIR = os.path.join(DATA_DIR, \"index\")\nMODEL_FILE = os.path.join(DATA_DIR, \"classification_model.pickle\")\n\nLOGGING_DIR = os.getenv(\"PAPERLESS_LOGGING_DIR\", os.path.join(DATA_DIR, \"log\"))\n\nCONSUMPTION_DIR = os.getenv(\n \"PAPERLESS_CONSUMPTION_DIR\",\n os.path.join(BASE_DIR, \"..\", \"consume\"),\n)\n\n# This will be created if it doesn't exist\nSCRATCH_DIR = os.getenv(\"PAPERLESS_SCRATCH_DIR\", \"/tmp/paperless\")\n\n###############################################################################\n# Application Definition #\n###############################################################################\n\nenv_apps = os.getenv(\"PAPERLESS_APPS\").split(\",\") if os.getenv(\"PAPERLESS_APPS\") else []\n\nINSTALLED_APPS = [\n \"whitenoise.runserver_nostatic\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"corsheaders\",\n \"django_extensions\",\n \"paperless\",\n \"documents.apps.DocumentsConfig\",\n \"paperless_tesseract.apps.PaperlessTesseractConfig\",\n \"paperless_text.apps.PaperlessTextConfig\",\n \"paperless_mail.apps.PaperlessMailConfig\",\n \"django.contrib.admin\",\n \"rest_framework\",\n \"rest_framework.authtoken\",\n \"django_filters\",\n \"django_q\",\n] + env_apps\n\nif DEBUG:\n INSTALLED_APPS.append(\"channels\")\n\nREST_FRAMEWORK = {\n \"DEFAULT_AUTHENTICATION_CLASSES\": [\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n \"rest_framework.authentication.TokenAuthentication\",\n ],\n \"DEFAULT_VERSIONING_CLASS\": \"rest_framework.versioning.AcceptHeaderVersioning\",\n \"DEFAULT_VERSION\": \"1\",\n # Make sure these are ordered and that the most recent version appears\n # last\n \"ALLOWED_VERSIONS\": [\"1\", \"2\"],\n}\n\nif DEBUG:\n REST_FRAMEWORK[\"DEFAULT_AUTHENTICATION_CLASSES\"].append(\n \"paperless.auth.AngularApiAuthenticationOverride\",\n )\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"paperless.middleware.ApiVersionMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"paperless.urls\"\n\nFORCE_SCRIPT_NAME = os.getenv(\"PAPERLESS_FORCE_SCRIPT_NAME\")\nBASE_URL = (FORCE_SCRIPT_NAME or \"\") + \"/\"\nLOGIN_URL = BASE_URL + \"accounts/login/\"\nLOGOUT_REDIRECT_URL = os.getenv(\"PAPERLESS_LOGOUT_REDIRECT_URL\")\n\nWSGI_APPLICATION = \"paperless.wsgi.application\"\nASGI_APPLICATION = \"paperless.asgi.application\"\n\nSTATIC_URL = os.getenv(\"PAPERLESS_STATIC_URL\", BASE_URL + \"static/\")\nWHITENOISE_STATIC_PREFIX = \"/static/\"\n\n# TODO: what is this used for?\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nCHANNEL_LAYERS = {\n \"default\": {\n \"BACKEND\": \"channels_redis.core.RedisChannelLayer\",\n \"CONFIG\": {\n \"hosts\": [os.getenv(\"PAPERLESS_REDIS\", \"redis://localhost:6379\")],\n \"capacity\": 2000, # default 100\n \"expiry\": 15, # default 60\n },\n },\n}\n\n###############################################################################\n# Security #\n###############################################################################\n\nAUTO_LOGIN_USERNAME = os.getenv(\"PAPERLESS_AUTO_LOGIN_USERNAME\")\n\nif AUTO_LOGIN_USERNAME:\n _index = MIDDLEWARE.index(\"django.contrib.auth.middleware.AuthenticationMiddleware\")\n # This overrides everything the auth middleware is doing but still allows\n # regular login in case the provided user does not exist.\n MIDDLEWARE.insert(_index + 1, \"paperless.auth.AutoLoginMiddleware\")\n\nENABLE_HTTP_REMOTE_USER = __get_boolean(\"PAPERLESS_ENABLE_HTTP_REMOTE_USER\")\nHTTP_REMOTE_USER_HEADER_NAME = os.getenv(\n \"PAPERLESS_HTTP_REMOTE_USER_HEADER_NAME\",\n \"HTTP_REMOTE_USER\",\n)\n\nif ENABLE_HTTP_REMOTE_USER:\n MIDDLEWARE.append(\"paperless.auth.HttpRemoteUserMiddleware\")\n AUTHENTICATION_BACKENDS = [\n \"django.contrib.auth.backends.RemoteUserBackend\",\n \"django.contrib.auth.backends.ModelBackend\",\n ]\n REST_FRAMEWORK[\"DEFAULT_AUTHENTICATION_CLASSES\"].append(\n \"rest_framework.authentication.RemoteUserAuthentication\",\n )\n\n# X-Frame options for embedded PDF display:\nif DEBUG:\n X_FRAME_OPTIONS = \"ANY\"\nelse:\n X_FRAME_OPTIONS = \"SAMEORIGIN\"\n\n\n# The next 3 settings can also be set using just PAPERLESS_URL\n_csrf_origins = os.getenv(\"PAPERLESS_CSRF_TRUSTED_ORIGINS\")\nif _csrf_origins:\n CSRF_TRUSTED_ORIGINS = _csrf_origins.split(\",\")\nelse:\n CSRF_TRUSTED_ORIGINS = []\n\n# We allow CORS from localhost:8000\nCORS_ALLOWED_ORIGINS = tuple(\n os.getenv(\"PAPERLESS_CORS_ALLOWED_HOSTS\", \"http://localhost:8000\").split(\",\"),\n)\n\nif DEBUG:\n # Allow access from the angular development server during debugging\n CORS_ALLOWED_ORIGINS += (\"http://localhost:4200\",)\n\n_allowed_hosts = os.getenv(\"PAPERLESS_ALLOWED_HOSTS\")\nif _allowed_hosts:\n ALLOWED_HOSTS = _allowed_hosts.split(\",\")\nelse:\n ALLOWED_HOSTS = [\"*\"]\n\n_paperless_url = os.getenv(\"PAPERLESS_URL\")\nif _paperless_url:\n _paperless_uri = urlparse(_paperless_url)\n CSRF_TRUSTED_ORIGINS.append(_paperless_url)\n CORS_ALLOWED_ORIGINS += (_paperless_url,)\n if _allowed_hosts:\n ALLOWED_HOSTS.append(_paperless_uri.hostname)\n else:\n # always allow localhost. Necessary e.g. for healthcheck in docker.\n ALLOWED_HOSTS = [_paperless_uri.hostname] + [\"localhost\"]\n\n# The secret key has a default that should be fine so long as you're hosting\n# Paperless on a closed network. However, if you're putting this anywhere\n# public, you should change the key to something unique and verbose.\nSECRET_KEY = os.getenv(\n \"PAPERLESS_SECRET_KEY\",\n \"e11fl1oa-*ytql8p)(06fbj4ukrlo+n7k&q5+$1md7i+mge=ee\",\n)\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n# Disable Django's artificial limit on the number of form fields to submit at\n# once. This is a protection against overloading the server, but since this is\n# a self-hosted sort of gig, the benefits of being able to mass-delete a tonne\n# of log entries outweight the benefits of such a safeguard.\n\nDATA_UPLOAD_MAX_NUMBER_FIELDS = None\n\nCOOKIE_PREFIX = os.getenv(\"PAPERLESS_COOKIE_PREFIX\", \"\")\n\nCSRF_COOKIE_NAME = f\"{COOKIE_PREFIX}csrftoken\"\nSESSION_COOKIE_NAME = f\"{COOKIE_PREFIX}sessionid\"\nLANGUAGE_COOKIE_NAME = f\"{COOKIE_PREFIX}django_language\"\n\n###############################################################################\n# Database #\n###############################################################################\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": os.path.join(DATA_DIR, \"db.sqlite3\"),\n },\n}\n\nif os.getenv(\"PAPERLESS_DBHOST\"):\n # Have sqlite available as a second option for management commands\n # This is important when migrating to/from sqlite\n DATABASES[\"sqlite\"] = DATABASES[\"default\"].copy()\n\n DATABASES[\"default\"] = {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"HOST\": os.getenv(\"PAPERLESS_DBHOST\"),\n \"NAME\": os.getenv(\"PAPERLESS_DBNAME\", \"paperless\"),\n \"USER\": os.getenv(\"PAPERLESS_DBUSER\", \"paperless\"),\n \"PASSWORD\": os.getenv(\"PAPERLESS_DBPASS\", \"paperless\"),\n \"OPTIONS\": {\"sslmode\": os.getenv(\"PAPERLESS_DBSSLMODE\", \"prefer\")},\n }\n if os.getenv(\"PAPERLESS_DBPORT\"):\n DATABASES[\"default\"][\"PORT\"] = os.getenv(\"PAPERLESS_DBPORT\")\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n###############################################################################\n# Internationalization #\n###############################################################################\n\nLANGUAGE_CODE = \"en-us\"\n\nLANGUAGES = [\n (\"en-us\", _(\"English (US)\")), # needs to be first to act as fallback language\n (\"be-by\", _(\"Belarusian\")),\n (\"cs-cz\", _(\"Czech\")),\n (\"da-dk\", _(\"Danish\")),\n (\"de-de\", _(\"German\")),\n (\"en-gb\", _(\"English (GB)\")),\n (\"es-es\", _(\"Spanish\")),\n (\"fr-fr\", _(\"French\")),\n (\"it-it\", _(\"Italian\")),\n (\"lb-lu\", _(\"Luxembourgish\")),\n (\"nl-nl\", _(\"Dutch\")),\n (\"pl-pl\", _(\"Polish\")),\n (\"pt-br\", _(\"Portuguese (Brazil)\")),\n (\"pt-pt\", _(\"Portuguese\")),\n (\"ro-ro\", _(\"Romanian\")),\n (\"ru-ru\", _(\"Russian\")),\n (\"sl-si\", _(\"Slovenian\")),\n (\"sr-cs\", _(\"Serbian\")),\n (\"sv-se\", _(\"Swedish\")),\n (\"tr-tr\", _(\"Turkish\")),\n (\"zh-cn\", _(\"Chinese Simplified\")),\n]\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, \"locale\")]\n\nTIME_ZONE = os.getenv(\"PAPERLESS_TIME_ZONE\", \"UTC\")\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n###############################################################################\n# Logging #\n###############################################################################\n\nsetup_logging_queues()\n\nos.makedirs(LOGGING_DIR, exist_ok=True)\n\nLOGROTATE_MAX_SIZE = os.getenv(\"PAPERLESS_LOGROTATE_MAX_SIZE\", 1024 * 1024)\nLOGROTATE_MAX_BACKUPS = os.getenv(\"PAPERLESS_LOGROTATE_MAX_BACKUPS\", 20)\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"verbose\": {\n \"format\": \"[{asctime}] [{levelname}] [{name}] {message}\",\n \"style\": \"{\",\n },\n \"simple\": {\n \"format\": \"{levelname} {message}\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"console\": {\n \"level\": \"DEBUG\" if DEBUG else \"INFO\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n \"file_paperless\": {\n \"class\": \"concurrent_log_handler.ConcurrentRotatingFileHandler\",\n \"formatter\": \"verbose\",\n \"filename\": os.path.join(LOGGING_DIR, \"paperless.log\"),\n \"maxBytes\": LOGROTATE_MAX_SIZE,\n \"backupCount\": LOGROTATE_MAX_BACKUPS,\n },\n \"file_mail\": {\n \"class\": \"concurrent_log_handler.ConcurrentRotatingFileHandler\",\n \"formatter\": \"verbose\",\n \"filename\": os.path.join(LOGGING_DIR, \"mail.log\"),\n \"maxBytes\": LOGROTATE_MAX_SIZE,\n \"backupCount\": LOGROTATE_MAX_BACKUPS,\n },\n },\n \"root\": {\"handlers\": [\"console\"]},\n \"loggers\": {\n \"paperless\": {\"handlers\": [\"file_paperless\"], \"level\": \"DEBUG\"},\n \"paperless_mail\": {\"handlers\": [\"file_mail\"], \"level\": \"DEBUG\"},\n },\n}\n\n###############################################################################\n# Task queue #\n###############################################################################\n\n\n# Sensible defaults for multitasking:\n# use a fair balance between worker processes and threads epr worker so that\n# both consuming many documents in parallel and consuming large documents is\n# reasonably fast.\n# Favors threads per worker on smaller systems and never exceeds cpu_count()\n# in total.\n\n\ndef default_task_workers() -> int:\n # always leave one core open\n available_cores = max(multiprocessing.cpu_count(), 1)\n try:\n if available_cores < 4:\n return available_cores\n return max(math.floor(math.sqrt(available_cores)), 1)\n except NotImplementedError:\n return 1\n\n\nTASK_WORKERS = __get_int(\"PAPERLESS_TASK_WORKERS\", default_task_workers())\n\nPAPERLESS_WORKER_TIMEOUT: Final[int] = __get_int(\"PAPERLESS_WORKER_TIMEOUT\", 1800)\n\n# Per django-q docs, timeout must be smaller than retry\n# We default retry to 10s more than the timeout\nPAPERLESS_WORKER_RETRY: Final[int] = __get_int(\n \"PAPERLESS_WORKER_RETRY\",\n PAPERLESS_WORKER_TIMEOUT + 10,\n)\n\nQ_CLUSTER = {\n \"name\": \"paperless\",\n \"catch_up\": False,\n \"recycle\": 1,\n \"retry\": PAPERLESS_WORKER_RETRY,\n \"timeout\": PAPERLESS_WORKER_TIMEOUT,\n \"workers\": TASK_WORKERS,\n \"redis\": os.getenv(\"PAPERLESS_REDIS\", \"redis://localhost:6379\"),\n \"log_level\": \"DEBUG\" if DEBUG else \"INFO\",\n}\n\n\ndef default_threads_per_worker(task_workers) -> int:\n # always leave one core open\n available_cores = max(multiprocessing.cpu_count(), 1)\n try:\n return max(math.floor(available_cores / task_workers), 1)\n except NotImplementedError:\n return 1\n\n\nTHREADS_PER_WORKER = os.getenv(\n \"PAPERLESS_THREADS_PER_WORKER\",\n default_threads_per_worker(TASK_WORKERS),\n)\n\n###############################################################################\n# Paperless Specific Settings #\n###############################################################################\n\nCONSUMER_POLLING = int(os.getenv(\"PAPERLESS_CONSUMER_POLLING\", 0))\n\nCONSUMER_POLLING_DELAY = int(os.getenv(\"PAPERLESS_CONSUMER_POLLING_DELAY\", 5))\n\nCONSUMER_POLLING_RETRY_COUNT = int(\n os.getenv(\"PAPERLESS_CONSUMER_POLLING_RETRY_COUNT\", 5),\n)\n\nCONSUMER_INOTIFY_DELAY: Final[float] = __get_float(\n \"PAPERLESS_CONSUMER_INOTIFY_DELAY\",\n 0.5,\n)\n\nCONSUMER_DELETE_DUPLICATES = __get_boolean(\"PAPERLESS_CONSUMER_DELETE_DUPLICATES\")\n\nCONSUMER_RECURSIVE = __get_boolean(\"PAPERLESS_CONSUMER_RECURSIVE\")\n\n# Ignore glob patterns, relative to PAPERLESS_CONSUMPTION_DIR\nCONSUMER_IGNORE_PATTERNS = list(\n json.loads(\n os.getenv(\n \"PAPERLESS_CONSUMER_IGNORE_PATTERNS\",\n '[\".DS_STORE/*\", \"._*\", \".stfolder/*\", \".stversions/*\", \".localized/*\", \"desktop.ini\"]',\n ),\n ),\n)\n\nCONSUMER_SUBDIRS_AS_TAGS = __get_boolean(\"PAPERLESS_CONSUMER_SUBDIRS_AS_TAGS\")\n\nCONSUMER_ENABLE_BARCODES = __get_boolean(\n \"PAPERLESS_CONSUMER_ENABLE_BARCODES\",\n)\n\nCONSUMER_BARCODE_TIFF_SUPPORT = __get_boolean(\n \"PAPERLESS_CONSUMER_BARCODE_TIFF_SUPPORT\",\n)\n\nCONSUMER_BARCODE_STRING = os.getenv(\"PAPERLESS_CONSUMER_BARCODE_STRING\", \"PATCHT\")\n\nOPTIMIZE_THUMBNAILS = __get_boolean(\"PAPERLESS_OPTIMIZE_THUMBNAILS\", \"true\")\n\nOCR_PAGES = int(os.getenv(\"PAPERLESS_OCR_PAGES\", 0))\n\n# The default language that tesseract will attempt to use when parsing\n# documents. It should be a 3-letter language code consistent with ISO 639.\nOCR_LANGUAGE = os.getenv(\"PAPERLESS_OCR_LANGUAGE\", \"eng\")\n\n# OCRmyPDF --output-type options are available.\n# TODO: validate this setting.\nOCR_OUTPUT_TYPE = os.getenv(\"PAPERLESS_OCR_OUTPUT_TYPE\", \"pdfa\")\n\n# skip. redo, force\n# TODO: validate this.\nOCR_MODE = os.getenv(\"PAPERLESS_OCR_MODE\", \"skip\")\n\nOCR_IMAGE_DPI = os.getenv(\"PAPERLESS_OCR_IMAGE_DPI\")\n\nOCR_CLEAN = os.getenv(\"PAPERLESS_OCR_CLEAN\", \"clean\")\n\nOCR_DESKEW = __get_boolean(\"PAPERLESS_OCR_DESKEW\", \"true\")\n\nOCR_ROTATE_PAGES = __get_boolean(\"PAPERLESS_OCR_ROTATE_PAGES\", \"true\")\n\nOCR_ROTATE_PAGES_THRESHOLD = float(\n os.getenv(\"PAPERLESS_OCR_ROTATE_PAGES_THRESHOLD\", 12.0),\n)\n\nOCR_MAX_IMAGE_PIXELS: Optional[int] = None\nif os.environ.get(\"PAPERLESS_OCR_MAX_IMAGE_PIXELS\") is not None:\n OCR_MAX_IMAGE_PIXELS: int = int(os.environ.get(\"PAPERLESS_OCR_MAX_IMAGE_PIXELS\"))\n\nOCR_USER_ARGS = os.getenv(\"PAPERLESS_OCR_USER_ARGS\", \"{}\")\n\n# GNUPG needs a home directory for some reason\nGNUPG_HOME = os.getenv(\"HOME\", \"/tmp\")\n\n# Convert is part of the ImageMagick package\nCONVERT_BINARY = os.getenv(\"PAPERLESS_CONVERT_BINARY\", \"convert\")\nCONVERT_TMPDIR = os.getenv(\"PAPERLESS_CONVERT_TMPDIR\")\nCONVERT_MEMORY_LIMIT = os.getenv(\"PAPERLESS_CONVERT_MEMORY_LIMIT\")\n\nGS_BINARY = os.getenv(\"PAPERLESS_GS_BINARY\", \"gs\")\n\nOPTIPNG_BINARY = os.getenv(\"PAPERLESS_OPTIPNG_BINARY\", \"optipng\")\n\n\n# Pre-2.x versions of Paperless stored your documents locally with GPG\n# encryption, but that is no longer the default. This behaviour is still\n# available, but it must be explicitly enabled by setting\n# `PAPERLESS_PASSPHRASE` in your environment or config file. The default is to\n# store these files unencrypted.\n#\n# Translation:\n# * If you're a new user, you can safely ignore this setting.\n# * If you're upgrading from 1.x, this must be set, OR you can run\n# `./manage.py change_storage_type gpg unencrypted` to decrypt your files,\n# after which you can unset this value.\nPASSPHRASE = os.getenv(\"PAPERLESS_PASSPHRASE\")\n\n# Trigger a script after every successful document consumption?\nPRE_CONSUME_SCRIPT = os.getenv(\"PAPERLESS_PRE_CONSUME_SCRIPT\")\nPOST_CONSUME_SCRIPT = os.getenv(\"PAPERLESS_POST_CONSUME_SCRIPT\")\n\n# Specify the default date order (for autodetected dates)\nDATE_ORDER = os.getenv(\"PAPERLESS_DATE_ORDER\", \"DMY\")\nFILENAME_DATE_ORDER = os.getenv(\"PAPERLESS_FILENAME_DATE_ORDER\")\n\n# Transformations applied before filename parsing\nFILENAME_PARSE_TRANSFORMS = []\nfor t in json.loads(os.getenv(\"PAPERLESS_FILENAME_PARSE_TRANSFORMS\", \"[]\")):\n FILENAME_PARSE_TRANSFORMS.append((re.compile(t[\"pattern\"]), t[\"repl\"]))\n\n# Specify the filename format for out files\nFILENAME_FORMAT = os.getenv(\"PAPERLESS_FILENAME_FORMAT\")\n\n# If this is enabled, variables in filename format will resolve to empty-string instead of 'none'.\n# Directories with 'empty names' are omitted, too.\nFILENAME_FORMAT_REMOVE_NONE = __get_boolean(\n \"PAPERLESS_FILENAME_FORMAT_REMOVE_NONE\",\n \"NO\",\n)\n\nTHUMBNAIL_FONT_NAME = os.getenv(\n \"PAPERLESS_THUMBNAIL_FONT_NAME\",\n \"/usr/share/fonts/liberation/LiberationSerif-Regular.ttf\",\n)\n\n# TODO: this should not have a prefix.\n# Tika settings\nPAPERLESS_TIKA_ENABLED = __get_boolean(\"PAPERLESS_TIKA_ENABLED\", \"NO\")\nPAPERLESS_TIKA_ENDPOINT = os.getenv(\"PAPERLESS_TIKA_ENDPOINT\", \"http://localhost:9998\")\nPAPERLESS_TIKA_GOTENBERG_ENDPOINT = os.getenv(\n \"PAPERLESS_TIKA_GOTENBERG_ENDPOINT\",\n \"http://localhost:3000\",\n)\n\nif PAPERLESS_TIKA_ENABLED:\n INSTALLED_APPS.append(\"paperless_tika.apps.PaperlessTikaConfig\")\n\n\ndef _parse_ignore_dates(\n env_ignore: str,\n date_order: str = DATE_ORDER,\n) -> Set[datetime.datetime]:\n \"\"\"\n If the PAPERLESS_IGNORE_DATES environment variable is set, parse the\n user provided string(s) into dates\n\n Args:\n env_ignore (str): The value of the environment variable, comma seperated dates\n date_order (str, optional): The format of the date strings. Defaults to DATE_ORDER.\n\n Returns:\n Set[datetime.datetime]: The set of parsed date objects\n \"\"\"\n import dateparser\n\n ignored_dates = set()\n for s in env_ignore.split(\",\"):\n d = dateparser.parse(\n s,\n settings={\n \"DATE_ORDER\": date_order,\n },\n )\n if d:\n ignored_dates.add(d.date())\n return ignored_dates\n\n\n# List dates that should be ignored when trying to parse date from document text\nIGNORE_DATES: Set[datetime.date] = set()\n\nif os.getenv(\"PAPERLESS_IGNORE_DATES\") is not None:\n IGNORE_DATES = _parse_ignore_dates(os.getenv(\"PAPERLESS_IGNORE_DATES\"))\n\nENABLE_UPDATE_CHECK = os.getenv(\"PAPERLESS_ENABLE_UPDATE_CHECK\", \"default\")\nif ENABLE_UPDATE_CHECK != \"default\":\n ENABLE_UPDATE_CHECK = __get_boolean(\"PAPERLESS_ENABLE_UPDATE_CHECK\")\n",
"path": "src/paperless/settings.py"
}
] | [
{
"content": "import datetime\nimport json\nimport math\nimport multiprocessing\nimport os\nimport re\nfrom typing import Final\nfrom typing import Optional\nfrom typing import Set\nfrom urllib.parse import urlparse\n\nfrom concurrent_log_handler.queue import setup_logging_queues\nfrom django.utils.translation import gettext_lazy as _\nfrom dotenv import load_dotenv\n\n# Tap paperless.conf if it's available\nif os.path.exists(\"../paperless.conf\"):\n load_dotenv(\"../paperless.conf\")\nelif os.path.exists(\"/etc/paperless.conf\"):\n load_dotenv(\"/etc/paperless.conf\")\nelif os.path.exists(\"/usr/local/etc/paperless.conf\"):\n load_dotenv(\"/usr/local/etc/paperless.conf\")\n\n# There are multiple levels of concurrency in paperless:\n# - Multiple consumers may be run in parallel.\n# - Each consumer may process multiple pages in parallel.\n# - Each Tesseract OCR run may spawn multiple threads to process a single page\n# slightly faster.\n# The performance gains from having tesseract use multiple threads are minimal.\n# However, when multiple pages are processed in parallel, the total number of\n# OCR threads may exceed the number of available cpu cores, which will\n# dramatically slow down the consumption process. This settings limits each\n# Tesseract process to one thread.\nos.environ[\"OMP_THREAD_LIMIT\"] = \"1\"\n\n\ndef __get_boolean(key: str, default: str = \"NO\") -> bool:\n \"\"\"\n Return a boolean value based on whatever the user has supplied in the\n environment based on whether the value \"looks like\" it's True or not.\n \"\"\"\n return bool(os.getenv(key, default).lower() in (\"yes\", \"y\", \"1\", \"t\", \"true\"))\n\n\ndef __get_int(key: str, default: int) -> int:\n \"\"\"\n Return an integer value based on the environment variable or a default\n \"\"\"\n return int(os.getenv(key, default))\n\n\ndef __get_float(key: str, default: float) -> float:\n \"\"\"\n Return an integer value based on the environment variable or a default\n \"\"\"\n return float(os.getenv(key, default))\n\n\n# NEVER RUN WITH DEBUG IN PRODUCTION.\nDEBUG = __get_boolean(\"PAPERLESS_DEBUG\", \"NO\")\n\n\n###############################################################################\n# Directories #\n###############################################################################\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nSTATIC_ROOT = os.getenv(\"PAPERLESS_STATICDIR\", os.path.join(BASE_DIR, \"..\", \"static\"))\n\nMEDIA_ROOT = os.getenv(\"PAPERLESS_MEDIA_ROOT\", os.path.join(BASE_DIR, \"..\", \"media\"))\nORIGINALS_DIR = os.path.join(MEDIA_ROOT, \"documents\", \"originals\")\nARCHIVE_DIR = os.path.join(MEDIA_ROOT, \"documents\", \"archive\")\nTHUMBNAIL_DIR = os.path.join(MEDIA_ROOT, \"documents\", \"thumbnails\")\n\nDATA_DIR = os.getenv(\"PAPERLESS_DATA_DIR\", os.path.join(BASE_DIR, \"..\", \"data\"))\n\nTRASH_DIR = os.getenv(\"PAPERLESS_TRASH_DIR\")\n\n# Lock file for synchronizing changes to the MEDIA directory across multiple\n# threads.\nMEDIA_LOCK = os.path.join(MEDIA_ROOT, \"media.lock\")\nINDEX_DIR = os.path.join(DATA_DIR, \"index\")\nMODEL_FILE = os.path.join(DATA_DIR, \"classification_model.pickle\")\n\nLOGGING_DIR = os.getenv(\"PAPERLESS_LOGGING_DIR\", os.path.join(DATA_DIR, \"log\"))\n\nCONSUMPTION_DIR = os.getenv(\n \"PAPERLESS_CONSUMPTION_DIR\",\n os.path.join(BASE_DIR, \"..\", \"consume\"),\n)\n\n# This will be created if it doesn't exist\nSCRATCH_DIR = os.getenv(\"PAPERLESS_SCRATCH_DIR\", \"/tmp/paperless\")\n\n###############################################################################\n# Application Definition #\n###############################################################################\n\nenv_apps = os.getenv(\"PAPERLESS_APPS\").split(\",\") if os.getenv(\"PAPERLESS_APPS\") else []\n\nINSTALLED_APPS = [\n \"whitenoise.runserver_nostatic\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"corsheaders\",\n \"django_extensions\",\n \"paperless\",\n \"documents.apps.DocumentsConfig\",\n \"paperless_tesseract.apps.PaperlessTesseractConfig\",\n \"paperless_text.apps.PaperlessTextConfig\",\n \"paperless_mail.apps.PaperlessMailConfig\",\n \"django.contrib.admin\",\n \"rest_framework\",\n \"rest_framework.authtoken\",\n \"django_filters\",\n \"django_q\",\n] + env_apps\n\nif DEBUG:\n INSTALLED_APPS.append(\"channels\")\n\nREST_FRAMEWORK = {\n \"DEFAULT_AUTHENTICATION_CLASSES\": [\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n \"rest_framework.authentication.TokenAuthentication\",\n ],\n \"DEFAULT_VERSIONING_CLASS\": \"rest_framework.versioning.AcceptHeaderVersioning\",\n \"DEFAULT_VERSION\": \"1\",\n # Make sure these are ordered and that the most recent version appears\n # last\n \"ALLOWED_VERSIONS\": [\"1\", \"2\"],\n}\n\nif DEBUG:\n REST_FRAMEWORK[\"DEFAULT_AUTHENTICATION_CLASSES\"].append(\n \"paperless.auth.AngularApiAuthenticationOverride\",\n )\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"paperless.middleware.ApiVersionMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"paperless.urls\"\n\nFORCE_SCRIPT_NAME = os.getenv(\"PAPERLESS_FORCE_SCRIPT_NAME\")\nBASE_URL = (FORCE_SCRIPT_NAME or \"\") + \"/\"\nLOGIN_URL = BASE_URL + \"accounts/login/\"\nLOGOUT_REDIRECT_URL = os.getenv(\"PAPERLESS_LOGOUT_REDIRECT_URL\")\n\nWSGI_APPLICATION = \"paperless.wsgi.application\"\nASGI_APPLICATION = \"paperless.asgi.application\"\n\nSTATIC_URL = os.getenv(\"PAPERLESS_STATIC_URL\", BASE_URL + \"static/\")\nWHITENOISE_STATIC_PREFIX = \"/static/\"\n\n# TODO: what is this used for?\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nCHANNEL_LAYERS = {\n \"default\": {\n \"BACKEND\": \"channels_redis.core.RedisChannelLayer\",\n \"CONFIG\": {\n \"hosts\": [os.getenv(\"PAPERLESS_REDIS\", \"redis://localhost:6379\")],\n \"capacity\": 2000, # default 100\n \"expiry\": 15, # default 60\n },\n },\n}\n\n###############################################################################\n# Security #\n###############################################################################\n\nAUTO_LOGIN_USERNAME = os.getenv(\"PAPERLESS_AUTO_LOGIN_USERNAME\")\n\nif AUTO_LOGIN_USERNAME:\n _index = MIDDLEWARE.index(\"django.contrib.auth.middleware.AuthenticationMiddleware\")\n # This overrides everything the auth middleware is doing but still allows\n # regular login in case the provided user does not exist.\n MIDDLEWARE.insert(_index + 1, \"paperless.auth.AutoLoginMiddleware\")\n\nENABLE_HTTP_REMOTE_USER = __get_boolean(\"PAPERLESS_ENABLE_HTTP_REMOTE_USER\")\nHTTP_REMOTE_USER_HEADER_NAME = os.getenv(\n \"PAPERLESS_HTTP_REMOTE_USER_HEADER_NAME\",\n \"HTTP_REMOTE_USER\",\n)\n\nif ENABLE_HTTP_REMOTE_USER:\n MIDDLEWARE.append(\"paperless.auth.HttpRemoteUserMiddleware\")\n AUTHENTICATION_BACKENDS = [\n \"django.contrib.auth.backends.RemoteUserBackend\",\n \"django.contrib.auth.backends.ModelBackend\",\n ]\n REST_FRAMEWORK[\"DEFAULT_AUTHENTICATION_CLASSES\"].append(\n \"rest_framework.authentication.RemoteUserAuthentication\",\n )\n\n# X-Frame options for embedded PDF display:\nif DEBUG:\n X_FRAME_OPTIONS = \"ANY\"\nelse:\n X_FRAME_OPTIONS = \"SAMEORIGIN\"\n\n\n# The next 3 settings can also be set using just PAPERLESS_URL\n_csrf_origins = os.getenv(\"PAPERLESS_CSRF_TRUSTED_ORIGINS\")\nif _csrf_origins:\n CSRF_TRUSTED_ORIGINS = _csrf_origins.split(\",\")\nelse:\n CSRF_TRUSTED_ORIGINS = []\n\n# We allow CORS from localhost:8000\nCORS_ALLOWED_ORIGINS = tuple(\n os.getenv(\"PAPERLESS_CORS_ALLOWED_HOSTS\", \"http://localhost:8000\").split(\",\"),\n)\n\nif DEBUG:\n # Allow access from the angular development server during debugging\n CORS_ALLOWED_ORIGINS += (\"http://localhost:4200\",)\n\n_allowed_hosts = os.getenv(\"PAPERLESS_ALLOWED_HOSTS\")\nif _allowed_hosts:\n ALLOWED_HOSTS = _allowed_hosts.split(\",\")\nelse:\n ALLOWED_HOSTS = [\"*\"]\n\n_paperless_url = os.getenv(\"PAPERLESS_URL\")\nif _paperless_url:\n _paperless_uri = urlparse(_paperless_url)\n CSRF_TRUSTED_ORIGINS.append(_paperless_url)\n CORS_ALLOWED_ORIGINS += (_paperless_url,)\n if _allowed_hosts:\n ALLOWED_HOSTS.append(_paperless_uri.hostname)\n else:\n # always allow localhost. Necessary e.g. for healthcheck in docker.\n ALLOWED_HOSTS = [_paperless_uri.hostname] + [\"localhost\"]\n\n# The secret key has a default that should be fine so long as you're hosting\n# Paperless on a closed network. However, if you're putting this anywhere\n# public, you should change the key to something unique and verbose.\nSECRET_KEY = os.getenv(\n \"PAPERLESS_SECRET_KEY\",\n \"e11fl1oa-*ytql8p)(06fbj4ukrlo+n7k&q5+$1md7i+mge=ee\",\n)\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n# Disable Django's artificial limit on the number of form fields to submit at\n# once. This is a protection against overloading the server, but since this is\n# a self-hosted sort of gig, the benefits of being able to mass-delete a tonne\n# of log entries outweight the benefits of such a safeguard.\n\nDATA_UPLOAD_MAX_NUMBER_FIELDS = None\n\nCOOKIE_PREFIX = os.getenv(\"PAPERLESS_COOKIE_PREFIX\", \"\")\n\nCSRF_COOKIE_NAME = f\"{COOKIE_PREFIX}csrftoken\"\nSESSION_COOKIE_NAME = f\"{COOKIE_PREFIX}sessionid\"\nLANGUAGE_COOKIE_NAME = f\"{COOKIE_PREFIX}django_language\"\n\n###############################################################################\n# Database #\n###############################################################################\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": os.path.join(DATA_DIR, \"db.sqlite3\"),\n },\n}\n\nif os.getenv(\"PAPERLESS_DBHOST\"):\n # Have sqlite available as a second option for management commands\n # This is important when migrating to/from sqlite\n DATABASES[\"sqlite\"] = DATABASES[\"default\"].copy()\n\n DATABASES[\"default\"] = {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"HOST\": os.getenv(\"PAPERLESS_DBHOST\"),\n \"NAME\": os.getenv(\"PAPERLESS_DBNAME\", \"paperless\"),\n \"USER\": os.getenv(\"PAPERLESS_DBUSER\", \"paperless\"),\n \"PASSWORD\": os.getenv(\"PAPERLESS_DBPASS\", \"paperless\"),\n \"OPTIONS\": {\"sslmode\": os.getenv(\"PAPERLESS_DBSSLMODE\", \"prefer\")},\n }\n if os.getenv(\"PAPERLESS_DBPORT\"):\n DATABASES[\"default\"][\"PORT\"] = os.getenv(\"PAPERLESS_DBPORT\")\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n###############################################################################\n# Internationalization #\n###############################################################################\n\nLANGUAGE_CODE = \"en-us\"\n\nLANGUAGES = [\n (\"en-us\", _(\"English (US)\")), # needs to be first to act as fallback language\n (\"be-by\", _(\"Belarusian\")),\n (\"cs-cz\", _(\"Czech\")),\n (\"da-dk\", _(\"Danish\")),\n (\"de-de\", _(\"German\")),\n (\"en-gb\", _(\"English (GB)\")),\n (\"es-es\", _(\"Spanish\")),\n (\"fr-fr\", _(\"French\")),\n (\"it-it\", _(\"Italian\")),\n (\"lb-lu\", _(\"Luxembourgish\")),\n (\"nl-nl\", _(\"Dutch\")),\n (\"pl-pl\", _(\"Polish\")),\n (\"pt-br\", _(\"Portuguese (Brazil)\")),\n (\"pt-pt\", _(\"Portuguese\")),\n (\"ro-ro\", _(\"Romanian\")),\n (\"ru-ru\", _(\"Russian\")),\n (\"sl-si\", _(\"Slovenian\")),\n (\"sr-cs\", _(\"Serbian\")),\n (\"sv-se\", _(\"Swedish\")),\n (\"tr-tr\", _(\"Turkish\")),\n (\"zh-cn\", _(\"Chinese Simplified\")),\n]\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, \"locale\")]\n\nTIME_ZONE = os.getenv(\"PAPERLESS_TIME_ZONE\", \"UTC\")\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n###############################################################################\n# Logging #\n###############################################################################\n\nsetup_logging_queues()\n\nos.makedirs(LOGGING_DIR, exist_ok=True)\n\nLOGROTATE_MAX_SIZE = os.getenv(\"PAPERLESS_LOGROTATE_MAX_SIZE\", 1024 * 1024)\nLOGROTATE_MAX_BACKUPS = os.getenv(\"PAPERLESS_LOGROTATE_MAX_BACKUPS\", 20)\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"verbose\": {\n \"format\": \"[{asctime}] [{levelname}] [{name}] {message}\",\n \"style\": \"{\",\n },\n \"simple\": {\n \"format\": \"{levelname} {message}\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"console\": {\n \"level\": \"DEBUG\" if DEBUG else \"INFO\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n \"file_paperless\": {\n \"class\": \"concurrent_log_handler.ConcurrentRotatingFileHandler\",\n \"formatter\": \"verbose\",\n \"filename\": os.path.join(LOGGING_DIR, \"paperless.log\"),\n \"maxBytes\": LOGROTATE_MAX_SIZE,\n \"backupCount\": LOGROTATE_MAX_BACKUPS,\n },\n \"file_mail\": {\n \"class\": \"concurrent_log_handler.ConcurrentRotatingFileHandler\",\n \"formatter\": \"verbose\",\n \"filename\": os.path.join(LOGGING_DIR, \"mail.log\"),\n \"maxBytes\": LOGROTATE_MAX_SIZE,\n \"backupCount\": LOGROTATE_MAX_BACKUPS,\n },\n },\n \"root\": {\"handlers\": [\"console\"]},\n \"loggers\": {\n \"paperless\": {\"handlers\": [\"file_paperless\"], \"level\": \"DEBUG\"},\n \"paperless_mail\": {\"handlers\": [\"file_mail\"], \"level\": \"DEBUG\"},\n },\n}\n\n###############################################################################\n# Task queue #\n###############################################################################\n\n\n# Sensible defaults for multitasking:\n# use a fair balance between worker processes and threads epr worker so that\n# both consuming many documents in parallel and consuming large documents is\n# reasonably fast.\n# Favors threads per worker on smaller systems and never exceeds cpu_count()\n# in total.\n\n\ndef default_task_workers() -> int:\n # always leave one core open\n available_cores = max(multiprocessing.cpu_count(), 1)\n try:\n if available_cores < 4:\n return available_cores\n return max(math.floor(math.sqrt(available_cores)), 1)\n except NotImplementedError:\n return 1\n\n\nTASK_WORKERS = __get_int(\"PAPERLESS_TASK_WORKERS\", default_task_workers())\n\nPAPERLESS_WORKER_TIMEOUT: Final[int] = __get_int(\"PAPERLESS_WORKER_TIMEOUT\", 1800)\n\n# Per django-q docs, timeout must be smaller than retry\n# We default retry to 10s more than the timeout\nPAPERLESS_WORKER_RETRY: Final[int] = __get_int(\n \"PAPERLESS_WORKER_RETRY\",\n PAPERLESS_WORKER_TIMEOUT + 10,\n)\n\nQ_CLUSTER = {\n \"name\": \"paperless\",\n \"guard_cycle\": 5,\n \"catch_up\": False,\n \"recycle\": 1,\n \"retry\": PAPERLESS_WORKER_RETRY,\n \"timeout\": PAPERLESS_WORKER_TIMEOUT,\n \"workers\": TASK_WORKERS,\n \"redis\": os.getenv(\"PAPERLESS_REDIS\", \"redis://localhost:6379\"),\n \"log_level\": \"DEBUG\" if DEBUG else \"INFO\",\n}\n\n\ndef default_threads_per_worker(task_workers) -> int:\n # always leave one core open\n available_cores = max(multiprocessing.cpu_count(), 1)\n try:\n return max(math.floor(available_cores / task_workers), 1)\n except NotImplementedError:\n return 1\n\n\nTHREADS_PER_WORKER = os.getenv(\n \"PAPERLESS_THREADS_PER_WORKER\",\n default_threads_per_worker(TASK_WORKERS),\n)\n\n###############################################################################\n# Paperless Specific Settings #\n###############################################################################\n\nCONSUMER_POLLING = int(os.getenv(\"PAPERLESS_CONSUMER_POLLING\", 0))\n\nCONSUMER_POLLING_DELAY = int(os.getenv(\"PAPERLESS_CONSUMER_POLLING_DELAY\", 5))\n\nCONSUMER_POLLING_RETRY_COUNT = int(\n os.getenv(\"PAPERLESS_CONSUMER_POLLING_RETRY_COUNT\", 5),\n)\n\nCONSUMER_INOTIFY_DELAY: Final[float] = __get_float(\n \"PAPERLESS_CONSUMER_INOTIFY_DELAY\",\n 0.5,\n)\n\nCONSUMER_DELETE_DUPLICATES = __get_boolean(\"PAPERLESS_CONSUMER_DELETE_DUPLICATES\")\n\nCONSUMER_RECURSIVE = __get_boolean(\"PAPERLESS_CONSUMER_RECURSIVE\")\n\n# Ignore glob patterns, relative to PAPERLESS_CONSUMPTION_DIR\nCONSUMER_IGNORE_PATTERNS = list(\n json.loads(\n os.getenv(\n \"PAPERLESS_CONSUMER_IGNORE_PATTERNS\",\n '[\".DS_STORE/*\", \"._*\", \".stfolder/*\", \".stversions/*\", \".localized/*\", \"desktop.ini\"]',\n ),\n ),\n)\n\nCONSUMER_SUBDIRS_AS_TAGS = __get_boolean(\"PAPERLESS_CONSUMER_SUBDIRS_AS_TAGS\")\n\nCONSUMER_ENABLE_BARCODES = __get_boolean(\n \"PAPERLESS_CONSUMER_ENABLE_BARCODES\",\n)\n\nCONSUMER_BARCODE_TIFF_SUPPORT = __get_boolean(\n \"PAPERLESS_CONSUMER_BARCODE_TIFF_SUPPORT\",\n)\n\nCONSUMER_BARCODE_STRING = os.getenv(\"PAPERLESS_CONSUMER_BARCODE_STRING\", \"PATCHT\")\n\nOPTIMIZE_THUMBNAILS = __get_boolean(\"PAPERLESS_OPTIMIZE_THUMBNAILS\", \"true\")\n\nOCR_PAGES = int(os.getenv(\"PAPERLESS_OCR_PAGES\", 0))\n\n# The default language that tesseract will attempt to use when parsing\n# documents. It should be a 3-letter language code consistent with ISO 639.\nOCR_LANGUAGE = os.getenv(\"PAPERLESS_OCR_LANGUAGE\", \"eng\")\n\n# OCRmyPDF --output-type options are available.\n# TODO: validate this setting.\nOCR_OUTPUT_TYPE = os.getenv(\"PAPERLESS_OCR_OUTPUT_TYPE\", \"pdfa\")\n\n# skip. redo, force\n# TODO: validate this.\nOCR_MODE = os.getenv(\"PAPERLESS_OCR_MODE\", \"skip\")\n\nOCR_IMAGE_DPI = os.getenv(\"PAPERLESS_OCR_IMAGE_DPI\")\n\nOCR_CLEAN = os.getenv(\"PAPERLESS_OCR_CLEAN\", \"clean\")\n\nOCR_DESKEW = __get_boolean(\"PAPERLESS_OCR_DESKEW\", \"true\")\n\nOCR_ROTATE_PAGES = __get_boolean(\"PAPERLESS_OCR_ROTATE_PAGES\", \"true\")\n\nOCR_ROTATE_PAGES_THRESHOLD = float(\n os.getenv(\"PAPERLESS_OCR_ROTATE_PAGES_THRESHOLD\", 12.0),\n)\n\nOCR_MAX_IMAGE_PIXELS: Optional[int] = None\nif os.environ.get(\"PAPERLESS_OCR_MAX_IMAGE_PIXELS\") is not None:\n OCR_MAX_IMAGE_PIXELS: int = int(os.environ.get(\"PAPERLESS_OCR_MAX_IMAGE_PIXELS\"))\n\nOCR_USER_ARGS = os.getenv(\"PAPERLESS_OCR_USER_ARGS\", \"{}\")\n\n# GNUPG needs a home directory for some reason\nGNUPG_HOME = os.getenv(\"HOME\", \"/tmp\")\n\n# Convert is part of the ImageMagick package\nCONVERT_BINARY = os.getenv(\"PAPERLESS_CONVERT_BINARY\", \"convert\")\nCONVERT_TMPDIR = os.getenv(\"PAPERLESS_CONVERT_TMPDIR\")\nCONVERT_MEMORY_LIMIT = os.getenv(\"PAPERLESS_CONVERT_MEMORY_LIMIT\")\n\nGS_BINARY = os.getenv(\"PAPERLESS_GS_BINARY\", \"gs\")\n\nOPTIPNG_BINARY = os.getenv(\"PAPERLESS_OPTIPNG_BINARY\", \"optipng\")\n\n\n# Pre-2.x versions of Paperless stored your documents locally with GPG\n# encryption, but that is no longer the default. This behaviour is still\n# available, but it must be explicitly enabled by setting\n# `PAPERLESS_PASSPHRASE` in your environment or config file. The default is to\n# store these files unencrypted.\n#\n# Translation:\n# * If you're a new user, you can safely ignore this setting.\n# * If you're upgrading from 1.x, this must be set, OR you can run\n# `./manage.py change_storage_type gpg unencrypted` to decrypt your files,\n# after which you can unset this value.\nPASSPHRASE = os.getenv(\"PAPERLESS_PASSPHRASE\")\n\n# Trigger a script after every successful document consumption?\nPRE_CONSUME_SCRIPT = os.getenv(\"PAPERLESS_PRE_CONSUME_SCRIPT\")\nPOST_CONSUME_SCRIPT = os.getenv(\"PAPERLESS_POST_CONSUME_SCRIPT\")\n\n# Specify the default date order (for autodetected dates)\nDATE_ORDER = os.getenv(\"PAPERLESS_DATE_ORDER\", \"DMY\")\nFILENAME_DATE_ORDER = os.getenv(\"PAPERLESS_FILENAME_DATE_ORDER\")\n\n# Transformations applied before filename parsing\nFILENAME_PARSE_TRANSFORMS = []\nfor t in json.loads(os.getenv(\"PAPERLESS_FILENAME_PARSE_TRANSFORMS\", \"[]\")):\n FILENAME_PARSE_TRANSFORMS.append((re.compile(t[\"pattern\"]), t[\"repl\"]))\n\n# Specify the filename format for out files\nFILENAME_FORMAT = os.getenv(\"PAPERLESS_FILENAME_FORMAT\")\n\n# If this is enabled, variables in filename format will resolve to empty-string instead of 'none'.\n# Directories with 'empty names' are omitted, too.\nFILENAME_FORMAT_REMOVE_NONE = __get_boolean(\n \"PAPERLESS_FILENAME_FORMAT_REMOVE_NONE\",\n \"NO\",\n)\n\nTHUMBNAIL_FONT_NAME = os.getenv(\n \"PAPERLESS_THUMBNAIL_FONT_NAME\",\n \"/usr/share/fonts/liberation/LiberationSerif-Regular.ttf\",\n)\n\n# TODO: this should not have a prefix.\n# Tika settings\nPAPERLESS_TIKA_ENABLED = __get_boolean(\"PAPERLESS_TIKA_ENABLED\", \"NO\")\nPAPERLESS_TIKA_ENDPOINT = os.getenv(\"PAPERLESS_TIKA_ENDPOINT\", \"http://localhost:9998\")\nPAPERLESS_TIKA_GOTENBERG_ENDPOINT = os.getenv(\n \"PAPERLESS_TIKA_GOTENBERG_ENDPOINT\",\n \"http://localhost:3000\",\n)\n\nif PAPERLESS_TIKA_ENABLED:\n INSTALLED_APPS.append(\"paperless_tika.apps.PaperlessTikaConfig\")\n\n\ndef _parse_ignore_dates(\n env_ignore: str,\n date_order: str = DATE_ORDER,\n) -> Set[datetime.datetime]:\n \"\"\"\n If the PAPERLESS_IGNORE_DATES environment variable is set, parse the\n user provided string(s) into dates\n\n Args:\n env_ignore (str): The value of the environment variable, comma seperated dates\n date_order (str, optional): The format of the date strings. Defaults to DATE_ORDER.\n\n Returns:\n Set[datetime.datetime]: The set of parsed date objects\n \"\"\"\n import dateparser\n\n ignored_dates = set()\n for s in env_ignore.split(\",\"):\n d = dateparser.parse(\n s,\n settings={\n \"DATE_ORDER\": date_order,\n },\n )\n if d:\n ignored_dates.add(d.date())\n return ignored_dates\n\n\n# List dates that should be ignored when trying to parse date from document text\nIGNORE_DATES: Set[datetime.date] = set()\n\nif os.getenv(\"PAPERLESS_IGNORE_DATES\") is not None:\n IGNORE_DATES = _parse_ignore_dates(os.getenv(\"PAPERLESS_IGNORE_DATES\"))\n\nENABLE_UPDATE_CHECK = os.getenv(\"PAPERLESS_ENABLE_UPDATE_CHECK\", \"default\")\nif ENABLE_UPDATE_CHECK != \"default\":\n ENABLE_UPDATE_CHECK = __get_boolean(\"PAPERLESS_ENABLE_UPDATE_CHECK\")\n",
"path": "src/paperless/settings.py"
}
] | diff --git a/src/paperless/settings.py b/src/paperless/settings.py
index c512b1d47c2..9a5d9453d77 100644
--- a/src/paperless/settings.py
+++ b/src/paperless/settings.py
@@ -458,6 +458,7 @@ def default_task_workers() -> int:
Q_CLUSTER = {
"name": "paperless",
+ "guard_cycle": 5,
"catch_up": False,
"recycle": 1,
"retry": PAPERLESS_WORKER_RETRY,
|
tensorflow__addons-1770 | Enable multiprocessing when testing with GPU and support distributed strategies in the tests.
**Describe the feature and the current behavior/state.**
Here I'm not going to discuss the bazel case as it's much more complicated to handle, and we currently advertize using pytest anyway to run the tests. We can of course make sure everything is compatible though.
This revamping of gpu testing has multiple objectives:
* The tests should behave the same weither the contributor has a gpu or not. Meaning we shouldn't run all the tests on a gpu just because a gpu is available, otherwise it hurts reproducibility.
* The test suite should be able to run with multiple workers in kokoro or when a user has multiple gpus. Pytest should use all gpus visible by the main process.
* We need to support testing with distributed strategies. Currently it doesn't work. A fix has been started in #1209 but we need to update it for pytest.
* Making the whole thing simple to use and to maintain. Notably, we would get rid of this file: https://github.com/tensorflow/addons/blob/master/tools/testing/parallel_gpu_execute.sh which is quite hard to work on.
To do all that, here is my proposal:
Stuff to know:
* Pytest-xdist uses multiprocessing to run the tests, not multithreading.
* 2 environement variables are available in each of the workers to identify them. https://github.com/pytest-dev/pytest-xdist#identifying-the-worker-process-during-a-test
### Test workers
Suppose we have a machine with 10CPUs and 4 GPUs, 10 processes will start to run the test suite. Workers 0 to 3 will have ownership of one GPU each (we can use CUDA_VISIBLE_DEVICES to enforce that, but I'm not even sure that's needed with the proposed implementation). Workers 4 to 9 will have no gpu available.
### Virtual devices
Each of those processes, when starting, will split their physical device into 2 virtual device. Tests that just need to run on gpu will use the first of those virtual devices. Processes which need to test distributed strategies will use the two of them. We assume here that 2 virtual devices are enough to test distributed strategies.
### Impact on the contributors:
For this whole machinery to work, we need to know which test needs to run on CPU, GPU, or in distributed strategies. To do that we'll use pytest markers: `@pytest.mark.....`
* By default, if no marker is found, the test will run on CPU: `with device("CPU:0")`. It's equivalent to
`@pytest.mark.run_on(["cpu"])`.
* To run with gpu only: `@pytest.mark.run_on(["gpu"])`.
* To run on the cpu and gpu: `@pytest.mark.run_on(["cpu", "gpu"])` (test runs twice)
* To run in within a distributed strategy `@pytest.mark.run_on(["distributed strategy"])`. (runs once here).
* To run with everything `@pytest.mark.run_on(["cpu", "gpu", "distributed strategy"])`
* To make crazy stuff, and not run the test in any device scope: `@pytest.mark.no_device_scope`. Then the contributor can do whatever he/she wants in the test.
Of course, if no gpu is available, we just skip the tests needing a distribution strategy or the gpu. Contributors who handle the devices manually have to make sure to skip manually the test if the gpu is used.
Since gpu are often the scarsest ressource (nb gpus << nb cpus), tests needing the gpu will also be marked with `@pytest.mark.tryfirst` to ensure that we don't have workers starvation at the end (to get maximum speed).
To implement that, we need first to convert all tests to pytest (as opposed to unittest) it's currently 80% done and thanks a lot @autoih for putting a LOT of work into that.
**Relevant information**
- Are you willing to contribute it (yes/no): yes
- Are you willing to maintain it going forward? (yes/no): yes
- Is there a relevant academic paper? (if so, where): no
- Is there already an implementation in another framework? (if so, where): no
- Was it part of tf.contrib? (if so, where): no
**Which API type would this fall under (layer, metric, optimizer, etc.)**
Testing
**Who will benefit with this feature?**
Contributors with gpu, CI.
**Any other info.**
I believe that the implementation will first go in tensorflow addons because we have 4 GPUs available in the CI. Later on when it's stable we can split it from tensorflow addons and make it a separate pytest plugin with a public API.
Comments welcome. Especially from @Squadrick , @hyang0129 , @seanpmorgan since I'm not a ninja of tf.device.
| [
{
"content": "from tensorflow_addons.utils.test_utils import ( # noqa: F401\n maybe_run_functions_eagerly,\n pytest_make_parametrize_id,\n data_format,\n set_seeds,\n pytest_addoption,\n set_global_variables,\n pytest_configure,\n device,\n pytest_generate_tests,\n)\n\n# fixtures present in this file will be available\n# when running tests and can be referenced with strings\n# https://docs.pytest.org/en/latest/fixture.html#conftest-py-sharing-fixture-functions\n",
"path": "tensorflow_addons/conftest.py"
}
] | [
{
"content": "from tensorflow_addons.utils.test_utils import ( # noqa: F401\n maybe_run_functions_eagerly,\n pytest_make_parametrize_id,\n data_format,\n set_seeds,\n pytest_addoption,\n set_global_variables,\n pytest_configure,\n device,\n pytest_generate_tests,\n pytest_collection_modifyitems,\n)\n\n# fixtures present in this file will be available\n# when running tests and can be referenced with strings\n# https://docs.pytest.org/en/latest/fixture.html#conftest-py-sharing-fixture-functions\n",
"path": "tensorflow_addons/conftest.py"
}
] | diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index e3d04ddb67..9e0836d26e 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -428,6 +428,8 @@ the test twice, on CPU and on GPU, or only on GPU. Here is how to do it.
```python
import pytest
+import tensorflow as tf
+from tensorflow_addons.utils import test_utils
@pytest.mark.with_device(["cpu", "gpu"])
def test_something():
@@ -445,6 +447,24 @@ def test_something2(device):
print("do something else.")
+
[email protected]_device(["cpu", "gpu", tf.distribute.MirroredStrategy])
+def test_something3(device):
+ # the code here will run three times, once on gpu, once on cpu and once with
+ # a mirror distributed strategy.
+ # device will be "cpu:0" or "gpu:0" or the strategy.
+ # with the MirroredStrategy, it's equivalent to:
+ # strategy = tf.distribute.MirroredStrategy(...)
+ # with strategy.scope():
+ # test_function(strategy)
+ if "cpu" in device:
+ print("do something.")
+ if "gpu" in device:
+ print("do something else.")
+ if isinstance(device, tf.distribute.Strategy):
+ device.run(...)
+
+
@pytest.mark.with_device(["gpu"])
def test_something_else():
# This test will be only run on gpu.
@@ -458,9 +478,20 @@ def test_something_more():
@pytest.mark.with_device(["no_device"])
[email protected]_gpu
def test_something_more2():
# When running the function, there will be no `with tf.device` wrapper.
# You are free to do whatever you wish with the devices in there.
+ # Make sure to use only the cpu, or only gpus available to the current process with
+ # test_utils.gpu_for_testing() , otherwise, it might not play nice with
+ # pytest's multiprocessing.
+ # If you use a gpu, mark the test with @pytest.mark.needs_gpu , otherwise the
+ # test will fail if no gpu is available on the system.
+ # for example
+ ...
+ strategy = tf.distribute.MirroredStrategy(test_utils.gpus_for_testing())
+ with strategy.scope():
+ print("I'm doing whatever I want.")
...
```
diff --git a/tensorflow_addons/conftest.py b/tensorflow_addons/conftest.py
index 85b035a9ee..e6f9f7a6ef 100644
--- a/tensorflow_addons/conftest.py
+++ b/tensorflow_addons/conftest.py
@@ -8,6 +8,7 @@
pytest_configure,
device,
pytest_generate_tests,
+ pytest_collection_modifyitems,
)
# fixtures present in this file will be available
diff --git a/tensorflow_addons/optimizers/tests/conditional_gradient_test.py b/tensorflow_addons/optimizers/tests/conditional_gradient_test.py
index 884b1a29ec..b3f7ac37be 100644
--- a/tensorflow_addons/optimizers/tests/conditional_gradient_test.py
+++ b/tensorflow_addons/optimizers/tests/conditional_gradient_test.py
@@ -190,7 +190,7 @@ def test_basic_nuclear(use_resource):
# to address issue #36764
for i, dtype in enumerate(
_dtypes_with_checking_system(
- use_gpu=tf.test.is_gpu_available(), system=platform.system()
+ use_gpu=test_utils.is_gpu_available(), system=platform.system()
)
):
@@ -276,7 +276,7 @@ def test_minimize_sparse_resource_variable_nuclear():
# TODO:
# to address issue #347 and #36764.
for dtype in _dtypes_with_checking_system(
- use_gpu=tf.test.is_gpu_available(), system=platform.system()
+ use_gpu=test_utils.is_gpu_available(), system=platform.system()
):
var0 = tf.Variable([[1.0, 2.0]], dtype=dtype)
@@ -316,7 +316,7 @@ def loss():
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_tensor_learning_rate_and_conditional_gradient_nuclear():
for dtype in _dtypes_with_checking_system(
- use_gpu=tf.test.is_gpu_available(), system=platform.system()
+ use_gpu=test_utils.is_gpu_available(), system=platform.system()
):
# TODO:
# Based on issue #36764 in the following link,
@@ -845,7 +845,7 @@ def test_like_dist_belief_frobenius_cg01():
def test_sparse_frobenius():
# TODO:
# To address the issue #347.
- for dtype in _dtypes_to_test(use_gpu=tf.test.is_gpu_available()):
+ for dtype in _dtypes_to_test(use_gpu=test_utils.is_gpu_available()):
var0 = tf.Variable(tf.zeros([4, 2], dtype=dtype))
var1 = tf.Variable(tf.constant(1.0, dtype, [4, 2]))
grads0 = tf.IndexedSlices(
@@ -1001,7 +1001,7 @@ def test_sharing_nuclear():
# TODO:
# To address the issue #36764.
for dtype in _dtypes_with_checking_system(
- use_gpu=tf.test.is_gpu_available(), system=platform.system()
+ use_gpu=test_utils.is_gpu_available(), system=platform.system()
):
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
@@ -1340,7 +1340,7 @@ def test_sparse_nuclear():
# TODO:
# To address the issue #347 and issue #36764.
for dtype in _dtypes_with_checking_system(
- use_gpu=tf.test.is_gpu_available(), system=platform.system()
+ use_gpu=test_utils.is_gpu_available(), system=platform.system()
):
var0 = tf.Variable(tf.zeros([4, 2], dtype=dtype))
var1 = tf.Variable(tf.constant(1.0, dtype, [4, 2]))
diff --git a/tensorflow_addons/optimizers/tests/lamb_test.py b/tensorflow_addons/optimizers/tests/lamb_test.py
index 06113d7ad3..9c497600e2 100644
--- a/tensorflow_addons/optimizers/tests/lamb_test.py
+++ b/tensorflow_addons/optimizers/tests/lamb_test.py
@@ -67,7 +67,7 @@ def get_beta_accumulators(opt, dtype):
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_sparse():
- for dtype in _dtypes_to_test(use_gpu=tf.test.is_gpu_available()):
+ for dtype in _dtypes_to_test(use_gpu=test_utils.is_gpu_available()):
# Initialize tf for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype)
@@ -113,7 +113,7 @@ def test_sparse():
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_basic_with_learning_rate_decay():
- for i, dtype in enumerate(_dtypes_to_test(use_gpu=tf.test.is_gpu_available())):
+ for i, dtype in enumerate(_dtypes_to_test(use_gpu=test_utils.is_gpu_available())):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
@@ -162,7 +162,7 @@ def test_basic_with_learning_rate_decay():
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_basic_with_learning_rate_inverse_time_decay():
- for i, dtype in enumerate(_dtypes_to_test(use_gpu=tf.test.is_gpu_available())):
+ for i, dtype in enumerate(_dtypes_to_test(use_gpu=test_utils.is_gpu_available())):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
@@ -204,7 +204,7 @@ def test_basic_with_learning_rate_inverse_time_decay():
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_tensor_learning_rate():
- for dtype in _dtypes_to_test(use_gpu=tf.test.is_gpu_available()):
+ for dtype in _dtypes_to_test(use_gpu=test_utils.is_gpu_available()):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
@@ -239,7 +239,7 @@ def test_tensor_learning_rate():
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_sharing():
- for dtype in _dtypes_to_test(use_gpu=tf.test.is_gpu_available()):
+ for dtype in _dtypes_to_test(use_gpu=test_utils.is_gpu_available()):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
@@ -293,7 +293,7 @@ def loss():
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_resource():
- for i, dtype in enumerate(_dtypes_to_test(use_gpu=tf.test.is_gpu_available())):
+ for i, dtype in enumerate(_dtypes_to_test(use_gpu=test_utils.is_gpu_available())):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
diff --git a/tensorflow_addons/optimizers/tests/weight_decay_optimizers_test.py b/tensorflow_addons/optimizers/tests/weight_decay_optimizers_test.py
index 98f1379b2a..d1653206f2 100644
--- a/tensorflow_addons/optimizers/tests/weight_decay_optimizers_test.py
+++ b/tensorflow_addons/optimizers/tests/weight_decay_optimizers_test.py
@@ -53,7 +53,7 @@ def do_test(
the optimizer_params in the update_fn.
"""
# TODO: Fix #347 issue
- if do_sparse and tf.test.is_gpu_available():
+ if do_sparse and test_utils.is_gpu_available():
pytest.skip("Wait #347 to be fixed")
# Initialize variables for numpy implementation.
@@ -112,7 +112,7 @@ def do_test_sparse_repeated_indices(dtype, optimizer, **optimizer_kwargs):
the optimizer_params in the update_fn.
"""
# TODO: Fix #347 issue
- if tf.test.is_gpu_available():
+ if test_utils.is_gpu_available():
pytest.skip("Wait #347 to be fixed")
repeated_index_update_var = tf.Variable([[1.0], [2.0]], dtype=dtype)
diff --git a/tensorflow_addons/optimizers/tests/yogi_test.py b/tensorflow_addons/optimizers/tests/yogi_test.py
index 1cc0498eff..fcd98d49bb 100644
--- a/tensorflow_addons/optimizers/tests/yogi_test.py
+++ b/tensorflow_addons/optimizers/tests/yogi_test.py
@@ -96,7 +96,7 @@ def _dtypes_to_test(use_gpu):
def do_test_sparse(beta1=0.0, l1reg=0.0, l2reg=0.0):
- for dtype in _dtypes_to_test(use_gpu=tf.test.is_gpu_available()):
+ for dtype in _dtypes_to_test(use_gpu=test_utils.is_gpu_available()):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 1.0, 0.0, 1.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
@@ -169,7 +169,7 @@ def test_sparse_momentum_regularization():
def test_sparse_repeated_indices():
- for dtype in _dtypes_to_test(use_gpu=tf.test.is_gpu_available()):
+ for dtype in _dtypes_to_test(use_gpu=test_utils.is_gpu_available()):
repeated_index_update_var = tf.Variable([[1.0], [2.0]], dtype=dtype)
aggregated_update_var = tf.Variable([[1.0], [2.0]], dtype=dtype)
grad_repeated_index = tf.IndexedSlices(
@@ -199,7 +199,7 @@ def test_sparse_repeated_indices():
def do_test_basic(beta1=0.0, l1reg=0.0, l2reg=0.0):
- for dtype in _dtypes_to_test(use_gpu=tf.test.is_gpu_available()):
+ for dtype in _dtypes_to_test(use_gpu=test_utils.is_gpu_available()):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 1.0, 0.0, 1.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
@@ -265,7 +265,7 @@ def test_basic_momentum_regularization():
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_tensor_learning_rate():
- for dtype in _dtypes_to_test(use_gpu=tf.test.is_gpu_available()):
+ for dtype in _dtypes_to_test(use_gpu=test_utils.is_gpu_available()):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 1.0, 0.0, 1.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
@@ -301,7 +301,7 @@ def test_tensor_learning_rate():
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_sharing():
- for dtype in _dtypes_to_test(use_gpu=tf.test.is_gpu_available()):
+ for dtype in _dtypes_to_test(use_gpu=test_utils.is_gpu_available()):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 1.0, 0.0, 1.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
diff --git a/tensorflow_addons/utils/test_utils.py b/tensorflow_addons/utils/test_utils.py
index 4b3c082fe3..fad6fcc22a 100644
--- a/tensorflow_addons/utils/test_utils.py
+++ b/tensorflow_addons/utils/test_utils.py
@@ -31,6 +31,11 @@
WORKER_ID = int(os.environ.get("PYTEST_XDIST_WORKER", "gw0")[2])
NUMBER_OF_GPUS = len(tf.config.list_physical_devices("GPU"))
+
+def is_gpu_available():
+ return NUMBER_OF_GPUS >= 1
+
+
# Some configuration before starting the tests.
# we only need one core per worker.
@@ -39,18 +44,22 @@
tf.config.threading.set_intra_op_parallelism_threads(1)
tf.config.threading.set_inter_op_parallelism_threads(1)
-if NUMBER_OF_GPUS != 0:
+if is_gpu_available():
# We use only the first gpu at the moment. That's enough for most use cases.
- # split the first gpu into chunks of 100MB per pytest worker.
+ # split the first gpu into chunks of 100MB per virtual device.
# It's the user's job to limit the amount of pytest workers depending
# on the available memory.
# In practice, each process takes a bit more memory.
# There must be some kind of overhead but it's not very big (~200MB more)
+ # Each worker has two virtual devices.
+ # When running on gpu, only the first device is used. The other one is used
+ # in distributed strategies.
first_gpu = tf.config.list_physical_devices("GPU")[0]
+ virtual_gpus = [
+ tf.config.LogicalDeviceConfiguration(memory_limit=100) for _ in range(2)
+ ]
- tf.config.set_logical_device_configuration(
- first_gpu, [tf.config.LogicalDeviceConfiguration(memory_limit=100)],
- )
+ tf.config.set_logical_device_configuration(first_gpu, virtual_gpus)
def finalizer():
@@ -96,6 +105,20 @@ def pytest_addoption(parser):
)
+def gpus_for_testing():
+ """For the moment it's very simple, but it might change in the future,
+ with multiple physical gpus for example. So it's better if this function
+ is called rather than hardcoding the gpu devices in the tests.
+ """
+ if not is_gpu_available():
+ raise SystemError(
+ "You are trying to get some gpus for testing but no gpu is available on "
+ "your system. \nDid you forget to use `@pytest.mark.needs_gpu` on your test"
+ " so that it's skipped automatically when no gpu is available?"
+ )
+ return ["gpu:0", "gpu:1"]
+
+
@pytest.fixture(scope="session", autouse=True)
def set_global_variables(request):
if request.config.getoption("--skip-custom-ops"):
@@ -114,7 +137,11 @@ def device(request):
requested_device = request.param
if requested_device == "no_device":
yield requested_device
- else:
+ elif requested_device == tf.distribute.MirroredStrategy:
+ strategy = requested_device(gpus_for_testing())
+ with strategy.scope():
+ yield strategy
+ elif isinstance(requested_device, str):
if requested_device in ["cpu", "gpu"]:
# we use GPU:0 because the virtual device we created is the
# only one in the first GPU (so first in the list of virtual devices).
@@ -126,13 +153,10 @@ def device(request):
def get_marks(device_name):
- marks = []
- if device_name == "gpu":
- marks.append(pytest.mark.needs_gpu)
- if NUMBER_OF_GPUS == 0:
- skip_message = "The gpu is not available."
- marks.append(pytest.mark.skip(reason=skip_message))
- return marks
+ if device_name == "gpu" or device_name == tf.distribute.MirroredStrategy:
+ return [pytest.mark.needs_gpu]
+ else:
+ return []
def pytest_generate_tests(metafunc):
@@ -149,6 +173,13 @@ def pytest_generate_tests(metafunc):
metafunc.parametrize("device", parameters, indirect=True)
+def pytest_collection_modifyitems(items):
+ for item in items:
+ if item.get_closest_marker("needs_gpu") is not None:
+ if not is_gpu_available():
+ item.add_marker(pytest.mark.skip("The gpu is not available."))
+
+
def assert_allclose_according_to_type(
a,
b,
diff --git a/tensorflow_addons/utils/tests/test_utils_test.py b/tensorflow_addons/utils/tests/test_utils_test.py
index 0220ef3747..3886ce2eef 100644
--- a/tensorflow_addons/utils/tests/test_utils_test.py
+++ b/tensorflow_addons/utils/tests/test_utils_test.py
@@ -1,10 +1,45 @@
import random
import numpy as np
+import pytest
import tensorflow as tf
+from tensorflow_addons.utils import test_utils
def test_seed_is_set():
assert random.randint(0, 10000) == 6311
assert np.random.randint(0, 10000) == 2732
assert tf.random.uniform([], 0, 10000, dtype=tf.int64).numpy() == 9457
+
+
[email protected]_device(["cpu", "gpu", tf.distribute.MirroredStrategy])
+def test_all_scopes(device):
+ assert isinstance(device, str) or isinstance(device, tf.distribute.Strategy)
+
+
+def train_small_model():
+ model_input = tf.keras.layers.Input((3,))
+ model_output = tf.keras.layers.Dense(4)(model_input)
+ model = tf.keras.Model(model_input, model_output)
+ model.compile(loss="mse")
+
+ x = np.random.uniform(size=(5, 3))
+ y = np.random.uniform(size=(5, 4))
+ model.fit(x, y, epochs=1)
+
+
[email protected]_device([tf.distribute.MirroredStrategy])
+def test_distributed_strategy(device):
+ assert isinstance(device, tf.distribute.Strategy)
+ train_small_model()
+
+
[email protected]_device(["no_device"])
[email protected]_gpu
+def test_custom_device_placement():
+ with tf.device(test_utils.gpus_for_testing()[0]):
+ train_small_model()
+
+ strategy = tf.distribute.MirroredStrategy(test_utils.gpus_for_testing())
+ with strategy.scope():
+ train_small_model()
|
PaddlePaddle__models-3094 | PaddleCV/Research/astar2019/reader.py中的bug
[PaddleCV/Research/astar2019/reader.py](https://github.com/PaddlePaddle/models/blob/develop/PaddleCV/Research/astar2019/reader.py)中第74行有一处bug,代码如下
```
@property
def apply_distort(self): # line 74: 按功能,此处应该是 def apply_expand(self):
return self._apply_expand
@property
def apply_distort(self):
return self._apply_distort
```
| [
{
"content": "# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport xml.etree.ElementTree\nimport os\nimport time\nimport copy\nimport six\nimport math\nimport numpy as np\nfrom PIL import Image\nfrom PIL import ImageDraw\nimport image_util\nimport paddle\n\n\nclass Settings(object):\n def __init__(self,\n dataset=None,\n data_dir=None,\n label_file=None,\n resize_h=300,\n resize_w=300,\n mean_value=[127.5, 127.5, 127.5],\n apply_distort=True,\n apply_expand=True,\n ap_version='11point'):\n self._dataset = dataset\n self._ap_version = ap_version\n self._data_dir = data_dir\n if 'pascalvoc' in dataset:\n self._label_list = []\n label_fpath = os.path.join(data_dir, label_file)\n for line in open(label_fpath):\n self._label_list.append(line.strip())\n\n self._apply_distort = apply_distort\n self._apply_expand = apply_expand\n self._resize_height = resize_h\n self._resize_width = resize_w\n self._img_mean = np.array(mean_value)[:, np.newaxis, np.newaxis].astype(\n 'float32')\n self._expand_prob = 0.5\n self._expand_max_ratio = 4\n self._hue_prob = 0.5\n self._hue_delta = 18\n self._contrast_prob = 0.5\n self._contrast_delta = 0.5\n self._saturation_prob = 0.5\n self._saturation_delta = 0.5\n self._brightness_prob = 0.5\n self._brightness_delta = 0.125\n\n @property\n def dataset(self):\n return self._dataset\n\n @property\n def ap_version(self):\n return self._ap_version\n\n @property\n def apply_distort(self):\n return self._apply_expand\n\n @property\n def apply_distort(self):\n return self._apply_distort\n\n @property\n def data_dir(self):\n return self._data_dir\n\n @data_dir.setter\n def data_dir(self, data_dir):\n self._data_dir = data_dir\n\n @property\n def label_list(self):\n return self._label_list\n\n @property\n def resize_h(self):\n return self._resize_height\n\n @property\n def resize_w(self):\n return self._resize_width\n\n @property\n def img_mean(self):\n return self._img_mean\n\n\ndef preprocess(img, bbox_labels, mode, settings):\n img_width, img_height = img.size\n sampled_labels = bbox_labels\n if mode == 'train':\n if settings._apply_distort:\n img = image_util.distort_image(img, settings)\n if settings._apply_expand:\n img, bbox_labels, img_width, img_height = image_util.expand_image(\n img, bbox_labels, img_width, img_height, settings)\n # sampling\n batch_sampler = []\n # hard-code here\n batch_sampler.append(\n image_util.sampler(1, 1, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0))\n batch_sampler.append(\n image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.1, 0.0))\n batch_sampler.append(\n image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.3, 0.0))\n batch_sampler.append(\n image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.5, 0.0))\n batch_sampler.append(\n image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.7, 0.0))\n batch_sampler.append(\n image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.9, 0.0))\n batch_sampler.append(\n image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.0, 1.0))\n sampled_bbox = image_util.generate_batch_samples(batch_sampler,\n bbox_labels)\n\n img = np.array(img)\n if len(sampled_bbox) > 0:\n idx = int(np.random.uniform(0, len(sampled_bbox)))\n img, sampled_labels = image_util.crop_image(\n img, bbox_labels, sampled_bbox[idx], img_width, img_height)\n\n img = Image.fromarray(img)\n img = img.resize((settings.resize_w, settings.resize_h), Image.ANTIALIAS)\n img = np.array(img)\n\n if mode == 'train':\n mirror = int(np.random.uniform(0, 2))\n if mirror == 1:\n img = img[:, ::-1, :]\n for i in six.moves.xrange(len(sampled_labels)):\n tmp = sampled_labels[i][1]\n sampled_labels[i][1] = 1 - sampled_labels[i][3]\n sampled_labels[i][3] = 1 - tmp\n # HWC to CHW\n if len(img.shape) == 3:\n img = np.swapaxes(img, 1, 2)\n img = np.swapaxes(img, 1, 0)\n # RBG to BGR\n img = img[[2, 1, 0], :, :]\n img = img.astype('float32')\n img -= settings.img_mean\n img = img * 0.007843\n return img, sampled_labels\n\n\ndef coco(settings, coco_api, file_list, mode, batch_size, shuffle, data_dir):\n from pycocotools.coco import COCO\n\n def reader():\n if mode == 'train' and shuffle:\n np.random.shuffle(file_list)\n batch_out = []\n for image in file_list:\n image_name = image['file_name']\n image_path = os.path.join(data_dir, image_name)\n if not os.path.exists(image_path):\n raise ValueError(\"%s is not exist, you should specify \"\n \"data path correctly.\" % image_path)\n im = Image.open(image_path)\n if im.mode == 'L':\n im = im.convert('RGB')\n im_width, im_height = im.size\n im_id = image['id']\n\n # layout: category_id | xmin | ymin | xmax | ymax | iscrowd\n bbox_labels = []\n annIds = coco_api.getAnnIds(imgIds=image['id'])\n anns = coco_api.loadAnns(annIds)\n for ann in anns:\n bbox_sample = []\n # start from 1, leave 0 to background\n bbox_sample.append(float(ann['category_id']))\n bbox = ann['bbox']\n xmin, ymin, w, h = bbox\n xmax = xmin + w\n ymax = ymin + h\n bbox_sample.append(float(xmin) / im_width)\n bbox_sample.append(float(ymin) / im_height)\n bbox_sample.append(float(xmax) / im_width)\n bbox_sample.append(float(ymax) / im_height)\n bbox_sample.append(float(ann['iscrowd']))\n bbox_labels.append(bbox_sample)\n im, sample_labels = preprocess(im, bbox_labels, mode, settings)\n sample_labels = np.array(sample_labels)\n if len(sample_labels) == 0: continue\n im = im.astype('float32')\n boxes = sample_labels[:, 1:5]\n lbls = sample_labels[:, 0].astype('int32')\n iscrowd = sample_labels[:, -1].astype('int32')\n if 'cocoMAP' in settings.ap_version:\n batch_out.append((im, boxes, lbls, iscrowd,\n [im_id, im_width, im_height]))\n else:\n batch_out.append((im, boxes, lbls, iscrowd))\n\n if len(batch_out) == batch_size:\n yield batch_out\n batch_out = []\n\n if mode == 'test' and len(batch_out) > 1:\n yield batch_out\n batch_out = []\n\n return reader\n\n\ndef pascalvoc(settings, file_list, mode, batch_size, shuffle):\n def reader():\n if mode == 'train' and shuffle:\n np.random.shuffle(file_list)\n batch_out = []\n cnt = 0\n for image in file_list:\n image_path, label_path = image.split()\n image_path = os.path.join(settings.data_dir, image_path)\n label_path = os.path.join(settings.data_dir, label_path)\n if not os.path.exists(image_path):\n raise ValueError(\"%s is not exist, you should specify \"\n \"data path correctly.\" % image_path)\n im = Image.open(image_path)\n if im.mode == 'L':\n im = im.convert('RGB')\n im_width, im_height = im.size\n\n # layout: label | xmin | ymin | xmax | ymax | difficult\n bbox_labels = []\n root = xml.etree.ElementTree.parse(label_path).getroot()\n for object in root.findall('object'):\n bbox_sample = []\n # start from 1\n bbox_sample.append(\n float(settings.label_list.index(object.find('name').text)))\n bbox = object.find('bndbox')\n difficult = float(object.find('difficult').text)\n bbox_sample.append(float(bbox.find('xmin').text) / im_width)\n bbox_sample.append(float(bbox.find('ymin').text) / im_height)\n bbox_sample.append(float(bbox.find('xmax').text) / im_width)\n bbox_sample.append(float(bbox.find('ymax').text) / im_height)\n bbox_sample.append(difficult)\n bbox_labels.append(bbox_sample)\n im, sample_labels = preprocess(im, bbox_labels, mode, settings)\n sample_labels = np.array(sample_labels)\n if len(sample_labels) == 0: continue\n im = im.astype('float32')\n boxes = sample_labels[:, 1:5]\n lbls = sample_labels[:, 0].astype('int32')\n difficults = sample_labels[:, -1].astype('int32')\n\n batch_out.append((im, boxes, lbls, difficults))\n if len(batch_out) == batch_size:\n yield batch_out\n cnt += len(batch_out)\n batch_out = []\n\n if mode == 'test' and len(batch_out) > 1:\n yield batch_out\n cnt += len(batch_out)\n batch_out = []\n\n return reader\n\n\ndef train(settings,\n file_list,\n batch_size,\n shuffle=True,\n num_workers=8,\n enable_ce=False):\n file_path = os.path.join(settings.data_dir, file_list)\n readers = []\n if 'coco' in settings.dataset:\n # cocoapi\n from pycocotools.coco import COCO\n coco_api = COCO(file_path)\n image_ids = coco_api.getImgIds()\n images = coco_api.loadImgs(image_ids)\n np.random.shuffle(images)\n n = int(math.ceil(len(images) // num_workers))\n image_lists = [images[i:i + n] for i in range(0, len(images), n)]\n\n if '2014' in file_list:\n sub_dir = \"train2014\"\n elif '2017' in file_list:\n sub_dir = \"train2017\"\n data_dir = os.path.join(settings.data_dir, sub_dir)\n for l in image_lists:\n readers.append(\n coco(settings, coco_api, l, 'train', batch_size, shuffle,\n data_dir))\n else:\n images = [line.strip() for line in open(file_path)]\n np.random.shuffle(images)\n n = int(math.ceil(len(images) // num_workers))\n image_lists = [images[i:i + n] for i in range(0, len(images), n)]\n for l in image_lists:\n readers.append(pascalvoc(settings, l, 'train', batch_size, shuffle))\n return paddle.reader.multiprocess_reader(readers, False)\n\n\ndef test(settings, file_list, batch_size):\n file_list = os.path.join(settings.data_dir, file_list)\n if 'coco' in settings.dataset:\n from pycocotools.coco import COCO\n coco_api = COCO(file_list)\n image_ids = coco_api.getImgIds()\n images = coco_api.loadImgs(image_ids)\n if '2014' in file_list:\n sub_dir = \"val2014\"\n elif '2017' in file_list:\n sub_dir = \"val2017\"\n data_dir = os.path.join(settings.data_dir, sub_dir)\n return coco(settings, coco_api, images, 'test', batch_size, False,\n data_dir)\n else:\n image_list = [line.strip() for line in open(file_list)]\n return pascalvoc(settings, image_list, 'test', batch_size, False)\n\n\ndef infer(settings, image_path):\n def reader():\n if not os.path.exists(image_path):\n raise ValueError(\"%s is not exist, you should specify \"\n \"data path correctly.\" % image_path)\n img = Image.open(image_path)\n if img.mode == 'L':\n img = img.convert('RGB')\n im_width, im_height = img.size\n img = img.resize((settings.resize_w, settings.resize_h),\n Image.ANTIALIAS)\n img = np.array(img)\n # HWC to CHW\n if len(img.shape) == 3:\n img = np.swapaxes(img, 1, 2)\n img = np.swapaxes(img, 1, 0)\n # RBG to BGR\n img = img[[2, 1, 0], :, :]\n img = img.astype('float32')\n img -= settings.img_mean\n img = img * 0.007843\n return img\n\n return reader\n",
"path": "PaddleCV/Research/astar2019/reader.py"
}
] | [
{
"content": "# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport xml.etree.ElementTree\nimport os\nimport time\nimport copy\nimport six\nimport math\nimport numpy as np\nfrom PIL import Image\nfrom PIL import ImageDraw\nimport image_util\nimport paddle\n\n\nclass Settings(object):\n def __init__(self,\n dataset=None,\n data_dir=None,\n label_file=None,\n resize_h=300,\n resize_w=300,\n mean_value=[127.5, 127.5, 127.5],\n apply_distort=True,\n apply_expand=True,\n ap_version='11point'):\n self._dataset = dataset\n self._ap_version = ap_version\n self._data_dir = data_dir\n if 'pascalvoc' in dataset:\n self._label_list = []\n label_fpath = os.path.join(data_dir, label_file)\n for line in open(label_fpath):\n self._label_list.append(line.strip())\n\n self._apply_distort = apply_distort\n self._apply_expand = apply_expand\n self._resize_height = resize_h\n self._resize_width = resize_w\n self._img_mean = np.array(mean_value)[:, np.newaxis, np.newaxis].astype(\n 'float32')\n self._expand_prob = 0.5\n self._expand_max_ratio = 4\n self._hue_prob = 0.5\n self._hue_delta = 18\n self._contrast_prob = 0.5\n self._contrast_delta = 0.5\n self._saturation_prob = 0.5\n self._saturation_delta = 0.5\n self._brightness_prob = 0.5\n self._brightness_delta = 0.125\n\n @property\n def dataset(self):\n return self._dataset\n\n @property\n def ap_version(self):\n return self._ap_version\n\n @property\n def apply_expand(self):\n return self._apply_expand\n\n @property\n def apply_distort(self):\n return self._apply_distort\n\n @property\n def data_dir(self):\n return self._data_dir\n\n @data_dir.setter\n def data_dir(self, data_dir):\n self._data_dir = data_dir\n\n @property\n def label_list(self):\n return self._label_list\n\n @property\n def resize_h(self):\n return self._resize_height\n\n @property\n def resize_w(self):\n return self._resize_width\n\n @property\n def img_mean(self):\n return self._img_mean\n\n\ndef preprocess(img, bbox_labels, mode, settings):\n img_width, img_height = img.size\n sampled_labels = bbox_labels\n if mode == 'train':\n if settings._apply_distort:\n img = image_util.distort_image(img, settings)\n if settings._apply_expand:\n img, bbox_labels, img_width, img_height = image_util.expand_image(\n img, bbox_labels, img_width, img_height, settings)\n # sampling\n batch_sampler = []\n # hard-code here\n batch_sampler.append(\n image_util.sampler(1, 1, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0))\n batch_sampler.append(\n image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.1, 0.0))\n batch_sampler.append(\n image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.3, 0.0))\n batch_sampler.append(\n image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.5, 0.0))\n batch_sampler.append(\n image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.7, 0.0))\n batch_sampler.append(\n image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.9, 0.0))\n batch_sampler.append(\n image_util.sampler(1, 50, 0.3, 1.0, 0.5, 2.0, 0.0, 1.0))\n sampled_bbox = image_util.generate_batch_samples(batch_sampler,\n bbox_labels)\n\n img = np.array(img)\n if len(sampled_bbox) > 0:\n idx = int(np.random.uniform(0, len(sampled_bbox)))\n img, sampled_labels = image_util.crop_image(\n img, bbox_labels, sampled_bbox[idx], img_width, img_height)\n\n img = Image.fromarray(img)\n img = img.resize((settings.resize_w, settings.resize_h), Image.ANTIALIAS)\n img = np.array(img)\n\n if mode == 'train':\n mirror = int(np.random.uniform(0, 2))\n if mirror == 1:\n img = img[:, ::-1, :]\n for i in six.moves.xrange(len(sampled_labels)):\n tmp = sampled_labels[i][1]\n sampled_labels[i][1] = 1 - sampled_labels[i][3]\n sampled_labels[i][3] = 1 - tmp\n # HWC to CHW\n if len(img.shape) == 3:\n img = np.swapaxes(img, 1, 2)\n img = np.swapaxes(img, 1, 0)\n # RBG to BGR\n img = img[[2, 1, 0], :, :]\n img = img.astype('float32')\n img -= settings.img_mean\n img = img * 0.007843\n return img, sampled_labels\n\n\ndef coco(settings, coco_api, file_list, mode, batch_size, shuffle, data_dir):\n from pycocotools.coco import COCO\n\n def reader():\n if mode == 'train' and shuffle:\n np.random.shuffle(file_list)\n batch_out = []\n for image in file_list:\n image_name = image['file_name']\n image_path = os.path.join(data_dir, image_name)\n if not os.path.exists(image_path):\n raise ValueError(\"%s is not exist, you should specify \"\n \"data path correctly.\" % image_path)\n im = Image.open(image_path)\n if im.mode == 'L':\n im = im.convert('RGB')\n im_width, im_height = im.size\n im_id = image['id']\n\n # layout: category_id | xmin | ymin | xmax | ymax | iscrowd\n bbox_labels = []\n annIds = coco_api.getAnnIds(imgIds=image['id'])\n anns = coco_api.loadAnns(annIds)\n for ann in anns:\n bbox_sample = []\n # start from 1, leave 0 to background\n bbox_sample.append(float(ann['category_id']))\n bbox = ann['bbox']\n xmin, ymin, w, h = bbox\n xmax = xmin + w\n ymax = ymin + h\n bbox_sample.append(float(xmin) / im_width)\n bbox_sample.append(float(ymin) / im_height)\n bbox_sample.append(float(xmax) / im_width)\n bbox_sample.append(float(ymax) / im_height)\n bbox_sample.append(float(ann['iscrowd']))\n bbox_labels.append(bbox_sample)\n im, sample_labels = preprocess(im, bbox_labels, mode, settings)\n sample_labels = np.array(sample_labels)\n if len(sample_labels) == 0: continue\n im = im.astype('float32')\n boxes = sample_labels[:, 1:5]\n lbls = sample_labels[:, 0].astype('int32')\n iscrowd = sample_labels[:, -1].astype('int32')\n if 'cocoMAP' in settings.ap_version:\n batch_out.append((im, boxes, lbls, iscrowd,\n [im_id, im_width, im_height]))\n else:\n batch_out.append((im, boxes, lbls, iscrowd))\n\n if len(batch_out) == batch_size:\n yield batch_out\n batch_out = []\n\n if mode == 'test' and len(batch_out) > 1:\n yield batch_out\n batch_out = []\n\n return reader\n\n\ndef pascalvoc(settings, file_list, mode, batch_size, shuffle):\n def reader():\n if mode == 'train' and shuffle:\n np.random.shuffle(file_list)\n batch_out = []\n cnt = 0\n for image in file_list:\n image_path, label_path = image.split()\n image_path = os.path.join(settings.data_dir, image_path)\n label_path = os.path.join(settings.data_dir, label_path)\n if not os.path.exists(image_path):\n raise ValueError(\"%s is not exist, you should specify \"\n \"data path correctly.\" % image_path)\n im = Image.open(image_path)\n if im.mode == 'L':\n im = im.convert('RGB')\n im_width, im_height = im.size\n\n # layout: label | xmin | ymin | xmax | ymax | difficult\n bbox_labels = []\n root = xml.etree.ElementTree.parse(label_path).getroot()\n for object in root.findall('object'):\n bbox_sample = []\n # start from 1\n bbox_sample.append(\n float(settings.label_list.index(object.find('name').text)))\n bbox = object.find('bndbox')\n difficult = float(object.find('difficult').text)\n bbox_sample.append(float(bbox.find('xmin').text) / im_width)\n bbox_sample.append(float(bbox.find('ymin').text) / im_height)\n bbox_sample.append(float(bbox.find('xmax').text) / im_width)\n bbox_sample.append(float(bbox.find('ymax').text) / im_height)\n bbox_sample.append(difficult)\n bbox_labels.append(bbox_sample)\n im, sample_labels = preprocess(im, bbox_labels, mode, settings)\n sample_labels = np.array(sample_labels)\n if len(sample_labels) == 0: continue\n im = im.astype('float32')\n boxes = sample_labels[:, 1:5]\n lbls = sample_labels[:, 0].astype('int32')\n difficults = sample_labels[:, -1].astype('int32')\n\n batch_out.append((im, boxes, lbls, difficults))\n if len(batch_out) == batch_size:\n yield batch_out\n cnt += len(batch_out)\n batch_out = []\n\n if mode == 'test' and len(batch_out) > 1:\n yield batch_out\n cnt += len(batch_out)\n batch_out = []\n\n return reader\n\n\ndef train(settings,\n file_list,\n batch_size,\n shuffle=True,\n num_workers=8,\n enable_ce=False):\n file_path = os.path.join(settings.data_dir, file_list)\n readers = []\n if 'coco' in settings.dataset:\n # cocoapi\n from pycocotools.coco import COCO\n coco_api = COCO(file_path)\n image_ids = coco_api.getImgIds()\n images = coco_api.loadImgs(image_ids)\n np.random.shuffle(images)\n n = int(math.ceil(len(images) // num_workers))\n image_lists = [images[i:i + n] for i in range(0, len(images), n)]\n\n if '2014' in file_list:\n sub_dir = \"train2014\"\n elif '2017' in file_list:\n sub_dir = \"train2017\"\n data_dir = os.path.join(settings.data_dir, sub_dir)\n for l in image_lists:\n readers.append(\n coco(settings, coco_api, l, 'train', batch_size, shuffle,\n data_dir))\n else:\n images = [line.strip() for line in open(file_path)]\n np.random.shuffle(images)\n n = int(math.ceil(len(images) // num_workers))\n image_lists = [images[i:i + n] for i in range(0, len(images), n)]\n for l in image_lists:\n readers.append(pascalvoc(settings, l, 'train', batch_size, shuffle))\n return paddle.reader.multiprocess_reader(readers, False)\n\n\ndef test(settings, file_list, batch_size):\n file_list = os.path.join(settings.data_dir, file_list)\n if 'coco' in settings.dataset:\n from pycocotools.coco import COCO\n coco_api = COCO(file_list)\n image_ids = coco_api.getImgIds()\n images = coco_api.loadImgs(image_ids)\n if '2014' in file_list:\n sub_dir = \"val2014\"\n elif '2017' in file_list:\n sub_dir = \"val2017\"\n data_dir = os.path.join(settings.data_dir, sub_dir)\n return coco(settings, coco_api, images, 'test', batch_size, False,\n data_dir)\n else:\n image_list = [line.strip() for line in open(file_list)]\n return pascalvoc(settings, image_list, 'test', batch_size, False)\n\n\ndef infer(settings, image_path):\n def reader():\n if not os.path.exists(image_path):\n raise ValueError(\"%s is not exist, you should specify \"\n \"data path correctly.\" % image_path)\n img = Image.open(image_path)\n if img.mode == 'L':\n img = img.convert('RGB')\n im_width, im_height = img.size\n img = img.resize((settings.resize_w, settings.resize_h),\n Image.ANTIALIAS)\n img = np.array(img)\n # HWC to CHW\n if len(img.shape) == 3:\n img = np.swapaxes(img, 1, 2)\n img = np.swapaxes(img, 1, 0)\n # RBG to BGR\n img = img[[2, 1, 0], :, :]\n img = img.astype('float32')\n img -= settings.img_mean\n img = img * 0.007843\n return img\n\n return reader\n",
"path": "PaddleCV/Research/astar2019/reader.py"
}
] | diff --git a/PaddleCV/Research/astar2019/reader.py b/PaddleCV/Research/astar2019/reader.py
index 8024bf067f..39efa44aa0 100644
--- a/PaddleCV/Research/astar2019/reader.py
+++ b/PaddleCV/Research/astar2019/reader.py
@@ -71,7 +71,7 @@ def ap_version(self):
return self._ap_version
@property
- def apply_distort(self):
+ def apply_expand(self):
return self._apply_expand
@property
|
adamchainz__django-perf-rec-469 | overwrite MODE can result in invalid snapshot files
### Python Version
3.9
### Django Version
3.2
### Package Version
4.20.0
### Description
If the new snapshot data are smaller than the previous ones, then `overwrite` mode can result in the file contents containing trailing data left over from the previous snapshots. In turn that data can end up being invalid YAML which then causes issues when the file is next read.
It looks like `set_and_save` needs to call [`fp.truncate()`](https://docs.python.org/3/library/io.html#io.IOBase.truncate) after `yaml.safe_dump`.
If you agree with this approach I'd be happy to put together a PR.
| [
{
"content": "from __future__ import annotations\n\nimport errno\nimport os\nfrom typing import Any\n\nimport yaml\nfrom django.core.files import locks\n\nfrom django_perf_rec.types import PerformanceRecord\n\n\nclass KVFile:\n def __init__(self, file_name: str) -> None:\n self.file_name = file_name\n self.data = self.load(file_name)\n\n def __len__(self) -> int:\n return len(self.data)\n\n LOAD_CACHE: dict[str, dict[str, Any]] = {}\n\n @classmethod\n def load(cls, file_name: str) -> dict[str, PerformanceRecord]:\n if file_name not in cls.LOAD_CACHE:\n cls.LOAD_CACHE[file_name] = cls.load_file(file_name)\n return cls.LOAD_CACHE[file_name]\n\n @classmethod\n def load_file(cls, file_name: str) -> dict[str, PerformanceRecord]:\n try:\n with open(file_name) as fp:\n locks.lock(fp, locks.LOCK_EX)\n content = fp.read()\n except OSError as exc:\n if exc.errno == errno.ENOENT:\n content = \"{}\"\n else:\n raise\n\n data = yaml.safe_load(content)\n\n if data is None:\n return {}\n elif not isinstance(data, dict):\n raise TypeError(f\"YAML content of {file_name} is not a dictionary\")\n\n return data\n\n @classmethod\n def _clear_load_cache(cls) -> None:\n # Should really only be used in testing this class\n cls.LOAD_CACHE = {}\n\n def get(\n self, key: str, default: PerformanceRecord | None\n ) -> PerformanceRecord | None:\n return self.data.get(key, default)\n\n def set_and_save(self, key: str, value: PerformanceRecord) -> None:\n if self.data.get(key, object()) == value:\n return\n\n fd = os.open(self.file_name, os.O_RDWR | os.O_CREAT, mode=0o666)\n with os.fdopen(fd, \"r+\") as fp:\n locks.lock(fd, locks.LOCK_EX)\n\n data = yaml.safe_load(fp)\n if data is None:\n data = {}\n\n self.data[key] = value\n data[key] = value\n\n fp.seek(0)\n yaml.safe_dump(\n data, fp, default_flow_style=False, allow_unicode=True, width=10000\n )\n",
"path": "src/django_perf_rec/yaml.py"
}
] | [
{
"content": "from __future__ import annotations\n\nimport errno\nimport os\nfrom typing import Any\n\nimport yaml\nfrom django.core.files import locks\n\nfrom django_perf_rec.types import PerformanceRecord\n\n\nclass KVFile:\n def __init__(self, file_name: str) -> None:\n self.file_name = file_name\n self.data = self.load(file_name)\n\n def __len__(self) -> int:\n return len(self.data)\n\n LOAD_CACHE: dict[str, dict[str, Any]] = {}\n\n @classmethod\n def load(cls, file_name: str) -> dict[str, PerformanceRecord]:\n if file_name not in cls.LOAD_CACHE:\n cls.LOAD_CACHE[file_name] = cls.load_file(file_name)\n return cls.LOAD_CACHE[file_name]\n\n @classmethod\n def load_file(cls, file_name: str) -> dict[str, PerformanceRecord]:\n try:\n with open(file_name) as fp:\n locks.lock(fp, locks.LOCK_EX)\n content = fp.read()\n except OSError as exc:\n if exc.errno == errno.ENOENT:\n content = \"{}\"\n else:\n raise\n\n data = yaml.safe_load(content)\n\n if data is None:\n return {}\n elif not isinstance(data, dict):\n raise TypeError(f\"YAML content of {file_name} is not a dictionary\")\n\n return data\n\n @classmethod\n def _clear_load_cache(cls) -> None:\n # Should really only be used in testing this class\n cls.LOAD_CACHE = {}\n\n def get(\n self, key: str, default: PerformanceRecord | None\n ) -> PerformanceRecord | None:\n return self.data.get(key, default)\n\n def set_and_save(self, key: str, value: PerformanceRecord) -> None:\n if self.data.get(key, object()) == value:\n return\n\n fd = os.open(self.file_name, os.O_RDWR | os.O_CREAT, mode=0o666)\n with os.fdopen(fd, \"r+\") as fp:\n locks.lock(fd, locks.LOCK_EX)\n\n data = yaml.safe_load(fp)\n if data is None:\n data = {}\n\n self.data[key] = value\n data[key] = value\n\n fp.seek(0)\n yaml.safe_dump(\n data, fp, default_flow_style=False, allow_unicode=True, width=10000\n )\n fp.truncate()\n",
"path": "src/django_perf_rec/yaml.py"
}
] | diff --git a/HISTORY.rst b/HISTORY.rst
index 331d0a69..a3ebf194 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -2,6 +2,10 @@
History
=======
+* Fix 'overwrite' mode to prevent file corruption.
+
+ Thanks to Peter Law for the report in `Issue #468 <https://github.com/adamchainz/django-perf-rec/issues/468>`__.
+
4.20.0 (2022-05-10)
-------------------
diff --git a/src/django_perf_rec/yaml.py b/src/django_perf_rec/yaml.py
index cc58265e..2dd6da6c 100644
--- a/src/django_perf_rec/yaml.py
+++ b/src/django_perf_rec/yaml.py
@@ -76,3 +76,4 @@ def set_and_save(self, key: str, value: PerformanceRecord) -> None:
yaml.safe_dump(
data, fp, default_flow_style=False, allow_unicode=True, width=10000
)
+ fp.truncate()
diff --git a/tests/test_api.py b/tests/test_api.py
index 54419fee..9abd5374 100644
--- a/tests/test_api.py
+++ b/tests/test_api.py
@@ -3,6 +3,7 @@
import os
import pytest
+import yaml
from django.core.cache import caches
from django.db.models import F, Q
from django.db.models.functions import Upper
@@ -243,19 +244,19 @@ def test_mode_overwrite(self):
with record(path="perf_files/api/", record_name="test_mode_overwrite"):
caches["default"].get("foo")
+ caches["default"].get("bar")
full_path = os.path.join(FILE_DIR, "perf_files", "api", "test_api.perf.yml")
assert os.path.exists(full_path)
with record(path="perf_files/api/", record_name="test_mode_overwrite"):
- caches["default"].get("bar")
+ caches["default"].get("baz")
full_path = os.path.join(FILE_DIR, "perf_files", "api", "test_api.perf.yml")
with open(full_path) as f:
- text = f.read()
+ data = yaml.safe_load(f.read())
- assert "bar" in text
- assert "foo" not in text
+ assert data == {"test_mode_overwrite": [{"cache|get": "baz"}]}
def test_delete_on_cascade_called_twice(self):
arthur = Author.objects.create(name="Arthur", age=42)
|
spyder-ide__spyder-6089 | Spyder not starting in macOS because pyopengl is present
I updated python and spyder using "conda update spyder" and "conda update python" respectively on MacOSX. Is this a python error or a spyder error?
Please find attached the conda list output listing the versions.
Thanks for your support.
--------
Traceback (most recent call last):
File "/Users/Nagraj/anaconda3/bin/spyder", line 7, in
[conda_list.txt](https://github.com/spyder-ide/spyder/files/1590875/conda_list.txt)
from spyder.app.start import main
File "/Users/Nagraj/anaconda3/lib/python3.6/site-packages/spyder/app/start.py", line 19, in
from OpenGL import GL
File "/Users/Nagraj/anaconda3/lib/python3.6/site-packages/OpenGL/GL/__init__.py", line 3, in
from OpenGL import error as _error
File "/Users/Nagraj/anaconda3/lib/python3.6/site-packages/OpenGL/error.py", line 12, in
from OpenGL import platform, _configflags
File "/Users/Nagraj/anaconda3/lib/python3.6/site-packages/OpenGL/platform/__init__.py", line 35, in
_load()
File "/Users/Nagraj/anaconda3/lib/python3.6/site-packages/OpenGL/platform/__init__.py", line 29, in _load
plugin = plugin_class()
TypeError: 'NoneType' object is not callable
| [
{
"content": "# -*- coding: utf-8 -*-\r\n\r\n# Std imports\r\nimport os\r\nimport os.path as osp\r\nimport random\r\nimport socket\r\nimport sys\r\nimport time\r\n\r\n# To prevent a race condition with ZMQ\r\n# See issue 5324\r\nimport zmq\r\n\r\n# This import is needed to fix errors with OpenGL when installed using pip\r\n# See issue 3332\r\ntry:\r\n from OpenGL import GL\r\nexcept ImportError:\r\n # pyopengl is not present when installed using conda\r\n pass\r\n\r\n# Local imports\r\nfrom spyder.app.cli_options import get_options\r\nfrom spyder.config.base import get_conf_path, running_in_mac_app\r\nfrom spyder.config.main import CONF\r\nfrom spyder.utils.external import lockfile\r\nfrom spyder.py3compat import is_unicode\r\n\r\n\r\ndef send_args_to_spyder(args):\r\n \"\"\"\r\n Simple socket client used to send the args passed to the Spyder \r\n executable to an already running instance.\r\n\r\n Args can be Python scripts or files with these extensions: .spydata, .mat,\r\n .npy, or .h5, which can be imported by the Variable Explorer.\r\n \"\"\"\r\n port = CONF.get('main', 'open_files_port')\r\n\r\n # Wait ~50 secs for the server to be up\r\n # Taken from http://stackoverflow.com/a/4766598/438386\r\n for _x in range(200):\r\n try:\r\n for arg in args:\r\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM,\r\n socket.IPPROTO_TCP)\r\n client.connect((\"127.0.0.1\", port))\r\n if is_unicode(arg):\r\n arg = arg.encode('utf-8')\r\n client.send(osp.abspath(arg))\r\n client.close()\r\n except socket.error:\r\n time.sleep(0.25)\r\n continue\r\n break\r\n\r\n\r\ndef main():\r\n \"\"\"\r\n Start Spyder application.\r\n\r\n If single instance mode is turned on (default behavior) and an instance of\r\n Spyder is already running, this will just parse and send command line\r\n options to the application.\r\n \"\"\"\r\n # Parse command line options\r\n options, args = get_options()\r\n\r\n # Store variable to be used in self.restart (restart spyder instance)\r\n os.environ['SPYDER_ARGS'] = str(sys.argv[1:])\r\n\r\n #==========================================================================\r\n # Proper high DPI scaling is available in Qt >= 5.6.0. This attibute must\r\n # be set before creating the application.\r\n #==========================================================================\r\n if CONF.get('main', 'high_dpi_custom_scale_factor'):\r\n factors = str(CONF.get('main', 'high_dpi_custom_scale_factors'))\r\n f = list(filter(None, factors.split(';')))\r\n if len(f) == 1:\r\n os.environ['QT_SCALE_FACTOR'] = f[0]\r\n else:\r\n os.environ['QT_SCREEN_SCALE_FACTORS'] = factors\r\n else:\r\n os.environ['QT_SCALE_FACTOR'] = ''\r\n os.environ['QT_SCREEN_SCALE_FACTORS'] = ''\r\n\r\n # Prevent Spyder from crashing in macOS if locale is not defined\r\n if sys.platform == 'darwin':\r\n LANG = os.environ.get('LANG')\r\n LC_ALL = os.environ.get('LC_ALL')\r\n if bool(LANG) and not bool(LC_ALL):\r\n LC_ALL = LANG\r\n elif not bool(LANG) and bool(LC_ALL):\r\n LANG = LC_ALL\r\n else:\r\n LANG = LC_ALL = 'en_US.UTF-8'\r\n\r\n os.environ['LANG'] = LANG\r\n os.environ['LC_ALL'] = LC_ALL\r\n\r\n if CONF.get('main', 'single_instance') and not options.new_instance \\\r\n and not options.reset_config_files and not running_in_mac_app():\r\n # Minimal delay (0.1-0.2 secs) to avoid that several\r\n # instances started at the same time step in their\r\n # own foots while trying to create the lock file\r\n time.sleep(random.randrange(1000, 2000, 90)/10000.)\r\n\r\n # Lock file creation\r\n lock_file = get_conf_path('spyder.lock')\r\n lock = lockfile.FilesystemLock(lock_file)\r\n\r\n # Try to lock spyder.lock. If it's *possible* to do it, then\r\n # there is no previous instance running and we can start a\r\n # new one. If *not*, then there is an instance already\r\n # running, which is locking that file\r\n try:\r\n lock_created = lock.lock()\r\n except:\r\n # If locking fails because of errors in the lockfile\r\n # module, try to remove a possibly stale spyder.lock.\r\n # This is reported to solve all problems with\r\n # lockfile (See issue 2363)\r\n try:\r\n if os.name == 'nt':\r\n if osp.isdir(lock_file):\r\n import shutil\r\n shutil.rmtree(lock_file, ignore_errors=True)\r\n else:\r\n if osp.islink(lock_file):\r\n os.unlink(lock_file)\r\n except:\r\n pass\r\n\r\n # Then start Spyder as usual and *don't* continue\r\n # executing this script because it doesn't make\r\n # sense\r\n from spyder.app import mainwindow\r\n mainwindow.main()\r\n return\r\n\r\n if lock_created:\r\n # Start a new instance\r\n from spyder.app import mainwindow\r\n mainwindow.main()\r\n else:\r\n # Pass args to Spyder or print an informative\r\n # message\r\n if args:\r\n send_args_to_spyder(args)\r\n else:\r\n print(\"Spyder is already running. If you want to open a new \\n\"\r\n \"instance, please pass to it the --new-instance option\")\r\n else:\r\n from spyder.app import mainwindow\r\n mainwindow.main()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n",
"path": "spyder/app/start.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\r\n\r\n# Std imports\r\nimport os\r\nimport os.path as osp\r\nimport random\r\nimport socket\r\nimport sys\r\nimport time\r\n\r\n# To prevent a race condition with ZMQ\r\n# See issue 5324\r\nimport zmq\r\n\r\n# This import is needed to fix errors with OpenGL when installed using pip\r\n# See issue 3332\r\ntry:\r\n from OpenGL import GL\r\nexcept:\r\n pass\r\n\r\n# Local imports\r\nfrom spyder.app.cli_options import get_options\r\nfrom spyder.config.base import get_conf_path, running_in_mac_app\r\nfrom spyder.config.main import CONF\r\nfrom spyder.utils.external import lockfile\r\nfrom spyder.py3compat import is_unicode\r\n\r\n\r\ndef send_args_to_spyder(args):\r\n \"\"\"\r\n Simple socket client used to send the args passed to the Spyder \r\n executable to an already running instance.\r\n\r\n Args can be Python scripts or files with these extensions: .spydata, .mat,\r\n .npy, or .h5, which can be imported by the Variable Explorer.\r\n \"\"\"\r\n port = CONF.get('main', 'open_files_port')\r\n\r\n # Wait ~50 secs for the server to be up\r\n # Taken from http://stackoverflow.com/a/4766598/438386\r\n for _x in range(200):\r\n try:\r\n for arg in args:\r\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM,\r\n socket.IPPROTO_TCP)\r\n client.connect((\"127.0.0.1\", port))\r\n if is_unicode(arg):\r\n arg = arg.encode('utf-8')\r\n client.send(osp.abspath(arg))\r\n client.close()\r\n except socket.error:\r\n time.sleep(0.25)\r\n continue\r\n break\r\n\r\n\r\ndef main():\r\n \"\"\"\r\n Start Spyder application.\r\n\r\n If single instance mode is turned on (default behavior) and an instance of\r\n Spyder is already running, this will just parse and send command line\r\n options to the application.\r\n \"\"\"\r\n # Parse command line options\r\n options, args = get_options()\r\n\r\n # Store variable to be used in self.restart (restart spyder instance)\r\n os.environ['SPYDER_ARGS'] = str(sys.argv[1:])\r\n\r\n #==========================================================================\r\n # Proper high DPI scaling is available in Qt >= 5.6.0. This attibute must\r\n # be set before creating the application.\r\n #==========================================================================\r\n if CONF.get('main', 'high_dpi_custom_scale_factor'):\r\n factors = str(CONF.get('main', 'high_dpi_custom_scale_factors'))\r\n f = list(filter(None, factors.split(';')))\r\n if len(f) == 1:\r\n os.environ['QT_SCALE_FACTOR'] = f[0]\r\n else:\r\n os.environ['QT_SCREEN_SCALE_FACTORS'] = factors\r\n else:\r\n os.environ['QT_SCALE_FACTOR'] = ''\r\n os.environ['QT_SCREEN_SCALE_FACTORS'] = ''\r\n\r\n # Prevent Spyder from crashing in macOS if locale is not defined\r\n if sys.platform == 'darwin':\r\n LANG = os.environ.get('LANG')\r\n LC_ALL = os.environ.get('LC_ALL')\r\n if bool(LANG) and not bool(LC_ALL):\r\n LC_ALL = LANG\r\n elif not bool(LANG) and bool(LC_ALL):\r\n LANG = LC_ALL\r\n else:\r\n LANG = LC_ALL = 'en_US.UTF-8'\r\n\r\n os.environ['LANG'] = LANG\r\n os.environ['LC_ALL'] = LC_ALL\r\n\r\n if CONF.get('main', 'single_instance') and not options.new_instance \\\r\n and not options.reset_config_files and not running_in_mac_app():\r\n # Minimal delay (0.1-0.2 secs) to avoid that several\r\n # instances started at the same time step in their\r\n # own foots while trying to create the lock file\r\n time.sleep(random.randrange(1000, 2000, 90)/10000.)\r\n\r\n # Lock file creation\r\n lock_file = get_conf_path('spyder.lock')\r\n lock = lockfile.FilesystemLock(lock_file)\r\n\r\n # Try to lock spyder.lock. If it's *possible* to do it, then\r\n # there is no previous instance running and we can start a\r\n # new one. If *not*, then there is an instance already\r\n # running, which is locking that file\r\n try:\r\n lock_created = lock.lock()\r\n except:\r\n # If locking fails because of errors in the lockfile\r\n # module, try to remove a possibly stale spyder.lock.\r\n # This is reported to solve all problems with\r\n # lockfile (See issue 2363)\r\n try:\r\n if os.name == 'nt':\r\n if osp.isdir(lock_file):\r\n import shutil\r\n shutil.rmtree(lock_file, ignore_errors=True)\r\n else:\r\n if osp.islink(lock_file):\r\n os.unlink(lock_file)\r\n except:\r\n pass\r\n\r\n # Then start Spyder as usual and *don't* continue\r\n # executing this script because it doesn't make\r\n # sense\r\n from spyder.app import mainwindow\r\n mainwindow.main()\r\n return\r\n\r\n if lock_created:\r\n # Start a new instance\r\n from spyder.app import mainwindow\r\n mainwindow.main()\r\n else:\r\n # Pass args to Spyder or print an informative\r\n # message\r\n if args:\r\n send_args_to_spyder(args)\r\n else:\r\n print(\"Spyder is already running. If you want to open a new \\n\"\r\n \"instance, please pass to it the --new-instance option\")\r\n else:\r\n from spyder.app import mainwindow\r\n mainwindow.main()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n",
"path": "spyder/app/start.py"
}
] | diff --git a/spyder/app/start.py b/spyder/app/start.py
index 8d0a33f8156..542afde3420 100644
--- a/spyder/app/start.py
+++ b/spyder/app/start.py
@@ -16,8 +16,7 @@
# See issue 3332
try:
from OpenGL import GL
-except ImportError:
- # pyopengl is not present when installed using conda
+except:
pass
# Local imports
|
ros__ros_comm-683 | unregister() method to message_filter.Subscriber on python
Do you have plan to implement this?
Or there is other way to achieve this?
| [
{
"content": "# Copyright (c) 2009, Willow Garage, Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the Willow Garage, Inc. nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nMessage Filter Objects\n======================\n\"\"\"\n\nimport itertools\nimport threading\nimport rospy\n\nclass SimpleFilter:\n\n def __init__(self):\n self.callbacks = {}\n\n def registerCallback(self, cb, *args):\n \"\"\"\n Register a callback function `cb` to be called when this filter\n has output.\n The filter calls the function ``cb`` with a filter-dependent list of arguments,\n followed by the call-supplied arguments ``args``.\n \"\"\"\n\n conn = len(self.callbacks)\n self.callbacks[conn] = (cb, args)\n return conn\n\n def signalMessage(self, *msg):\n for (cb, args) in self.callbacks.values():\n cb(*(msg + args))\n\nclass Subscriber(SimpleFilter):\n \n \"\"\"\n ROS subscription filter. Identical arguments as :class:`rospy.Subscriber`.\n\n This class acts as a highest-level filter, simply passing messages\n from a ROS subscription through to the filters which have connected\n to it.\n \"\"\"\n def __init__(self, *args, **kwargs):\n SimpleFilter.__init__(self)\n self.topic = args[0]\n kwargs['callback'] = self.callback\n self.sub = rospy.Subscriber(*args, **kwargs)\n\n def callback(self, msg):\n self.signalMessage(msg)\n\n def getTopic(self):\n return self.topic\n\nclass Cache(SimpleFilter):\n\n \"\"\"\n Stores a time history of messages.\n\n Given a stream of messages, the most recent ``cache_size`` messages\n are cached in a ring buffer, from which time intervals of the cache\n can then be retrieved by the client.\n \"\"\"\n\n def __init__(self, f, cache_size = 1):\n SimpleFilter.__init__(self)\n self.connectInput(f)\n self.cache_size = cache_size\n # Array to store messages\n self.cache_msgs = []\n # Array to store msgs times, auxiliary structure to facilitate\n # sorted insertion\n self.cache_times = []\n\n def connectInput(self, f):\n self.incoming_connection = f.registerCallback(self.add)\n\n def add(self, msg):\n # Cannot use message filters with non-stamped messages\n if not hasattr(msg, 'header') or not hasattr(msg.header, 'stamp'):\n rospy.logwarn(\"Cannot use message filters with non-stamped messages\")\n return\n\n # Insert sorted\n stamp = msg.header.stamp\n self.cache_times.append(stamp)\n self.cache_msgs.append(msg)\n\n # Implement a ring buffer, discard older if oversized\n if (len(self.cache_msgs) > self.cache_size):\n del self.cache_msgs[0]\n del self.cache_times[0]\n\n # Signal new input\n self.signalMessage(msg)\n\n def getInterval(self, from_stamp, to_stamp):\n \"\"\"Query the current cache content between from_stamp to to_stamp.\"\"\"\n assert from_stamp <= to_stamp\n return [m for m in self.cache_msgs\n if m.header.stamp >= from_stamp and m.header.stamp <= to_stamp]\n\n def getElemAfterTime(self, stamp):\n \"\"\"Return the oldest element after or equal the passed time stamp.\"\"\"\n newer = [m for m in self.cache_msgs if m.header.stamp >= stamp]\n if not newer:\n return None\n return newer[0]\n\n def getElemBeforeTime(self, stamp):\n \"\"\"Return the newest element before or equal the passed time stamp.\"\"\"\n older = [m for m in self.cache_msgs if m.header.stamp <= stamp]\n if not older:\n return None\n return older[-1]\n\n def getLastestTime(self):\n \"\"\"Return the newest recorded timestamp.\"\"\"\n if not self.cache_times:\n return None\n return self.cache_times[-1]\n\n def getOldestTime(self):\n \"\"\"Return the oldest recorded timestamp.\"\"\"\n if not self.cache_times:\n return None\n return self.cache_times[0]\n\n\nclass TimeSynchronizer(SimpleFilter):\n\n \"\"\"\n Synchronizes messages by their timestamps.\n\n :class:`TimeSynchronizer` synchronizes incoming message filters by the\n timestamps contained in their messages' headers. TimeSynchronizer\n listens on multiple input message filters ``fs``, and invokes the callback\n when it has a collection of messages with matching timestamps.\n\n The signature of the callback function is::\n\n def callback(msg1, ... msgN):\n\n where N is the number of input message filters, and each message is\n the output of the corresponding filter in ``fs``.\n The required ``queue size`` parameter specifies how many sets of\n messages it should store from each input filter (by timestamp)\n while waiting for messages to arrive and complete their \"set\".\n \"\"\"\n\n def __init__(self, fs, queue_size):\n SimpleFilter.__init__(self)\n self.connectInput(fs)\n self.queue_size = queue_size\n self.lock = threading.Lock()\n\n def connectInput(self, fs):\n self.queues = [{} for f in fs]\n self.input_connections = [f.registerCallback(self.add, q) for (f, q) in zip(fs, self.queues)]\n\n def add(self, msg, my_queue):\n self.lock.acquire()\n my_queue[msg.header.stamp] = msg\n while len(my_queue) > self.queue_size:\n del my_queue[min(my_queue)]\n # common is the set of timestamps that occur in all queues\n common = reduce(set.intersection, [set(q) for q in self.queues])\n for t in sorted(common):\n # msgs is list of msgs (one from each queue) with stamp t\n msgs = [q[t] for q in self.queues]\n self.signalMessage(*msgs)\n for q in self.queues:\n del q[t]\n self.lock.release()\n\nclass ApproximateTimeSynchronizer(TimeSynchronizer):\n\n \"\"\"\n Approximately synchronizes messages by their timestamps.\n\n :class:`ApproximateTimeSynchronizer` synchronizes incoming message filters by the\n timestamps contained in their messages' headers. The API is the same as TimeSynchronizer\n except for an extra `slop` parameter in the constructor that defines the delay (in seconds)\n with which messages can be synchronized\n \"\"\"\n\n def __init__(self, fs, queue_size, slop):\n TimeSynchronizer.__init__(self, fs, queue_size)\n self.slop = rospy.Duration.from_sec(slop)\n\n def add(self, msg, my_queue):\n self.lock.acquire()\n my_queue[msg.header.stamp] = msg\n while len(my_queue) > self.queue_size:\n del my_queue[min(my_queue)]\n for vv in itertools.product(*[list(q.keys()) for q in self.queues]):\n qt = list(zip(self.queues, vv))\n if ( ((max(vv) - min(vv)) < self.slop) and\n (len([1 for q,t in qt if t not in q]) == 0) ):\n msgs = [q[t] for q,t in qt]\n self.signalMessage(*msgs)\n for q,t in qt:\n del q[t]\n self.lock.release()\n",
"path": "utilities/message_filters/src/message_filters/__init__.py"
}
] | [
{
"content": "# Copyright (c) 2009, Willow Garage, Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the Willow Garage, Inc. nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nMessage Filter Objects\n======================\n\"\"\"\n\nimport itertools\nimport threading\nimport rospy\n\nclass SimpleFilter:\n\n def __init__(self):\n self.callbacks = {}\n\n def registerCallback(self, cb, *args):\n \"\"\"\n Register a callback function `cb` to be called when this filter\n has output.\n The filter calls the function ``cb`` with a filter-dependent list of arguments,\n followed by the call-supplied arguments ``args``.\n \"\"\"\n\n conn = len(self.callbacks)\n self.callbacks[conn] = (cb, args)\n return conn\n\n def signalMessage(self, *msg):\n for (cb, args) in self.callbacks.values():\n cb(*(msg + args))\n\nclass Subscriber(SimpleFilter):\n \n \"\"\"\n ROS subscription filter. Identical arguments as :class:`rospy.Subscriber`.\n\n This class acts as a highest-level filter, simply passing messages\n from a ROS subscription through to the filters which have connected\n to it.\n \"\"\"\n def __init__(self, *args, **kwargs):\n SimpleFilter.__init__(self)\n self.topic = args[0]\n kwargs['callback'] = self.callback\n self.sub = rospy.Subscriber(*args, **kwargs)\n\n def callback(self, msg):\n self.signalMessage(msg)\n\n def getTopic(self):\n return self.topic\n\n def unregister(self):\n self.sub.unregister()\n\nclass Cache(SimpleFilter):\n\n \"\"\"\n Stores a time history of messages.\n\n Given a stream of messages, the most recent ``cache_size`` messages\n are cached in a ring buffer, from which time intervals of the cache\n can then be retrieved by the client.\n \"\"\"\n\n def __init__(self, f, cache_size = 1):\n SimpleFilter.__init__(self)\n self.connectInput(f)\n self.cache_size = cache_size\n # Array to store messages\n self.cache_msgs = []\n # Array to store msgs times, auxiliary structure to facilitate\n # sorted insertion\n self.cache_times = []\n\n def connectInput(self, f):\n self.incoming_connection = f.registerCallback(self.add)\n\n def add(self, msg):\n # Cannot use message filters with non-stamped messages\n if not hasattr(msg, 'header') or not hasattr(msg.header, 'stamp'):\n rospy.logwarn(\"Cannot use message filters with non-stamped messages\")\n return\n\n # Insert sorted\n stamp = msg.header.stamp\n self.cache_times.append(stamp)\n self.cache_msgs.append(msg)\n\n # Implement a ring buffer, discard older if oversized\n if (len(self.cache_msgs) > self.cache_size):\n del self.cache_msgs[0]\n del self.cache_times[0]\n\n # Signal new input\n self.signalMessage(msg)\n\n def getInterval(self, from_stamp, to_stamp):\n \"\"\"Query the current cache content between from_stamp to to_stamp.\"\"\"\n assert from_stamp <= to_stamp\n return [m for m in self.cache_msgs\n if m.header.stamp >= from_stamp and m.header.stamp <= to_stamp]\n\n def getElemAfterTime(self, stamp):\n \"\"\"Return the oldest element after or equal the passed time stamp.\"\"\"\n newer = [m for m in self.cache_msgs if m.header.stamp >= stamp]\n if not newer:\n return None\n return newer[0]\n\n def getElemBeforeTime(self, stamp):\n \"\"\"Return the newest element before or equal the passed time stamp.\"\"\"\n older = [m for m in self.cache_msgs if m.header.stamp <= stamp]\n if not older:\n return None\n return older[-1]\n\n def getLastestTime(self):\n \"\"\"Return the newest recorded timestamp.\"\"\"\n if not self.cache_times:\n return None\n return self.cache_times[-1]\n\n def getOldestTime(self):\n \"\"\"Return the oldest recorded timestamp.\"\"\"\n if not self.cache_times:\n return None\n return self.cache_times[0]\n\n\nclass TimeSynchronizer(SimpleFilter):\n\n \"\"\"\n Synchronizes messages by their timestamps.\n\n :class:`TimeSynchronizer` synchronizes incoming message filters by the\n timestamps contained in their messages' headers. TimeSynchronizer\n listens on multiple input message filters ``fs``, and invokes the callback\n when it has a collection of messages with matching timestamps.\n\n The signature of the callback function is::\n\n def callback(msg1, ... msgN):\n\n where N is the number of input message filters, and each message is\n the output of the corresponding filter in ``fs``.\n The required ``queue size`` parameter specifies how many sets of\n messages it should store from each input filter (by timestamp)\n while waiting for messages to arrive and complete their \"set\".\n \"\"\"\n\n def __init__(self, fs, queue_size):\n SimpleFilter.__init__(self)\n self.connectInput(fs)\n self.queue_size = queue_size\n self.lock = threading.Lock()\n\n def connectInput(self, fs):\n self.queues = [{} for f in fs]\n self.input_connections = [f.registerCallback(self.add, q) for (f, q) in zip(fs, self.queues)]\n\n def add(self, msg, my_queue):\n self.lock.acquire()\n my_queue[msg.header.stamp] = msg\n while len(my_queue) > self.queue_size:\n del my_queue[min(my_queue)]\n # common is the set of timestamps that occur in all queues\n common = reduce(set.intersection, [set(q) for q in self.queues])\n for t in sorted(common):\n # msgs is list of msgs (one from each queue) with stamp t\n msgs = [q[t] for q in self.queues]\n self.signalMessage(*msgs)\n for q in self.queues:\n del q[t]\n self.lock.release()\n\nclass ApproximateTimeSynchronizer(TimeSynchronizer):\n\n \"\"\"\n Approximately synchronizes messages by their timestamps.\n\n :class:`ApproximateTimeSynchronizer` synchronizes incoming message filters by the\n timestamps contained in their messages' headers. The API is the same as TimeSynchronizer\n except for an extra `slop` parameter in the constructor that defines the delay (in seconds)\n with which messages can be synchronized\n \"\"\"\n\n def __init__(self, fs, queue_size, slop):\n TimeSynchronizer.__init__(self, fs, queue_size)\n self.slop = rospy.Duration.from_sec(slop)\n\n def add(self, msg, my_queue):\n self.lock.acquire()\n my_queue[msg.header.stamp] = msg\n while len(my_queue) > self.queue_size:\n del my_queue[min(my_queue)]\n for vv in itertools.product(*[list(q.keys()) for q in self.queues]):\n qt = list(zip(self.queues, vv))\n if ( ((max(vv) - min(vv)) < self.slop) and\n (len([1 for q,t in qt if t not in q]) == 0) ):\n msgs = [q[t] for q,t in qt]\n self.signalMessage(*msgs)\n for q,t in qt:\n del q[t]\n self.lock.release()\n",
"path": "utilities/message_filters/src/message_filters/__init__.py"
}
] | diff --git a/utilities/message_filters/src/message_filters/__init__.py b/utilities/message_filters/src/message_filters/__init__.py
index dc7d7cf105..ae6b929ee1 100644
--- a/utilities/message_filters/src/message_filters/__init__.py
+++ b/utilities/message_filters/src/message_filters/__init__.py
@@ -76,6 +76,9 @@ def callback(self, msg):
def getTopic(self):
return self.topic
+ def unregister(self):
+ self.sub.unregister()
+
class Cache(SimpleFilter):
"""
|
Flexget__Flexget-2652 | Dependencies on non pure Python modules
Hi,
Sorry for [x-posting](https://discuss.flexget.com/t/dependencies-on-non-pure-python-modules/5086). This is kind of a development issue so it might fit here better than in the forum. Feel free to just close this and reply on Discourse.
When upgrading Flexget I stumbled upon the fact that now it depends on Pillow, as per:
https://github.com/Flexget/Flexget/pull/2621
My Flexget runs on an embedded aarch64 device so every time you add a dep on a module that links against C libraries (Pillow depends on zlib and libjpeg) I have to cross compile the necessary native extensions, distribute them etc. Either that or manually installing a skimmed `requirements.txt` and finally installing Flexget passing `--no-dependencies` to `pip install`.
Does adding a tray icon justify this hassle for users? No saying that’s wrong of course, just trying to bring to your attention this use case and the potential annoyance for users.
Could perhaps these things be made optional at install/build time by relaxing the hard dependency?
Thanks.
Dependencies on non pure Python modules
Hi,
Sorry for [x-posting](https://discuss.flexget.com/t/dependencies-on-non-pure-python-modules/5086). This is kind of a development issue so it might fit here better than in the forum. Feel free to just close this and reply on Discourse.
When upgrading Flexget I stumbled upon the fact that now it depends on Pillow, as per:
https://github.com/Flexget/Flexget/pull/2621
My Flexget runs on an embedded aarch64 device so every time you add a dep on a module that links against C libraries (Pillow depends on zlib and libjpeg) I have to cross compile the necessary native extensions, distribute them etc. Either that or manually installing a skimmed `requirements.txt` and finally installing Flexget passing `--no-dependencies` to `pip install`.
Does adding a tray icon justify this hassle for users? No saying that’s wrong of course, just trying to bring to your attention this use case and the potential annoyance for users.
Could perhaps these things be made optional at install/build time by relaxing the hard dependency?
Thanks.
| [
{
"content": "import logging\nimport webbrowser\nfrom functools import partial, wraps\nfrom pathlib import Path\nfrom typing import List, Optional\n\nfrom loguru import logger\n\nfrom flexget import __version__\n\nlogger = logger.bind(name='tray_icon')\n\ntry:\n # If we are running outside of a graphical environment, these imports will fail\n from PIL import Image\n from pystray import Icon, Menu, MenuItem\n\n _import_success = True\nexcept Exception as e:\n logger.debug('Could not import pystray: {}', e)\n _import_success = False\n\n\ndef check_if_tray_is_active(f):\n @wraps(f)\n def wrapped(self, *args, **kwargs):\n if not self.active:\n return\n return f(self, *args, **kwargs)\n\n return wrapped\n\n\nimage_path = Path(__file__).parent / 'resources' / 'flexget.png'\n\n\nclass TrayIcon:\n def __init__(self, path_to_image: Path = image_path):\n # Silence PIL noisy logging\n logging.getLogger('PIL.PngImagePlugin').setLevel(logging.INFO)\n logging.getLogger('PIL.Image').setLevel(logging.INFO)\n\n self.path_to_image: Path = path_to_image\n self.icon: Optional['Icon'] = None\n self._menu: Optional['Menu'] = None\n self.menu_items: List['MenuItem'] = []\n\n self.active: bool = _import_success\n self.running: bool = False\n\n self.add_core_menu_items()\n\n @check_if_tray_is_active\n def add_menu_item(\n self,\n text: str = None,\n action: callable = None,\n menu_item: 'MenuItem' = None,\n index: int = None,\n **kwargs,\n ):\n \"\"\"\n Add a menu item byt passing its text and function, or pass a created MenuItem. Force position by sending index\n \"\"\"\n if not any(v for v in (menu_item, text)):\n raise ValueError(f\"Either 'text' or 'menu_item' are required\")\n\n menu_item = menu_item or MenuItem(text=text, action=action, **kwargs)\n if index is not None:\n self.menu_items.insert(index, menu_item)\n else:\n self.menu_items.append(menu_item)\n\n @check_if_tray_is_active\n def add_menu_separator(self, index: int = None):\n self.add_menu_item(menu_item=Menu.SEPARATOR, index=index)\n\n def add_core_menu_items(self):\n open_web = partial(webbrowser.open)\n self.add_menu_item(text=f'Flexget {__version__}', enabled=False)\n self.add_menu_separator()\n self.add_menu_item(text='Homepage', action=partial(open_web, 'https://flexget.com/'))\n self.add_menu_item(text='Forum', action=partial(open_web, 'https://discuss.flexget.com/'))\n\n @property\n def menu(self) -> 'Menu':\n # This is lazy loaded since we'd like to delay the menu build until the tray is requested to run\n if not self._menu:\n self._menu = Menu(*self.menu_items)\n return self._menu\n\n @check_if_tray_is_active\n def run(self):\n \"\"\"Run the tray icon. Must be run from the main thread and is blocking\"\"\"\n try:\n logger.verbose('Starting tray icon')\n self.icon = Icon('Flexget', Image.open(self.path_to_image), menu=self.menu)\n self.running = True\n self.icon.run()\n except Exception as e:\n logger.warning('Could not run tray icon: {}', e)\n self.running = False\n\n @check_if_tray_is_active\n def stop(self):\n if not self.running:\n return\n\n logger.verbose('Stopping tray icon')\n self.icon.stop()\n self.running = False\n\n\ntray_icon = TrayIcon()\n",
"path": "flexget/tray_icon.py"
}
] | [
{
"content": "import logging\nimport webbrowser\nfrom functools import partial, wraps\nfrom pathlib import Path\nfrom typing import List, Optional\n\nfrom loguru import logger\n\nfrom flexget import __version__\n\nlogger = logger.bind(name='tray_icon')\n\ntry:\n # If we are running outside of a graphical environment, these imports will fail\n from PIL import Image\n from pystray import Icon, Menu, MenuItem\n\n _import_success = True\nexcept Exception as e:\n logger.debug('Could not load tray icon: {}', e)\n _import_success = False\n\n\ndef check_if_tray_is_active(f):\n @wraps(f)\n def wrapped(self, *args, **kwargs):\n if not self.active:\n return\n return f(self, *args, **kwargs)\n\n return wrapped\n\n\nimage_path = Path(__file__).parent / 'resources' / 'flexget.png'\n\n\nclass TrayIcon:\n def __init__(self, path_to_image: Path = image_path):\n # Silence PIL noisy logging\n logging.getLogger('PIL.PngImagePlugin').setLevel(logging.INFO)\n logging.getLogger('PIL.Image').setLevel(logging.INFO)\n\n self.path_to_image: Path = path_to_image\n self.icon: Optional['Icon'] = None\n self._menu: Optional['Menu'] = None\n self.menu_items: List['MenuItem'] = []\n\n self.active: bool = _import_success\n self.running: bool = False\n\n self.add_core_menu_items()\n\n @check_if_tray_is_active\n def add_menu_item(\n self,\n text: str = None,\n action: callable = None,\n menu_item: 'MenuItem' = None,\n index: int = None,\n **kwargs,\n ):\n \"\"\"\n Add a menu item byt passing its text and function, or pass a created MenuItem. Force position by sending index\n \"\"\"\n if not any(v for v in (menu_item, text)):\n raise ValueError(f\"Either 'text' or 'menu_item' are required\")\n\n menu_item = menu_item or MenuItem(text=text, action=action, **kwargs)\n if index is not None:\n self.menu_items.insert(index, menu_item)\n else:\n self.menu_items.append(menu_item)\n\n @check_if_tray_is_active\n def add_menu_separator(self, index: int = None):\n self.add_menu_item(menu_item=Menu.SEPARATOR, index=index)\n\n def add_core_menu_items(self):\n open_web = partial(webbrowser.open)\n self.add_menu_item(text=f'Flexget {__version__}', enabled=False)\n self.add_menu_separator()\n self.add_menu_item(text='Homepage', action=partial(open_web, 'https://flexget.com/'))\n self.add_menu_item(text='Forum', action=partial(open_web, 'https://discuss.flexget.com/'))\n\n @property\n def menu(self) -> 'Menu':\n # This is lazy loaded since we'd like to delay the menu build until the tray is requested to run\n if not self._menu:\n self._menu = Menu(*self.menu_items)\n return self._menu\n\n @check_if_tray_is_active\n def run(self):\n \"\"\"Run the tray icon. Must be run from the main thread and is blocking\"\"\"\n try:\n logger.verbose('Starting tray icon')\n self.icon = Icon('Flexget', Image.open(self.path_to_image), menu=self.menu)\n self.running = True\n self.icon.run()\n except Exception as e:\n logger.warning('Could not run tray icon: {}', e)\n self.running = False\n\n @check_if_tray_is_active\n def stop(self):\n if not self.running:\n return\n\n logger.verbose('Stopping tray icon')\n self.icon.stop()\n self.running = False\n\n\ntray_icon = TrayIcon()\n",
"path": "flexget/tray_icon.py"
}
] | diff --git a/flexget/tray_icon.py b/flexget/tray_icon.py
index 36cb008561..c669515fbc 100644
--- a/flexget/tray_icon.py
+++ b/flexget/tray_icon.py
@@ -17,7 +17,7 @@
_import_success = True
except Exception as e:
- logger.debug('Could not import pystray: {}', e)
+ logger.debug('Could not load tray icon: {}', e)
_import_success = False
diff --git a/requirements.in b/requirements.in
index fc2b21595e..b9226b8afa 100644
--- a/requirements.in
+++ b/requirements.in
@@ -30,4 +30,3 @@ pyparsing>=2.0.3
zxcvbn-python
progressbar==2.5
more-itertools
-Pillow
diff --git a/requirements.txt b/requirements.txt
index d3e2ac288b..bd674aa440 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -31,7 +31,6 @@ jsonschema==2.6.0 # via -r requirements.in, flask-restplus
loguru==0.4.1 # via -r requirements.in
markupsafe==1.1.1 # via jinja2
more-itertools==7.2.0 # via -r requirements.in, cheroot, cherrypy, jaraco.functools
-pillow==7.0.0 # via -r requirements.in
plumbum==1.6.3 # via rpyc
portend==2.6 # via cherrypy
progressbar==2.5 # via -r requirements.in
@@ -55,7 +54,6 @@ webencodings==0.5.1 # via html5lib
werkzeug==0.15.6 # via flask
zc.lockfile==2.0 # via cherrypy
zxcvbn-python==4.4.15 # via -r requirements.in
-pystray==0.15.0
# The following packages are considered to be unsafe in a requirements file:
# setuptools
|
vaexio__vaex-2186 | [FEATURE-REQUEST]: Expose `vaex.DataFrame` for type hinting
I would like to add type annotations to my code. Currently I can do `from vaex.dataframe import DataFrame`, but it would be nice if the DataFrame was exposed at the top level so I could just do `import vaex` and then `df: vaex.DataFrame = ...`
I like being able to namespace it like this to differentiate between other DataFrames, eg `import pandas as pd` and then `pdf: pd.DataFrame = ...`
I can submit a PR if you want. Thank you for this awesome library, I'm excited to get started with it! Better string processing is a big requirement of mine and this is 💯
| [
{
"content": "\"\"\"\nVaex is a library for dealing with larger than memory DataFrames (out of core).\n\nThe most important class (datastructure) in vaex is the :class:`.DataFrame`. A DataFrame is obtained by either opening\nthe example dataset:\n\n>>> import vaex\n>>> df = vaex.example()\n\nOr using :func:`open` to open a file.\n\n>>> df1 = vaex.open(\"somedata.hdf5\")\n>>> df2 = vaex.open(\"somedata.fits\")\n>>> df2 = vaex.open(\"somedata.arrow\")\n>>> df4 = vaex.open(\"somedata.csv\")\n\nOr connecting to a remove server:\n\n>>> df_remote = vaex.open(\"http://try.vaex.io/nyc_taxi_2015\")\n\n\nA few strong features of vaex are:\n\n- Performance: works with huge tabular data, process over a billion (> 10\\\\ :sup:`9`\\\\ ) rows/second.\n- Expression system / Virtual columns: compute on the fly, without wasting ram.\n- Memory efficient: no memory copies when doing filtering/selections/subsets.\n- Visualization: directly supported, a one-liner is often enough.\n- User friendly API: you will only need to deal with a DataFrame object, and tab completion + docstring will help you out: `ds.mean<tab>`, feels very similar to Pandas.\n- Very fast statistics on N dimensional grids such as histograms, running mean, heatmaps.\n\n\nFollow the tutorial at https://docs.vaex.io/en/latest/tutorial.html to learn how to use vaex.\n\n\"\"\" # -*- coding: utf-8 -*-\nimport logging as root_logging\nimport os\nimport pkg_resources\nfrom typing import Dict, List\nfrom urllib.parse import urlparse, parse_qs\n\n# first configure logging, which also imports vaex.settings\nimport vaex.logging\n# import this to be explicit\nimport vaex.settings\n\nimport vaex.dataframe\nimport vaex.dataset\nfrom vaex.docstrings import docsubst\nfrom vaex.registry import register_function\nfrom vaex import functions, struct\nfrom . import stat\n# import vaex.file\n# import vaex.export\nfrom .delayed import delayed\nfrom .groupby import *\nfrom . import agg\nimport vaex.datasets\n\n\n\n\nimport vaex.progress\n\ntry:\n from . import version\nexcept:\n import sys\n print(\"version file not found, please run git/hooks/post-commit or git/hooks/post-checkout and/or install them as hooks (see git/README)\", file=sys.stderr)\n raise\n\nlogger = root_logging.getLogger('vaex')\nDEBUG_MODE = os.environ.get('VAEX_DEBUG', '')\n__version__ = version.get_versions()\n\n\ndef app(*args, **kwargs):\n \"\"\"Create a vaex app, the QApplication mainloop must be started.\n\n In ipython notebook/jupyter do the following:\n\n >>> import vaex.ui.main # this causes the qt api level to be set properly\n >>> import vaex\n\n Next cell:\n\n >>> %gui qt\n\n Next cell:\n\n >>> app = vaex.app()\n\n From now on, you can run the app along with jupyter\n\n \"\"\"\n\n import vaex.ui.main\n return vaex.ui.main.VaexApp()\n\n\n@docsubst\ndef open(path, convert=False, progress=None, shuffle=False, fs_options={}, fs=None, *args, **kwargs):\n \"\"\"Open a DataFrame from file given by path.\n\n Example:\n\n >>> df = vaex.open('sometable.hdf5')\n >>> df = vaex.open('somedata*.csv', convert='bigdata.hdf5')\n\n :param str or list path: local or absolute path to file, or glob string, or list of paths\n :param convert: Uses `dataframe.export` when convert is a path. If True, ``convert=path+'.hdf5'``\n The conversion is skipped if the input file or conversion argument did not change.\n :param progress: (*Only applies when convert is not False*) {progress}\n :param bool shuffle: shuffle converted DataFrame or not\n :param dict fs_options: Extra arguments passed to an optional file system if needed. See below\n :param group: (optional) Specify the group to be read from and HDF5 file. By default this is set to \"/table\".\n :param fs: Apache Arrow FileSystem object, or FSSpec FileSystem object, if specified, fs_options should be empty.\n :param args: extra arguments for file readers that need it\n :param kwargs: extra keyword arguments\n :return: return a DataFrame on success, otherwise None\n :rtype: DataFrame\n\n Cloud storage support:\n\n Vaex supports streaming of HDF5 files from Amazon AWS S3 and Google Cloud Storage.\n Files are by default cached in $HOME/.vaex/file-cache/(s3|gs) such that successive access\n is as fast as native disk access.\n\n Amazon AWS S3 options:\n\n The following common fs_options are used for S3 access:\n\n * `anon`: Use anonymous access or not (false by default). (Allowed values are: true,True,1,false,False,0)\n * `anonymous` - Alias for `anon`\n * `cache`: Use the disk cache or not, only set to false if the data should be accessed once. (Allowed values are: true,True,1,false,False,0)\n * `access_key` - AWS access key, if not provided will use the standard env vars, or the `~/.aws/credentials` file\n * `secret_key` - AWS secret key, similar to `access_key`\n * `profile` - If multiple profiles are present in `~/.aws/credentials`, pick this one instead of 'default', see https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html\n * `region` - AWS Region, e.g. 'us-east-1`, will be determined automatically if not provided.\n * `endpoint_override` - URL/ip to connect to, instead of AWS, e.g. 'localhost:9000' for minio\n\n All fs_options can also be encoded in the file path as a query string.\n\n Examples:\n\n >>> df = vaex.open('s3://vaex/taxi/yellow_taxi_2015_f32s.hdf5', fs_options={{'anonymous': True}})\n >>> df = vaex.open('s3://vaex/taxi/yellow_taxi_2015_f32s.hdf5?anon=true')\n >>> df = vaex.open('s3://mybucket/path/to/file.hdf5', fs_options={{'access_key': my_key, 'secret_key': my_secret_key}})\n >>> df = vaex.open(f's3://mybucket/path/to/file.hdf5?access_key={{my_key}}&secret_key={{my_secret_key}}')\n >>> df = vaex.open('s3://mybucket/path/to/file.hdf5?profile=myproject')\n\n Google Cloud Storage options:\n\n The following fs_options are used for GCP access:\n\n * token: Authentication method for GCP. Use 'anon' for annonymous access. See https://gcsfs.readthedocs.io/en/latest/index.html#credentials for more details.\n * cache: Use the disk cache or not, only set to false if the data should be accessed once. (Allowed values are: true,True,1,false,False,0).\n * project and other arguments are passed to :py:class:`gcsfs.core.GCSFileSystem`\n\n Examples:\n\n >>> df = vaex.open('gs://vaex-data/airlines/us_airline_data_1988_2019.hdf5', fs_options={{'token': None}})\n >>> df = vaex.open('gs://vaex-data/airlines/us_airline_data_1988_2019.hdf5?token=anon')\n >>> df = vaex.open('gs://vaex-data/testing/xys.hdf5?token=anon&cache=False')\n \"\"\"\n import vaex\n import vaex.convert\n try:\n if not isinstance(path, (list, tuple)):\n # remote and clusters only support single path, not a list\n path = vaex.file.stringyfy(path)\n if path in aliases:\n path = aliases[path]\n path = vaex.file.stringyfy(path)\n if path.startswith(\"http://\") or path.startswith(\"ws://\") or \\\n path.startswith(\"vaex+wss://\") or path.startswith(\"wss://\") or \\\n path.startswith(\"vaex+http://\") or path.startswith(\"vaex+ws://\"):\n server, name = path.rsplit(\"/\", 1)\n url = urlparse(path)\n if '?' in name:\n name = name[:name.index('?')]\n extra_args = {key: values[0] for key, values in parse_qs(url.query).items()}\n if 'token' in extra_args:\n kwargs['token'] = extra_args['token']\n if 'token_trusted' in extra_args:\n kwargs['token_trusted'] = extra_args['token_trusted']\n client = vaex.connect(server, **kwargs)\n return client[name]\n if path.startswith(\"cluster\"):\n import vaex.enterprise.distributed\n return vaex.enterprise.distributed.open(path, *args, **kwargs)\n\n import vaex.file\n import glob\n if isinstance(path, str):\n paths = [path]\n else:\n paths = path\n filenames = []\n for path in paths:\n path = vaex.file.stringyfy(path)\n if path in aliases:\n path = aliases[path]\n path = vaex.file.stringyfy(path)\n naked_path, options = vaex.file.split_options(path)\n if glob.has_magic(naked_path):\n filenames.extend(list(sorted(vaex.file.glob(path, fs_options=fs_options, fs=fs))))\n else:\n filenames.append(path)\n df = None\n if len(filenames) == 0:\n raise IOError(f'File pattern did not match anything {path}')\n filename_hdf5 = vaex.convert._convert_name(filenames, shuffle=shuffle)\n filename_hdf5_noshuffle = vaex.convert._convert_name(filenames, shuffle=False)\n if len(filenames) == 1:\n path = filenames[0]\n # # naked_path, _ = vaex.file.split_options(path, fs_options)\n _, ext, _ = vaex.file.split_ext(path)\n if ext == '.csv': # special case for csv\n return vaex.from_csv(path, fs_options=fs_options, fs=fs, convert=convert, progress=progress, **kwargs)\n if convert:\n path_output = convert if isinstance(convert, str) else filename_hdf5\n vaex.convert.convert(\n path_input=path, fs_options_input=fs_options, fs_input=fs,\n path_output=path_output, fs_options_output=fs_options, fs_output=fs,\n progress=progress,\n *args, **kwargs\n )\n ds = vaex.dataset.open(path_output, fs_options=fs_options, fs=fs, **kwargs)\n else:\n ds = vaex.dataset.open(path, fs_options=fs_options, fs=fs, **kwargs)\n df = vaex.from_dataset(ds)\n if df is None:\n if os.path.exists(path):\n raise IOError('Could not open file: {}, did you install vaex-hdf5? Is the format supported?'.format(path))\n elif len(filenames) > 1:\n if convert not in [True, False]:\n filename_hdf5 = convert\n else:\n filename_hdf5 = vaex.convert._convert_name(filenames, shuffle=shuffle)\n if os.path.exists(filename_hdf5) and convert: # also check mtime\n df = vaex.open(filename_hdf5)\n else:\n dfs = []\n for filename in filenames:\n dfs.append(vaex.open(filename, fs_options=fs_options, fs=fs, convert=bool(convert), shuffle=shuffle, **kwargs))\n df = vaex.concat(dfs)\n if convert:\n if shuffle:\n df = df.shuffle()\n df.export_hdf5(filename_hdf5, progress=progress)\n df = vaex.open(filename_hdf5)\n\n if df is None:\n raise IOError('Unknown error opening: {}'.format(path))\n return df\n except:\n logger.exception(\"error opening %r\" % path)\n raise\n\n\ndef open_many(filenames):\n \"\"\"Open a list of filenames, and return a DataFrame with all DataFrames concatenated.\n\n :param list[str] filenames: list of filenames/paths\n :rtype: DataFrame\n \"\"\"\n dfs = []\n for filename in filenames:\n filename = filename.strip()\n if filename and filename[0] != \"#\":\n dfs.append(open(filename))\n return concat(dfs)\n\n\ndef from_samp(username=None, password=None):\n \"\"\"Connect to a SAMP Hub and wait for a single table load event, disconnect, download the table and return the DataFrame.\n\n Useful if you want to send a single table from say TOPCAT to vaex in a python console or notebook.\n \"\"\"\n print(\"Waiting for SAMP message...\")\n import vaex.samp\n t = vaex.samp.single_table(username=username, password=password)\n return from_astropy_table(t.to_table())\n\n\ndef from_astropy_table(table):\n \"\"\"Create a vaex DataFrame from an Astropy Table.\"\"\"\n from vaex.astro.astropy_table import DatasetAstropyTable\n ds = DatasetAstropyTable(table=table)\n return vaex.dataframe.DataFrameLocal(ds)\n\n\ndef from_dict(data):\n \"\"\"Create an in memory dataset from a dict with column names as keys and list/numpy-arrays as values\n\n Example\n\n >>> data = {'A':[1,2,3],'B':['a','b','c']}\n >>> vaex.from_dict(data)\n # A B\n 0 1 'a'\n 1 2 'b'\n 2 3 'c'\n\n :param data: A dict of {column:[value, value,...]}\n :rtype: DataFrame\n\n \"\"\"\n return vaex.from_arrays(**data)\n\n\ndef from_items(*items):\n \"\"\"Create an in memory DataFrame from numpy arrays, in contrast to from_arrays this keeps the order of columns intact (for Python < 3.6).\n\n Example\n\n >>> import vaex, numpy as np\n >>> x = np.arange(5)\n >>> y = x ** 2\n >>> vaex.from_items(('x', x), ('y', y))\n # x y\n 0 0 0\n 1 1 1\n 2 2 4\n 3 3 9\n 4 4 16\n\n :param items: list of [(name, numpy array), ...]\n :rtype: DataFrame\n\n \"\"\"\n return from_dict(dict(items))\n\n\ndef from_arrays(**arrays) -> vaex.dataframe.DataFrameLocal:\n \"\"\"Create an in memory DataFrame from numpy arrays.\n\n Example\n\n >>> import vaex, numpy as np\n >>> x = np.arange(5)\n >>> y = x ** 2\n >>> vaex.from_arrays(x=x, y=y)\n # x y\n 0 0 0\n 1 1 1\n 2 2 4\n 3 3 9\n 4 4 16\n >>> some_dict = {'x': x, 'y': y}\n >>> vaex.from_arrays(**some_dict) # in case you have your columns in a dict\n # x y\n 0 0 0\n 1 1 1\n 2 2 4\n 3 3 9\n 4 4 16\n\n :param arrays: keyword arguments with arrays\n :rtype: DataFrame\n \"\"\"\n import numpy as np\n import six\n dataset = vaex.dataset.DatasetArrays(arrays)\n return vaex.dataframe.DataFrameLocal(dataset)\n\n\ndef from_arrow_table(table) -> vaex.dataframe.DataFrame:\n \"\"\"Creates a vaex DataFrame from an arrow Table.\n\n :param as_numpy: Will lazily cast columns to a NumPy ndarray.\n :rtype: DataFrame\n \"\"\"\n from vaex.arrow.dataset import from_table\n return from_dataset(from_table(table=table))\n\n\ndef from_arrow_dataset(arrow_dataset) -> vaex.dataframe.DataFrame:\n '''Create a DataFrame from an Apache Arrow dataset'''\n import vaex.arrow.dataset\n return from_dataset(vaex.arrow.dataset.DatasetArrow(arrow_dataset))\n\n\ndef from_dataset(dataset: vaex.dataset.Dataset) -> vaex.dataframe.DataFrame:\n '''Create a Vaex DataFrame from a Vaex Dataset'''\n return vaex.dataframe.DataFrameLocal(dataset)\n\n\ndef from_scalars(**kwargs):\n \"\"\"Similar to from_arrays, but convenient for a DataFrame of length 1.\n\n Example:\n\n >>> import vaex\n >>> df = vaex.from_scalars(x=1, y=2)\n\n :rtype: DataFrame\n \"\"\"\n import numpy as np\n return from_arrays(**{k: np.array([v]) for k, v in kwargs.items()})\n\n\ndef from_pandas(df, name=\"pandas\", copy_index=False, index_name=\"index\"):\n \"\"\"Create an in memory DataFrame from a pandas DataFrame.\n\n :param: pandas.DataFrame df: Pandas DataFrame\n :param: name: unique for the DataFrame\n\n >>> import vaex, pandas as pd\n >>> df_pandas = pd.from_csv('test.csv')\n >>> df = vaex.from_pandas(df_pandas)\n\n :rtype: DataFrame\n \"\"\"\n import six\n import pandas as pd\n import numpy as np\n import pyarrow as pa\n columns = {}\n\n def add(name, column):\n values = column.values\n # the first test is to support (partially) pandas 0.23\n if hasattr(pd.core.arrays, 'integer') and isinstance(values, pd.core.arrays.integer.IntegerArray):\n values = np.ma.array(values._data, mask=values._mask)\n elif hasattr(pd.core.arrays, 'StringArray') and isinstance(values, pd.core.arrays.StringArray):\n values = pa.array(values)\n elif hasattr(pd.core.arrays, 'FloatingArray') and isinstance(values, pd.core.arrays.FloatingArray):\n values = np.ma.array(values._data, mask=values._mask)\n try:\n columns[name] = vaex.dataset.to_supported_array(values)\n except Exception as e:\n print(\"could not convert column %s, error: %r, will try to convert it to string\" % (name, e))\n try:\n values = values.astype(\"S\")\n columns[name] = vaex.dataset.to_supported_array(values)\n except Exception as e:\n print(\"Giving up column %s, error: %r\" % (name, e))\n for name in df.columns:\n add(str(name), df[name])\n if copy_index:\n add(index_name, df.index)\n return from_dict(columns)\n\n\ndef from_ascii(path, seperator=None, names=True, skip_lines=0, skip_after=0, **kwargs):\n \"\"\"\n Create an in memory DataFrame from an ascii file (whitespace seperated by default).\n\n >>> ds = vx.from_ascii(\"table.asc\")\n >>> ds = vx.from_ascii(\"table.csv\", seperator=\",\", names=[\"x\", \"y\", \"z\"])\n\n :param path: file path\n :param seperator: value seperator, by default whitespace, use \",\" for comma seperated values.\n :param names: If True, the first line is used for the column names, otherwise provide a list of strings with names\n :param skip_lines: skip lines at the start of the file\n :param skip_after: skip lines at the end of the file\n :param kwargs:\n :rtype: DataFrame\n \"\"\"\n\n import vaex.ext.readcol as rc\n ds = vaex.dataframe.DataFrameLocal()\n if names not in [True, False]:\n namelist = names\n names = False\n else:\n namelist = None\n data = rc.readcol(path, fsep=seperator, asdict=namelist is None, names=names, skipline=skip_lines, skipafter=skip_after, **kwargs)\n if namelist:\n for name, array in zip(namelist, data.T):\n ds.add_column(name, array)\n else:\n for name, array in data.items():\n ds.add_column(name, array)\n return ds\n\n\ndef from_json(path_or_buffer, orient=None, precise_float=False, lines=False, copy_index=False, **kwargs):\n \"\"\" A method to read a JSON file using pandas, and convert to a DataFrame directly.\n\n :param str path_or_buffer: a valid JSON string or file-like, default: None\n The string could be a URL. Valid URL schemes include http, ftp, s3,\n gcs, and file. For file URLs, a host is expected. For instance, a local\n file could be ``file://localhost/path/to/table.json``\n :param str orient: Indication of expected JSON string format. Allowed values are\n ``split``, ``records``, ``index``, ``columns``, and ``values``.\n :param bool precise_float: Set to enable usage of higher precision (strtod) function when\n decoding string to double values. Default (False) is to use fast but less precise builtin functionality\n :param bool lines: Read the file as a json object per line.\n\n :rtype: DataFrame\n \"\"\"\n # Check for unsupported kwargs\n if kwargs.get('typ') == 'series':\n raise ValueError('`typ` must be set to `\"frame\"`.')\n if kwargs.get('numpy') == True:\n raise ValueError('`numpy` must be set to `False`.')\n if kwargs.get('chunksize') is not None:\n raise ValueError('`chunksize` must be `None`.')\n\n import pandas as pd\n return from_pandas(pd.read_json(path_or_buffer, orient=orient, precise_float=precise_float, lines=lines, **kwargs),\n copy_index=copy_index)\n\n\n@docsubst\ndef from_records(records : List[Dict], array_type=\"arrow\", defaults={}) -> vaex.dataframe.DataFrame:\n '''Create a dataframe from a list of dict.\n\n .. warning:: This is for convenience only, for performance pass arrays to :func:`from_arrays` for instance.\n\n :param str array_type: {array_type}\n :param dict defaults: default values if a record has a missing entry\n '''\n arrays = dict()\n for i, record in enumerate(records):\n for name, value in record.items():\n if name not in arrays:\n # prepend None's\n arrays[name] = [defaults.get(name)] * i\n arrays[name].append(value)\n for name in arrays:\n if name not in record:\n # missing values get replaced\n arrays[name].append(defaults.get(name))\n arrays = {k: vaex.array_types.convert(v, array_type) for k, v in arrays.items()}\n return vaex.from_dict(arrays)\n\n\n@docsubst\ndef from_csv(filename_or_buffer, copy_index=False, chunk_size=None, convert=False, fs_options={}, fs=None, progress=None, **kwargs):\n \"\"\"\n Read a CSV file as a DataFrame, and optionally convert to an hdf5 file.\n\n :param str or file filename_or_buffer: CSV file path or file-like\n :param bool copy_index: copy index when source is read via Pandas\n :param int chunk_size: if the CSV file is too big to fit in the memory this parameter can be used to read\n CSV file in chunks. For example:\n\n >>> import vaex\n >>> for i, df in enumerate(vaex.from_csv('taxi.csv', chunk_size=100_000)):\n >>> df = df[df.passenger_count < 6]\n >>> df.export_hdf5(f'taxi_{{i:02}}.hdf5')\n\n :param bool or str convert: convert files to an hdf5 file for optimization, can also be a path. The CSV\n file will be read in chunks: either using the provided chunk_size argument, or a default size. Each chunk will\n be saved as a separate hdf5 file, then all of them will be combined into one hdf5 file. So for a big CSV file\n you will need at least double of extra space on the disk. Default chunk_size for converting is 5 million rows,\n which corresponds to around 1Gb memory on an example of NYC Taxi dataset.\n :param progress: (*Only applies when convert is not False*) {progress}\n :param kwargs: extra keyword arguments, currently passed to Pandas read_csv function, but the implementation might\n change in future versions.\n :returns: DataFrame\n \"\"\"\n if not convert:\n return _from_csv_read(filename_or_buffer=filename_or_buffer, copy_index=copy_index,\n fs_options=fs_options, fs=fs, chunk_size=chunk_size, **kwargs)\n else:\n if chunk_size is None:\n # make it memory efficient by default\n chunk_size = 5_000_000\n import vaex.convert\n path_output = convert if isinstance(convert, str) else vaex.convert._convert_name(filename_or_buffer)\n vaex.convert.convert_csv(\n path_input=filename_or_buffer, fs_options_input=fs_options, fs_input=fs,\n path_output=path_output, fs_options_output=fs_options, fs_output=fs,\n chunk_size=chunk_size,\n copy_index=copy_index,\n progress=progress,\n **kwargs\n )\n return open(path_output, fs_options=fs_options, fs=fs)\n\n\ndef _from_csv_read(filename_or_buffer, copy_index, chunk_size, fs_options={}, fs=None, **kwargs):\n import pandas as pd\n if not chunk_size:\n with vaex.file.open(filename_or_buffer, fs_options=fs_options, fs=fs, for_arrow=True) as f:\n full_df = pd.read_csv(f, **kwargs)\n return from_pandas(full_df, copy_index=copy_index)\n else:\n def iterator():\n chunk_iterator = pd.read_csv(filename_or_buffer, chunksize=chunk_size, **kwargs)\n for chunk_df in chunk_iterator:\n yield from_pandas(chunk_df, copy_index=copy_index)\n return iterator()\n\n\ndef read_csv(filepath_or_buffer, **kwargs):\n '''Alias to from_csv.'''\n return from_csv(filepath_or_buffer, **kwargs)\n\naliases = vaex.settings.aliases\n\n\ndef connect(url, **kwargs):\n \"\"\"Connect to hostname supporting the vaex web api.\n\n :param str hostname: hostname or ip address of server\n :rtype: vaex.server.client.Client\n \"\"\"\n # dispatch to vaex.server package\n from vaex.server import connect\n return connect(url, **kwargs)\n\ndef example():\n '''Result of an N-body simulation of the accretion of 33 satellite galaxies into a Milky Way dark matter halo.\n\n Data was greated by Helmi & de Zeeuw 2000.\n The data contains the position (x, y, z), velocitie (vx, vy, vz), the energy (E),\n the angular momentum (L, Lz) and iron content (FeH) of the particles.\n\n :rtype: DataFrame\n '''\n return vaex.datasets.helmi_simulation_data()\n\n\n\n# there are kept for backwards compatibility\n# TODO: remove in vaex v5?\n\ndef set_log_level_debug(loggers=[\"vaex\"]):\n \"\"\"set log level to debug\"\"\"\n vaex.logging.set_log_level_debug(loggers)\n\n\ndef set_log_level_info(loggers=[\"vaex\"]):\n \"\"\"set log level to info\"\"\"\n vaex.logging.set_log_level_info(loggers)\n\n\ndef set_log_level_warning(loggers=[\"vaex\"]):\n \"\"\"set log level to warning\"\"\"\n vaex.logging.set_log_level_warning(loggers)\n\n\ndef set_log_level_exception(loggers=[\"vaex\"]):\n \"\"\"set log level to exception/error\"\"\"\n vaex.logging.set_log_level_error(loggers)\n\n\ndef set_log_level_off():\n \"\"\"Disabled logging\"\"\"\n vaex.logging.set_log_level_off()\n\n\nimport_script = os.path.expanduser(\"~/.vaex/vaex_import.py\")\nif os.path.exists(import_script):\n try:\n with open(import_script) as f:\n code = compile(f.read(), import_script, 'exec')\n exec(code)\n except:\n import traceback\n traceback.print_stack()\n\n\ndef register_dataframe_accessor(name, cls=None, override=False):\n \"\"\"Registers a new accessor for a dataframe\n\n See vaex.geo for an example.\n \"\"\"\n def wrapper(cls):\n old_value = getattr(vaex.dataframe.DataFrame, name, None)\n if old_value is not None and override is False:\n raise ValueError(\"DataFrame already has a property/accessor named %r (%r)\" % (name, old_value) )\n\n def get_accessor(self):\n if name in self.__dict__:\n return self.__dict__[name]\n else:\n self.__dict__[name] = cls(self)\n return self.__dict__[name]\n setattr(vaex.dataframe.DataFrame, name, property(get_accessor))\n return cls\n if cls is None:\n return wrapper\n else:\n return wrapper(cls)\n\n\nfor entry in pkg_resources.iter_entry_points(group='vaex.namespace'):\n logger.warning('(DEPRECATED, use vaex.dataframe.accessor) adding vaex namespace: ' + entry.name)\n try:\n add_namespace = entry.load()\n add_namespace()\n except Exception:\n logger.exception('issue loading ' + entry.name)\n\n_lazy_accessors_map = {}\n\n\nclass _lazy_accessor(object):\n def __init__(self, name, scope, loader, lazy_accessors):\n \"\"\"When adding an accessor geo.cone, scope=='geo', name='cone', scope may be falsy\"\"\"\n self.loader = loader\n self.name = name\n self.scope = scope\n self.lazy_accessors = lazy_accessors\n\n def __call__(self, obj):\n if self.name in obj.__dict__:\n return obj.__dict__[self.name]\n else:\n cls = self.loader()\n accessor = cls(obj)\n obj.__dict__[self.name] = accessor\n fullname = self.name\n if self.scope:\n fullname = self.scope + '.' + self.name\n if fullname in self.lazy_accessors:\n for name, scope, loader, lazy_accessors in self.lazy_accessors[fullname]:\n assert fullname == scope\n setattr(cls, name, property(_lazy_accessor(name, scope, loader, lazy_accessors)))\n return obj.__dict__[self.name]\n\n\ndef _add_lazy_accessor(name, loader, target_class=vaex.dataframe.DataFrame):\n \"\"\"Internal use see tests/internal/accessor_test.py for usage\n\n This enables us to have df.foo.bar accessors that lazily loads the modules.\n \"\"\"\n parts = name.split('.')\n if target_class not in _lazy_accessors_map:\n _lazy_accessors_map[target_class] = {}\n lazy_accessors = _lazy_accessors_map[target_class]\n if len(parts) == 1:\n setattr(target_class, parts[0], property(_lazy_accessor(name, None, loader, lazy_accessors)))\n else:\n scope = \".\".join(parts[:-1])\n if scope not in lazy_accessors:\n lazy_accessors[scope] = []\n lazy_accessors[scope].append((parts[-1], scope, loader, lazy_accessors))\n\n\nfor entry in pkg_resources.iter_entry_points(group='vaex.dataframe.accessor'):\n logger.debug('adding vaex accessor: ' + entry.name)\n def loader(entry=entry):\n return entry.load()\n _add_lazy_accessor(entry.name, loader)\n\n\nfor entry in pkg_resources.iter_entry_points(group='vaex.expression.accessor'):\n logger.debug('adding vaex expression accessor: ' + entry.name)\n def loader(entry=entry):\n return entry.load()\n _add_lazy_accessor(entry.name, loader, vaex.expression.Expression)\n\n\nfor entry in pkg_resources.iter_entry_points(group='vaex.plugin'):\n if entry.module_name == 'vaex_arrow.opener':\n # if vaex_arrow package is installed, we ignore it\n continue\n logger.debug('adding vaex plugin: ' + entry.name)\n try:\n add_namespace = entry.load()\n add_namespace()\n except Exception:\n logger.exception('issue loading ' + entry.name)\n\n\ndef concat(dfs, resolver='flexible') -> vaex.dataframe.DataFrame:\n '''Concatenate a list of DataFrames.\n\n :param resolver: How to resolve schema conflicts, see :meth:`DataFrame.concat`.\n '''\n df, *tail = dfs\n return df.concat(*tail, resolver=resolver)\n\ndef vrange(start, stop, step=1, dtype='f8'):\n \"\"\"Creates a virtual column which is the equivalent of numpy.arange, but uses 0 memory\n\n :param int start: Start of interval. The interval includes this value.\n :param int stop: End of interval. The interval does not include this value,\n :param int step: Spacing between values.\n :dtype: The preferred dtype for the column.\n \"\"\"\n from .column import ColumnVirtualRange\n return ColumnVirtualRange(start, stop, step, dtype)\n\ndef vconstant(value, length, dtype=None, chunk_size=1024):\n \"\"\"Creates a virtual column with constant values, which uses 0 memory.\n\n :param value: The value with which to fill the column\n :param length: The length of the column, i.e. the number of rows it should contain.\n :param dtype: The preferred dtype for the column.\n :param chunk_size: Could be used to optimize the performance (evaluation) of this column.\n \"\"\"\n from .column import ColumnVirtualConstant\n return ColumnVirtualConstant(value=value, length=length, dtype=dtype, chunk_size=chunk_size)\n\ndef string_column(strings):\n import pyarrow as pa\n return pa.array(strings)\n\n\ndef dtype(type):\n '''Creates a Vaex DataType based on a NumPy or Arrow type'''\n return vaex.datatype.DataType(type)\n\ndef dtype_of(ar) -> vaex.datatype.DataType:\n '''Creates a Vaex DataType from a NumPy or Arrow array'''\n if isinstance(ar, vaex.dataset.Column):\n return dtype(ar.dtype)\n elif vaex.array_types.is_arrow_array(ar):\n return dtype(ar.type)\n elif vaex.array_types.is_numpy_array(ar) or isinstance(ar, vaex.column.supported_column_types):\n return dtype(ar.dtype)\n else:\n raise TypeError(f'{ar} is not a an Arrow or NumPy array')\n\n\nclass RowLimitException(ValueError):\n pass\n",
"path": "packages/vaex-core/vaex/__init__.py"
}
] | [
{
"content": "\"\"\"\nVaex is a library for dealing with larger than memory DataFrames (out of core).\n\nThe most important class (datastructure) in vaex is the :class:`.DataFrame`. A DataFrame is obtained by either opening\nthe example dataset:\n\n>>> import vaex\n>>> df = vaex.example()\n\nOr using :func:`open` to open a file.\n\n>>> df1 = vaex.open(\"somedata.hdf5\")\n>>> df2 = vaex.open(\"somedata.fits\")\n>>> df2 = vaex.open(\"somedata.arrow\")\n>>> df4 = vaex.open(\"somedata.csv\")\n\nOr connecting to a remove server:\n\n>>> df_remote = vaex.open(\"http://try.vaex.io/nyc_taxi_2015\")\n\n\nA few strong features of vaex are:\n\n- Performance: works with huge tabular data, process over a billion (> 10\\\\ :sup:`9`\\\\ ) rows/second.\n- Expression system / Virtual columns: compute on the fly, without wasting ram.\n- Memory efficient: no memory copies when doing filtering/selections/subsets.\n- Visualization: directly supported, a one-liner is often enough.\n- User friendly API: you will only need to deal with a DataFrame object, and tab completion + docstring will help you out: `ds.mean<tab>`, feels very similar to Pandas.\n- Very fast statistics on N dimensional grids such as histograms, running mean, heatmaps.\n\n\nFollow the tutorial at https://docs.vaex.io/en/latest/tutorial.html to learn how to use vaex.\n\n\"\"\" # -*- coding: utf-8 -*-\nimport logging as root_logging\nimport os\nimport pkg_resources\nfrom typing import Dict, List\nfrom urllib.parse import urlparse, parse_qs\n\n# first configure logging, which also imports vaex.settings\nimport vaex.logging\n# import this to be explicit\nimport vaex.settings\n\nimport vaex.dataframe\nimport vaex.dataset\nfrom vaex.docstrings import docsubst\nfrom vaex.registry import register_function\nfrom vaex import functions, struct\nfrom . import stat\n# import vaex.file\n# import vaex.export\nfrom .delayed import delayed\nfrom .groupby import *\nfrom . import agg\nimport vaex.datasets\n\n# Re-export these so users can type hint with eg vaex.DataFrame\nfrom vaex.dataframe import DataFrame as DataFrame\nfrom vaex.expression import Expression as Expression\n\n\n\nimport vaex.progress\n\ntry:\n from . import version\nexcept:\n import sys\n print(\"version file not found, please run git/hooks/post-commit or git/hooks/post-checkout and/or install them as hooks (see git/README)\", file=sys.stderr)\n raise\n\nlogger = root_logging.getLogger('vaex')\nDEBUG_MODE = os.environ.get('VAEX_DEBUG', '')\n__version__ = version.get_versions()\n\n\ndef app(*args, **kwargs):\n \"\"\"Create a vaex app, the QApplication mainloop must be started.\n\n In ipython notebook/jupyter do the following:\n\n >>> import vaex.ui.main # this causes the qt api level to be set properly\n >>> import vaex\n\n Next cell:\n\n >>> %gui qt\n\n Next cell:\n\n >>> app = vaex.app()\n\n From now on, you can run the app along with jupyter\n\n \"\"\"\n\n import vaex.ui.main\n return vaex.ui.main.VaexApp()\n\n\n@docsubst\ndef open(path, convert=False, progress=None, shuffle=False, fs_options={}, fs=None, *args, **kwargs):\n \"\"\"Open a DataFrame from file given by path.\n\n Example:\n\n >>> df = vaex.open('sometable.hdf5')\n >>> df = vaex.open('somedata*.csv', convert='bigdata.hdf5')\n\n :param str or list path: local or absolute path to file, or glob string, or list of paths\n :param convert: Uses `dataframe.export` when convert is a path. If True, ``convert=path+'.hdf5'``\n The conversion is skipped if the input file or conversion argument did not change.\n :param progress: (*Only applies when convert is not False*) {progress}\n :param bool shuffle: shuffle converted DataFrame or not\n :param dict fs_options: Extra arguments passed to an optional file system if needed. See below\n :param group: (optional) Specify the group to be read from and HDF5 file. By default this is set to \"/table\".\n :param fs: Apache Arrow FileSystem object, or FSSpec FileSystem object, if specified, fs_options should be empty.\n :param args: extra arguments for file readers that need it\n :param kwargs: extra keyword arguments\n :return: return a DataFrame on success, otherwise None\n :rtype: DataFrame\n\n Cloud storage support:\n\n Vaex supports streaming of HDF5 files from Amazon AWS S3 and Google Cloud Storage.\n Files are by default cached in $HOME/.vaex/file-cache/(s3|gs) such that successive access\n is as fast as native disk access.\n\n Amazon AWS S3 options:\n\n The following common fs_options are used for S3 access:\n\n * `anon`: Use anonymous access or not (false by default). (Allowed values are: true,True,1,false,False,0)\n * `anonymous` - Alias for `anon`\n * `cache`: Use the disk cache or not, only set to false if the data should be accessed once. (Allowed values are: true,True,1,false,False,0)\n * `access_key` - AWS access key, if not provided will use the standard env vars, or the `~/.aws/credentials` file\n * `secret_key` - AWS secret key, similar to `access_key`\n * `profile` - If multiple profiles are present in `~/.aws/credentials`, pick this one instead of 'default', see https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html\n * `region` - AWS Region, e.g. 'us-east-1`, will be determined automatically if not provided.\n * `endpoint_override` - URL/ip to connect to, instead of AWS, e.g. 'localhost:9000' for minio\n\n All fs_options can also be encoded in the file path as a query string.\n\n Examples:\n\n >>> df = vaex.open('s3://vaex/taxi/yellow_taxi_2015_f32s.hdf5', fs_options={{'anonymous': True}})\n >>> df = vaex.open('s3://vaex/taxi/yellow_taxi_2015_f32s.hdf5?anon=true')\n >>> df = vaex.open('s3://mybucket/path/to/file.hdf5', fs_options={{'access_key': my_key, 'secret_key': my_secret_key}})\n >>> df = vaex.open(f's3://mybucket/path/to/file.hdf5?access_key={{my_key}}&secret_key={{my_secret_key}}')\n >>> df = vaex.open('s3://mybucket/path/to/file.hdf5?profile=myproject')\n\n Google Cloud Storage options:\n\n The following fs_options are used for GCP access:\n\n * token: Authentication method for GCP. Use 'anon' for annonymous access. See https://gcsfs.readthedocs.io/en/latest/index.html#credentials for more details.\n * cache: Use the disk cache or not, only set to false if the data should be accessed once. (Allowed values are: true,True,1,false,False,0).\n * project and other arguments are passed to :py:class:`gcsfs.core.GCSFileSystem`\n\n Examples:\n\n >>> df = vaex.open('gs://vaex-data/airlines/us_airline_data_1988_2019.hdf5', fs_options={{'token': None}})\n >>> df = vaex.open('gs://vaex-data/airlines/us_airline_data_1988_2019.hdf5?token=anon')\n >>> df = vaex.open('gs://vaex-data/testing/xys.hdf5?token=anon&cache=False')\n \"\"\"\n import vaex\n import vaex.convert\n try:\n if not isinstance(path, (list, tuple)):\n # remote and clusters only support single path, not a list\n path = vaex.file.stringyfy(path)\n if path in aliases:\n path = aliases[path]\n path = vaex.file.stringyfy(path)\n if path.startswith(\"http://\") or path.startswith(\"ws://\") or \\\n path.startswith(\"vaex+wss://\") or path.startswith(\"wss://\") or \\\n path.startswith(\"vaex+http://\") or path.startswith(\"vaex+ws://\"):\n server, name = path.rsplit(\"/\", 1)\n url = urlparse(path)\n if '?' in name:\n name = name[:name.index('?')]\n extra_args = {key: values[0] for key, values in parse_qs(url.query).items()}\n if 'token' in extra_args:\n kwargs['token'] = extra_args['token']\n if 'token_trusted' in extra_args:\n kwargs['token_trusted'] = extra_args['token_trusted']\n client = vaex.connect(server, **kwargs)\n return client[name]\n if path.startswith(\"cluster\"):\n import vaex.enterprise.distributed\n return vaex.enterprise.distributed.open(path, *args, **kwargs)\n\n import vaex.file\n import glob\n if isinstance(path, str):\n paths = [path]\n else:\n paths = path\n filenames = []\n for path in paths:\n path = vaex.file.stringyfy(path)\n if path in aliases:\n path = aliases[path]\n path = vaex.file.stringyfy(path)\n naked_path, options = vaex.file.split_options(path)\n if glob.has_magic(naked_path):\n filenames.extend(list(sorted(vaex.file.glob(path, fs_options=fs_options, fs=fs))))\n else:\n filenames.append(path)\n df = None\n if len(filenames) == 0:\n raise IOError(f'File pattern did not match anything {path}')\n filename_hdf5 = vaex.convert._convert_name(filenames, shuffle=shuffle)\n filename_hdf5_noshuffle = vaex.convert._convert_name(filenames, shuffle=False)\n if len(filenames) == 1:\n path = filenames[0]\n # # naked_path, _ = vaex.file.split_options(path, fs_options)\n _, ext, _ = vaex.file.split_ext(path)\n if ext == '.csv': # special case for csv\n return vaex.from_csv(path, fs_options=fs_options, fs=fs, convert=convert, progress=progress, **kwargs)\n if convert:\n path_output = convert if isinstance(convert, str) else filename_hdf5\n vaex.convert.convert(\n path_input=path, fs_options_input=fs_options, fs_input=fs,\n path_output=path_output, fs_options_output=fs_options, fs_output=fs,\n progress=progress,\n *args, **kwargs\n )\n ds = vaex.dataset.open(path_output, fs_options=fs_options, fs=fs, **kwargs)\n else:\n ds = vaex.dataset.open(path, fs_options=fs_options, fs=fs, **kwargs)\n df = vaex.from_dataset(ds)\n if df is None:\n if os.path.exists(path):\n raise IOError('Could not open file: {}, did you install vaex-hdf5? Is the format supported?'.format(path))\n elif len(filenames) > 1:\n if convert not in [True, False]:\n filename_hdf5 = convert\n else:\n filename_hdf5 = vaex.convert._convert_name(filenames, shuffle=shuffle)\n if os.path.exists(filename_hdf5) and convert: # also check mtime\n df = vaex.open(filename_hdf5)\n else:\n dfs = []\n for filename in filenames:\n dfs.append(vaex.open(filename, fs_options=fs_options, fs=fs, convert=bool(convert), shuffle=shuffle, **kwargs))\n df = vaex.concat(dfs)\n if convert:\n if shuffle:\n df = df.shuffle()\n df.export_hdf5(filename_hdf5, progress=progress)\n df = vaex.open(filename_hdf5)\n\n if df is None:\n raise IOError('Unknown error opening: {}'.format(path))\n return df\n except:\n logger.exception(\"error opening %r\" % path)\n raise\n\n\ndef open_many(filenames):\n \"\"\"Open a list of filenames, and return a DataFrame with all DataFrames concatenated.\n\n :param list[str] filenames: list of filenames/paths\n :rtype: DataFrame\n \"\"\"\n dfs = []\n for filename in filenames:\n filename = filename.strip()\n if filename and filename[0] != \"#\":\n dfs.append(open(filename))\n return concat(dfs)\n\n\ndef from_samp(username=None, password=None):\n \"\"\"Connect to a SAMP Hub and wait for a single table load event, disconnect, download the table and return the DataFrame.\n\n Useful if you want to send a single table from say TOPCAT to vaex in a python console or notebook.\n \"\"\"\n print(\"Waiting for SAMP message...\")\n import vaex.samp\n t = vaex.samp.single_table(username=username, password=password)\n return from_astropy_table(t.to_table())\n\n\ndef from_astropy_table(table):\n \"\"\"Create a vaex DataFrame from an Astropy Table.\"\"\"\n from vaex.astro.astropy_table import DatasetAstropyTable\n ds = DatasetAstropyTable(table=table)\n return vaex.dataframe.DataFrameLocal(ds)\n\n\ndef from_dict(data):\n \"\"\"Create an in memory dataset from a dict with column names as keys and list/numpy-arrays as values\n\n Example\n\n >>> data = {'A':[1,2,3],'B':['a','b','c']}\n >>> vaex.from_dict(data)\n # A B\n 0 1 'a'\n 1 2 'b'\n 2 3 'c'\n\n :param data: A dict of {column:[value, value,...]}\n :rtype: DataFrame\n\n \"\"\"\n return vaex.from_arrays(**data)\n\n\ndef from_items(*items):\n \"\"\"Create an in memory DataFrame from numpy arrays, in contrast to from_arrays this keeps the order of columns intact (for Python < 3.6).\n\n Example\n\n >>> import vaex, numpy as np\n >>> x = np.arange(5)\n >>> y = x ** 2\n >>> vaex.from_items(('x', x), ('y', y))\n # x y\n 0 0 0\n 1 1 1\n 2 2 4\n 3 3 9\n 4 4 16\n\n :param items: list of [(name, numpy array), ...]\n :rtype: DataFrame\n\n \"\"\"\n return from_dict(dict(items))\n\n\ndef from_arrays(**arrays) -> vaex.dataframe.DataFrameLocal:\n \"\"\"Create an in memory DataFrame from numpy arrays.\n\n Example\n\n >>> import vaex, numpy as np\n >>> x = np.arange(5)\n >>> y = x ** 2\n >>> vaex.from_arrays(x=x, y=y)\n # x y\n 0 0 0\n 1 1 1\n 2 2 4\n 3 3 9\n 4 4 16\n >>> some_dict = {'x': x, 'y': y}\n >>> vaex.from_arrays(**some_dict) # in case you have your columns in a dict\n # x y\n 0 0 0\n 1 1 1\n 2 2 4\n 3 3 9\n 4 4 16\n\n :param arrays: keyword arguments with arrays\n :rtype: DataFrame\n \"\"\"\n import numpy as np\n import six\n dataset = vaex.dataset.DatasetArrays(arrays)\n return vaex.dataframe.DataFrameLocal(dataset)\n\n\ndef from_arrow_table(table) -> vaex.dataframe.DataFrame:\n \"\"\"Creates a vaex DataFrame from an arrow Table.\n\n :param as_numpy: Will lazily cast columns to a NumPy ndarray.\n :rtype: DataFrame\n \"\"\"\n from vaex.arrow.dataset import from_table\n return from_dataset(from_table(table=table))\n\n\ndef from_arrow_dataset(arrow_dataset) -> vaex.dataframe.DataFrame:\n '''Create a DataFrame from an Apache Arrow dataset'''\n import vaex.arrow.dataset\n return from_dataset(vaex.arrow.dataset.DatasetArrow(arrow_dataset))\n\n\ndef from_dataset(dataset: vaex.dataset.Dataset) -> vaex.dataframe.DataFrame:\n '''Create a Vaex DataFrame from a Vaex Dataset'''\n return vaex.dataframe.DataFrameLocal(dataset)\n\n\ndef from_scalars(**kwargs):\n \"\"\"Similar to from_arrays, but convenient for a DataFrame of length 1.\n\n Example:\n\n >>> import vaex\n >>> df = vaex.from_scalars(x=1, y=2)\n\n :rtype: DataFrame\n \"\"\"\n import numpy as np\n return from_arrays(**{k: np.array([v]) for k, v in kwargs.items()})\n\n\ndef from_pandas(df, name=\"pandas\", copy_index=False, index_name=\"index\"):\n \"\"\"Create an in memory DataFrame from a pandas DataFrame.\n\n :param: pandas.DataFrame df: Pandas DataFrame\n :param: name: unique for the DataFrame\n\n >>> import vaex, pandas as pd\n >>> df_pandas = pd.from_csv('test.csv')\n >>> df = vaex.from_pandas(df_pandas)\n\n :rtype: DataFrame\n \"\"\"\n import six\n import pandas as pd\n import numpy as np\n import pyarrow as pa\n columns = {}\n\n def add(name, column):\n values = column.values\n # the first test is to support (partially) pandas 0.23\n if hasattr(pd.core.arrays, 'integer') and isinstance(values, pd.core.arrays.integer.IntegerArray):\n values = np.ma.array(values._data, mask=values._mask)\n elif hasattr(pd.core.arrays, 'StringArray') and isinstance(values, pd.core.arrays.StringArray):\n values = pa.array(values)\n elif hasattr(pd.core.arrays, 'FloatingArray') and isinstance(values, pd.core.arrays.FloatingArray):\n values = np.ma.array(values._data, mask=values._mask)\n try:\n columns[name] = vaex.dataset.to_supported_array(values)\n except Exception as e:\n print(\"could not convert column %s, error: %r, will try to convert it to string\" % (name, e))\n try:\n values = values.astype(\"S\")\n columns[name] = vaex.dataset.to_supported_array(values)\n except Exception as e:\n print(\"Giving up column %s, error: %r\" % (name, e))\n for name in df.columns:\n add(str(name), df[name])\n if copy_index:\n add(index_name, df.index)\n return from_dict(columns)\n\n\ndef from_ascii(path, seperator=None, names=True, skip_lines=0, skip_after=0, **kwargs):\n \"\"\"\n Create an in memory DataFrame from an ascii file (whitespace seperated by default).\n\n >>> ds = vx.from_ascii(\"table.asc\")\n >>> ds = vx.from_ascii(\"table.csv\", seperator=\",\", names=[\"x\", \"y\", \"z\"])\n\n :param path: file path\n :param seperator: value seperator, by default whitespace, use \",\" for comma seperated values.\n :param names: If True, the first line is used for the column names, otherwise provide a list of strings with names\n :param skip_lines: skip lines at the start of the file\n :param skip_after: skip lines at the end of the file\n :param kwargs:\n :rtype: DataFrame\n \"\"\"\n\n import vaex.ext.readcol as rc\n ds = vaex.dataframe.DataFrameLocal()\n if names not in [True, False]:\n namelist = names\n names = False\n else:\n namelist = None\n data = rc.readcol(path, fsep=seperator, asdict=namelist is None, names=names, skipline=skip_lines, skipafter=skip_after, **kwargs)\n if namelist:\n for name, array in zip(namelist, data.T):\n ds.add_column(name, array)\n else:\n for name, array in data.items():\n ds.add_column(name, array)\n return ds\n\n\ndef from_json(path_or_buffer, orient=None, precise_float=False, lines=False, copy_index=False, **kwargs):\n \"\"\" A method to read a JSON file using pandas, and convert to a DataFrame directly.\n\n :param str path_or_buffer: a valid JSON string or file-like, default: None\n The string could be a URL. Valid URL schemes include http, ftp, s3,\n gcs, and file. For file URLs, a host is expected. For instance, a local\n file could be ``file://localhost/path/to/table.json``\n :param str orient: Indication of expected JSON string format. Allowed values are\n ``split``, ``records``, ``index``, ``columns``, and ``values``.\n :param bool precise_float: Set to enable usage of higher precision (strtod) function when\n decoding string to double values. Default (False) is to use fast but less precise builtin functionality\n :param bool lines: Read the file as a json object per line.\n\n :rtype: DataFrame\n \"\"\"\n # Check for unsupported kwargs\n if kwargs.get('typ') == 'series':\n raise ValueError('`typ` must be set to `\"frame\"`.')\n if kwargs.get('numpy') == True:\n raise ValueError('`numpy` must be set to `False`.')\n if kwargs.get('chunksize') is not None:\n raise ValueError('`chunksize` must be `None`.')\n\n import pandas as pd\n return from_pandas(pd.read_json(path_or_buffer, orient=orient, precise_float=precise_float, lines=lines, **kwargs),\n copy_index=copy_index)\n\n\n@docsubst\ndef from_records(records : List[Dict], array_type=\"arrow\", defaults={}) -> vaex.dataframe.DataFrame:\n '''Create a dataframe from a list of dict.\n\n .. warning:: This is for convenience only, for performance pass arrays to :func:`from_arrays` for instance.\n\n :param str array_type: {array_type}\n :param dict defaults: default values if a record has a missing entry\n '''\n arrays = dict()\n for i, record in enumerate(records):\n for name, value in record.items():\n if name not in arrays:\n # prepend None's\n arrays[name] = [defaults.get(name)] * i\n arrays[name].append(value)\n for name in arrays:\n if name not in record:\n # missing values get replaced\n arrays[name].append(defaults.get(name))\n arrays = {k: vaex.array_types.convert(v, array_type) for k, v in arrays.items()}\n return vaex.from_dict(arrays)\n\n\n@docsubst\ndef from_csv(filename_or_buffer, copy_index=False, chunk_size=None, convert=False, fs_options={}, fs=None, progress=None, **kwargs):\n \"\"\"\n Read a CSV file as a DataFrame, and optionally convert to an hdf5 file.\n\n :param str or file filename_or_buffer: CSV file path or file-like\n :param bool copy_index: copy index when source is read via Pandas\n :param int chunk_size: if the CSV file is too big to fit in the memory this parameter can be used to read\n CSV file in chunks. For example:\n\n >>> import vaex\n >>> for i, df in enumerate(vaex.from_csv('taxi.csv', chunk_size=100_000)):\n >>> df = df[df.passenger_count < 6]\n >>> df.export_hdf5(f'taxi_{{i:02}}.hdf5')\n\n :param bool or str convert: convert files to an hdf5 file for optimization, can also be a path. The CSV\n file will be read in chunks: either using the provided chunk_size argument, or a default size. Each chunk will\n be saved as a separate hdf5 file, then all of them will be combined into one hdf5 file. So for a big CSV file\n you will need at least double of extra space on the disk. Default chunk_size for converting is 5 million rows,\n which corresponds to around 1Gb memory on an example of NYC Taxi dataset.\n :param progress: (*Only applies when convert is not False*) {progress}\n :param kwargs: extra keyword arguments, currently passed to Pandas read_csv function, but the implementation might\n change in future versions.\n :returns: DataFrame\n \"\"\"\n if not convert:\n return _from_csv_read(filename_or_buffer=filename_or_buffer, copy_index=copy_index,\n fs_options=fs_options, fs=fs, chunk_size=chunk_size, **kwargs)\n else:\n if chunk_size is None:\n # make it memory efficient by default\n chunk_size = 5_000_000\n import vaex.convert\n path_output = convert if isinstance(convert, str) else vaex.convert._convert_name(filename_or_buffer)\n vaex.convert.convert_csv(\n path_input=filename_or_buffer, fs_options_input=fs_options, fs_input=fs,\n path_output=path_output, fs_options_output=fs_options, fs_output=fs,\n chunk_size=chunk_size,\n copy_index=copy_index,\n progress=progress,\n **kwargs\n )\n return open(path_output, fs_options=fs_options, fs=fs)\n\n\ndef _from_csv_read(filename_or_buffer, copy_index, chunk_size, fs_options={}, fs=None, **kwargs):\n import pandas as pd\n if not chunk_size:\n with vaex.file.open(filename_or_buffer, fs_options=fs_options, fs=fs, for_arrow=True) as f:\n full_df = pd.read_csv(f, **kwargs)\n return from_pandas(full_df, copy_index=copy_index)\n else:\n def iterator():\n chunk_iterator = pd.read_csv(filename_or_buffer, chunksize=chunk_size, **kwargs)\n for chunk_df in chunk_iterator:\n yield from_pandas(chunk_df, copy_index=copy_index)\n return iterator()\n\n\ndef read_csv(filepath_or_buffer, **kwargs):\n '''Alias to from_csv.'''\n return from_csv(filepath_or_buffer, **kwargs)\n\naliases = vaex.settings.aliases\n\n\ndef connect(url, **kwargs):\n \"\"\"Connect to hostname supporting the vaex web api.\n\n :param str hostname: hostname or ip address of server\n :rtype: vaex.server.client.Client\n \"\"\"\n # dispatch to vaex.server package\n from vaex.server import connect\n return connect(url, **kwargs)\n\ndef example():\n '''Result of an N-body simulation of the accretion of 33 satellite galaxies into a Milky Way dark matter halo.\n\n Data was greated by Helmi & de Zeeuw 2000.\n The data contains the position (x, y, z), velocitie (vx, vy, vz), the energy (E),\n the angular momentum (L, Lz) and iron content (FeH) of the particles.\n\n :rtype: DataFrame\n '''\n return vaex.datasets.helmi_simulation_data()\n\n\n\n# there are kept for backwards compatibility\n# TODO: remove in vaex v5?\n\ndef set_log_level_debug(loggers=[\"vaex\"]):\n \"\"\"set log level to debug\"\"\"\n vaex.logging.set_log_level_debug(loggers)\n\n\ndef set_log_level_info(loggers=[\"vaex\"]):\n \"\"\"set log level to info\"\"\"\n vaex.logging.set_log_level_info(loggers)\n\n\ndef set_log_level_warning(loggers=[\"vaex\"]):\n \"\"\"set log level to warning\"\"\"\n vaex.logging.set_log_level_warning(loggers)\n\n\ndef set_log_level_exception(loggers=[\"vaex\"]):\n \"\"\"set log level to exception/error\"\"\"\n vaex.logging.set_log_level_error(loggers)\n\n\ndef set_log_level_off():\n \"\"\"Disabled logging\"\"\"\n vaex.logging.set_log_level_off()\n\n\nimport_script = os.path.expanduser(\"~/.vaex/vaex_import.py\")\nif os.path.exists(import_script):\n try:\n with open(import_script) as f:\n code = compile(f.read(), import_script, 'exec')\n exec(code)\n except:\n import traceback\n traceback.print_stack()\n\n\ndef register_dataframe_accessor(name, cls=None, override=False):\n \"\"\"Registers a new accessor for a dataframe\n\n See vaex.geo for an example.\n \"\"\"\n def wrapper(cls):\n old_value = getattr(vaex.dataframe.DataFrame, name, None)\n if old_value is not None and override is False:\n raise ValueError(\"DataFrame already has a property/accessor named %r (%r)\" % (name, old_value) )\n\n def get_accessor(self):\n if name in self.__dict__:\n return self.__dict__[name]\n else:\n self.__dict__[name] = cls(self)\n return self.__dict__[name]\n setattr(vaex.dataframe.DataFrame, name, property(get_accessor))\n return cls\n if cls is None:\n return wrapper\n else:\n return wrapper(cls)\n\n\nfor entry in pkg_resources.iter_entry_points(group='vaex.namespace'):\n logger.warning('(DEPRECATED, use vaex.dataframe.accessor) adding vaex namespace: ' + entry.name)\n try:\n add_namespace = entry.load()\n add_namespace()\n except Exception:\n logger.exception('issue loading ' + entry.name)\n\n_lazy_accessors_map = {}\n\n\nclass _lazy_accessor(object):\n def __init__(self, name, scope, loader, lazy_accessors):\n \"\"\"When adding an accessor geo.cone, scope=='geo', name='cone', scope may be falsy\"\"\"\n self.loader = loader\n self.name = name\n self.scope = scope\n self.lazy_accessors = lazy_accessors\n\n def __call__(self, obj):\n if self.name in obj.__dict__:\n return obj.__dict__[self.name]\n else:\n cls = self.loader()\n accessor = cls(obj)\n obj.__dict__[self.name] = accessor\n fullname = self.name\n if self.scope:\n fullname = self.scope + '.' + self.name\n if fullname in self.lazy_accessors:\n for name, scope, loader, lazy_accessors in self.lazy_accessors[fullname]:\n assert fullname == scope\n setattr(cls, name, property(_lazy_accessor(name, scope, loader, lazy_accessors)))\n return obj.__dict__[self.name]\n\n\ndef _add_lazy_accessor(name, loader, target_class=vaex.dataframe.DataFrame):\n \"\"\"Internal use see tests/internal/accessor_test.py for usage\n\n This enables us to have df.foo.bar accessors that lazily loads the modules.\n \"\"\"\n parts = name.split('.')\n if target_class not in _lazy_accessors_map:\n _lazy_accessors_map[target_class] = {}\n lazy_accessors = _lazy_accessors_map[target_class]\n if len(parts) == 1:\n setattr(target_class, parts[0], property(_lazy_accessor(name, None, loader, lazy_accessors)))\n else:\n scope = \".\".join(parts[:-1])\n if scope not in lazy_accessors:\n lazy_accessors[scope] = []\n lazy_accessors[scope].append((parts[-1], scope, loader, lazy_accessors))\n\n\nfor entry in pkg_resources.iter_entry_points(group='vaex.dataframe.accessor'):\n logger.debug('adding vaex accessor: ' + entry.name)\n def loader(entry=entry):\n return entry.load()\n _add_lazy_accessor(entry.name, loader)\n\n\nfor entry in pkg_resources.iter_entry_points(group='vaex.expression.accessor'):\n logger.debug('adding vaex expression accessor: ' + entry.name)\n def loader(entry=entry):\n return entry.load()\n _add_lazy_accessor(entry.name, loader, vaex.expression.Expression)\n\n\nfor entry in pkg_resources.iter_entry_points(group='vaex.plugin'):\n if entry.module_name == 'vaex_arrow.opener':\n # if vaex_arrow package is installed, we ignore it\n continue\n logger.debug('adding vaex plugin: ' + entry.name)\n try:\n add_namespace = entry.load()\n add_namespace()\n except Exception:\n logger.exception('issue loading ' + entry.name)\n\n\ndef concat(dfs, resolver='flexible') -> vaex.dataframe.DataFrame:\n '''Concatenate a list of DataFrames.\n\n :param resolver: How to resolve schema conflicts, see :meth:`DataFrame.concat`.\n '''\n df, *tail = dfs\n return df.concat(*tail, resolver=resolver)\n\ndef vrange(start, stop, step=1, dtype='f8'):\n \"\"\"Creates a virtual column which is the equivalent of numpy.arange, but uses 0 memory\n\n :param int start: Start of interval. The interval includes this value.\n :param int stop: End of interval. The interval does not include this value,\n :param int step: Spacing between values.\n :dtype: The preferred dtype for the column.\n \"\"\"\n from .column import ColumnVirtualRange\n return ColumnVirtualRange(start, stop, step, dtype)\n\ndef vconstant(value, length, dtype=None, chunk_size=1024):\n \"\"\"Creates a virtual column with constant values, which uses 0 memory.\n\n :param value: The value with which to fill the column\n :param length: The length of the column, i.e. the number of rows it should contain.\n :param dtype: The preferred dtype for the column.\n :param chunk_size: Could be used to optimize the performance (evaluation) of this column.\n \"\"\"\n from .column import ColumnVirtualConstant\n return ColumnVirtualConstant(value=value, length=length, dtype=dtype, chunk_size=chunk_size)\n\ndef string_column(strings):\n import pyarrow as pa\n return pa.array(strings)\n\n\ndef dtype(type):\n '''Creates a Vaex DataType based on a NumPy or Arrow type'''\n return vaex.datatype.DataType(type)\n\ndef dtype_of(ar) -> vaex.datatype.DataType:\n '''Creates a Vaex DataType from a NumPy or Arrow array'''\n if isinstance(ar, vaex.dataset.Column):\n return dtype(ar.dtype)\n elif vaex.array_types.is_arrow_array(ar):\n return dtype(ar.type)\n elif vaex.array_types.is_numpy_array(ar) or isinstance(ar, vaex.column.supported_column_types):\n return dtype(ar.dtype)\n else:\n raise TypeError(f'{ar} is not a an Arrow or NumPy array')\n\n\nclass RowLimitException(ValueError):\n pass\n",
"path": "packages/vaex-core/vaex/__init__.py"
}
] | diff --git a/packages/vaex-core/vaex/__init__.py b/packages/vaex-core/vaex/__init__.py
index f7a91b5fc6..50bbb0b921 100644
--- a/packages/vaex-core/vaex/__init__.py
+++ b/packages/vaex-core/vaex/__init__.py
@@ -56,6 +56,9 @@
from . import agg
import vaex.datasets
+# Re-export these so users can type hint with eg vaex.DataFrame
+from vaex.dataframe import DataFrame as DataFrame
+from vaex.expression import Expression as Expression
|
Textualize__textual-2755 | A lone `Static` results in a `TooManyMatches` error when using `query_one`
I've not dived into this beyond knocking up this example to isolate what I saw (about to head out of the door but wanted to record this as a reminder). With 0.27.0 (perhaps before too, just noting the version here for the record), this code:
```python
from textual.app import App, ComposeResult
from textual.widgets import Static
class OneStatic( App[ None ] ):
def compose( self ) -> ComposeResult:
yield Static()
def on_mount( self ) -> None:
self.query_one( Static ).update( "Hello, World!" )
if __name__ == "__main__":
OneStatic().run()
```
results in a `TooManyMatches` error being raised from the `query_one`. With very early testing this only seems to be the case with `Static` (at least, I tested with `Label` and `Button` and they're fine).
I think most people would rightly find this surprising.
| [
{
"content": "from __future__ import annotations\n\nfrom textual.widgets import Static\n\n\nclass Tooltip(Static):\n DEFAULT_CSS = \"\"\"\n Tooltip {\n layer: _tooltips;\n margin: 1 2;\n padding: 1 2;\n background: $panel;\n width: auto;\n height: auto;\n constrain: inflect;\n max-width: 40;\n display: none;\n }\n \"\"\"\n",
"path": "src/textual/widgets/_tooltip.py"
}
] | [
{
"content": "from __future__ import annotations\n\nfrom textual.widgets import Static\n\n\nclass Tooltip(Static, inherit_css=False):\n DEFAULT_CSS = \"\"\"\n Tooltip {\n layer: _tooltips;\n margin: 1 2;\n padding: 1 2;\n background: $panel;\n width: auto;\n height: auto;\n constrain: inflect;\n max-width: 40;\n display: none;\n }\n \"\"\"\n",
"path": "src/textual/widgets/_tooltip.py"
}
] | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 23739959d7..ce7c782824 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -16,6 +16,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
- Fixed setting `TreeNode.label` on an existing `Tree` node not immediately https://github.com/Textualize/textual/pull/2713
- Correctly implement `__eq__` protocol in DataTable https://github.com/Textualize/textual/pull/2705
+- Fixed `Tooltip` causing a `query_one` on a lone `Static` to fail https://github.com/Textualize/textual/issues/2723
### Changed
diff --git a/src/textual/widgets/_tooltip.py b/src/textual/widgets/_tooltip.py
index c00b57be62..94664edb47 100644
--- a/src/textual/widgets/_tooltip.py
+++ b/src/textual/widgets/_tooltip.py
@@ -3,7 +3,7 @@
from textual.widgets import Static
-class Tooltip(Static):
+class Tooltip(Static, inherit_css=False):
DEFAULT_CSS = """
Tooltip {
layer: _tooltips;
|
openfun__marsha-2411 | 🐛(backend) licence not saved during creation video resource
## Bug Report
**Problematic Behavior**
When we create a video and set a licence, the licence is not saved.
[error-licence-2023-09-12 143121.webm](https://github.com/openfun/marsha/assets/25994652/60514ad8-07cd-4390-97c9-21eb3525ecc6)
| [
{
"content": "\"\"\"Marsha forms module.\"\"\"\nfrom django.core.exceptions import ValidationError\nfrom django.forms import CharField, ModelForm\n\nfrom . import models\nfrom .defaults import INITIALIZED\n\n\nclass DocumentForm(ModelForm):\n \"\"\"Form to create or update documents.\"\"\"\n\n class Meta:\n \"\"\"Meta for DocumentForm.\"\"\"\n\n model = models.Document\n fields = [\"description\", \"is_public\", \"lti_id\", \"playlist\", \"title\"]\n\n\nclass VideoForm(ModelForm):\n \"\"\"Form to create or update videos.\"\"\"\n\n upload_state = CharField(\n max_length=20,\n required=False,\n )\n\n class Meta:\n \"\"\"Meta for VideoForm.\"\"\"\n\n model = models.Video\n fields = [\n \"description\",\n \"is_public\",\n \"lti_id\",\n \"playlist\",\n \"title\",\n \"upload_state\",\n ]\n\n def clean_upload_state(self):\n \"\"\"Check upload_state valid value.\"\"\"\n upload_state = self.cleaned_data[\"upload_state\"]\n\n if upload_state and upload_state != INITIALIZED:\n raise ValidationError(f\"{INITIALIZED} is the only accepted value\")\n\n return upload_state\n",
"path": "src/backend/marsha/core/forms.py"
}
] | [
{
"content": "\"\"\"Marsha forms module.\"\"\"\nfrom django.core.exceptions import ValidationError\nfrom django.forms import CharField, ModelForm\n\nfrom . import models\nfrom .defaults import INITIALIZED\n\n\nclass DocumentForm(ModelForm):\n \"\"\"Form to create or update documents.\"\"\"\n\n class Meta:\n \"\"\"Meta for DocumentForm.\"\"\"\n\n model = models.Document\n fields = [\"description\", \"is_public\", \"lti_id\", \"playlist\", \"title\"]\n\n\nclass VideoForm(ModelForm):\n \"\"\"Form to create or update videos.\"\"\"\n\n upload_state = CharField(\n max_length=20,\n required=False,\n )\n\n class Meta:\n \"\"\"Meta for VideoForm.\"\"\"\n\n model = models.Video\n fields = [\n \"description\",\n \"is_public\",\n \"lti_id\",\n \"playlist\",\n \"title\",\n \"upload_state\",\n \"license\",\n ]\n\n def clean_upload_state(self):\n \"\"\"Check upload_state valid value.\"\"\"\n upload_state = self.cleaned_data[\"upload_state\"]\n\n if upload_state and upload_state != INITIALIZED:\n raise ValidationError(f\"{INITIALIZED} is the only accepted value\")\n\n return upload_state\n",
"path": "src/backend/marsha/core/forms.py"
}
] | diff --git a/CHANGELOG.md b/CHANGELOG.md
index d6c580de24..3705a01655 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -8,6 +8,10 @@ Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
+### Fixed
+
+- Save license when a video is created
+
## [4.4.0] - 2023-09-08
### Added
diff --git a/src/backend/marsha/core/forms.py b/src/backend/marsha/core/forms.py
index 8be51131dd..f1ab78baef 100644
--- a/src/backend/marsha/core/forms.py
+++ b/src/backend/marsha/core/forms.py
@@ -35,6 +35,7 @@ class Meta:
"playlist",
"title",
"upload_state",
+ "license",
]
def clean_upload_state(self):
diff --git a/src/backend/marsha/core/tests/api/video/test_create.py b/src/backend/marsha/core/tests/api/video/test_create.py
index b8191bbbab..748b1879bd 100644
--- a/src/backend/marsha/core/tests/api/video/test_create.py
+++ b/src/backend/marsha/core/tests/api/video/test_create.py
@@ -78,6 +78,7 @@ def test_api_video_create_by_playlist_admin(self):
"lti_id": "video_one",
"playlist": str(playlist.id),
"title": "Some video",
+ "license": "CC_BY-ND",
},
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
@@ -127,7 +128,7 @@ def test_api_video_create_by_playlist_admin(self):
"urls": None,
"xmpp": None,
"tags": [],
- "license": None,
+ "license": "CC_BY-ND",
},
)
@@ -174,6 +175,7 @@ def test_api_video_create_by_playlist_instructor(self):
"lti_id": "video_one",
"playlist": str(playlist.id),
"title": "Some video",
+ "license": "CC_BY-ND",
},
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
@@ -223,7 +225,7 @@ def test_api_video_create_by_playlist_instructor(self):
"urls": None,
"xmpp": None,
"tags": [],
- "license": None,
+ "license": "CC_BY-ND",
},
)
|
dbt-labs__dbt-core-2537 | Python 3.6.2 doesn't work with dbt 0.17.0
### Describe the bug
Running dbt on python <= 3.6.2 results in an error that `name 'TimestampSnapshotConfig' is not defined`. 3.6.3 is unaffected.
### Steps To Reproduce
Install python 3.6.2
Install dbt
Try to use dbt
### Expected behavior
dbt should run, not crash, etc
### System information
**Which database are you using dbt with?**
Any
**The output of `dbt --version`:**
```
0.17.0
```
**The operating system you're using:**
macos, linux
**The output of `python --version`:**
`Python 3.6.2`
| [
{
"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nfrom setuptools import setup\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print('Error: dbt requires setuptools v40.1.0 or higher.')\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" '\n 'and try again')\n sys.exit(1)\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"0.17.1a1\"\ndescription = \"\"\"dbt (data build tool) is a command line tool that helps \\\nanalysts and engineers transform data in their warehouse more effectively\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=description,\n author=\"Fishtown Analytics\",\n author_email=\"[email protected]\",\n url=\"https://github.com/fishtown-analytics/dbt\",\n packages=find_namespace_packages(include=['dbt', 'dbt.*']),\n package_data={\n 'dbt': [\n 'include/index.html',\n 'include/global_project/dbt_project.yml',\n 'include/global_project/docs/*.md',\n 'include/global_project/macros/*.sql',\n 'include/global_project/macros/**/*.sql',\n 'include/global_project/macros/**/**/*.sql',\n 'py.typed',\n ]\n },\n test_suite='test',\n entry_points={\n 'console_scripts': [\n 'dbt = dbt.main:main',\n ],\n },\n scripts=[\n 'scripts/dbt',\n ],\n install_requires=[\n 'Jinja2==2.11.2',\n 'PyYAML>=3.11',\n 'sqlparse>=0.2.3,<0.4',\n 'networkx>=2.3,<3',\n 'minimal-snowplow-tracker==0.0.2',\n 'colorama>=0.3.9,<0.5',\n 'agate>=1.6,<2',\n 'isodate>=0.6,<0.7',\n 'json-rpc>=1.12,<2',\n 'werkzeug>=0.15,<0.17',\n 'dataclasses==0.6;python_version<\"3.7\"',\n 'hologram==0.0.7',\n 'logbook>=1.5,<1.6',\n 'typing-extensions>=3.7.4,<3.8',\n # the following are all to match snowflake-connector-python\n 'requests>=2.18.0,<2.23.0',\n 'idna<2.9',\n 'cffi>=1.9,<1.14',\n ],\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n python_requires=\">=3.6.2\",\n)\n",
"path": "core/setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nfrom setuptools import setup\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print('Error: dbt requires setuptools v40.1.0 or higher.')\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" '\n 'and try again')\n sys.exit(1)\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"0.17.1a1\"\ndescription = \"\"\"dbt (data build tool) is a command line tool that helps \\\nanalysts and engineers transform data in their warehouse more effectively\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=description,\n author=\"Fishtown Analytics\",\n author_email=\"[email protected]\",\n url=\"https://github.com/fishtown-analytics/dbt\",\n packages=find_namespace_packages(include=['dbt', 'dbt.*']),\n package_data={\n 'dbt': [\n 'include/index.html',\n 'include/global_project/dbt_project.yml',\n 'include/global_project/docs/*.md',\n 'include/global_project/macros/*.sql',\n 'include/global_project/macros/**/*.sql',\n 'include/global_project/macros/**/**/*.sql',\n 'py.typed',\n ]\n },\n test_suite='test',\n entry_points={\n 'console_scripts': [\n 'dbt = dbt.main:main',\n ],\n },\n scripts=[\n 'scripts/dbt',\n ],\n install_requires=[\n 'Jinja2==2.11.2',\n 'PyYAML>=3.11',\n 'sqlparse>=0.2.3,<0.4',\n 'networkx>=2.3,<3',\n 'minimal-snowplow-tracker==0.0.2',\n 'colorama>=0.3.9,<0.5',\n 'agate>=1.6,<2',\n 'isodate>=0.6,<0.7',\n 'json-rpc>=1.12,<2',\n 'werkzeug>=0.15,<0.17',\n 'dataclasses==0.6;python_version<\"3.7\"',\n 'hologram==0.0.7',\n 'logbook>=1.5,<1.6',\n 'typing-extensions>=3.7.4,<3.8',\n # the following are all to match snowflake-connector-python\n 'requests>=2.18.0,<2.23.0',\n 'idna<2.9',\n 'cffi>=1.9,<1.14',\n ],\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n python_requires=\">=3.6.3\",\n)\n",
"path": "core/setup.py"
}
] | diff --git a/core/setup.py b/core/setup.py
index b63daad5d0a..82fc31ae8ba 100644
--- a/core/setup.py
+++ b/core/setup.py
@@ -86,5 +86,5 @@ def read(fname):
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
- python_requires=">=3.6.2",
+ python_requires=">=3.6.3",
)
|
cowrie__cowrie-1063 | Can't enter using "oracle" or "tomcat" users
Hello.
I'm using stock userdb.txt file, where "oracle" and "tomcat" users are defined with any password. When using these users, the ssh client gives me an error of "Permission denied (publickey,password)" after entering three times any password.
The ugly thing is that in cowrie.json file appear entries of "cowrie.login.success" type with the message "login attempt [oracle/password] suceeded", which is, obviously, incorrect.
Thanks a lot
| [
{
"content": "# Copyright (c) 2015 Michel Oosterhof <[email protected]>\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. The names of the author(s) may not be used to endorse or promote\n# products derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR\n# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED\n# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGE.\n\nfrom __future__ import absolute_import, division\n\nfrom binascii import crc32\nfrom random import randint, seed\n\nfrom cowrie.core.config import CONFIG\n\n\nclass Passwd(object):\n \"\"\"\n This class contains code to handle the users and their properties in\n /etc/passwd. Note that contrary to the name, it does not handle any\n passwords.\n \"\"\"\n\n def __init__(self):\n self.passwd_file = '%s/etc/passwd' % (CONFIG.get('honeypot', 'contents_path'),)\n self.load()\n\n def load(self):\n \"\"\"\n Load /etc/passwd\n \"\"\"\n self.passwd = []\n with open(self.passwd_file, 'r') as f:\n while True:\n rawline = f.readline()\n if not rawline:\n break\n\n line = rawline.strip()\n if not line:\n continue\n\n if line.startswith('#'):\n continue\n\n (pw_name, pw_passwd, pw_uid, pw_gid, pw_gecos, pw_dir,\n pw_shell) = line.split(':')\n\n e = {}\n e[\"pw_name\"] = pw_name\n e[\"pw_passwd\"] = pw_passwd\n e[\"pw_gecos\"] = pw_gecos\n e[\"pw_dir\"] = pw_dir\n e[\"pw_shell\"] = pw_shell\n try:\n e[\"pw_uid\"] = int(pw_uid)\n except ValueError:\n e[\"pw_uid\"] = 1001\n try:\n e[\"pw_gid\"] = int(pw_gid)\n except ValueError:\n e[\"pw_gid\"] = 1001\n\n self.passwd.append(e)\n\n def save(self):\n \"\"\"\n Save the user db\n Note: this is subject to races between cowrie instances, but hey ...\n \"\"\"\n # with open(self.passwd_file, 'w') as f:\n # for (login, uid, passwd) in self.userdb:\n # f.write('%s:%d:%s\\n' % (login, uid, passwd))\n raise NotImplementedError\n\n def getpwnam(self, name):\n \"\"\"\n Get passwd entry for username\n \"\"\"\n for _ in self.passwd:\n if name == _[\"pw_name\"]:\n return _\n raise KeyError(\"getpwnam(): name not found in passwd file: \" + name)\n\n def getpwuid(self, uid):\n \"\"\"\n Get passwd entry for uid\n \"\"\"\n for _ in self.passwd:\n if uid == _[\"pw_uid\"]:\n return _\n raise KeyError(\"getpwuid(): uid not found in passwd file: \" + str(uid))\n\n def setpwentry(self, name):\n \"\"\"\n If the user is not in /etc/passwd, creates a new user entry for the session\n \"\"\"\n\n # ensure consistent uid and gid\n seed_id = crc32(name)\n seed(seed_id)\n\n e = {}\n e[\"pw_name\"] = name\n e[\"pw_passwd\"] = \"x\"\n e[\"pw_gecos\"] = 0\n e[\"pw_dir\"] = \"/home/\" + name\n e[\"pw_shell\"] = \"/bin/bash\"\n e[\"pw_uid\"] = randint(1500, 10000)\n e[\"pw_gid\"] = e[\"pw_uid\"]\n self.passwd.append(e)\n return e\n\n\nclass Group(object):\n \"\"\"\n This class contains code to handle the groups and their properties in\n /etc/group.\n \"\"\"\n\n def __init__(self):\n self.group_file = '%s/etc/group' % (CONFIG.get('honeypot', 'contents_path'),)\n self.load()\n\n def load(self):\n \"\"\"\n Load /etc/group\n \"\"\"\n self.group = []\n with open(self.group_file, 'r') as f:\n while True:\n rawline = f.readline()\n if not rawline:\n break\n\n line = rawline.strip()\n if not line:\n continue\n\n if line.startswith('#'):\n continue\n\n (gr_name, gr_passwd, gr_gid, gr_mem) = line.split(':')\n\n e = {}\n e[\"gr_name\"] = gr_name\n try:\n e[\"gr_gid\"] = int(gr_gid)\n except ValueError:\n e[\"gr_gid\"] = 1001\n e[\"gr_mem\"] = gr_mem\n\n self.group.append(e)\n\n def save(self):\n \"\"\"\n Save the group db\n Note: this is subject to races between cowrie instances, but hey ...\n \"\"\"\n # with open(self.group_file, 'w') as f:\n # for (login, uid, passwd) in self.userdb:\n # f.write('%s:%d:%s\\n' % (login, uid, passwd))\n raise NotImplementedError\n\n def getgrnam(self, name):\n \"\"\"\n Get group entry for groupname\n \"\"\"\n for _ in self.group:\n if name == _[\"gr_name\"]:\n return _\n raise KeyError(\"getgrnam(): name not found in group file: \" + name)\n\n def getgrgid(self, uid):\n \"\"\"\n Get group entry for gid\n \"\"\"\n for _ in self.group:\n if uid == _[\"gr_gid\"]:\n return _\n raise KeyError(\"getgruid(): uid not found in group file: \" + str(uid))\n",
"path": "src/cowrie/shell/pwd.py"
}
] | [
{
"content": "# Copyright (c) 2015 Michel Oosterhof <[email protected]>\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. The names of the author(s) may not be used to endorse or promote\n# products derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR\n# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED\n# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGE.\n\nfrom __future__ import absolute_import, division\n\nfrom binascii import crc32\nfrom random import randint, seed\n\nfrom cowrie.core.config import CONFIG\n\n\nclass Passwd(object):\n \"\"\"\n This class contains code to handle the users and their properties in\n /etc/passwd. Note that contrary to the name, it does not handle any\n passwords.\n \"\"\"\n\n def __init__(self):\n self.passwd_file = '%s/etc/passwd' % (CONFIG.get('honeypot', 'contents_path'),)\n self.load()\n\n def load(self):\n \"\"\"\n Load /etc/passwd\n \"\"\"\n self.passwd = []\n with open(self.passwd_file, 'r') as f:\n while True:\n rawline = f.readline()\n if not rawline:\n break\n\n line = rawline.strip()\n if not line:\n continue\n\n if line.startswith('#'):\n continue\n\n (pw_name, pw_passwd, pw_uid, pw_gid, pw_gecos, pw_dir,\n pw_shell) = line.split(':')\n\n e = {}\n e[\"pw_name\"] = pw_name\n e[\"pw_passwd\"] = pw_passwd\n e[\"pw_gecos\"] = pw_gecos\n e[\"pw_dir\"] = pw_dir\n e[\"pw_shell\"] = pw_shell\n try:\n e[\"pw_uid\"] = int(pw_uid)\n except ValueError:\n e[\"pw_uid\"] = 1001\n try:\n e[\"pw_gid\"] = int(pw_gid)\n except ValueError:\n e[\"pw_gid\"] = 1001\n\n self.passwd.append(e)\n\n def save(self):\n \"\"\"\n Save the user db\n Note: this is subject to races between cowrie instances, but hey ...\n \"\"\"\n # with open(self.passwd_file, 'w') as f:\n # for (login, uid, passwd) in self.userdb:\n # f.write('%s:%d:%s\\n' % (login, uid, passwd))\n raise NotImplementedError\n\n def getpwnam(self, name):\n \"\"\"\n Get passwd entry for username\n \"\"\"\n for _ in self.passwd:\n if name == _[\"pw_name\"]:\n return _\n raise KeyError(\"getpwnam(): name not found in passwd file: \" + name)\n\n def getpwuid(self, uid):\n \"\"\"\n Get passwd entry for uid\n \"\"\"\n for _ in self.passwd:\n if uid == _[\"pw_uid\"]:\n return _\n raise KeyError(\"getpwuid(): uid not found in passwd file: \" + str(uid))\n\n def setpwentry(self, name):\n \"\"\"\n If the user is not in /etc/passwd, creates a new user entry for the session\n \"\"\"\n\n # ensure consistent uid and gid\n seed_id = crc32(name.encode(\"utf-8\"))\n seed(seed_id)\n\n e = {}\n e[\"pw_name\"] = name\n e[\"pw_passwd\"] = \"x\"\n e[\"pw_gecos\"] = 0\n e[\"pw_dir\"] = \"/home/\" + name\n e[\"pw_shell\"] = \"/bin/bash\"\n e[\"pw_uid\"] = randint(1500, 10000)\n e[\"pw_gid\"] = e[\"pw_uid\"]\n self.passwd.append(e)\n return e\n\n\nclass Group(object):\n \"\"\"\n This class contains code to handle the groups and their properties in\n /etc/group.\n \"\"\"\n\n def __init__(self):\n self.group_file = '%s/etc/group' % (CONFIG.get('honeypot', 'contents_path'),)\n self.load()\n\n def load(self):\n \"\"\"\n Load /etc/group\n \"\"\"\n self.group = []\n with open(self.group_file, 'r') as f:\n while True:\n rawline = f.readline()\n if not rawline:\n break\n\n line = rawline.strip()\n if not line:\n continue\n\n if line.startswith('#'):\n continue\n\n (gr_name, gr_passwd, gr_gid, gr_mem) = line.split(':')\n\n e = {}\n e[\"gr_name\"] = gr_name\n try:\n e[\"gr_gid\"] = int(gr_gid)\n except ValueError:\n e[\"gr_gid\"] = 1001\n e[\"gr_mem\"] = gr_mem\n\n self.group.append(e)\n\n def save(self):\n \"\"\"\n Save the group db\n Note: this is subject to races between cowrie instances, but hey ...\n \"\"\"\n # with open(self.group_file, 'w') as f:\n # for (login, uid, passwd) in self.userdb:\n # f.write('%s:%d:%s\\n' % (login, uid, passwd))\n raise NotImplementedError\n\n def getgrnam(self, name):\n \"\"\"\n Get group entry for groupname\n \"\"\"\n for _ in self.group:\n if name == _[\"gr_name\"]:\n return _\n raise KeyError(\"getgrnam(): name not found in group file: \" + name)\n\n def getgrgid(self, uid):\n \"\"\"\n Get group entry for gid\n \"\"\"\n for _ in self.group:\n if uid == _[\"gr_gid\"]:\n return _\n raise KeyError(\"getgruid(): uid not found in group file: \" + str(uid))\n",
"path": "src/cowrie/shell/pwd.py"
}
] | diff --git a/src/cowrie/shell/pwd.py b/src/cowrie/shell/pwd.py
index 394a95899c..c3a3842a74 100644
--- a/src/cowrie/shell/pwd.py
+++ b/src/cowrie/shell/pwd.py
@@ -117,7 +117,7 @@ def setpwentry(self, name):
"""
# ensure consistent uid and gid
- seed_id = crc32(name)
+ seed_id = crc32(name.encode("utf-8"))
seed(seed_id)
e = {}
|
strawberry-graphql__strawberry-2528 | Additional type hints for strawberry.field - so that it can be used with async reolvers
If I use strawberry.field as explained in the documentation for async resolvers I get the following warning from pylance.
```python
import asyncio
import strawberry
async def resolve_hello(root) -> str:
await asyncio.sleep(1)
return "Hello world"
@strawberry.type
class Query:
hello: str = strawberry.field(resolver=resolve_hello)
schema = strawberry.Schema(Query)
```
```
(module) strawberry
Expression of type "StrawberryField" cannot be assigned to declared type "str"
"StrawberryField" is incompatible with "str"
Pylance[reportGeneralTypeIssues]
```
It would be nice if you could enhance the overloaded `field` defitions in field.py for use with coroutines.
Used Version: 0.155.2
<!-- POLAR PLEDGE BADGE START -->
## Upvote & Fund
- We're using [Polar.sh](https://polar.sh/strawberry-graphql) so you can upvote and help fund this issue.
- We receive the funding once the issue is completed & confirmed by you.
- Thank you in advance for helping prioritize & fund our backlog.
<a href="https://polar.sh/strawberry-graphql/strawberry/issues/2500">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/strawberry-graphql/strawberry/issues/2500/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/strawberry-graphql/strawberry/issues/2500/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
| [
{
"content": "import builtins\nimport dataclasses\nimport inspect\nimport sys\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Awaitable,\n Callable,\n Dict,\n List,\n Mapping,\n Optional,\n Sequence,\n Type,\n TypeVar,\n Union,\n overload,\n)\nfrom typing_extensions import Literal\n\nfrom strawberry.annotation import StrawberryAnnotation\nfrom strawberry.arguments import StrawberryArgument\nfrom strawberry.exceptions import InvalidArgumentTypeError, InvalidDefaultFactoryError\nfrom strawberry.type import StrawberryType\nfrom strawberry.types.info import Info\nfrom strawberry.union import StrawberryUnion\nfrom strawberry.utils.cached_property import cached_property\n\nfrom .permission import BasePermission\nfrom .types.fields.resolver import StrawberryResolver\n\nif TYPE_CHECKING:\n from .object_type import TypeDefinition\n\nT = TypeVar(\"T\")\n\n\n_RESOLVER_TYPE = Union[\n StrawberryResolver[T],\n Callable[..., T],\n \"staticmethod[T]\",\n \"classmethod[T]\",\n]\n\n\nUNRESOLVED = object()\n\n\ndef _is_generic(resolver_type: Union[StrawberryType, type]) -> bool:\n \"\"\"Returns True if `resolver_type` is generic else False\"\"\"\n if isinstance(resolver_type, StrawberryType):\n return resolver_type.is_generic\n\n # solves the Generic subclass case\n if hasattr(resolver_type, \"_type_definition\"):\n return resolver_type._type_definition.is_generic\n\n return False\n\n\nclass StrawberryField(dataclasses.Field):\n type_annotation: Optional[StrawberryAnnotation]\n default_resolver: Callable[[Any, str], object] = getattr\n\n def __init__(\n self,\n python_name: Optional[str] = None,\n graphql_name: Optional[str] = None,\n type_annotation: Optional[StrawberryAnnotation] = None,\n origin: Optional[Union[Type, Callable, staticmethod, classmethod]] = None,\n is_subscription: bool = False,\n description: Optional[str] = None,\n base_resolver: Optional[StrawberryResolver] = None,\n permission_classes: List[Type[BasePermission]] = (), # type: ignore\n default: object = dataclasses.MISSING,\n default_factory: Union[Callable[[], Any], object] = dataclasses.MISSING,\n metadata: Optional[Mapping[Any, Any]] = None,\n deprecation_reason: Optional[str] = None,\n directives: Sequence[object] = (),\n ):\n # basic fields are fields with no provided resolver\n is_basic_field = not base_resolver\n\n kwargs: Dict[str, Any] = {}\n\n # kw_only was added to python 3.10 and it is required\n if sys.version_info >= (3, 10):\n kwargs[\"kw_only\"] = dataclasses.MISSING\n\n super().__init__(\n default=default,\n default_factory=default_factory, # type: ignore\n init=is_basic_field,\n repr=is_basic_field,\n compare=is_basic_field,\n hash=None,\n metadata=metadata or {},\n **kwargs,\n )\n\n self.graphql_name = graphql_name\n if python_name is not None:\n self.python_name = python_name\n\n self.type_annotation = type_annotation\n\n self.description: Optional[str] = description\n self.origin = origin\n\n self._base_resolver: Optional[StrawberryResolver] = None\n if base_resolver is not None:\n self.base_resolver = base_resolver\n\n # Note: StrawberryField.default is the same as\n # StrawberryField.default_value except that `.default` uses\n # `dataclasses.MISSING` to represent an \"undefined\" value and\n # `.default_value` uses `UNSET`\n self.default_value = default\n if callable(default_factory):\n try:\n self.default_value = default_factory()\n except TypeError as exc:\n raise InvalidDefaultFactoryError() from exc\n\n self.is_subscription = is_subscription\n\n self.permission_classes: List[Type[BasePermission]] = list(permission_classes)\n self.directives = directives\n\n self.deprecation_reason = deprecation_reason\n\n def __call__(self, resolver: _RESOLVER_TYPE) -> \"StrawberryField\":\n \"\"\"Add a resolver to the field\"\"\"\n\n # Allow for StrawberryResolvers or bare functions to be provided\n if not isinstance(resolver, StrawberryResolver):\n resolver = StrawberryResolver(resolver)\n\n for argument in resolver.arguments:\n if isinstance(argument.type_annotation.annotation, str):\n continue\n elif isinstance(argument.type, StrawberryUnion):\n raise InvalidArgumentTypeError(\n resolver,\n argument,\n )\n elif getattr(argument.type, \"_type_definition\", False):\n if argument.type._type_definition.is_interface: # type: ignore\n raise InvalidArgumentTypeError(\n resolver,\n argument,\n )\n\n self.base_resolver = resolver\n\n return self\n\n def get_result(\n self, source: Any, info: Optional[Info], args: List[Any], kwargs: Dict[str, Any]\n ) -> Union[Awaitable[Any], Any]:\n \"\"\"\n Calls the resolver defined for the StrawberryField.\n If the field doesn't have a resolver defined we default\n to using the default resolver specified in StrawberryConfig.\n \"\"\"\n\n if self.base_resolver:\n return self.base_resolver(*args, **kwargs)\n\n return self.default_resolver(source, self.python_name)\n\n @property\n def is_basic_field(self) -> bool:\n \"\"\"\n Flag indicating if this is a \"basic\" field that has no resolver or\n permission classes, i.e. it just returns the relevant attribute from\n the source object. If it is a basic field we can avoid constructing\n an `Info` object and running any permission checks in the resolver\n which improves performance.\n \"\"\"\n return not self.base_resolver and not self.permission_classes\n\n @property\n def arguments(self) -> List[StrawberryArgument]:\n if not self.base_resolver:\n return []\n\n return self.base_resolver.arguments\n\n def _python_name(self) -> Optional[str]:\n if self.name:\n return self.name\n\n if self.base_resolver:\n return self.base_resolver.name\n\n return None\n\n def _set_python_name(self, name: str) -> None:\n self.name = name\n\n python_name: str = property(_python_name, _set_python_name) # type: ignore[assignment] # noqa: E501\n\n @property\n def base_resolver(self) -> Optional[StrawberryResolver]:\n return self._base_resolver\n\n @base_resolver.setter\n def base_resolver(self, resolver: StrawberryResolver) -> None:\n self._base_resolver = resolver\n\n # Don't add field to __init__, __repr__ and __eq__ once it has a resolver\n self.init = False\n self.compare = False\n self.repr = False\n\n # TODO: See test_resolvers.test_raises_error_when_argument_annotation_missing\n # (https://github.com/strawberry-graphql/strawberry/blob/8e102d3/tests/types/test_resolvers.py#L89-L98)\n #\n # Currently we expect the exception to be thrown when the StrawberryField\n # is constructed, but this only happens if we explicitly retrieve the\n # arguments.\n #\n # If we want to change when the exception is thrown, this line can be\n # removed.\n _ = resolver.arguments\n\n @property # type: ignore\n def type(self) -> Union[StrawberryType, type, Literal[UNRESOLVED]]: # type: ignore\n # We are catching NameError because dataclasses tries to fetch the type\n # of the field from the class before the class is fully defined.\n # This triggers a NameError error when using forward references because\n # our `type` property tries to find the field type from the global namespace\n # but it is not yet defined.\n try:\n # Prioritise the field type over the resolver return type\n if self.type_annotation is not None:\n return self.type_annotation.resolve()\n\n if self.base_resolver is not None:\n # Handle unannotated functions (such as lambdas)\n if self.base_resolver.type is not None:\n\n # Generics will raise MissingTypesForGenericError later\n # on if we let it be returned. So use `type_annotation` instead\n # which is the same behaviour as having no type information.\n if not _is_generic(self.base_resolver.type):\n return self.base_resolver.type\n\n # If we get this far it means that we don't have a field type and\n # the resolver doesn't have a return type so all we can do is return\n # UNRESOLVED here.\n # This case will raise a MissingReturnAnnotationError exception in the\n # _check_field_annotations function:\n # https://github.com/strawberry-graphql/strawberry/blob/846f060a63cb568b3cdc0deb26c308a8d0718190/strawberry/object_type.py#L76-L80\n return UNRESOLVED\n\n except NameError:\n return UNRESOLVED\n\n @type.setter\n def type(self, type_: Any) -> None:\n # Note: we aren't setting a namespace here for the annotation. That\n # happens in the `_get_fields` function in `types/type_resolver` so\n # that we have access to the correct namespace for the object type\n # the field is attached to.\n self.type_annotation = StrawberryAnnotation.from_annotation(\n type_, namespace=None\n )\n\n # TODO: add this to arguments (and/or move it to StrawberryType)\n @property\n def type_params(self) -> List[TypeVar]:\n if hasattr(self.type, \"_type_definition\"):\n parameters = getattr(self.type, \"__parameters__\", None)\n\n return list(parameters) if parameters else []\n\n # TODO: Consider making leaf types always StrawberryTypes, maybe a\n # StrawberryBaseType or something\n if isinstance(self.type, StrawberryType):\n return self.type.type_params\n return []\n\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, builtins.type]]\n ) -> \"StrawberryField\":\n new_type: Union[StrawberryType, type] = self.type\n\n # TODO: Remove with creation of StrawberryObject. Will act same as other\n # StrawberryTypes\n if hasattr(self.type, \"_type_definition\"):\n type_definition: TypeDefinition = self.type._type_definition\n\n if type_definition.is_generic:\n type_ = type_definition\n new_type = type_.copy_with(type_var_map)\n elif isinstance(self.type, StrawberryType):\n new_type = self.type.copy_with(type_var_map)\n\n new_resolver = (\n self.base_resolver.copy_with(type_var_map)\n if self.base_resolver is not None\n else None\n )\n\n return StrawberryField(\n python_name=self.python_name,\n graphql_name=self.graphql_name,\n # TODO: do we need to wrap this in `StrawberryAnnotation`?\n # see comment related to dataclasses above\n type_annotation=StrawberryAnnotation(new_type),\n origin=self.origin,\n is_subscription=self.is_subscription,\n description=self.description,\n base_resolver=new_resolver,\n permission_classes=self.permission_classes,\n default=self.default_value,\n # ignored because of https://github.com/python/mypy/issues/6910\n default_factory=self.default_factory,\n deprecation_reason=self.deprecation_reason,\n )\n\n @property\n def _has_async_permission_classes(self) -> bool:\n for permission_class in self.permission_classes:\n if inspect.iscoroutinefunction(permission_class.has_permission):\n return True\n return False\n\n @property\n def _has_async_base_resolver(self) -> bool:\n return self.base_resolver is not None and self.base_resolver.is_async\n\n @cached_property\n def is_async(self) -> bool:\n return self._has_async_permission_classes or self._has_async_base_resolver\n\n\n@overload\ndef field(\n *,\n resolver: _RESOLVER_TYPE[T],\n name: Optional[str] = None,\n is_subscription: bool = False,\n description: Optional[str] = None,\n init: Literal[False] = False,\n permission_classes: Optional[List[Type[BasePermission]]] = None,\n deprecation_reason: Optional[str] = None,\n default: Any = dataclasses.MISSING,\n default_factory: Union[Callable[..., object], object] = dataclasses.MISSING,\n metadata: Optional[Mapping[Any, Any]] = None,\n directives: Optional[Sequence[object]] = (),\n graphql_type: Optional[Any] = None,\n) -> T:\n ...\n\n\n@overload\ndef field(\n *,\n name: Optional[str] = None,\n is_subscription: bool = False,\n description: Optional[str] = None,\n init: Literal[True] = True,\n permission_classes: Optional[List[Type[BasePermission]]] = None,\n deprecation_reason: Optional[str] = None,\n default: Any = dataclasses.MISSING,\n default_factory: Union[Callable[..., object], object] = dataclasses.MISSING,\n metadata: Optional[Mapping[Any, Any]] = None,\n directives: Optional[Sequence[object]] = (),\n graphql_type: Optional[Any] = None,\n) -> Any:\n ...\n\n\n@overload\ndef field(\n resolver: _RESOLVER_TYPE[T],\n *,\n name: Optional[str] = None,\n is_subscription: bool = False,\n description: Optional[str] = None,\n permission_classes: Optional[List[Type[BasePermission]]] = None,\n deprecation_reason: Optional[str] = None,\n default: Any = dataclasses.MISSING,\n default_factory: Union[Callable[..., object], object] = dataclasses.MISSING,\n metadata: Optional[Mapping[Any, Any]] = None,\n directives: Optional[Sequence[object]] = (),\n graphql_type: Optional[Any] = None,\n) -> StrawberryField:\n ...\n\n\ndef field(\n resolver: Optional[_RESOLVER_TYPE[Any]] = None,\n *,\n name: Optional[str] = None,\n is_subscription: bool = False,\n description: Optional[str] = None,\n permission_classes: Optional[List[Type[BasePermission]]] = None,\n deprecation_reason: Optional[str] = None,\n default: Any = dataclasses.MISSING,\n default_factory: Union[Callable[..., object], object] = dataclasses.MISSING,\n metadata: Optional[Mapping[Any, Any]] = None,\n directives: Optional[Sequence[object]] = (),\n graphql_type: Optional[Any] = None,\n # This init parameter is used by PyRight to determine whether this field\n # is added in the constructor or not. It is not used to change\n # any behavior at the moment.\n init: Literal[True, False, None] = None,\n) -> Any:\n \"\"\"Annotates a method or property as a GraphQL field.\n\n This is normally used inside a type declaration:\n\n >>> @strawberry.type:\n >>> class X:\n >>> field_abc: str = strawberry.field(description=\"ABC\")\n\n >>> @strawberry.field(description=\"ABC\")\n >>> def field_with_resolver(self) -> str:\n >>> return \"abc\"\n\n it can be used both as decorator and as a normal function.\n \"\"\"\n\n type_annotation = StrawberryAnnotation.from_annotation(graphql_type)\n\n field_ = StrawberryField(\n python_name=None,\n graphql_name=name,\n type_annotation=type_annotation,\n description=description,\n is_subscription=is_subscription,\n permission_classes=permission_classes or [],\n deprecation_reason=deprecation_reason,\n default=default,\n default_factory=default_factory,\n metadata=metadata,\n directives=directives or (),\n )\n\n if resolver:\n assert init is not True, \"Can't set init as True when passing a resolver.\"\n return field_(resolver)\n return field_\n\n\n__all__ = [\"StrawberryField\", \"field\"]\n",
"path": "strawberry/field.py"
}
] | [
{
"content": "import builtins\nimport dataclasses\nimport inspect\nimport sys\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Awaitable,\n Callable,\n Dict,\n List,\n Mapping,\n Optional,\n Sequence,\n Type,\n TypeVar,\n Union,\n overload,\n)\nfrom typing_extensions import Literal\n\nfrom strawberry.annotation import StrawberryAnnotation\nfrom strawberry.arguments import StrawberryArgument\nfrom strawberry.exceptions import InvalidArgumentTypeError, InvalidDefaultFactoryError\nfrom strawberry.type import StrawberryType\nfrom strawberry.types.info import Info\nfrom strawberry.union import StrawberryUnion\nfrom strawberry.utils.cached_property import cached_property\n\nfrom .permission import BasePermission\nfrom .types.fields.resolver import StrawberryResolver\n\nif TYPE_CHECKING:\n from .object_type import TypeDefinition\n\nT = TypeVar(\"T\")\n\n\n_RESOLVER_TYPE = Union[\n StrawberryResolver[T],\n Callable[..., T],\n Callable[..., Awaitable[T]],\n \"staticmethod[T]\",\n \"classmethod[T]\",\n]\n\n\nUNRESOLVED = object()\n\n\ndef _is_generic(resolver_type: Union[StrawberryType, type]) -> bool:\n \"\"\"Returns True if `resolver_type` is generic else False\"\"\"\n if isinstance(resolver_type, StrawberryType):\n return resolver_type.is_generic\n\n # solves the Generic subclass case\n if hasattr(resolver_type, \"_type_definition\"):\n return resolver_type._type_definition.is_generic\n\n return False\n\n\nclass StrawberryField(dataclasses.Field):\n type_annotation: Optional[StrawberryAnnotation]\n default_resolver: Callable[[Any, str], object] = getattr\n\n def __init__(\n self,\n python_name: Optional[str] = None,\n graphql_name: Optional[str] = None,\n type_annotation: Optional[StrawberryAnnotation] = None,\n origin: Optional[Union[Type, Callable, staticmethod, classmethod]] = None,\n is_subscription: bool = False,\n description: Optional[str] = None,\n base_resolver: Optional[StrawberryResolver] = None,\n permission_classes: List[Type[BasePermission]] = (), # type: ignore\n default: object = dataclasses.MISSING,\n default_factory: Union[Callable[[], Any], object] = dataclasses.MISSING,\n metadata: Optional[Mapping[Any, Any]] = None,\n deprecation_reason: Optional[str] = None,\n directives: Sequence[object] = (),\n ):\n # basic fields are fields with no provided resolver\n is_basic_field = not base_resolver\n\n kwargs: Dict[str, Any] = {}\n\n # kw_only was added to python 3.10 and it is required\n if sys.version_info >= (3, 10):\n kwargs[\"kw_only\"] = dataclasses.MISSING\n\n super().__init__(\n default=default,\n default_factory=default_factory, # type: ignore\n init=is_basic_field,\n repr=is_basic_field,\n compare=is_basic_field,\n hash=None,\n metadata=metadata or {},\n **kwargs,\n )\n\n self.graphql_name = graphql_name\n if python_name is not None:\n self.python_name = python_name\n\n self.type_annotation = type_annotation\n\n self.description: Optional[str] = description\n self.origin = origin\n\n self._base_resolver: Optional[StrawberryResolver] = None\n if base_resolver is not None:\n self.base_resolver = base_resolver\n\n # Note: StrawberryField.default is the same as\n # StrawberryField.default_value except that `.default` uses\n # `dataclasses.MISSING` to represent an \"undefined\" value and\n # `.default_value` uses `UNSET`\n self.default_value = default\n if callable(default_factory):\n try:\n self.default_value = default_factory()\n except TypeError as exc:\n raise InvalidDefaultFactoryError() from exc\n\n self.is_subscription = is_subscription\n\n self.permission_classes: List[Type[BasePermission]] = list(permission_classes)\n self.directives = directives\n\n self.deprecation_reason = deprecation_reason\n\n def __call__(self, resolver: _RESOLVER_TYPE) -> \"StrawberryField\":\n \"\"\"Add a resolver to the field\"\"\"\n\n # Allow for StrawberryResolvers or bare functions to be provided\n if not isinstance(resolver, StrawberryResolver):\n resolver = StrawberryResolver(resolver)\n\n for argument in resolver.arguments:\n if isinstance(argument.type_annotation.annotation, str):\n continue\n elif isinstance(argument.type, StrawberryUnion):\n raise InvalidArgumentTypeError(\n resolver,\n argument,\n )\n elif getattr(argument.type, \"_type_definition\", False):\n if argument.type._type_definition.is_interface: # type: ignore\n raise InvalidArgumentTypeError(\n resolver,\n argument,\n )\n\n self.base_resolver = resolver\n\n return self\n\n def get_result(\n self, source: Any, info: Optional[Info], args: List[Any], kwargs: Dict[str, Any]\n ) -> Union[Awaitable[Any], Any]:\n \"\"\"\n Calls the resolver defined for the StrawberryField.\n If the field doesn't have a resolver defined we default\n to using the default resolver specified in StrawberryConfig.\n \"\"\"\n\n if self.base_resolver:\n return self.base_resolver(*args, **kwargs)\n\n return self.default_resolver(source, self.python_name)\n\n @property\n def is_basic_field(self) -> bool:\n \"\"\"\n Flag indicating if this is a \"basic\" field that has no resolver or\n permission classes, i.e. it just returns the relevant attribute from\n the source object. If it is a basic field we can avoid constructing\n an `Info` object and running any permission checks in the resolver\n which improves performance.\n \"\"\"\n return not self.base_resolver and not self.permission_classes\n\n @property\n def arguments(self) -> List[StrawberryArgument]:\n if not self.base_resolver:\n return []\n\n return self.base_resolver.arguments\n\n def _python_name(self) -> Optional[str]:\n if self.name:\n return self.name\n\n if self.base_resolver:\n return self.base_resolver.name\n\n return None\n\n def _set_python_name(self, name: str) -> None:\n self.name = name\n\n python_name: str = property(_python_name, _set_python_name) # type: ignore[assignment] # noqa: E501\n\n @property\n def base_resolver(self) -> Optional[StrawberryResolver]:\n return self._base_resolver\n\n @base_resolver.setter\n def base_resolver(self, resolver: StrawberryResolver) -> None:\n self._base_resolver = resolver\n\n # Don't add field to __init__, __repr__ and __eq__ once it has a resolver\n self.init = False\n self.compare = False\n self.repr = False\n\n # TODO: See test_resolvers.test_raises_error_when_argument_annotation_missing\n # (https://github.com/strawberry-graphql/strawberry/blob/8e102d3/tests/types/test_resolvers.py#L89-L98)\n #\n # Currently we expect the exception to be thrown when the StrawberryField\n # is constructed, but this only happens if we explicitly retrieve the\n # arguments.\n #\n # If we want to change when the exception is thrown, this line can be\n # removed.\n _ = resolver.arguments\n\n @property # type: ignore\n def type(self) -> Union[StrawberryType, type, Literal[UNRESOLVED]]: # type: ignore\n # We are catching NameError because dataclasses tries to fetch the type\n # of the field from the class before the class is fully defined.\n # This triggers a NameError error when using forward references because\n # our `type` property tries to find the field type from the global namespace\n # but it is not yet defined.\n try:\n # Prioritise the field type over the resolver return type\n if self.type_annotation is not None:\n return self.type_annotation.resolve()\n\n if self.base_resolver is not None:\n # Handle unannotated functions (such as lambdas)\n if self.base_resolver.type is not None:\n\n # Generics will raise MissingTypesForGenericError later\n # on if we let it be returned. So use `type_annotation` instead\n # which is the same behaviour as having no type information.\n if not _is_generic(self.base_resolver.type):\n return self.base_resolver.type\n\n # If we get this far it means that we don't have a field type and\n # the resolver doesn't have a return type so all we can do is return\n # UNRESOLVED here.\n # This case will raise a MissingReturnAnnotationError exception in the\n # _check_field_annotations function:\n # https://github.com/strawberry-graphql/strawberry/blob/846f060a63cb568b3cdc0deb26c308a8d0718190/strawberry/object_type.py#L76-L80\n return UNRESOLVED\n\n except NameError:\n return UNRESOLVED\n\n @type.setter\n def type(self, type_: Any) -> None:\n # Note: we aren't setting a namespace here for the annotation. That\n # happens in the `_get_fields` function in `types/type_resolver` so\n # that we have access to the correct namespace for the object type\n # the field is attached to.\n self.type_annotation = StrawberryAnnotation.from_annotation(\n type_, namespace=None\n )\n\n # TODO: add this to arguments (and/or move it to StrawberryType)\n @property\n def type_params(self) -> List[TypeVar]:\n if hasattr(self.type, \"_type_definition\"):\n parameters = getattr(self.type, \"__parameters__\", None)\n\n return list(parameters) if parameters else []\n\n # TODO: Consider making leaf types always StrawberryTypes, maybe a\n # StrawberryBaseType or something\n if isinstance(self.type, StrawberryType):\n return self.type.type_params\n return []\n\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, builtins.type]]\n ) -> \"StrawberryField\":\n new_type: Union[StrawberryType, type] = self.type\n\n # TODO: Remove with creation of StrawberryObject. Will act same as other\n # StrawberryTypes\n if hasattr(self.type, \"_type_definition\"):\n type_definition: TypeDefinition = self.type._type_definition\n\n if type_definition.is_generic:\n type_ = type_definition\n new_type = type_.copy_with(type_var_map)\n elif isinstance(self.type, StrawberryType):\n new_type = self.type.copy_with(type_var_map)\n\n new_resolver = (\n self.base_resolver.copy_with(type_var_map)\n if self.base_resolver is not None\n else None\n )\n\n return StrawberryField(\n python_name=self.python_name,\n graphql_name=self.graphql_name,\n # TODO: do we need to wrap this in `StrawberryAnnotation`?\n # see comment related to dataclasses above\n type_annotation=StrawberryAnnotation(new_type),\n origin=self.origin,\n is_subscription=self.is_subscription,\n description=self.description,\n base_resolver=new_resolver,\n permission_classes=self.permission_classes,\n default=self.default_value,\n # ignored because of https://github.com/python/mypy/issues/6910\n default_factory=self.default_factory,\n deprecation_reason=self.deprecation_reason,\n )\n\n @property\n def _has_async_permission_classes(self) -> bool:\n for permission_class in self.permission_classes:\n if inspect.iscoroutinefunction(permission_class.has_permission):\n return True\n return False\n\n @property\n def _has_async_base_resolver(self) -> bool:\n return self.base_resolver is not None and self.base_resolver.is_async\n\n @cached_property\n def is_async(self) -> bool:\n return self._has_async_permission_classes or self._has_async_base_resolver\n\n\n@overload\ndef field(\n *,\n resolver: _RESOLVER_TYPE[T],\n name: Optional[str] = None,\n is_subscription: bool = False,\n description: Optional[str] = None,\n init: Literal[False] = False,\n permission_classes: Optional[List[Type[BasePermission]]] = None,\n deprecation_reason: Optional[str] = None,\n default: Any = dataclasses.MISSING,\n default_factory: Union[Callable[..., object], object] = dataclasses.MISSING,\n metadata: Optional[Mapping[Any, Any]] = None,\n directives: Optional[Sequence[object]] = (),\n graphql_type: Optional[Any] = None,\n) -> T:\n ...\n\n\n@overload\ndef field(\n *,\n name: Optional[str] = None,\n is_subscription: bool = False,\n description: Optional[str] = None,\n init: Literal[True] = True,\n permission_classes: Optional[List[Type[BasePermission]]] = None,\n deprecation_reason: Optional[str] = None,\n default: Any = dataclasses.MISSING,\n default_factory: Union[Callable[..., object], object] = dataclasses.MISSING,\n metadata: Optional[Mapping[Any, Any]] = None,\n directives: Optional[Sequence[object]] = (),\n graphql_type: Optional[Any] = None,\n) -> Any:\n ...\n\n\n@overload\ndef field(\n resolver: _RESOLVER_TYPE[T],\n *,\n name: Optional[str] = None,\n is_subscription: bool = False,\n description: Optional[str] = None,\n permission_classes: Optional[List[Type[BasePermission]]] = None,\n deprecation_reason: Optional[str] = None,\n default: Any = dataclasses.MISSING,\n default_factory: Union[Callable[..., object], object] = dataclasses.MISSING,\n metadata: Optional[Mapping[Any, Any]] = None,\n directives: Optional[Sequence[object]] = (),\n graphql_type: Optional[Any] = None,\n) -> StrawberryField:\n ...\n\n\ndef field(\n resolver: Optional[_RESOLVER_TYPE[Any]] = None,\n *,\n name: Optional[str] = None,\n is_subscription: bool = False,\n description: Optional[str] = None,\n permission_classes: Optional[List[Type[BasePermission]]] = None,\n deprecation_reason: Optional[str] = None,\n default: Any = dataclasses.MISSING,\n default_factory: Union[Callable[..., object], object] = dataclasses.MISSING,\n metadata: Optional[Mapping[Any, Any]] = None,\n directives: Optional[Sequence[object]] = (),\n graphql_type: Optional[Any] = None,\n # This init parameter is used by PyRight to determine whether this field\n # is added in the constructor or not. It is not used to change\n # any behavior at the moment.\n init: Literal[True, False, None] = None,\n) -> Any:\n \"\"\"Annotates a method or property as a GraphQL field.\n\n This is normally used inside a type declaration:\n\n >>> @strawberry.type:\n >>> class X:\n >>> field_abc: str = strawberry.field(description=\"ABC\")\n\n >>> @strawberry.field(description=\"ABC\")\n >>> def field_with_resolver(self) -> str:\n >>> return \"abc\"\n\n it can be used both as decorator and as a normal function.\n \"\"\"\n\n type_annotation = StrawberryAnnotation.from_annotation(graphql_type)\n\n field_ = StrawberryField(\n python_name=None,\n graphql_name=name,\n type_annotation=type_annotation,\n description=description,\n is_subscription=is_subscription,\n permission_classes=permission_classes or [],\n deprecation_reason=deprecation_reason,\n default=default,\n default_factory=default_factory,\n metadata=metadata,\n directives=directives or (),\n )\n\n if resolver:\n assert init is not True, \"Can't set init as True when passing a resolver.\"\n return field_(resolver)\n return field_\n\n\n__all__ = [\"StrawberryField\", \"field\"]\n",
"path": "strawberry/field.py"
}
] | diff --git a/RELEASE.md b/RELEASE.md
new file mode 100644
index 0000000000..9a29a22c51
--- /dev/null
+++ b/RELEASE.md
@@ -0,0 +1,20 @@
+Release type: patch
+
+This release updates the typing for the resolver argument in
+`strawberry.field`i to support async resolvers.
+This means that now you won't get any type
+error from Pyright when using async resolver, like the following example:
+
+```python
+import strawberry
+
+
+async def get_user_age() -> int:
+ return 0
+
+
[email protected]
+class User:
+ name: str
+ age: int = strawberry.field(resolver=get_user_age)
+```
diff --git a/strawberry/field.py b/strawberry/field.py
index b6593c0f2a..a7ad30c355 100644
--- a/strawberry/field.py
+++ b/strawberry/field.py
@@ -39,6 +39,7 @@
_RESOLVER_TYPE = Union[
StrawberryResolver[T],
Callable[..., T],
+ Callable[..., Awaitable[T]],
"staticmethod[T]",
"classmethod[T]",
]
diff --git a/tests/pyright/test_fields_resolver_async.py b/tests/pyright/test_fields_resolver_async.py
new file mode 100644
index 0000000000..85da5db557
--- /dev/null
+++ b/tests/pyright/test_fields_resolver_async.py
@@ -0,0 +1,54 @@
+from .utils import Result, requires_pyright, run_pyright, skip_on_windows
+
+pytestmark = [skip_on_windows, requires_pyright]
+
+CODE = """
+import strawberry
+
+async def get_user_age() -> int:
+ return 0
+
+
[email protected]
+class User:
+ name: str
+ age: int = strawberry.field(resolver=get_user_age)
+
+
+User(name="Patrick")
+User(n="Patrick")
+
+reveal_type(User)
+reveal_type(User.__init__)
+"""
+
+
+def test_pyright():
+ results = run_pyright(CODE)
+
+ assert results == [
+ Result(
+ type="error",
+ message='No parameter named "n" (reportGeneralTypeIssues)',
+ line=15,
+ column=6,
+ ),
+ Result(
+ type="error",
+ message='Argument missing for parameter "name" (reportGeneralTypeIssues)',
+ line=15,
+ column=1,
+ ),
+ Result(
+ type="information",
+ message='Type of "User" is "Type[User]"',
+ line=17,
+ column=13,
+ ),
+ Result(
+ type="information",
+ message='Type of "User.__init__" is "(self: User, *, name: str) -> None"',
+ line=18,
+ column=13,
+ ),
+ ]
|
inventree__InvenTree-1525 | [BUG] no attribute MPN
# System
Version: 0.2.2 pre
Commit-Date: 2021-04-27
Commit-Hash: 4d2e23f
Plattform: Win H2 - WSL2 Debian Buster
Deployment Method: runserver
# Steps to reproduce
- open supplier part
- add price break (button)
- enter data (does not matter, in my case: number 1, price 1)
-> results in 'Internal Server Error' Error Code 500
# Message
AttributeError at /price-break/new/
'SupplierPart' object has no attribute 'MPN'
| [
{
"content": "\"\"\"\nCompany database model definitions\n\"\"\"\n\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport os\n\nimport math\n\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.validators import MinValueValidator\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.db.utils import IntegrityError\nfrom django.db.models import Sum, Q, UniqueConstraint\n\nfrom django.apps import apps\nfrom django.urls import reverse\n\nfrom moneyed import CURRENCIES\n\nfrom markdownx.models import MarkdownxField\n\nfrom stdimage.models import StdImageField\n\nfrom InvenTree.helpers import getMediaUrl, getBlankImage, getBlankThumbnail\nfrom InvenTree.helpers import normalize\nfrom InvenTree.fields import InvenTreeURLField\nfrom InvenTree.status_codes import PurchaseOrderStatus\n\nimport InvenTree.validators\n\nimport common.models\nimport common.settings\n\n\ndef rename_company_image(instance, filename):\n \"\"\" Function to rename a company image after upload\n\n Args:\n instance: Company object\n filename: uploaded image filename\n\n Returns:\n New image filename\n \"\"\"\n\n base = 'company_images'\n\n if filename.count('.') > 0:\n ext = filename.split('.')[-1]\n else:\n ext = ''\n\n fn = 'company_{pk}_img'.format(pk=instance.pk)\n\n if ext:\n fn += '.' + ext\n\n return os.path.join(base, fn)\n\n\nclass Company(models.Model):\n \"\"\" A Company object represents an external company.\n It may be a supplier or a customer or a manufacturer (or a combination)\n\n - A supplier is a company from which parts can be purchased\n - A customer is a company to which parts can be sold\n - A manufacturer is a company which manufactures a raw good (they may or may not be a \"supplier\" also)\n\n\n Attributes:\n name: Brief name of the company\n description: Longer form description\n website: URL for the company website\n address: Postal address\n phone: contact phone number\n email: contact email address\n link: Secondary URL e.g. for link to internal Wiki page\n image: Company image / logo\n notes: Extra notes about the company\n is_customer: boolean value, is this company a customer\n is_supplier: boolean value, is this company a supplier\n is_manufacturer: boolean value, is this company a manufacturer\n currency_code: Specifies the default currency for the company\n \"\"\"\n\n class Meta:\n ordering = ['name', ]\n constraints = [\n UniqueConstraint(fields=['name', 'email'], name='unique_name_email_pair')\n ]\n\n name = models.CharField(max_length=100, blank=False,\n help_text=_('Company name'),\n verbose_name=_('Company name'))\n\n description = models.CharField(\n max_length=500,\n verbose_name=_('Company description'),\n help_text=_('Description of the company'),\n blank=True,\n )\n\n website = models.URLField(blank=True, verbose_name=_('Website'), help_text=_('Company website URL'))\n\n address = models.CharField(max_length=200,\n verbose_name=_('Address'),\n blank=True, help_text=_('Company address'))\n\n phone = models.CharField(max_length=50,\n verbose_name=_('Phone number'),\n blank=True, help_text=_('Contact phone number'))\n\n email = models.EmailField(blank=True, null=True,\n verbose_name=_('Email'), help_text=_('Contact email address'))\n\n contact = models.CharField(max_length=100,\n verbose_name=_('Contact'),\n blank=True, help_text=_('Point of contact'))\n\n link = InvenTreeURLField(blank=True, verbose_name=_('Link'), help_text=_('Link to external company information'))\n\n image = StdImageField(\n upload_to=rename_company_image,\n null=True,\n blank=True,\n variations={'thumbnail': (128, 128)},\n delete_orphans=True,\n verbose_name=_('Image'),\n )\n\n notes = MarkdownxField(blank=True, verbose_name=_('Notes'))\n\n is_customer = models.BooleanField(default=False, verbose_name=_('is customer'), help_text=_('Do you sell items to this company?'))\n\n is_supplier = models.BooleanField(default=True, verbose_name=_('is supplier'), help_text=_('Do you purchase items from this company?'))\n\n is_manufacturer = models.BooleanField(default=False, verbose_name=_('is manufacturer'), help_text=_('Does this company manufacture parts?'))\n\n currency = models.CharField(\n max_length=3,\n verbose_name=_('Currency'),\n blank=True,\n help_text=_('Default currency used for this company'),\n validators=[InvenTree.validators.validate_currency_code],\n )\n\n @property\n def currency_code(self):\n \"\"\"\n Return the currency code associated with this company.\n \n - If the currency code is invalid, use the default currency\n - If the currency code is not specified, use the default currency\n \"\"\"\n\n code = self.currency\n\n if code not in CURRENCIES:\n code = common.settings.currency_code_default()\n\n return code\n\n def __str__(self):\n \"\"\" Get string representation of a Company \"\"\"\n return \"{n} - {d}\".format(n=self.name, d=self.description)\n\n def get_absolute_url(self):\n \"\"\" Get the web URL for the detail view for this Company \"\"\"\n return reverse('company-detail', kwargs={'pk': self.id})\n\n def get_image_url(self):\n \"\"\" Return the URL of the image for this company \"\"\"\n\n if self.image:\n return getMediaUrl(self.image.url)\n else:\n return getBlankImage()\n\n def get_thumbnail_url(self):\n \"\"\" Return the URL for the thumbnail image for this Company \"\"\"\n\n if self.image:\n return getMediaUrl(self.image.thumbnail.url)\n else:\n return getBlankThumbnail()\n \n @property\n def manufactured_part_count(self):\n \"\"\" The number of parts manufactured by this company \"\"\"\n return self.manufactured_parts.count()\n\n @property\n def has_manufactured_parts(self):\n return self.manufactured_part_count > 0\n\n @property\n def supplied_part_count(self):\n \"\"\" The number of parts supplied by this company \"\"\"\n return self.supplied_parts.count()\n\n @property\n def has_supplied_parts(self):\n \"\"\" Return True if this company supplies any parts \"\"\"\n return self.supplied_part_count > 0\n\n @property\n def parts(self):\n \"\"\" Return SupplierPart objects which are supplied or manufactured by this company \"\"\"\n return SupplierPart.objects.filter(Q(supplier=self.id) | Q(manufacturer_part__manufacturer=self.id))\n\n @property\n def part_count(self):\n \"\"\" The number of parts manufactured (or supplied) by this Company \"\"\"\n return self.parts.count()\n\n @property\n def has_parts(self):\n return self.part_count > 0\n\n @property\n def stock_items(self):\n \"\"\" Return a list of all stock items supplied or manufactured by this company \"\"\"\n stock = apps.get_model('stock', 'StockItem')\n return stock.objects.filter(Q(supplier_part__supplier=self.id) | Q(supplier_part__manufacturer_part__manufacturer=self.id)).all()\n\n @property\n def stock_count(self):\n \"\"\" Return the number of stock items supplied or manufactured by this company \"\"\"\n return self.stock_items.count()\n\n def outstanding_purchase_orders(self):\n \"\"\" Return purchase orders which are 'outstanding' \"\"\"\n return self.purchase_orders.filter(status__in=PurchaseOrderStatus.OPEN)\n\n def pending_purchase_orders(self):\n \"\"\" Return purchase orders which are PENDING (not yet issued) \"\"\"\n return self.purchase_orders.filter(status=PurchaseOrderStatus.PENDING)\n\n def closed_purchase_orders(self):\n \"\"\" Return purchase orders which are not 'outstanding'\n\n - Complete\n - Failed / lost\n - Returned\n \"\"\"\n\n return self.purchase_orders.exclude(status__in=PurchaseOrderStatus.OPEN)\n\n def complete_purchase_orders(self):\n return self.purchase_orders.filter(status=PurchaseOrderStatus.COMPLETE)\n\n def failed_purchase_orders(self):\n \"\"\" Return any purchase orders which were not successful \"\"\"\n\n return self.purchase_orders.filter(status__in=PurchaseOrderStatus.FAILED)\n\n\nclass Contact(models.Model):\n \"\"\" A Contact represents a person who works at a particular company.\n A Company may have zero or more associated Contact objects.\n\n Attributes:\n company: Company link for this contact\n name: Name of the contact\n phone: contact phone number\n email: contact email\n role: position in company\n \"\"\"\n\n company = models.ForeignKey(Company, related_name='contacts',\n on_delete=models.CASCADE)\n\n name = models.CharField(max_length=100)\n\n phone = models.CharField(max_length=100, blank=True)\n\n email = models.EmailField(blank=True)\n\n role = models.CharField(max_length=100, blank=True)\n\n company = models.ForeignKey(Company, related_name='contacts',\n on_delete=models.CASCADE)\n\n\nclass ManufacturerPart(models.Model):\n \"\"\" Represents a unique part as provided by a Manufacturer\n Each ManufacturerPart is identified by a MPN (Manufacturer Part Number)\n Each ManufacturerPart is also linked to a Part object.\n A Part may be available from multiple manufacturers\n\n Attributes:\n part: Link to the master Part\n manufacturer: Company that manufactures the ManufacturerPart\n MPN: Manufacture part number\n link: Link to external website for this manufacturer part\n description: Descriptive notes field\n \"\"\"\n\n class Meta:\n unique_together = ('part', 'manufacturer', 'MPN')\n \n part = models.ForeignKey('part.Part', on_delete=models.CASCADE,\n related_name='manufacturer_parts',\n verbose_name=_('Base Part'),\n limit_choices_to={\n 'purchaseable': True,\n },\n help_text=_('Select part'),\n )\n \n manufacturer = models.ForeignKey(\n Company,\n on_delete=models.CASCADE,\n null=True,\n related_name='manufactured_parts',\n limit_choices_to={\n 'is_manufacturer': True\n },\n verbose_name=_('Manufacturer'),\n help_text=_('Select manufacturer'),\n )\n\n MPN = models.CharField(\n null=True,\n max_length=100,\n verbose_name=_('MPN'),\n help_text=_('Manufacturer Part Number')\n )\n\n link = InvenTreeURLField(\n blank=True, null=True,\n verbose_name=_('Link'),\n help_text=_('URL for external manufacturer part link')\n )\n\n description = models.CharField(\n max_length=250, blank=True, null=True,\n verbose_name=_('Description'),\n help_text=_('Manufacturer part description')\n )\n\n @classmethod\n def create(cls, part, manufacturer, mpn, description, link=None):\n \"\"\" Check if ManufacturerPart instance does not already exist\n then create it\n \"\"\"\n\n manufacturer_part = None\n\n try:\n manufacturer_part = ManufacturerPart.objects.get(part=part, manufacturer=manufacturer, MPN=mpn)\n except ManufacturerPart.DoesNotExist:\n pass\n\n if not manufacturer_part:\n manufacturer_part = ManufacturerPart(part=part, manufacturer=manufacturer, MPN=mpn, description=description, link=link)\n manufacturer_part.save()\n \n return manufacturer_part\n\n def __str__(self):\n s = ''\n\n if self.manufacturer:\n s += f'{self.manufacturer.name}'\n s += ' | '\n\n s += f'{self.MPN}'\n\n return s\n\n\nclass SupplierPart(models.Model):\n \"\"\" Represents a unique part as provided by a Supplier\n Each SupplierPart is identified by a SKU (Supplier Part Number)\n Each SupplierPart is also linked to a Part or ManufacturerPart object.\n A Part may be available from multiple suppliers\n\n Attributes:\n part: Link to the master Part (Obsolete)\n source_item: The sourcing item linked to this SupplierPart instance\n supplier: Company that supplies this SupplierPart object\n SKU: Stock keeping unit (supplier part number)\n link: Link to external website for this supplier part\n description: Descriptive notes field\n note: Longer form note field\n base_cost: Base charge added to order independent of quantity e.g. \"Reeling Fee\"\n multiple: Multiple that the part is provided in\n lead_time: Supplier lead time\n packaging: packaging that the part is supplied in, e.g. \"Reel\"\n \"\"\"\n\n def get_absolute_url(self):\n return reverse('supplier-part-detail', kwargs={'pk': self.id})\n\n def save(self, *args, **kwargs):\n \"\"\" Overriding save method to process the linked ManufacturerPart\n \"\"\"\n\n if 'manufacturer' in kwargs:\n manufacturer_id = kwargs.pop('manufacturer')\n\n try:\n manufacturer = Company.objects.get(pk=int(manufacturer_id))\n except (ValueError, Company.DoesNotExist):\n manufacturer = None\n else:\n manufacturer = None\n if 'MPN' in kwargs:\n MPN = kwargs.pop('MPN')\n else:\n MPN = None\n \n if manufacturer or MPN:\n if not self.manufacturer_part:\n # Create ManufacturerPart\n manufacturer_part = ManufacturerPart.create(part=self.part,\n manufacturer=manufacturer,\n mpn=MPN,\n description=self.description)\n self.manufacturer_part = manufacturer_part\n else:\n # Update ManufacturerPart (if ID exists)\n try:\n manufacturer_part_id = self.manufacturer_part.id\n except AttributeError:\n manufacturer_part_id = None\n \n if manufacturer_part_id:\n try:\n (manufacturer_part, created) = ManufacturerPart.objects.update_or_create(part=self.part,\n manufacturer=manufacturer,\n MPN=MPN)\n except IntegrityError:\n manufacturer_part = None\n raise ValidationError(f'ManufacturerPart linked to {self.part} from manufacturer {manufacturer.name}'\n f'with part number {MPN} already exists!')\n\n if manufacturer_part:\n self.manufacturer_part = manufacturer_part\n\n self.clean()\n self.validate_unique()\n\n super().save(*args, **kwargs)\n\n class Meta:\n unique_together = ('part', 'supplier', 'SKU')\n\n # This model was moved from the 'Part' app\n db_table = 'part_supplierpart'\n\n part = models.ForeignKey('part.Part', on_delete=models.CASCADE,\n related_name='supplier_parts',\n verbose_name=_('Base Part'),\n limit_choices_to={\n 'purchaseable': True,\n },\n help_text=_('Select part'),\n )\n\n supplier = models.ForeignKey(Company, on_delete=models.CASCADE,\n related_name='supplied_parts',\n limit_choices_to={'is_supplier': True},\n verbose_name=_('Supplier'),\n help_text=_('Select supplier'),\n )\n\n SKU = models.CharField(\n max_length=100,\n verbose_name=_('SKU'),\n help_text=_('Supplier stock keeping unit')\n )\n\n manufacturer_part = models.ForeignKey(ManufacturerPart, on_delete=models.CASCADE,\n blank=True, null=True,\n related_name='supplier_parts',\n verbose_name=_('Manufacturer Part'),\n help_text=_('Select manufacturer part'),\n )\n\n link = InvenTreeURLField(\n blank=True, null=True,\n verbose_name=_('Link'),\n help_text=_('URL for external supplier part link')\n )\n\n description = models.CharField(\n max_length=250, blank=True, null=True,\n verbose_name=_('Description'),\n help_text=_('Supplier part description')\n )\n\n note = models.CharField(\n max_length=100, blank=True, null=True,\n verbose_name=_('Note'),\n help_text=_('Notes')\n )\n\n base_cost = models.DecimalField(max_digits=10, decimal_places=3, default=0, validators=[MinValueValidator(0)], verbose_name=_('base cost'), help_text=_('Minimum charge (e.g. stocking fee)'))\n\n packaging = models.CharField(max_length=50, blank=True, null=True, verbose_name=_('Packaging'), help_text=_('Part packaging'))\n \n multiple = models.PositiveIntegerField(default=1, validators=[MinValueValidator(1)], verbose_name=_('multiple'), help_text=_('Order multiple'))\n\n # TODO - Reimplement lead-time as a charfield with special validation (pattern matching).\n # lead_time = models.DurationField(blank=True, null=True)\n\n @property\n def manufacturer_string(self):\n \"\"\" Format a MPN string for this SupplierPart.\n Concatenates manufacture name and part number.\n \"\"\"\n\n items = []\n\n if self.manufacturer_part:\n if self.manufacturer_part.manufacturer:\n items.append(self.manufacturer_part.manufacturer.name)\n if self.manufacturer_part.MPN:\n items.append(self.manufacturer_part.MPN)\n\n return ' | '.join(items)\n\n @property\n def has_price_breaks(self):\n return self.price_breaks.count() > 0\n\n @property\n def price_breaks(self):\n \"\"\" Return the associated price breaks in the correct order \"\"\"\n return self.pricebreaks.order_by('quantity').all()\n\n @property\n def unit_pricing(self):\n return self.get_price(1)\n\n def add_price_break(self, quantity, price):\n \"\"\"\n Create a new price break for this part\n\n args:\n quantity - Numerical quantity\n price - Must be a Money object\n \"\"\"\n\n # Check if a price break at that quantity already exists...\n if self.price_breaks.filter(quantity=quantity, part=self.pk).exists():\n return\n\n SupplierPriceBreak.objects.create(\n part=self,\n quantity=quantity,\n price=price\n )\n\n def get_price(self, quantity, moq=True, multiples=True, currency=None):\n \"\"\" Calculate the supplier price based on quantity price breaks.\n\n - Don't forget to add in flat-fee cost (base_cost field)\n - If MOQ (minimum order quantity) is required, bump quantity\n - If order multiples are to be observed, then we need to calculate based on that, too\n \"\"\"\n\n price_breaks = self.price_breaks.filter(quantity__lte=quantity)\n\n # No price break information available?\n if len(price_breaks) == 0:\n return None\n\n # Order multiples\n if multiples:\n quantity = int(math.ceil(quantity / self.multiple) * self.multiple)\n\n pb_found = False\n pb_quantity = -1\n pb_cost = 0.0\n\n if currency is None:\n # Default currency selection\n currency = common.models.InvenTreeSetting.get_setting('INVENTREE_DEFAULT_CURRENCY')\n\n for pb in self.price_breaks.all():\n # Ignore this pricebreak (quantity is too high)\n if pb.quantity > quantity:\n continue\n\n pb_found = True\n\n # If this price-break quantity is the largest so far, use it!\n if pb.quantity > pb_quantity:\n pb_quantity = pb.quantity\n\n # Convert everything to the selected currency\n pb_cost = pb.convert_to(currency)\n\n if pb_found:\n cost = pb_cost * quantity\n return normalize(cost + self.base_cost)\n else:\n return None\n\n def open_orders(self):\n \"\"\" Return a database query for PO line items for this SupplierPart,\n limited to purchase orders that are open / outstanding.\n \"\"\"\n\n return self.purchase_order_line_items.prefetch_related('order').filter(order__status__in=PurchaseOrderStatus.OPEN)\n\n def on_order(self):\n \"\"\" Return the total quantity of items currently on order.\n\n Subtract partially received stock as appropriate\n \"\"\"\n\n totals = self.open_orders().aggregate(Sum('quantity'), Sum('received'))\n\n # Quantity on order\n q = totals.get('quantity__sum', 0)\n\n # Quantity received\n r = totals.get('received__sum', 0)\n\n if q is None or r is None:\n return 0\n else:\n return max(q - r, 0)\n\n def purchase_orders(self):\n \"\"\" Returns a list of purchase orders relating to this supplier part \"\"\"\n\n return [line.order for line in self.purchase_order_line_items.all().prefetch_related('order')]\n\n @property\n def pretty_name(self):\n return str(self)\n\n def __str__(self):\n s = ''\n\n if self.part.IPN:\n s += f'{self.part.IPN}'\n s += ' | '\n\n s += f'{self.supplier.name} | {self.SKU}'\n\n if self.manufacturer_string:\n s = s + ' | ' + self.manufacturer_string\n \n return s\n\n\nclass SupplierPriceBreak(common.models.PriceBreak):\n \"\"\" Represents a quantity price break for a SupplierPart.\n - Suppliers can offer discounts at larger quantities\n - SupplierPart(s) may have zero-or-more associated SupplierPriceBreak(s)\n\n Attributes:\n part: Link to a SupplierPart object that this price break applies to\n quantity: Quantity required for price break\n cost: Cost at specified quantity\n currency: Reference to the currency of this pricebreak (leave empty for base currency)\n \"\"\"\n\n part = models.ForeignKey(SupplierPart, on_delete=models.CASCADE, related_name='pricebreaks', verbose_name=_('Part'),)\n\n class Meta:\n unique_together = (\"part\", \"quantity\")\n\n # This model was moved from the 'Part' app\n db_table = 'part_supplierpricebreak'\n\n def __str__(self):\n return f'{self.part.MPN} - {self.price} @ {self.quantity}'\n",
"path": "InvenTree/company/models.py"
}
] | [
{
"content": "\"\"\"\nCompany database model definitions\n\"\"\"\n\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport os\n\nimport math\n\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.validators import MinValueValidator\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.db.utils import IntegrityError\nfrom django.db.models import Sum, Q, UniqueConstraint\n\nfrom django.apps import apps\nfrom django.urls import reverse\n\nfrom moneyed import CURRENCIES\n\nfrom markdownx.models import MarkdownxField\n\nfrom stdimage.models import StdImageField\n\nfrom InvenTree.helpers import getMediaUrl, getBlankImage, getBlankThumbnail\nfrom InvenTree.helpers import normalize\nfrom InvenTree.fields import InvenTreeURLField\nfrom InvenTree.status_codes import PurchaseOrderStatus\n\nimport InvenTree.validators\n\nimport common.models\nimport common.settings\n\n\ndef rename_company_image(instance, filename):\n \"\"\" Function to rename a company image after upload\n\n Args:\n instance: Company object\n filename: uploaded image filename\n\n Returns:\n New image filename\n \"\"\"\n\n base = 'company_images'\n\n if filename.count('.') > 0:\n ext = filename.split('.')[-1]\n else:\n ext = ''\n\n fn = 'company_{pk}_img'.format(pk=instance.pk)\n\n if ext:\n fn += '.' + ext\n\n return os.path.join(base, fn)\n\n\nclass Company(models.Model):\n \"\"\" A Company object represents an external company.\n It may be a supplier or a customer or a manufacturer (or a combination)\n\n - A supplier is a company from which parts can be purchased\n - A customer is a company to which parts can be sold\n - A manufacturer is a company which manufactures a raw good (they may or may not be a \"supplier\" also)\n\n\n Attributes:\n name: Brief name of the company\n description: Longer form description\n website: URL for the company website\n address: Postal address\n phone: contact phone number\n email: contact email address\n link: Secondary URL e.g. for link to internal Wiki page\n image: Company image / logo\n notes: Extra notes about the company\n is_customer: boolean value, is this company a customer\n is_supplier: boolean value, is this company a supplier\n is_manufacturer: boolean value, is this company a manufacturer\n currency_code: Specifies the default currency for the company\n \"\"\"\n\n class Meta:\n ordering = ['name', ]\n constraints = [\n UniqueConstraint(fields=['name', 'email'], name='unique_name_email_pair')\n ]\n\n name = models.CharField(max_length=100, blank=False,\n help_text=_('Company name'),\n verbose_name=_('Company name'))\n\n description = models.CharField(\n max_length=500,\n verbose_name=_('Company description'),\n help_text=_('Description of the company'),\n blank=True,\n )\n\n website = models.URLField(blank=True, verbose_name=_('Website'), help_text=_('Company website URL'))\n\n address = models.CharField(max_length=200,\n verbose_name=_('Address'),\n blank=True, help_text=_('Company address'))\n\n phone = models.CharField(max_length=50,\n verbose_name=_('Phone number'),\n blank=True, help_text=_('Contact phone number'))\n\n email = models.EmailField(blank=True, null=True,\n verbose_name=_('Email'), help_text=_('Contact email address'))\n\n contact = models.CharField(max_length=100,\n verbose_name=_('Contact'),\n blank=True, help_text=_('Point of contact'))\n\n link = InvenTreeURLField(blank=True, verbose_name=_('Link'), help_text=_('Link to external company information'))\n\n image = StdImageField(\n upload_to=rename_company_image,\n null=True,\n blank=True,\n variations={'thumbnail': (128, 128)},\n delete_orphans=True,\n verbose_name=_('Image'),\n )\n\n notes = MarkdownxField(blank=True, verbose_name=_('Notes'))\n\n is_customer = models.BooleanField(default=False, verbose_name=_('is customer'), help_text=_('Do you sell items to this company?'))\n\n is_supplier = models.BooleanField(default=True, verbose_name=_('is supplier'), help_text=_('Do you purchase items from this company?'))\n\n is_manufacturer = models.BooleanField(default=False, verbose_name=_('is manufacturer'), help_text=_('Does this company manufacture parts?'))\n\n currency = models.CharField(\n max_length=3,\n verbose_name=_('Currency'),\n blank=True,\n help_text=_('Default currency used for this company'),\n validators=[InvenTree.validators.validate_currency_code],\n )\n\n @property\n def currency_code(self):\n \"\"\"\n Return the currency code associated with this company.\n \n - If the currency code is invalid, use the default currency\n - If the currency code is not specified, use the default currency\n \"\"\"\n\n code = self.currency\n\n if code not in CURRENCIES:\n code = common.settings.currency_code_default()\n\n return code\n\n def __str__(self):\n \"\"\" Get string representation of a Company \"\"\"\n return \"{n} - {d}\".format(n=self.name, d=self.description)\n\n def get_absolute_url(self):\n \"\"\" Get the web URL for the detail view for this Company \"\"\"\n return reverse('company-detail', kwargs={'pk': self.id})\n\n def get_image_url(self):\n \"\"\" Return the URL of the image for this company \"\"\"\n\n if self.image:\n return getMediaUrl(self.image.url)\n else:\n return getBlankImage()\n\n def get_thumbnail_url(self):\n \"\"\" Return the URL for the thumbnail image for this Company \"\"\"\n\n if self.image:\n return getMediaUrl(self.image.thumbnail.url)\n else:\n return getBlankThumbnail()\n \n @property\n def manufactured_part_count(self):\n \"\"\" The number of parts manufactured by this company \"\"\"\n return self.manufactured_parts.count()\n\n @property\n def has_manufactured_parts(self):\n return self.manufactured_part_count > 0\n\n @property\n def supplied_part_count(self):\n \"\"\" The number of parts supplied by this company \"\"\"\n return self.supplied_parts.count()\n\n @property\n def has_supplied_parts(self):\n \"\"\" Return True if this company supplies any parts \"\"\"\n return self.supplied_part_count > 0\n\n @property\n def parts(self):\n \"\"\" Return SupplierPart objects which are supplied or manufactured by this company \"\"\"\n return SupplierPart.objects.filter(Q(supplier=self.id) | Q(manufacturer_part__manufacturer=self.id))\n\n @property\n def part_count(self):\n \"\"\" The number of parts manufactured (or supplied) by this Company \"\"\"\n return self.parts.count()\n\n @property\n def has_parts(self):\n return self.part_count > 0\n\n @property\n def stock_items(self):\n \"\"\" Return a list of all stock items supplied or manufactured by this company \"\"\"\n stock = apps.get_model('stock', 'StockItem')\n return stock.objects.filter(Q(supplier_part__supplier=self.id) | Q(supplier_part__manufacturer_part__manufacturer=self.id)).all()\n\n @property\n def stock_count(self):\n \"\"\" Return the number of stock items supplied or manufactured by this company \"\"\"\n return self.stock_items.count()\n\n def outstanding_purchase_orders(self):\n \"\"\" Return purchase orders which are 'outstanding' \"\"\"\n return self.purchase_orders.filter(status__in=PurchaseOrderStatus.OPEN)\n\n def pending_purchase_orders(self):\n \"\"\" Return purchase orders which are PENDING (not yet issued) \"\"\"\n return self.purchase_orders.filter(status=PurchaseOrderStatus.PENDING)\n\n def closed_purchase_orders(self):\n \"\"\" Return purchase orders which are not 'outstanding'\n\n - Complete\n - Failed / lost\n - Returned\n \"\"\"\n\n return self.purchase_orders.exclude(status__in=PurchaseOrderStatus.OPEN)\n\n def complete_purchase_orders(self):\n return self.purchase_orders.filter(status=PurchaseOrderStatus.COMPLETE)\n\n def failed_purchase_orders(self):\n \"\"\" Return any purchase orders which were not successful \"\"\"\n\n return self.purchase_orders.filter(status__in=PurchaseOrderStatus.FAILED)\n\n\nclass Contact(models.Model):\n \"\"\" A Contact represents a person who works at a particular company.\n A Company may have zero or more associated Contact objects.\n\n Attributes:\n company: Company link for this contact\n name: Name of the contact\n phone: contact phone number\n email: contact email\n role: position in company\n \"\"\"\n\n company = models.ForeignKey(Company, related_name='contacts',\n on_delete=models.CASCADE)\n\n name = models.CharField(max_length=100)\n\n phone = models.CharField(max_length=100, blank=True)\n\n email = models.EmailField(blank=True)\n\n role = models.CharField(max_length=100, blank=True)\n\n company = models.ForeignKey(Company, related_name='contacts',\n on_delete=models.CASCADE)\n\n\nclass ManufacturerPart(models.Model):\n \"\"\" Represents a unique part as provided by a Manufacturer\n Each ManufacturerPart is identified by a MPN (Manufacturer Part Number)\n Each ManufacturerPart is also linked to a Part object.\n A Part may be available from multiple manufacturers\n\n Attributes:\n part: Link to the master Part\n manufacturer: Company that manufactures the ManufacturerPart\n MPN: Manufacture part number\n link: Link to external website for this manufacturer part\n description: Descriptive notes field\n \"\"\"\n\n class Meta:\n unique_together = ('part', 'manufacturer', 'MPN')\n \n part = models.ForeignKey('part.Part', on_delete=models.CASCADE,\n related_name='manufacturer_parts',\n verbose_name=_('Base Part'),\n limit_choices_to={\n 'purchaseable': True,\n },\n help_text=_('Select part'),\n )\n \n manufacturer = models.ForeignKey(\n Company,\n on_delete=models.CASCADE,\n null=True,\n related_name='manufactured_parts',\n limit_choices_to={\n 'is_manufacturer': True\n },\n verbose_name=_('Manufacturer'),\n help_text=_('Select manufacturer'),\n )\n\n MPN = models.CharField(\n null=True,\n max_length=100,\n verbose_name=_('MPN'),\n help_text=_('Manufacturer Part Number')\n )\n\n link = InvenTreeURLField(\n blank=True, null=True,\n verbose_name=_('Link'),\n help_text=_('URL for external manufacturer part link')\n )\n\n description = models.CharField(\n max_length=250, blank=True, null=True,\n verbose_name=_('Description'),\n help_text=_('Manufacturer part description')\n )\n\n @classmethod\n def create(cls, part, manufacturer, mpn, description, link=None):\n \"\"\" Check if ManufacturerPart instance does not already exist\n then create it\n \"\"\"\n\n manufacturer_part = None\n\n try:\n manufacturer_part = ManufacturerPart.objects.get(part=part, manufacturer=manufacturer, MPN=mpn)\n except ManufacturerPart.DoesNotExist:\n pass\n\n if not manufacturer_part:\n manufacturer_part = ManufacturerPart(part=part, manufacturer=manufacturer, MPN=mpn, description=description, link=link)\n manufacturer_part.save()\n \n return manufacturer_part\n\n def __str__(self):\n s = ''\n\n if self.manufacturer:\n s += f'{self.manufacturer.name}'\n s += ' | '\n\n s += f'{self.MPN}'\n\n return s\n\n\nclass SupplierPart(models.Model):\n \"\"\" Represents a unique part as provided by a Supplier\n Each SupplierPart is identified by a SKU (Supplier Part Number)\n Each SupplierPart is also linked to a Part or ManufacturerPart object.\n A Part may be available from multiple suppliers\n\n Attributes:\n part: Link to the master Part (Obsolete)\n source_item: The sourcing item linked to this SupplierPart instance\n supplier: Company that supplies this SupplierPart object\n SKU: Stock keeping unit (supplier part number)\n link: Link to external website for this supplier part\n description: Descriptive notes field\n note: Longer form note field\n base_cost: Base charge added to order independent of quantity e.g. \"Reeling Fee\"\n multiple: Multiple that the part is provided in\n lead_time: Supplier lead time\n packaging: packaging that the part is supplied in, e.g. \"Reel\"\n \"\"\"\n\n def get_absolute_url(self):\n return reverse('supplier-part-detail', kwargs={'pk': self.id})\n\n def save(self, *args, **kwargs):\n \"\"\" Overriding save method to process the linked ManufacturerPart\n \"\"\"\n\n if 'manufacturer' in kwargs:\n manufacturer_id = kwargs.pop('manufacturer')\n\n try:\n manufacturer = Company.objects.get(pk=int(manufacturer_id))\n except (ValueError, Company.DoesNotExist):\n manufacturer = None\n else:\n manufacturer = None\n if 'MPN' in kwargs:\n MPN = kwargs.pop('MPN')\n else:\n MPN = None\n \n if manufacturer or MPN:\n if not self.manufacturer_part:\n # Create ManufacturerPart\n manufacturer_part = ManufacturerPart.create(part=self.part,\n manufacturer=manufacturer,\n mpn=MPN,\n description=self.description)\n self.manufacturer_part = manufacturer_part\n else:\n # Update ManufacturerPart (if ID exists)\n try:\n manufacturer_part_id = self.manufacturer_part.id\n except AttributeError:\n manufacturer_part_id = None\n \n if manufacturer_part_id:\n try:\n (manufacturer_part, created) = ManufacturerPart.objects.update_or_create(part=self.part,\n manufacturer=manufacturer,\n MPN=MPN)\n except IntegrityError:\n manufacturer_part = None\n raise ValidationError(f'ManufacturerPart linked to {self.part} from manufacturer {manufacturer.name}'\n f'with part number {MPN} already exists!')\n\n if manufacturer_part:\n self.manufacturer_part = manufacturer_part\n\n self.clean()\n self.validate_unique()\n\n super().save(*args, **kwargs)\n\n class Meta:\n unique_together = ('part', 'supplier', 'SKU')\n\n # This model was moved from the 'Part' app\n db_table = 'part_supplierpart'\n\n part = models.ForeignKey('part.Part', on_delete=models.CASCADE,\n related_name='supplier_parts',\n verbose_name=_('Base Part'),\n limit_choices_to={\n 'purchaseable': True,\n },\n help_text=_('Select part'),\n )\n\n supplier = models.ForeignKey(Company, on_delete=models.CASCADE,\n related_name='supplied_parts',\n limit_choices_to={'is_supplier': True},\n verbose_name=_('Supplier'),\n help_text=_('Select supplier'),\n )\n\n SKU = models.CharField(\n max_length=100,\n verbose_name=_('SKU'),\n help_text=_('Supplier stock keeping unit')\n )\n\n manufacturer_part = models.ForeignKey(ManufacturerPart, on_delete=models.CASCADE,\n blank=True, null=True,\n related_name='supplier_parts',\n verbose_name=_('Manufacturer Part'),\n help_text=_('Select manufacturer part'),\n )\n\n link = InvenTreeURLField(\n blank=True, null=True,\n verbose_name=_('Link'),\n help_text=_('URL for external supplier part link')\n )\n\n description = models.CharField(\n max_length=250, blank=True, null=True,\n verbose_name=_('Description'),\n help_text=_('Supplier part description')\n )\n\n note = models.CharField(\n max_length=100, blank=True, null=True,\n verbose_name=_('Note'),\n help_text=_('Notes')\n )\n\n base_cost = models.DecimalField(max_digits=10, decimal_places=3, default=0, validators=[MinValueValidator(0)], verbose_name=_('base cost'), help_text=_('Minimum charge (e.g. stocking fee)'))\n\n packaging = models.CharField(max_length=50, blank=True, null=True, verbose_name=_('Packaging'), help_text=_('Part packaging'))\n \n multiple = models.PositiveIntegerField(default=1, validators=[MinValueValidator(1)], verbose_name=_('multiple'), help_text=_('Order multiple'))\n\n # TODO - Reimplement lead-time as a charfield with special validation (pattern matching).\n # lead_time = models.DurationField(blank=True, null=True)\n\n @property\n def manufacturer_string(self):\n \"\"\" Format a MPN string for this SupplierPart.\n Concatenates manufacture name and part number.\n \"\"\"\n\n items = []\n\n if self.manufacturer_part:\n if self.manufacturer_part.manufacturer:\n items.append(self.manufacturer_part.manufacturer.name)\n if self.manufacturer_part.MPN:\n items.append(self.manufacturer_part.MPN)\n\n return ' | '.join(items)\n\n @property\n def has_price_breaks(self):\n return self.price_breaks.count() > 0\n\n @property\n def price_breaks(self):\n \"\"\" Return the associated price breaks in the correct order \"\"\"\n return self.pricebreaks.order_by('quantity').all()\n\n @property\n def unit_pricing(self):\n return self.get_price(1)\n\n def add_price_break(self, quantity, price):\n \"\"\"\n Create a new price break for this part\n\n args:\n quantity - Numerical quantity\n price - Must be a Money object\n \"\"\"\n\n # Check if a price break at that quantity already exists...\n if self.price_breaks.filter(quantity=quantity, part=self.pk).exists():\n return\n\n SupplierPriceBreak.objects.create(\n part=self,\n quantity=quantity,\n price=price\n )\n\n def get_price(self, quantity, moq=True, multiples=True, currency=None):\n \"\"\" Calculate the supplier price based on quantity price breaks.\n\n - Don't forget to add in flat-fee cost (base_cost field)\n - If MOQ (minimum order quantity) is required, bump quantity\n - If order multiples are to be observed, then we need to calculate based on that, too\n \"\"\"\n\n price_breaks = self.price_breaks.filter(quantity__lte=quantity)\n\n # No price break information available?\n if len(price_breaks) == 0:\n return None\n\n # Order multiples\n if multiples:\n quantity = int(math.ceil(quantity / self.multiple) * self.multiple)\n\n pb_found = False\n pb_quantity = -1\n pb_cost = 0.0\n\n if currency is None:\n # Default currency selection\n currency = common.models.InvenTreeSetting.get_setting('INVENTREE_DEFAULT_CURRENCY')\n\n for pb in self.price_breaks.all():\n # Ignore this pricebreak (quantity is too high)\n if pb.quantity > quantity:\n continue\n\n pb_found = True\n\n # If this price-break quantity is the largest so far, use it!\n if pb.quantity > pb_quantity:\n pb_quantity = pb.quantity\n\n # Convert everything to the selected currency\n pb_cost = pb.convert_to(currency)\n\n if pb_found:\n cost = pb_cost * quantity\n return normalize(cost + self.base_cost)\n else:\n return None\n\n def open_orders(self):\n \"\"\" Return a database query for PO line items for this SupplierPart,\n limited to purchase orders that are open / outstanding.\n \"\"\"\n\n return self.purchase_order_line_items.prefetch_related('order').filter(order__status__in=PurchaseOrderStatus.OPEN)\n\n def on_order(self):\n \"\"\" Return the total quantity of items currently on order.\n\n Subtract partially received stock as appropriate\n \"\"\"\n\n totals = self.open_orders().aggregate(Sum('quantity'), Sum('received'))\n\n # Quantity on order\n q = totals.get('quantity__sum', 0)\n\n # Quantity received\n r = totals.get('received__sum', 0)\n\n if q is None or r is None:\n return 0\n else:\n return max(q - r, 0)\n\n def purchase_orders(self):\n \"\"\" Returns a list of purchase orders relating to this supplier part \"\"\"\n\n return [line.order for line in self.purchase_order_line_items.all().prefetch_related('order')]\n\n @property\n def pretty_name(self):\n return str(self)\n\n def __str__(self):\n s = ''\n\n if self.part.IPN:\n s += f'{self.part.IPN}'\n s += ' | '\n\n s += f'{self.supplier.name} | {self.SKU}'\n\n if self.manufacturer_string:\n s = s + ' | ' + self.manufacturer_string\n \n return s\n\n\nclass SupplierPriceBreak(common.models.PriceBreak):\n \"\"\" Represents a quantity price break for a SupplierPart.\n - Suppliers can offer discounts at larger quantities\n - SupplierPart(s) may have zero-or-more associated SupplierPriceBreak(s)\n\n Attributes:\n part: Link to a SupplierPart object that this price break applies to\n quantity: Quantity required for price break\n cost: Cost at specified quantity\n currency: Reference to the currency of this pricebreak (leave empty for base currency)\n \"\"\"\n\n part = models.ForeignKey(SupplierPart, on_delete=models.CASCADE, related_name='pricebreaks', verbose_name=_('Part'),)\n\n class Meta:\n unique_together = (\"part\", \"quantity\")\n\n # This model was moved from the 'Part' app\n db_table = 'part_supplierpricebreak'\n\n def __str__(self):\n return f'{self.part.SKU} - {self.price} @ {self.quantity}'\n",
"path": "InvenTree/company/models.py"
}
] | diff --git a/InvenTree/company/models.py b/InvenTree/company/models.py
index 3ea50b162219..c743bf44343c 100644
--- a/InvenTree/company/models.py
+++ b/InvenTree/company/models.py
@@ -675,4 +675,4 @@ class Meta:
db_table = 'part_supplierpricebreak'
def __str__(self):
- return f'{self.part.MPN} - {self.price} @ {self.quantity}'
+ return f'{self.part.SKU} - {self.price} @ {self.quantity}'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.