repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
enobayram/MHFlib | MHFPython/scripts/figures.py | 1 | 3219 | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 1 11:30:49 2012
@author: eba
"""
from numpy import *
from scipy import *
from matplotlib.pyplot import *
from MHFPython import *
from plotGaussians import *
def rotate(x,y,rot):
return [x*rot[0,0]+y*rot[0,1],x*rot[1,0]+y*rot[1,1]]
theta = arange(-pi,pi,0.01)
r = 1;
limit = 3.;
[x,y] = [r*cos(theta), r*sin(theta)]
rotOrig = array([[1.,0.],[1.,1.]])
[xorig,yorig] = rotate(x,y,rotOrig)
variances = [0.025,1]
stddevs = sqrt(variances)
rotMax = array([[stddevs[0],0],[0,stddevs[1]]])
[xmax,ymax] = rotate(x,y,rotMax)
figure(1)
hold(False)
Orig = plot(xorig,yorig)
hold(True)
Max = plot(xmax,ymax)
ylim([-limit,limit])
xlim([-limit,limit])
grid(True)
legend([Orig,Max],["Original Gaussian","Maximum Component Size"])
title("2D Gaussian to Split")
rotOrigScaled = inv(rotMax).dot(rotOrig)
[xorigs,yorigs] = rotate(x,y,rotOrigScaled)
rotMaxScaled = inv(rotMax).dot(rotMax)
[xmaxs,ymaxs] = rotate(x,y,rotMaxScaled)
figure(2)
hold(False)
OrigS = plot(xorigs,yorigs)
hold(True)
MaxS = plot(xmaxs,ymaxs)
ylim([-limit,limit])
xlim([-limit,limit])
grid(True)
legend([OrigS,MaxS],["Original Gaussian","Maximum Component Size"])
title("Scaled Coordinates")
POrigScaled = rotOrigScaled.dot(rotOrigScaled.transpose());
eigs,rotDecompose = eig(POrigScaled)
rotIndependent = inv(rotDecompose).dot(rotOrigScaled)
[xind,yind] = rotate(x,y,rotIndependent)
figure(3)
hold(False)
OrigI = plot(xind,yind)
hold(True)
MaxI = plot(xmaxs,ymaxs)
ylim([-limit,limit])
xlim([-limit,limit])
grid(True)
legend([OrigI,MaxI],["Original Gaussian","Maximum Component Size"])
table = SplitTable1('tables/kl1e-1table');
plotvar = eigs[0];
#1D plot of the table entries
lim1d = sqrt(plotvar)*4;
x = arange(-lim1d,lim1d,lim1d/500)
#y = 1/sqrt(2*pi*originalVar)*exp(-1/2*(x*x)/originalVar)
y = 1/sqrt(2*pi*plotvar)*exp(-x*x/(2*plotvar))
fig=figure(4)
hold(False)
orig1d = plot(x, y)
hold(True)
y = zeros_like(x)
entry = table.getUpperEntry(plotvar)
entryvar = entry.variance;
varRatio = plotvar/entryvar;
hyp1d = entry.hypotheses
for gh in hyp1d:
var = gh.cov(0)*varRatio;
mean = gh.mean(0)*sqrt(varRatio);
y=1/sqrt(2*pi*var)*exp(-(x-mean)*(x-mean)/(2*var))*gh.weight
components = plot(x, y, color = 'green')
#savefig('figures/split')
legend([OrigI,MaxI],["Original","Components"])
vO = rotOrig.dot(rotOrig.transpose())
original = GaussianHypothesis3();
assign(original.mean,[0,0,0]);
assign(original.cov,[vO[0,0],vO[0,1],0, vO[1,0],vO[1,1],0, 0,0,1]);
original.weight = 1;
variancesMat = MeanMatrix();
assign(variancesMat, [variances[0],variances[1],2]);
result = GH3list();
mhf = MultiHypDist3();
split(original, result, variancesMat, table);
[x,y] = [r*cos(theta), r*sin(theta)]
figure(5)
hold(False)
Orig = plot(xorig,yorig)
hold(True)
for gh in result:
mean = pyArray(gh.mean)
rotGh = cholesky(pyArray(gh.cov))
[xgh,ygh] = rotate(x,y,rotGh[0:2,0:2])
[xghm, yghm] = [xgh+mean[0], ygh+mean[1]]
plot(xghm,yghm, color='green')
ylim([-limit,limit])
xlim([-limit,limit])
legend([OrigI,MaxI],["Original","Components"])
grid(True)
steps = 100
plotGaussian(original,[0,1],limit,steps,6)
plotGaussians(result,[0,1], limit, steps, 7, 0.)
| bsd-2-clause | -8,608,220,335,965,278,000 | 21.047945 | 67 | 0.680646 | false |
monopole/test-infra | config/jobs/kubernetes/kops/build_jobs.py | 1 | 42219 | # Copyright 2020 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import zlib
import yaml
import boto3 # pylint: disable=import-error
import jinja2 # pylint: disable=import-error
periodic_template = """
- name: {{job_name}}
cron: '{{cron}}'
labels:
preset-service-account: "true"
preset-aws-ssh: "true"
preset-aws-credential: "true"
decorate: true
decoration_config:
timeout: {{job_timeout}}
extra_refs:
- org: kubernetes
repo: kops
base_ref: master
workdir: true
path_alias: k8s.io/kops
spec:
containers:
- command:
- runner.sh
args:
- bash
- -c
- |
make test-e2e-install
kubetest2 kops \\
-v 2 \\
--up --down \\
--cloud-provider=aws \\
--create-args="{{create_args}}" \\
{%- if kops_feature_flags %}
--env=KOPS_FEATURE_FLAGS={{kops_feature_flags}} \\
{%- endif %}
--kops-version-marker={{kops_deploy_url}} \\
{%- if publish_version_marker %}
--publish-version-marker={{publish_version_marker}} \\
{%- endif %}
--kubernetes-version={{k8s_deploy_url}} \\
{%- if terraform_version %}
--terraform-version={{terraform_version}} \\
{%- endif %}
{%- if validation_wait %}
--validation-wait={{validation_wait}} \\
{%- endif %}
--test=kops \\
-- \\
--ginkgo-args="--debug" \\
--test-args="-test.timeout={{test_timeout}} -num-nodes=0" \\
{%- if test_package_bucket %}
--test-package-bucket={{test_package_bucket}} \\
{%- endif %}
{%- if test_package_dir %}
--test-package-dir={{test_package_dir}} \\
{%- endif %}
--test-package-marker={{marker}} \\
--parallel={{test_parallelism}} \\
{%- if focus_regex %}
--focus-regex="{{focus_regex}}" \\
{%- endif %}
--skip-regex="{{skip_regex}}"
env:
- name: KUBE_SSH_KEY_PATH
value: /etc/aws-ssh/aws-ssh-private
- name: KUBE_SSH_USER
value: {{kops_ssh_user}}
image: gcr.io/k8s-testimages/kubekins-e2e:v20210426-51fd28e-master
imagePullPolicy: Always
resources:
limits:
memory: 3Gi
requests:
cpu: "2"
memory: 3Gi
"""
presubmit_template = """
- name: {{job_name}}
branches:
- master
{%- if run_if_changed %}
run_if_changed: '{{run_if_changed}}'
{%- endif %}
always_run: {{always_run}}
skip_report: {{skip_report}}
labels:
{%- if cloud == "aws" %}
preset-service-account: "true"
preset-aws-ssh: "true"
preset-aws-credential: "true"
preset-bazel-scratch-dir: "true"
preset-bazel-remote-cache-enabled: "true"
preset-dind-enabled: "true"
{%- else %}
preset-k8s-ssh: "true"
{%- endif %}
decorate: true
decoration_config:
timeout: {{job_timeout}}
path_alias: k8s.io/kops
spec:
{%- if cloud == "gce" %}
serviceAccountName: k8s-kops-test
{%- endif %}
containers:
- image: gcr.io/k8s-testimages/kubekins-e2e:v20210426-51fd28e-master
imagePullPolicy: Always
command:
- runner.sh
args:
- bash
- -c
- |
make test-e2e-install
kubetest2 kops \\
-v 2 \\
--up --build --down \\
--cloud-provider={{cloud}} \\
--create-args="{{create_args}}" \\
{%- if kops_feature_flags %}
--env=KOPS_FEATURE_FLAGS={{kops_feature_flags}} \\
{%- endif %}
--kubernetes-version={{k8s_deploy_url}} \\
--kops-binary-path=/home/prow/go/src/k8s.io/kops/bazel-bin/cmd/kops/linux-amd64/kops \\
{%- if terraform_version %}
--terraform-version={{terraform_version}} \\
{%- endif %}
--test=kops \\
-- \\
--ginkgo-args="--debug" \\
--test-args="-test.timeout={{test_timeout}} -num-nodes=0" \\
{%- if test_package_bucket %}
--test-package-bucket={{test_package_bucket}} \\
{%- endif %}
{%- if test_package_dir %}
--test-package-dir={{test_package_dir}} \\
{%- endif %}
--test-package-marker={{marker}} \\
--parallel={{test_parallelism}} \\
{%- if focus_regex %}
--focus-regex="{{focus_regex}}" \\
{%- endif %}
--skip-regex="{{skip_regex}}"
securityContext:
privileged: true
env:
- name: KUBE_SSH_KEY_PATH
value: {{kops_ssh_key_path}}
- name: KUBE_SSH_USER
value: {{kops_ssh_user}}
- name: GOPATH
value: /home/prow/go
resources:
requests:
cpu: "2"
memory: "6Gi"
"""
# We support rapid focus on a few tests of high concern
# This should be used for temporary tests we are evaluating,
# and ideally linked to a bug, and removed once the bug is fixed
run_hourly = [
]
run_daily = [
'kops-grid-scenario-public-jwks',
'kops-grid-scenario-arm64',
'kops-grid-scenario-aws-cloud-controller-manager',
'kops-grid-scenario-serial-test-for-timeout',
'kops-grid-scenario-terraform',
]
# These are job tab names of unsupported grid combinations
skip_jobs = [
]
def simple_hash(s):
# & 0xffffffff avoids python2/python3 compatibility
return zlib.crc32(s.encode()) & 0xffffffff
def build_cron(key, runs_per_day):
runs_per_week = 0
minute = simple_hash("minutes:" + key) % 60
hour = simple_hash("hours:" + key) % 24
day_of_week = simple_hash("day_of_week:" + key) % 7
if runs_per_day > 0:
hour_denominator = 24 / runs_per_day
hour_offset = simple_hash("hours:" + key) % hour_denominator
return "%d %d-23/%d * * *" % (minute, hour_offset, hour_denominator), (runs_per_day * 7)
# run Ubuntu 20.04 (Focal) jobs more frequently
if "u2004" in key:
runs_per_week += 7
return "%d %d * * *" % (minute, hour), runs_per_week
# run hotlist jobs more frequently
if key in run_hourly:
runs_per_week += 24 * 7
return "%d * * * *" % (minute), runs_per_week
if key in run_daily:
runs_per_week += 7
return "%d %d * * *" % (minute, hour), runs_per_week
runs_per_week += 1
return "%d %d * * %d" % (minute, hour, day_of_week), runs_per_week
def replace_or_remove_line(s, pattern, new_str):
keep = []
for line in s.split('\n'):
if pattern in line:
if new_str:
line = line.replace(pattern, new_str)
keep.append(line)
else:
keep.append(line)
return '\n'.join(keep)
def should_skip_newer_k8s(k8s_version, kops_version):
if kops_version is None:
return False
if k8s_version is None:
return True
return float(k8s_version) > float(kops_version)
def k8s_version_info(k8s_version):
test_package_bucket = ''
test_package_dir = ''
if k8s_version == 'latest':
marker = 'latest.txt'
k8s_deploy_url = "https://storage.googleapis.com/kubernetes-release/release/latest.txt"
elif k8s_version == 'ci':
marker = 'latest.txt'
k8s_deploy_url = "https://storage.googleapis.com/kubernetes-release-dev/ci/latest.txt"
test_package_bucket = 'kubernetes-release-dev'
test_package_dir = 'ci'
elif k8s_version == 'stable':
marker = 'stable.txt'
k8s_deploy_url = "https://storage.googleapis.com/kubernetes-release/release/stable.txt"
elif k8s_version:
marker = f"stable-{k8s_version}.txt"
k8s_deploy_url = f"https://storage.googleapis.com/kubernetes-release/release/stable-{k8s_version}.txt" # pylint: disable=line-too-long
else:
raise Exception('missing required k8s_version')
return marker, k8s_deploy_url, test_package_bucket, test_package_dir
def create_args(kops_channel, networking, container_runtime, extra_flags, kops_image):
args = f"--channel={kops_channel} --networking=" + (networking or "kubenet")
if container_runtime:
args += f" --container-runtime={container_runtime}"
if kops_image:
image_overridden = False
if extra_flags:
for arg in extra_flags:
if "--image=" in arg:
image_overridden = True
args = args + " " + arg
if not image_overridden:
args = f"--image='{kops_image}' {args}"
return args.strip()
def latest_aws_image(owner, name):
client = boto3.client('ec2', region_name='us-east-1')
response = client.describe_images(
Owners=[owner],
Filters=[
{
'Name': 'name',
'Values': [
name,
],
},
],
)
images = {}
for image in response['Images']:
images[image['CreationDate']] = image['ImageLocation']
return images[sorted(images, reverse=True)[0]]
distro_images = {
'amzn2': latest_aws_image('137112412989', 'amzn2-ami-hvm-*-x86_64-gp2'),
'centos7': latest_aws_image('125523088429', 'CentOS 7.*x86_64'),
'centos8': latest_aws_image('125523088429', 'CentOS 8.*x86_64'),
'deb9': latest_aws_image('379101102735', 'debian-stretch-hvm-x86_64-gp2-*'),
'deb10': latest_aws_image('136693071363', 'debian-10-amd64-*'),
'flatcar': latest_aws_image('075585003325', 'Flatcar-stable-*-hvm'),
'rhel7': latest_aws_image('309956199498', 'RHEL-7.*_HVM_*-x86_64-0-Hourly2-GP2'),
'rhel8': latest_aws_image('309956199498', 'RHEL-8.*_HVM-*-x86_64-0-Hourly2-GP2'),
'u1804': latest_aws_image('099720109477', 'ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*'), # pylint: disable=line-too-long
'u2004': latest_aws_image('099720109477', 'ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*'), # pylint: disable=line-too-long
'u2004arm64': latest_aws_image('099720109477', 'ubuntu/images/hvm-ssd/ubuntu-focal-20.04-arm64-server-*'), # pylint: disable=line-too-long
'u2010': latest_aws_image('099720109477', 'ubuntu/images/hvm-ssd/ubuntu-groovy-20.10-amd64-server-*'), # pylint: disable=line-too-long
}
distros_ssh_user = {
'amzn2': 'ec2-user',
'centos7': 'centos',
'centos8': 'centos',
'deb9': 'admin',
'deb10': 'admin',
'flatcar': 'core',
'rhel7': 'ec2-user',
'rhel8': 'ec2-user',
'u1804': 'ubuntu',
'u2004': 'ubuntu',
'u2004arm64': 'ubuntu',
'u2010': 'ubuntu',
}
##############
# Build Test #
##############
# Returns a string representing the periodic prow job and the number of job invocations per week
def build_test(cloud='aws',
distro='u2004',
networking=None,
container_runtime='docker',
k8s_version='latest',
kops_channel='alpha',
kops_version=None,
publish_version_marker=None,
name_override=None,
feature_flags=(),
extra_flags=None,
extra_dashboards=None,
terraform_version=None,
test_parallelism=25,
test_timeout_minutes=60,
skip_override=None,
focus_regex=None,
runs_per_day=0):
# pylint: disable=too-many-statements,too-many-branches,too-many-arguments
if kops_version is None:
# TODO: Move to kops-ci/markers/master/ once validated
kops_deploy_url = "https://storage.googleapis.com/kops-ci/bin/latest-ci-updown-green.txt"
elif kops_version.startswith("https://"):
kops_deploy_url = kops_version
kops_version = None
else:
kops_deploy_url = f"https://storage.googleapis.com/kops-ci/markers/release-{kops_version}/latest-ci-updown-green.txt" # pylint: disable=line-too-long
# https://github.com/cilium/cilium/blob/71cfb265d53b63a2be3806fb3fd4425fa36262ff/Documentation/install/system_requirements.rst#centos-foot
if networking == "cilium" and distro not in ["u2004", "u2004arm64", "deb10", "rhel8"]:
return None
if should_skip_newer_k8s(k8s_version, kops_version):
return None
kops_image = distro_images[distro]
kops_ssh_user = distros_ssh_user[distro]
validation_wait = '20m' if distro == 'flatcar' else None
marker, k8s_deploy_url, test_package_bucket, test_package_dir = k8s_version_info(k8s_version)
args = create_args(kops_channel, networking, container_runtime, extra_flags, kops_image)
skip_regex = r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler|Services.*functioning.*NodePort|Services.*rejected.*endpoints|Services.*affinity' # pylint: disable=line-too-long
if networking == "cilium":
# https://github.com/cilium/cilium/issues/10002
skip_regex += r'|TCP.CLOSE_WAIT'
# https://github.com/cilium/cilium/issues/15361
skip_regex += r'|external.IP.is.not.assigned.to.a.node'
if skip_override is not None:
skip_regex = skip_override
# TODO(rifelpet): Remove once k8s tags has been created that include
# https://github.com/kubernetes/kubernetes/pull/101443
if cloud == 'aws' and k8s_version in ('ci', 'latest', 'stable', '1.21', '1.22') and skip_regex:
skip_regex += r'|Invalid.AWS.KMS.key'
suffix = ""
if cloud and cloud != "aws":
suffix += "-" + cloud
if networking:
suffix += "-" + networking
if distro:
suffix += "-" + distro
if k8s_version:
suffix += "-k" + k8s_version.replace("1.", "")
if kops_version:
suffix += "-ko" + kops_version.replace("1.", "")
if container_runtime:
suffix += "-" + container_runtime
tab = name_override or (f"kops-grid{suffix}")
if tab in skip_jobs:
return None
job_name = f"e2e-{tab}"
cron, runs_per_week = build_cron(tab, runs_per_day)
tmpl = jinja2.Template(periodic_template)
job = tmpl.render(
job_name=job_name,
cron=cron,
kops_ssh_user=kops_ssh_user,
create_args=args,
k8s_deploy_url=k8s_deploy_url,
kops_deploy_url=kops_deploy_url,
test_parallelism=str(test_parallelism),
job_timeout=str(test_timeout_minutes + 30) + 'm',
test_timeout=str(test_timeout_minutes) + 'm',
marker=marker,
skip_regex=skip_regex,
kops_feature_flags=','.join(feature_flags),
terraform_version=terraform_version,
test_package_bucket=test_package_bucket,
test_package_dir=test_package_dir,
focus_regex=focus_regex,
publish_version_marker=publish_version_marker,
validation_wait=validation_wait,
)
spec = {
'cloud': cloud,
'networking': networking,
'distro': distro,
'k8s_version': k8s_version,
'kops_version': kops_version,
'container_runtime': container_runtime,
'kops_channel': kops_channel,
}
if feature_flags:
spec['feature_flags'] = ','.join(feature_flags)
if extra_flags:
spec['extra_flags'] = ' '.join(extra_flags)
jsonspec = json.dumps(spec, sort_keys=True)
dashboards = [
'sig-cluster-lifecycle-kops',
'google-aws',
'kops-kubetest2',
f"kops-distro-{distro}",
f"kops-k8s-{k8s_version or 'latest'}",
f"kops-{kops_version or 'latest'}",
]
if extra_dashboards:
dashboards.extend(extra_dashboards)
annotations = {
'testgrid-dashboards': ', '.join(sorted(dashboards)),
'testgrid-days-of-results': '90',
'testgrid-tab-name': tab,
}
for (k, v) in spec.items():
annotations[f"test.kops.k8s.io/{k}"] = v or ""
extra = yaml.dump({'annotations': annotations}, width=9999, default_flow_style=False)
output = f"\n# {jsonspec}\n{job.strip()}\n"
for line in extra.splitlines():
output += f" {line}\n"
return output, runs_per_week
# Returns a string representing a presubmit prow job YAML
def presubmit_test(cloud='aws',
distro='u2004',
networking=None,
container_runtime='docker',
k8s_version='latest',
kops_channel='alpha',
name=None,
tab_name=None,
feature_flags=(),
extra_flags=None,
extra_dashboards=None,
test_parallelism=25,
test_timeout_minutes=60,
skip_override=None,
focus_regex=None,
run_if_changed=None,
skip_report=False,
always_run=False):
# pylint: disable=too-many-statements,too-many-branches,too-many-arguments
if cloud == 'aws':
kops_image = distro_images[distro]
kops_ssh_user = distros_ssh_user[distro]
kops_ssh_key_path = '/etc/aws-ssh/aws-ssh-private'
# TODO(rifelpet): Remove once k8s tags has been created that include
# https://github.com/kubernetes/kubernetes/pull/101443
if k8s_version in ('ci', 'latest', 'stable', '1.21', '1.22'):
skip_override += r'|Invalid.AWS.KMS.key'
elif cloud == 'gce':
kops_image = None
kops_ssh_user = 'prow'
kops_ssh_key_path = '/etc/ssh-key-secret/ssh-private'
marker, k8s_deploy_url, test_package_bucket, test_package_dir = k8s_version_info(k8s_version)
args = create_args(kops_channel, networking, container_runtime, extra_flags, kops_image)
tmpl = jinja2.Template(presubmit_template)
job = tmpl.render(
job_name=name,
cloud=cloud,
kops_ssh_key_path=kops_ssh_key_path,
kops_ssh_user=kops_ssh_user,
create_args=args,
k8s_deploy_url=k8s_deploy_url,
test_parallelism=str(test_parallelism),
job_timeout=str(test_timeout_minutes + 30) + 'm',
test_timeout=str(test_timeout_minutes) + 'm',
marker=marker,
skip_regex=skip_override,
kops_feature_flags=','.join(feature_flags),
test_package_bucket=test_package_bucket,
test_package_dir=test_package_dir,
focus_regex=focus_regex,
run_if_changed=run_if_changed,
skip_report='true' if skip_report else 'false',
always_run='true' if always_run else 'false',
)
spec = {
'cloud': cloud,
'networking': networking,
'distro': distro,
'k8s_version': k8s_version,
'container_runtime': container_runtime,
'kops_channel': kops_channel,
}
if feature_flags:
spec['feature_flags'] = ','.join(feature_flags)
if extra_flags:
spec['extra_flags'] = ' '.join(extra_flags)
jsonspec = json.dumps(spec, sort_keys=True)
dashboards = [
'presubmits-kops',
'kops-presubmits',
'sig-cluster-lifecycle-kops',
'kops-kubetest2',
f"kops-distro-{distro}",
f"kops-k8s-{k8s_version or 'latest'}",
]
if extra_dashboards:
dashboards.extend(extra_dashboards)
annotations = {
'testgrid-dashboards': ', '.join(sorted(dashboards)),
'testgrid-days-of-results': '90',
'testgrid-tab-name': tab_name,
}
for (k, v) in spec.items():
annotations[f"test.kops.k8s.io/{k}"] = v or ""
extra = yaml.dump({'annotations': annotations}, width=9999, default_flow_style=False)
output = f"\n# {jsonspec}{job}\n"
for line in extra.splitlines():
output += f" {line}\n"
return output
####################
# Grid Definitions #
####################
networking_options = [
None,
'calico',
'cilium',
'flannel',
'kopeio',
]
distro_options = [
'amzn2',
'deb9',
'deb10',
'flatcar',
'rhel7',
'rhel8',
'u1804',
'u2004',
]
k8s_versions = [
#"latest", # disabled until we're ready to test 1.21
"1.18",
"1.19",
"1.20"
]
kops_versions = [
None, # maps to latest
"1.19",
"1.20",
]
container_runtimes = [
"docker",
"containerd",
]
############################
# kops-periodics-grid.yaml #
############################
def generate_grid():
results = []
# pylint: disable=too-many-nested-blocks
for container_runtime in container_runtimes:
for networking in networking_options:
for distro in distro_options:
for k8s_version in k8s_versions:
for kops_version in kops_versions:
results.append(
build_test(cloud="aws",
distro=distro,
extra_dashboards=['kops-grid'],
k8s_version=k8s_version,
kops_version=kops_version,
networking=networking,
container_runtime=container_runtime)
)
return filter(None, results)
#############################
# kops-periodics-misc2.yaml #
#############################
def generate_misc():
results = [
# A one-off scenario testing arm64
build_test(name_override="kops-grid-scenario-arm64",
cloud="aws",
distro="u2004arm64",
extra_flags=["--zones=eu-central-1a",
"--node-size=m6g.large",
"--master-size=m6g.large"],
skip_override=r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler|Services.*functioning.*NodePort|Services.*rejected.*endpoints|Services.*affinity|Simple.pod.should.handle.in-cluster.config', # pylint: disable=line-too-long
extra_dashboards=['kops-misc']),
# A special test for JWKS
build_test(name_override="kops-grid-scenario-public-jwks",
cloud="aws",
distro="u2004",
feature_flags=["UseServiceAccountIAM", "PublicJWKS"],
extra_flags=['--api-loadbalancer-type=public'],
extra_dashboards=['kops-misc']),
# A special test for AWS Cloud-Controller-Manager
build_test(name_override="kops-grid-scenario-aws-cloud-controller-manager",
cloud="aws",
distro="u2004",
k8s_version="1.19",
feature_flags=["EnableExternalCloudController,SpecOverrideFlag"],
extra_flags=['--override=cluster.spec.cloudControllerManager.cloudProvider=aws',
'--override=cluster.spec.cloudConfig.awsEBSCSIDriver.enabled=true'],
extra_dashboards=['provider-aws-cloud-provider-aws', 'kops-misc']),
build_test(name_override="kops-grid-scenario-terraform",
container_runtime='containerd',
k8s_version="1.20",
terraform_version="0.14.6",
extra_dashboards=['kops-misc']),
build_test(name_override="kops-aws-misc-ha-euwest1",
k8s_version="stable",
networking="calico",
kops_channel="alpha",
runs_per_day=24,
extra_flags=["--master-count=3", "--zones=eu-west-1a,eu-west-1b,eu-west-1c"],
extra_dashboards=["kops-misc"]),
build_test(name_override="kops-aws-misc-arm64-release",
k8s_version="latest",
container_runtime="containerd",
distro="u2004arm64",
networking="calico",
kops_channel="alpha",
runs_per_day=3,
extra_flags=["--zones=eu-central-1a",
"--node-size=m6g.large",
"--master-size=m6g.large"],
skip_override=r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler|Services.*functioning.*NodePort|Services.*rejected.*endpoints|Services.*affinity|Simple.pod.should.handle.in-cluster.config', # pylint: disable=line-too-long
extra_dashboards=["kops-misc"]),
build_test(name_override="kops-aws-misc-arm64-ci",
k8s_version="ci",
container_runtime="containerd",
distro="u2004arm64",
networking="calico",
kops_channel="alpha",
runs_per_day=3,
extra_flags=["--zones=eu-central-1a",
"--node-size=m6g.large",
"--master-size=m6g.large"],
skip_override=r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler|Simple.pod.should.handle.in-cluster.config', # pylint: disable=line-too-long
extra_dashboards=["kops-misc"]),
build_test(name_override="kops-aws-misc-arm64-conformance",
k8s_version="ci",
container_runtime="containerd",
distro="u2004arm64",
networking="calico",
kops_channel="alpha",
runs_per_day=3,
extra_flags=["--zones=eu-central-1a",
"--node-size=m6g.large",
"--master-size=m6g.large"],
skip_override=r'\[Slow\]|\[Serial\]|\[Flaky\]',
focus_regex=r'\[Conformance\]|\[NodeConformance\]',
extra_dashboards=["kops-misc"]),
build_test(name_override="kops-aws-misc-amd64-conformance",
k8s_version="ci",
container_runtime="containerd",
distro='u2004',
kops_channel="alpha",
runs_per_day=3,
extra_flags=["--node-size=c5.large",
"--master-size=c5.large"],
skip_override=r'\[Slow\]|\[Serial\]|\[Flaky\]',
focus_regex=r'\[Conformance\]|\[NodeConformance\]',
extra_dashboards=["kops-misc"]),
build_test(name_override="kops-aws-misc-updown",
k8s_version="stable",
container_runtime="containerd",
networking="calico",
distro='u2004',
kops_channel="alpha",
kops_version="https://storage.googleapis.com/kops-ci/bin/latest-ci.txt",
publish_version_marker="gs://kops-ci/bin/latest-ci-updown-green.txt",
runs_per_day=24,
extra_flags=["--node-size=c5.large",
"--master-size=c5.large"],
skip_override=r'',
focus_regex=r'\[k8s.io\]\sNetworking.*\[Conformance\]',
extra_dashboards=["kops-misc"]),
build_test(name_override="kops-grid-scenario-cilium10-arm64",
cloud="aws",
networking="cilium",
distro="u2004arm64",
kops_channel="alpha",
runs_per_day=1,
extra_flags=["--zones=eu-central-1a",
"--node-size=m6g.large",
"--master-size=m6g.large",
"--override=cluster.spec.networking.cilium.version=v1.10.0-rc1"],
skip_override=r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler|Services.*functioning.*NodePort|Services.*rejected.*endpoints|Services.*affinity|TCP.CLOSE_WAIT|external.IP.is.not.assigned.to.a.node|Simple.pod.should.handle.in-cluster.config', # pylint: disable=line-too-long
extra_dashboards=['kops-misc']),
build_test(name_override="kops-grid-scenario-cilium10-amd64",
cloud="aws",
networking="cilium",
distro="u2004",
kops_channel="alpha",
runs_per_day=1,
extra_flags=["--zones=eu-central-1a",
"--override=cluster.spec.networking.cilium.version=v1.10.0-rc1"],
extra_dashboards=['kops-misc']),
]
return results
###############################
# kops-periodics-distros.yaml #
###############################
def generate_distros():
distros = ['debian9', 'debian10', 'ubuntu1804', 'ubuntu2004', 'centos7', 'centos8',
'amazonlinux2', 'rhel7', 'rhel8', 'flatcar']
results = []
for distro in distros:
distro_short = distro.replace('ubuntu', 'u').replace('debian', 'deb').replace('amazonlinux', 'amzn') # pylint: disable=line-too-long
results.append(
build_test(distro=distro_short,
networking='calico',
container_runtime='containerd',
k8s_version='stable',
kops_channel='alpha',
name_override=f"kops-aws-distro-image{distro}",
extra_dashboards=['kops-distros'],
runs_per_day=3,
skip_override=r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler' # pylint: disable=line-too-long
)
)
# pprint.pprint(results)
return results
#######################################
# kops-periodics-network-plugins.yaml #
#######################################
def generate_network_plugins():
plugins = ['amazon-vpc', 'calico', 'canal', 'cilium', 'flannel', 'kopeio', 'kuberouter', 'weave'] # pylint: disable=line-too-long
results = []
skip_base = r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler'# pylint: disable=line-too-long
for plugin in plugins:
networking_arg = plugin
skip_regex = skip_base
if plugin == 'amazon-vpc':
networking_arg = 'amazonvpc'
if plugin == 'cilium':
skip_regex += r'|should.set.TCP.CLOSE_WAIT'
else:
skip_regex += r'|Services.*functioning.*NodePort'
if plugin in ['calico', 'canal', 'weave', 'cilium']:
skip_regex += r'|Services.*rejected.*endpoints'
if plugin == 'kuberouter':
skip_regex += r'|load-balancer|hairpin|affinity\stimeout|service\.kubernetes\.io|CLOSE_WAIT' # pylint: disable=line-too-long
networking_arg = 'kube-router'
results.append(
build_test(
container_runtime='containerd',
k8s_version='stable',
kops_channel='alpha',
name_override=f"kops-aws-cni-{plugin}",
networking=networking_arg,
extra_flags=['--node-size=t3.large'],
extra_dashboards=['kops-network-plugins'],
runs_per_day=3,
skip_override=skip_regex
)
)
return results
################################
# kops-periodics-versions.yaml #
################################
def generate_versions():
skip_regex = r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler' # pylint: disable=line-too-long
results = [
build_test(
container_runtime='containerd',
k8s_version='ci',
kops_channel='alpha',
name_override='kops-aws-k8s-latest',
networking='calico',
extra_dashboards=['kops-versions'],
runs_per_day=24,
# This version marker is only used by the k/k presubmit job
publish_version_marker='gs://kops-ci/bin/latest-ci-green.txt',
skip_override=skip_regex
)
]
for version in ['1.20', '1.19', '1.18', '1.17', '1.16', '1.15']:
distro = 'deb9' if version in ['1.17', '1.16', '1.15'] else 'u2004'
if version == '1.15':
skip_regex += r'|Services.*rejected.*endpoints'
results.append(
build_test(
container_runtime='containerd',
distro=distro,
k8s_version=version,
kops_channel='alpha',
name_override=f"kops-aws-k8s-{version.replace('.', '-')}",
networking='calico',
extra_dashboards=['kops-versions'],
runs_per_day=8,
skip_override=skip_regex
)
)
return results
######################
# kops-pipeline.yaml #
######################
def generate_pipeline():
results = []
focus_regex = r'\[k8s.io\]\sNetworking.*\[Conformance\]'
for version in ['master', '1.20', '1.19']:
branch = version if version == 'master' else f"release-{version}"
publish_version_marker = f"gs://kops-ci/markers/{branch}/latest-ci-updown-green.txt"
kops_version = f"https://storage.googleapis.com/k8s-staging-kops/kops/releases/markers/{branch}/latest-ci.txt" # pylint: disable=line-too-long
results.append(
build_test(
container_runtime='containerd',
k8s_version=version.replace('master', 'latest'),
kops_version=kops_version,
kops_channel='alpha',
name_override=f"kops-pipeline-updown-kops{version.replace('.', '')}",
networking='calico',
extra_dashboards=['kops-versions'],
runs_per_day=24,
skip_override=r'\[Slow\]|\[Serial\]',
focus_regex=focus_regex,
publish_version_marker=publish_version_marker,
)
)
return results
########################################
# kops-presubmits-network-plugins.yaml #
########################################
def generate_presubmits_network_plugins():
plugins = {
'amazonvpc': r'^(upup\/models\/cloudup\/resources\/addons\/networking\.amazon-vpc-routed-eni\/|pkg\/model\/(firewall|components\/kubeproxy|iam\/iam_builder).go|nodeup\/pkg\/model\/(context|kubelet).go|upup\/pkg\/fi\/cloudup\/defaults.go)', # pylint: disable=line-too-long
'calico': r'^(upup\/models\/cloudup\/resources\/addons\/networking\.projectcalico\.org\/|pkg\/model\/(firewall.go|pki.go|iam\/iam_builder.go)|nodeup\/pkg\/model\/networking\/calico.go)', # pylint: disable=line-too-long
'canal': r'^(upup\/models\/cloudup\/resources\/addons\/networking\.projectcalico\.org\.canal\/|nodeup\/pkg\/model\/networking\/(flannel|canal).go)', # pylint: disable=line-too-long
'cilium': r'^(upup\/models\/cloudup\/resources\/addons\/networking\.cilium\.io\/|pkg\/model\/(firewall|components\/cilium|iam\/iam_builder).go|nodeup\/pkg\/model\/(context|networking\/cilium).go|upup\/pkg\/fi\/cloudup\/template_functions.go)', # pylint: disable=line-too-long
'flannel': r'^(upup\/models\/cloudup\/resources\/addons\/networking\.flannel\/|nodeup\/pkg\/model\/(sysctls|networking\/flannel).go|upup\/pkg\/fi\/cloudup\/template_functions.go)', # pylint: disable=line-too-long
'kuberouter': r'^(upup\/models\/cloudup\/resources\/addons\/networking\.kuberouter\/|upup\/pkg\/fi\/cloudup\/template_functions.go)', # pylint: disable=line-too-long
'weave': r'^(upup\/models\/cloudup\/resources\/addons\/networking\.weave\/|upup\/pkg\/fi\/cloudup\/template_functions.go)' # pylint: disable=line-too-long
}
results = []
skip_base = r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler' # pylint: disable=line-too-long
for plugin, run_if_changed in plugins.items():
networking_arg = plugin
skip_regex = skip_base
if plugin == 'cilium':
skip_regex += r'|should.set.TCP.CLOSE_WAIT'
else:
skip_regex += r'|Services.*functioning.*NodePort'
if plugin in ['calico', 'canal', 'weave', 'cilium']:
skip_regex += r'|Services.*rejected.*endpoints|external.IP.is.not.assigned.to.a.node|hostPort.but.different.hostIP|same.port.number.but.different.protocols' # pylint: disable=line-too-long
if plugin == 'kuberouter':
skip_regex += r'|load-balancer|hairpin|affinity\stimeout|service\.kubernetes\.io|CLOSE_WAIT' # pylint: disable=line-too-long
networking_arg = 'kube-router'
if plugin in ['canal', 'flannel']:
skip_regex += r'|up\sand\sdown|headless|service-proxy-name'
results.append(
presubmit_test(
container_runtime='containerd',
k8s_version='stable',
kops_channel='alpha',
name=f"pull-kops-e2e-cni-{plugin}",
tab_name=f"e2e-{plugin}",
networking=networking_arg,
extra_flags=['--node-size=t3.large'],
extra_dashboards=['kops-network-plugins'],
skip_override=skip_regex,
run_if_changed=run_if_changed,
skip_report=False,
always_run=False,
)
)
return results
############################
# kops-presubmits-e2e.yaml #
############################
def generate_presubmits_e2e():
skip_regex = r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler' # pylint: disable=line-too-long
return [
presubmit_test(
container_runtime='docker',
k8s_version='1.21',
kops_channel='alpha',
name='pull-kops-e2e-kubernetes-aws',
tab_name='e2e-docker',
always_run=True,
skip_override=skip_regex,
),
presubmit_test(
container_runtime='containerd',
k8s_version='1.21',
kops_channel='alpha',
name='pull-kops-e2e-k8s-containerd',
networking='calico',
tab_name='e2e-containerd',
always_run=True,
skip_override=skip_regex,
),
presubmit_test(
container_runtime='containerd',
k8s_version='1.21',
kops_channel='alpha',
name='pull-kops-e2e-k8s-containerd-ha',
networking='calico',
extra_flags=["--master-count=3", "--zones=eu-central-1a,eu-central-1b,eu-central-1c"],
tab_name='e2e-containerd-ha',
always_run=False,
skip_override=skip_regex+'|Multi-AZ',
),
presubmit_test(
distro="u2010",
networking='calico',
container_runtime='crio',
k8s_version='1.21',
kops_channel='alpha',
name='pull-kops-e2e-k8s-crio',
tab_name='e2e-crio',
always_run=False,
skip_override=skip_regex,
),
presubmit_test(
cloud='gce',
container_runtime='containerd',
k8s_version='1.21',
kops_channel='alpha',
name='pull-kops-e2e-k8s-gce',
networking='cilium',
tab_name='e2e-gce',
always_run=False,
skip_override=r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Firewall|Dashboard|RuntimeClass|RuntimeHandler|kube-dns|run.a.Pod.requesting.a.RuntimeClass|should.set.TCP.CLOSE_WAIT|Services.*rejected.*endpoints', # pylint: disable=line-too-long
feature_flags=['GoogleCloudBucketACL'],
),
]
########################
# YAML File Generation #
########################
periodics_files = {
'kops-periodics-distros.yaml': generate_distros,
'kops-periodics-grid.yaml': generate_grid,
'kops-periodics-misc2.yaml': generate_misc,
'kops-periodics-network-plugins.yaml': generate_network_plugins,
'kops-periodics-versions.yaml': generate_versions,
'kops-periodics-pipeline.yaml': generate_pipeline,
}
presubmits_files = {
'kops-presubmits-network-plugins.yaml': generate_presubmits_network_plugins,
'kops-presubmits-e2e.yaml': generate_presubmits_e2e,
}
def main():
for filename, generate_func in periodics_files.items():
print(f"Generating {filename}")
output = []
runs_per_week = 0
job_count = 0
for res in generate_func():
output.append(res[0])
runs_per_week += res[1]
job_count += 1
output.insert(0, "# Test jobs generated by build_jobs.py (do not manually edit)\n")
output.insert(1, f"# {job_count} jobs, total of {runs_per_week} runs per week\n")
output.insert(2, "periodics:\n")
with open(filename, 'w') as fd:
fd.write(''.join(output))
for filename, generate_func in presubmits_files.items():
print(f"Generating {filename}")
output = []
job_count = 0
for res in generate_func():
output.append(res)
job_count += 1
output.insert(0, "# Test jobs generated by build_jobs.py (do not manually edit)\n")
output.insert(1, f"# {job_count} jobs\n")
output.insert(2, "presubmits:\n")
output.insert(3, " kubernetes/kops:\n")
with open(filename, 'w') as fd:
fd.write(''.join(output))
if __name__ == "__main__":
main()
| apache-2.0 | -7,853,174,255,853,810,000 | 38.754237 | 351 | 0.55224 | false |
mocnik-science/osm-python-tools | OSMPythonTools/internal/cacheObject.py | 1 | 4281 | import hashlib
import os
import time
import ujson
import urllib.request
import OSMPythonTools
class CacheObject:
def __init__(self, prefix, endpoint, cacheDir='cache', waitBetweenQueries=None, jsonResult=True):
self._prefix = prefix
self._endpoint = endpoint
self.__cacheDir = cacheDir
self.__waitBetweenQueries = waitBetweenQueries
self.__lastQuery = None
self.__jsonResult = jsonResult
def query(self, *args, onlyCached=False, shallow=False, **kwargs):
queryString, hashString, params = self._queryString(*args, **kwargs)
filename = self.__cacheDir + '/' + self._prefix + '-' + self.__hash(hashString + ('????' + urllib.parse.urlencode(sorted(params.items())) if params else ''))
if not os.path.exists(self.__cacheDir):
os.makedirs(self.__cacheDir)
if os.path.exists(filename):
with open(filename, 'r') as file:
data = ujson.load(file)
elif onlyCached:
OSMPythonTools.logger.error('[' + self._prefix + '] data not cached: ' + queryString)
return None
elif shallow:
data = shallow
else:
OSMPythonTools.logger.warning('[' + self._prefix + '] downloading data: ' + queryString)
if self._waitForReady() == None:
if self.__lastQuery and self.__waitBetweenQueries and time.time() - self.__lastQuery < self.__waitBetweenQueries:
time.sleep(self.__waitBetweenQueries - time.time() + self.__lastQuery)
self.__lastQuery = time.time()
data = self.__query(queryString, params)
with open(filename, 'w') as file:
ujson.dump(data, file)
result = self._rawToResult(data, queryString, params, shallow=shallow)
if not self._isValid(result):
msg = '[' + self._prefix + '] error in result (' + filename + '): ' + queryString
OSMPythonTools.logger.exception(msg)
raise(Exception(msg))
return result
def deleteQueryFromCache(self, *args, **kwargs):
queryString, hashString, params = self._queryString(*args, **kwargs)
filename = self.__cacheDir + '/' + self._prefix + '-' + self.__hash(hashString + ('????' + urllib.parse.urlencode(sorted(params.items())) if params else ''))
if os.path.exists(filename):
OSMPythonTools.logger.info('[' + self._prefix + '] removing cached data: ' + queryString)
os.remove(filename)
def _queryString(self, *args, **kwargs):
raise(NotImplementedError('Subclass should implement _queryString'))
def _queryRequest(self, endpoint, queryString, params={}):
raise(NotImplementedError('Subclass should implement _queryRequest'))
def _rawToResult(self, data, queryString, params, shallow=False):
raise(NotImplementedError('Subclass should implement _rawToResult'))
def _isValid(self, result):
return True
def _waitForReady(self):
return None
def _userAgent(self):
return '%s/%s (%s)' % (OSMPythonTools.pkgName, OSMPythonTools.pkgVersion, OSMPythonTools.pkgUrl)
def __hash(self, x):
h = hashlib.sha1()
h.update(x.encode('utf-8'))
return h.hexdigest()
def __query(self, requestString, params):
request = self._queryRequest(self._endpoint, requestString, params=params)
if not isinstance(request, urllib.request.Request):
request = urllib.request.Request(request)
request.headers['User-Agent'] = self._userAgent()
try:
response = urllib.request.urlopen(request)
except urllib.request.HTTPError as err:
msg = 'The requested data could not be downloaded. ' + str(err)
OSMPythonTools.logger.exception(msg)
raise Exception(msg)
except:
msg = 'The requested data could not be downloaded. Please check whether your internet connection is working.'
OSMPythonTools.logger.exception(msg)
raise Exception(msg)
encoding = response.info().get_content_charset('utf-8')
r = response.read().decode(encoding)
return ujson.loads(r) if self.__jsonResult else r
| gpl-3.0 | -6,900,467,216,144,160,000 | 44.542553 | 165 | 0.61551 | false |
blechta/dolfin-tape | dolfintape/hat_function.py | 1 | 4970 | # Copyright (C) 2015 Jan Blechta
#
# This file is part of dolfin-tape.
#
# dolfin-tape is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dolfin-tape is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with dolfin-tape. If not, see <http://www.gnu.org/licenses/>.
from dolfin import Expression, cpp, FiniteElement, jit, \
vertices, facets, Vertex, not_working_in_parallel
__all__ = ['hat_function_collection', 'hat_function', 'hat_function_grad']
def hat_function_collection(vertex_colors, color, element=None):
"""Return Expression on given element which takes values:
1 ... if vertex_colors[node] == color
0 ... at other nodes
This is well defined just on Lagrange 1 element (default) and Dicontinuous
Lagrange 1 element.
NOTE: This expression provides a little hack as it lacks continuity across
MPI partitions boundaries unless vertex_colors is compatible there. In fact,
this behaviour is needed in FluxReconstructor."""
assert isinstance(vertex_colors, cpp.VertexFunctionSizet)
mesh = vertex_colors.mesh()
if not element:
element = FiniteElement('Lagrange', mesh.ufl_cell(), 1)
assert element.family() in ['Lagrange', 'Discontinuous Lagrange']
assert element.degree() == 1
ufc_element, ufc_dofmap = jit(element, mpi_comm=mesh.mpi_comm())
dolfin_element = cpp.FiniteElement(ufc_element)
e = Expression(hats_cpp_code, element=element, domain=mesh)
e.vertex_colors = vertex_colors
e.color = color
e.dolfin_element = dolfin_element
return e
hats_cpp_code="""
class HatFunctionCollection : public Expression
{
public:
std::shared_ptr<VertexFunction<std::size_t> > vertex_colors;
std::size_t color;
std::shared_ptr<FiniteElement> dolfin_element;
HatFunctionCollection() : Expression() { }
void restrict(double* w, const FiniteElement& element,
const Cell& cell,
const double* vertex_coordinates,
const ufc::cell& ufc_cell) const
{
if ( cell.mesh_id() == vertex_colors->mesh()->id()
&& element.hash() == dolfin_element->hash() )
for (VertexIterator v(cell); !v.end(); ++v)
*(w++) = (*vertex_colors)[v->index()] == color ? 1.0 : 0.0;
else
restrict_as_ufc_function(w, element, cell, vertex_coordinates, ufc_cell);
}
};
"""
def hat_function(vertex):
"""Return Expression on Lagrange degree 1 element which is
1 ... at given 'vertex'
0 ... at other vertices
"""
assert isinstance(vertex, Vertex)
mesh = vertex.mesh()
element = FiniteElement('Lagrange', mesh.ufl_cell(), 1)
ufc_element, ufc_dofmap = jit(element, mpi_comm=mesh.mpi_comm())
ufc_element = make_ufc_finite_element(ufc_element)
dolfin_element = cpp.FiniteElement(ufc_element)
e = Expression(hat_cpp_code, element=element, domain=mesh)
e.vertex = vertex
e.dolfin_element = dolfin_element
return e
hat_cpp_code="""
#include <dolfin/mesh/Vertex.h>
namespace dolfin {
class HatFunction : public Expression
{
public:
MeshEntity vertex;
std::shared_ptr<FiniteElement> dolfin_element;
HatFunction() : Expression() { }
void restrict(double* w, const FiniteElement& element,
const Cell& cell,
const double* vertex_coordinates,
const ufc::cell& ufc_cell) const
{
if ( cell.mesh_id() == vertex.mesh_id()
&& element.hash() == dolfin_element->hash() )
for (VertexIterator v(cell); !v.end(); ++v)
*(w++) = *v == vertex ? 1.0 : 0.0;
else
restrict_as_ufc_function(w, element, cell, vertex_coordinates, ufc_cell);
}
};
}
"""
def hat_function_grad(vertex, cell):
"""Compute L^\infty-norm of gradient of hat function on 'cell'
and value 1 in 'vertex'."""
# TODO: fix using ghosted mesh
not_working_in_parallel("function 'hat_function_grad'")
assert vertex in vertices(cell), "vertex not in cell!"
# Find adjacent facet
f = [f for f in facets(cell) if not vertex in vertices(f)]
assert len(f) == 1, "Something strange with adjacent cell!"
f = f[0]
# Get unit normal
n = f.normal()
n /= n.norm()
# Pick some vertex on facet
# FIXME: Is it correct index in parallel?
facet_vertex_0 = Vertex(cell.mesh(), f.entities(0)[0])
# Compute signed distance from vertex to facet plane
d = (facet_vertex_0.point() - vertex.point()).dot(n)
# Return norm of gradient
assert d != 0.0, "Degenerate cell!"
return 1.0/abs(d)
| gpl-3.0 | -4,066,114,395,403,508,000 | 30.858974 | 80 | 0.663179 | false |
Acehaidrey/incubator-airflow | tests/cluster_policies/__init__.py | 1 | 2187 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Callable, List
from airflow.configuration import conf
from airflow.exceptions import AirflowClusterPolicyViolation
from airflow.models.baseoperator import BaseOperator
# [START example_cluster_policy_rule]
def task_must_have_owners(task: BaseOperator):
if not task.owner or task.owner.lower() == conf.get('operators', 'default_owner'):
raise AirflowClusterPolicyViolation(
f'''Task must have non-None non-default owner. Current value: {task.owner}'''
)
# [END example_cluster_policy_rule]
# [START example_list_of_cluster_policy_rules]
TASK_RULES: List[Callable[[BaseOperator], None]] = [
task_must_have_owners,
]
def _check_task_rules(current_task: BaseOperator):
"""Check task rules for given task."""
notices = []
for rule in TASK_RULES:
try:
rule(current_task)
except AirflowClusterPolicyViolation as ex:
notices.append(str(ex))
if notices:
notices_list = " * " + "\n * ".join(notices)
raise AirflowClusterPolicyViolation(
f"DAG policy violation (DAG ID: {current_task.dag_id}, Path: {current_task.dag.filepath}):\n"
f"Notices:\n"
f"{notices_list}"
)
def cluster_policy(task: BaseOperator):
"""Ensure Tasks have non-default owners."""
_check_task_rules(task)
# [END example_list_of_cluster_policy_rules]
| apache-2.0 | 1,157,787,360,719,644,000 | 33.171875 | 105 | 0.703704 | false |
vondrejc/FFTHomPy | tutorials/03_exact_integration_simple.py | 1 | 4521 | from __future__ import division, print_function
print("""
Numerical homogenisation based on exact integration, which is described in
J. Vondrejc, Improved guaranteed computable bounds on homogenized properties
of periodic media by FourierGalerkin method with exact integration,
Int. J. Numer. Methods Eng., 2016.
This is a self-contained tutorial implementing scalar problem in dim=2 or dim=3
on a unit periodic cell Y=(-0.5,0.5)**dim
with a square (2D) or cube (3D) inclusion of size 0.6 (side).
The material is identity I in matrix phase and 11*I in inclusion phase.
""")
import numpy as np
import itertools
from scipy.sparse.linalg import cg, LinearOperator
dim = 3 # number of spatial dimensions
N = dim*(5,) # number of discretization points
dN = tuple(2*np.array(N)-1) # double grid value
vec_shape=(dim,)+dN
# indicator function indicating the phase per grid point (square inclusion)
P = dim*(5,) # material resolution in each spatial dimension
phi = np.zeros(P, dtype='float')
if dim==2:
phi[1:4, 1:4] = 1
elif dim==3:
phi[1:4, 1:4, 1:4] = 1
# material coefficients at grid points
C = np.einsum('ij,...->ij...', 11*np.eye(dim), phi)
C += np.einsum('ij,...->ij...', 1*np.eye(dim), 1-phi)
# tensor products / (inverse) Fourier transform / frequencies
dot = lambda A, B: np.einsum('ij...,j...->i...', A, B)
fft = lambda x, N: np.fft.fftshift(np.fft.fftn(np.fft.ifftshift(x), N))/np.prod(np.array(N))
ifft = lambda x, N: np.fft.fftshift(np.fft.ifftn(np.fft.ifftshift(x), N))*np.prod(np.array(N))
freq_fun = lambda N: np.arange(np.fix(-N/2.), np.fix(N/2.+0.5))
freq = [freq_fun(n) for n in dN]
def get_weights(h): # calculation of integral weights of rectangular function
Wphi = np.zeros(dN) # integral weights
for ind in itertools.product(*[range(n) for n in dN]):
Wphi[ind] = np.prod(h)
for ii in range(dim):
Wphi[ind] *= np.sinc(h[ii]*freq[ii][ind[ii]])
return Wphi
def decrease(val, dN): # auxiliary function to remove unnecesary Fourier freq.
dN=np.array(dN)
N=np.array(val.shape[-dN.size:])
ibeg = np.array(np.fix((N-dN+(dN % 2))/2), dtype=np.int)
iend = np.array(np.fix((N+dN+(dN % 2))/2), dtype=np.int)
if dN.size==2:
return val[:,:,ibeg[0]:iend[0],ibeg[1]:iend[1]]
elif dN.size==3:
return val[:,:,ibeg[0]:iend[0],ibeg[1]:iend[1],ibeg[2]:iend[2]]
## GRID-BASED COMPOSITE ######### evaluate the matrix of Galerkin approximation
hC0 = np.prod(np.array(P))*fft(C, P)
if P == dN:
hCex = hC0
elif P > dN:
hCex = decrease(hC0, dN)
elif P < dN:
factor = np.max(np.ceil(np.array(dN) / np.array(P)))
hCper = np.tile(hC0, int(2*factor-1)*np.ones(dim, dtype=np.int))
hCex = decrease(hCper, dN)
Cex = ifft(np.einsum('ij...,...->ij...', hCex, get_weights(1./np.array(P))), dN).real
## INCLUSION-BASED COMPOSITE #### another expression of Cex
Wraw = get_weights(0.6*np.ones(dim))
"""HINT: the size 0.6 corresponds to the size of square inclusion; it is exactly
the size of topology generated by phi, i.e. 3x3 pixels in 5x5 image of PUC with
PUC size 1; then 0.6 = 3./5.
"""
char_square = ifft(Wraw, dN).real
Cex2 = np.einsum('ij...,...->ij...', 11*np.eye(dim), char_square)
Cex2 += np.einsum('ij...,...->ij...', 1*np.eye(dim), 1.-char_square)
## checking that the Cex2 is the same
print('zero check:', np.linalg.norm(Cex-Cex2))
Gamma = np.zeros((dim,dim)+ tuple(dN)) # zero initialize
for i,j in itertools.product(range(dim),repeat=2):
for ind in itertools.product(*[range(int((dN[k]-N[k])/2), int((dN[k]-N[k])/2+N[k])) for k in range(dim)]):
q = np.array([freq[ii][ind[ii]] for ii in range(dim)]) # frequency vector
if not q.dot(q) == 0: # zero freq. -> mean
Gamma[(i,j)+ind] = -(q[i]*q[j])/(q.dot(q))
# - convert to operators
G = lambda X: np.real(ifft(dot(Gamma, fft(X, dN)), dN)).reshape(-1)
A = lambda x: dot(Cex, x.reshape(vec_shape))
GA = lambda x: G(A(x))
# initiate strain/stress (2nd order tensor for each grid point)
X = np.zeros(vec_shape, dtype=np.float)
x = X.reshape(-1)
# macroscopic value
E = np.zeros_like(X); E[0] = 1.
b = -GA(E.reshape(-1))
# iterative solution of the linear system
Alinoper = LinearOperator(shape=(x.size, x.size), matvec=GA, dtype=np.float)
x, info = cg(A=Alinoper, b=b, x0=X.reshape(-1)) # conjugate gradients
state = x.reshape(vec_shape) + E
flux = dot(Cex, state)
AH_11 = np.sum(flux*state)/np.prod(np.array(dN)) # homogenised properties
print('homogenised coefficient (component 11) =', AH_11)
print('END')
| mit | -5,727,445,794,444,432,000 | 38.657895 | 110 | 0.652068 | false |
srio/shadow3-scripts | script1_ID26.py | 1 | 12878 |
import numpy
from orangecontrib.xoppy.util.xoppy_undulators import xoppy_calc_undulator_power_density, xoppy_calc_undulator_spectrum
from orangecontrib.xoppy.util.xoppy_xraylib_util import xpower_calc
from orangecontrib.xoppy.util.fit_gaussian2d import fit_gaussian2d, info_params, twoD_Gaussian
from srxraylib.plot.gol import plot, plot_image
import scipy.constants as codata
def calculate_line(photon_energy,undulator_period,N,K,thickness_diamond_mm,distance,slit_h,slit_v,coating,incident_angle_mrad,
do_plot=False):
print("######################### INPUTS ###################################")
print("photon_energy=",photon_energy)
print("undulator_period=",undulator_period)
print("N=",N)
print("K=",K)
print("thickness_diamond_mm=",thickness_diamond_mm)
print("distance=",distance)
print("slit_h=",slit_h)
print("coating=",coating)
print("incident_angle_mrad=",incident_angle_mrad)
print("#######################################################################")
out_dictionary = {}
#
# Spectrum simulation
#
#ULATTICEFILE S28D.mat
#UEPSILONX 1.3166e-10
#UEPSILONY 5e-12
#BETAX = 6.89997
#BETAY = 2.6447
SIGMAX = 30.1836 * 1e-6
SIGMAY = 3.63641 * 1e-6
SIGMAXP = 4.36821 * 1e-6
SIGMAYP = 1.37498 * 1e-6
METHOD = 2 # US=0 URGENT=1 SRW=2
print("\n\n Computing spectrum \n\n")
e, f, spectral_power, cumulated_power = \
xoppy_calc_undulator_spectrum(ELECTRONENERGY=6.0,ELECTRONENERGYSPREAD=0.001,ELECTRONCURRENT=0.2,\
ELECTRONBEAMSIZEH=SIGMAX,ELECTRONBEAMSIZEV=SIGMAY,\
ELECTRONBEAMDIVERGENCEH=SIGMAXP,ELECTRONBEAMDIVERGENCEV=SIGMAYP,\
PERIODID=undulator_period,NPERIODS=N,KV=K,DISTANCE=distance,GAPH=slit_h,GAPV=slit_v,\
PHOTONENERGYMIN=1000.0,PHOTONENERGYMAX=100000.0,PHOTONENERGYPOINTS=5000,METHOD=2,
USEEMITTANCES=1)
power_in_spectrum = f.sum()*1e3*codata.e*(e[1]-e[0])
out_dictionary["power_in_spectrum"] = power_in_spectrum
if do_plot:
plot(e,spectral_power,title="E = %d keV"%photon_energy)
#
# optical system
#
# """
# Apply reflectivities/transmittivities of optical elements on a source spectrum
#
# :param energies: the array with photon energies in eV
# :param source: the spectral intensity or spectral power
# :param substance: a list with descriptors of each optical element material
# :param flags: a list with 0 (filter or attenuator) or 1 (mirror) for all optical elements
# :param dens: a list with densities of o.e. materials. "?" is accepted for looking in the database
# :param thick: a list with the thickness in mm for all o.e.'s. Only applicable for filters
# :param angle: a list with the grazing angles in mrad for all o.e.'s. Only applicable for mirrors
# :param roughness:a list with the roughness RMS in A for all o.e.'s. Only applicable for mirrors
# :param output_file: name of the output file (default=None, no output file)
# :return: a dictionary with the results
# """
optical_system_dictionary = xpower_calc(energies=e,source=spectral_power,
substance=["C",coating,coating],flags=[0,1,1],dens=[3.53,2.33,2.33],
thick=[thickness_diamond_mm,1,1],
angle=[0,incident_angle_mrad,incident_angle_mrad],roughness=[0,0,0],
output_file=None)
for key in optical_system_dictionary.keys():
print(key)
print(optical_system_dictionary["info"])
for i,ilabel in enumerate(optical_system_dictionary["labels"]):
print(i,ilabel)
# 0 Photon Energy [eV]
# 1 Source
# 2 [oe 1] Total CS cm2/g
# 3 [oe 1] Mu cm^-1
# 4 [oe 1] Transmitivity
# 5 [oe 1] Absorption
# 6 Intensity after oe #1
# 7 [oe 2] 1-Re[n]=delta
# 8 [oe 2] Im[n]=beta
# 9 [oe 2] delta/beta
# 10 [oe 2] Reflectivity-s
# 11 [oe 2] Transmitivity
# 12 Intensity after oe #2
# 13 [oe 3] 1-Re[n]=delta
# 14 [oe 3] Im[n]=beta
# 15 [oe 3] delta/beta
# 16 [oe 3] Reflectivity-s
# 17 [oe 3] Transmitivity
# 18 Intensity after oe #3
print(optical_system_dictionary["data"].shape)
# I would be interested in:
#
# - Total Power [W] emitted in the slit aperture: power_in_spectrum
#
# - Absorbed Power [W] by Diamond Window: integral of col6-col1
#
# - Absorbed Power [W] for 1rst and 2nd mirrors: : integral of col112-col6 and integral of col18-col12
#
# - Fitted parameters from the power density distribution calculated in a 5*5 mm slit aperture:
#
# - Maximum value [W/mm2]
#
# - Gaussian Fit parameters for both axis: FWHM [mm]
I0 = numpy.trapz( optical_system_dictionary["data"][1,:], x=e, axis=-1)
I1 = numpy.trapz( optical_system_dictionary["data"][6,:], x=e, axis=-1)
I2 = numpy.trapz( optical_system_dictionary["data"][12,:], x=e, axis=-1)
I3 = numpy.trapz( optical_system_dictionary["data"][18,:], x=e, axis=-1)
print("Source power: ",I0)
print(" after diamond: ",I1)
print(" after M1: ",I2)
print(" after M2: ",I3)
out_dictionary["diamond_absorbed"] = I0-I1
out_dictionary["m1_absorbed"] = I1-I2
out_dictionary["m2_absorbed"] = I2-I3
#
# power density
#
h, v, p, code = xoppy_calc_undulator_power_density(ELECTRONENERGY=6.0,ELECTRONENERGYSPREAD=0.001,ELECTRONCURRENT=0.2,\
ELECTRONBEAMSIZEH=SIGMAX,ELECTRONBEAMSIZEV=SIGMAY,\
ELECTRONBEAMDIVERGENCEH=SIGMAXP,ELECTRONBEAMDIVERGENCEV=SIGMAYP,\
PERIODID=undulator_period,NPERIODS=N,KV=K,DISTANCE=distance,GAPH=5e-3,GAPV=5e-3,\
HSLITPOINTS=101,VSLITPOINTS=101,METHOD=2,USEEMITTANCES=1)
if do_plot:
plot_image(p,h,v,title="power density E = %d keV"%photon_energy)
#
# fit power density
#
print("============= Fitting power density to a 2D Gaussian. ==============\n")
print("Please use these results with care: check if the original data looks like a Gaussian.")
fit_parameters = fit_gaussian2d(p,h,v)
print(info_params(fit_parameters))
H,V = numpy.meshgrid(h,v)
data_fitted = twoD_Gaussian( (H,V), *fit_parameters)
power_in_spectrum = p.sum()*(h[1]-h[0])*(v[1]-v[0])
print(" Total power in the calculated data [W]: ",power_in_spectrum)
power_in_spectrum_fit = data_fitted.sum()*(h[1]-h[0])*(v[1]-v[0])
print(" Total power in the fitted data [W]: ",power_in_spectrum_fit)
# plot_image(data_fitted.reshape((h.size,v.size)),h, v,title="FIT")
print("====================================================\n")
if do_plot:
data_fitted.shape = (h.size,v.size)
plot_image(data_fitted,h,v,title="FITTED power density E = %d keV"%photon_energy)
out_dictionary["fit_parameters"] = fit_parameters
out_dictionary["fit_percent_difference"] = 100 * (power_in_spectrum_fit - power_in_spectrum) / power_in_spectrum
return out_dictionary
if __name__ == "__main__":
Energy_keV = [ 2.043 , 2.44 , 2.44 , 4 , 4 , 5.9 , 5.9 , 5.9 , 10 , 10 , 15 , 24 , 24 , 30 ]
#Undulator = [ U35 , U35 , U35 , U35 , U35 , U35 ,U35 , U35, U35 , U35, U35, U35, U35, U35 ]
lambda0_cm = [ 3.5 , 3.5 , 3.5 , 3.5 , 3.5 , 3.5 ,3.5 , 3.5, 3.5 , 3.5, 3.5, 3.5, 3.5, 3.5 ]
N = [ 47 , 47 , 47 , 47 , 47 , 47 ,47 , 47, 47 , 47, 47, 47, 47, 47 ]
K = [ 2.75 , 2.45 , 2.45 , 1.67 , 1.67 , 1.12 ,1.12 , 1.12 , 1.94 , 1.94 , 1.36 , 1.41 , 1.41 , 1.09 ]
Diamond_window_thickness_mm = [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0 , 0.0, 0.0 , 0.0 ]
Distance_from_source_m = [ 29.2 , 29.2 , 29.2 , 29.2 , 29.2 , 29.2 , 29.2 , 29.2, 29.2 , 29.2, 29.2, 29.2, 29.2, 29.2 ]
H_mm = [ 2.8 , 2.8 , 2.2 , 2.2 , 1.4 , 2.00 , 1.4 , 1.00, 1.00 , 1.00, 1.00, 1.00, 1.00, 1.00 ]
V_mm = [ 0.875 , 0.801 , 0.801 , 0.628 , 0.628 , 0.515 , 0.515 , 0.515, 0.403 , 0.403, 0.333, 0.268, 0.268, 0.243 ]
Coating = [ "Si" , "Cr" , "Si" , "Cr" , "Si" , "Cr" , "Cr" , "Si" , "Si" , "Pd" , "Pd" , "Pt", "Pd" , "Pt"]
Incident_angle_mrad = [ 7 , 7 , 5.5 , 5.5 , 3.5 , 5 , 3.5 , 2.5, 2.5 , 2.5, 2.5, 2.5, 2.5, 2.5 ]
#
# calculation loop
#
out_dictionaries = []
for i,photon_energy in enumerate(Energy_keV):
out_dictionary = calculate_line(photon_energy,1e-2*lambda0_cm[i],N[i],K[i],Diamond_window_thickness_mm[i],
Distance_from_source_m[i],1e-3*H_mm[i],1e-3*V_mm[i],Coating[i],Incident_angle_mrad[i],
do_plot=False)
out_dictionaries.append(out_dictionary)
#
# prepare text output
#
text_output = ""
titles = ["energy_kev","power_in_spectrum","diamond_absorbed","m1_absorbed","m2_absorbed"]
text_output += (" %20s %20s %20s %20s %20s \n")%(tuple(titles))
for i in range(len(out_dictionaries)):
text_output += ("%20d %20.3f %20.3f %20.3f %20.3f \n")%( Energy_keV[i],
out_dictionaries[i]["power_in_spectrum"],
out_dictionaries[i]["diamond_absorbed"],
out_dictionaries[i]["m1_absorbed"],
out_dictionaries[i]["m2_absorbed"])
text_fit = ""
titles_fit = ["energy_kev","Height A: ","center x0:","center y0","sigmax","sigmay","angle","offset","fit difference"]
text_fit += ("%20s %20s %20s %20s %20s %20s %20s %20s %20s\n")%(tuple(titles_fit))
for i in range(len(out_dictionaries)):
text_fit += ("%20.3f %20.3f %20.3f %20.3f %20.3f %20.3f %20.3f %20.3f %20.3f \n")%(
Energy_keV[i],
out_dictionaries[i]["fit_parameters"][0],
out_dictionaries[i]["fit_parameters"][1],
out_dictionaries[i]["fit_parameters"][2],
out_dictionaries[i]["fit_parameters"][3],
out_dictionaries[i]["fit_parameters"][4],
out_dictionaries[i]["fit_parameters"][5],
out_dictionaries[i]["fit_parameters"][6],
out_dictionaries[i]["fit_percent_difference"])
text_full = ""
titles = ["energy_kev","power_in_spectrum","diamond_absorbed","m1_absorbed","m2_absorbed","Height A: ","sigmax","sigmay","offset","fit difference"]
text_full += (" %20s %20s %20s %20s %20s %20s %20s %20s %20s %20s \n")%(tuple(titles))
for i in range(len(out_dictionaries)):
text_full += ("%20.3f %20.3f %20.3f %20.3f %20.3f %20.3f %20.3f %20.3f %20.3f %20.3f \n")%( Energy_keV[i],
out_dictionaries[i]["power_in_spectrum"],
out_dictionaries[i]["diamond_absorbed"],
out_dictionaries[i]["m1_absorbed"],
out_dictionaries[i]["m2_absorbed"],
out_dictionaries[i]["fit_parameters"][0],
out_dictionaries[i]["fit_parameters"][3],
out_dictionaries[i]["fit_parameters"][4],
out_dictionaries[i]["fit_parameters"][6],
out_dictionaries[i]["fit_percent_difference"]
)
print(text_output)
print(text_fit)
print(text_full)
#
# dump to file
#
f = open("script1_ID26_v1.txt",'w')
#f.write(text_output)
#f.write("\n\n\n")
#f.write(text_fit)
#f.write("\n\n\n")
f.write(text_full)
f.close()
print("File written to disk: script1_ID26_v1.txt")
| mit | -5,591,580,236,130,774,000 | 44.34507 | 178 | 0.516773 | false |
LxiaoGirl/sqlmapTamper | unicodetobypasswaf.py | 1 | 1384 | #!/usr/bin/env python
"""
Copyright (c) 2015 @xiaoL (http://xlixli.net/)
"""
import os
import string
from lib.core.enums import PRIORITY
from lib.core.common import singleTimeWarnMessage
__priority__ = PRIORITY.LOWEST
def dependencies():
singleTimeWarnMessage("tamper script '%s' is only meant to be run against WAF on IIS")
def tamper(payload, **kwargs):
"""
IIS Unicode-url-encodes
WideChar To MultiByte bypass weak web application firewalls
Requirement:
* IIS
Tested against:
* WAF
Reference:
* http://blog.sina.com.cn/s/blog_85e506df0102vo9s.html
Notes:
* Useful to bypass weak web application firewalls
tamper('SELECT FIELD%20FROM TABLE')
'S%u00F0L%u00F0C%u00DE FI%u00F0L%u00D0%20FR%u00BAM %u00DE%u00AABL%u00F0'
"""
change_char = {'1': 'B9', '2': 'B2', '3': 'B3', 'D': 'D0',
'T': 'DE', 'Y': 'DD', 'a': 'AA', 'e': 'F0',
'o': 'BA', 't': 'FE', 'y': 'FD', '|': 'A6',
'd': 'D0', 'A': 'AA', 'E': 'F0', 'O': 'BA'}
ret_val = payload
if payload:
ret_val = ""
i = 0
while i < len(payload):
if payload[i] in change_char.keys():
ret_val += "%%u00%s" % change_char.get(payload[i])
else:
ret_val += payload[i]
i += 1
return ret_val
| gpl-2.0 | 6,264,521,854,367,625,000 | 23.280702 | 90 | 0.537572 | false |
kebarr/Geist | geist/backends/windows.py | 1 | 14357 | from __future__ import division, absolute_import, print_function
import numpy
import subprocess
import shlex
import ctypes
from ctypes import (
byref,
WINFUNCTYPE,
c_ubyte,
sizeof,
POINTER,
Structure,
)
from ctypes.wintypes import (
POINT,
RECT,
DWORD,
LPARAM,
HWND,
BOOL,
WCHAR,
LONG,
WORD
)
from geist.finders import Location, LocationList
from ._common import BackendActionBuilder
from . import logger
class _ActionsTransaction(object):
def __init__(self, backend):
self._actions_builder = BackendActionBuilder(backend)
def __enter__(self):
return self._actions_builder
def __exit__(self, *args):
self._actions_builder.execute()
return False
_USER32 = ctypes.windll.USER32
_GDI32 = ctypes.windll.GDI32
class _RGBQUAD(Structure):
_fields_ = [
('rgbBlue', c_ubyte),
('rgbGreen', c_ubyte),
('rgbRed', c_ubyte),
('rgbReserved', c_ubyte),
]
class _BITMAPINFOHEADER(Structure):
_fields_ = [
('biSize', DWORD),
('biWidth', LONG),
('biHeight', LONG),
('biPlanes', WORD),
('biBitCount', WORD),
('biCompression', WORD),
('biSizeImage', DWORD),
('biXPelsPerMeter', LONG),
('biYPelsPerMeter', LONG),
('biClrUsed', DWORD),
('biClrImportant', DWORD)
]
class _BITMAPINFO(Structure):
_fields_ = [
('bmiHeader', _BITMAPINFOHEADER),
('bmiColors', (_RGBQUAD * 1))
]
_DIB_RGB_COLORS = 0
class GeistWindowsBackend(object):
SRCCOPY = 0xCC0020
SM_CXVIRTUALSCREEN, SM_CYVIRTUALSCREEN = 78, 79
BITSPIXEL = 12
def __init__(self, **kwargs):
self._mouse = _Mouse()
self._keyboard = _KeyBoard()
logger.info("Created GeistWindowsBackend")
def create_process(self, command):
dev_null = open('NUL', 'w')
process = subprocess.Popen(
shlex.split(command), stdout=dev_null, stderr=subprocess.STDOUT
)
return Process(process.pid)
def actions_transaction(self):
return _ActionsTransaction(self)
def capture_locations(self):
hwnd = _USER32.GetDesktopWindow()
width, height = (
_USER32.GetSystemMetrics(GeistWindowsBackend.SM_CXVIRTUALSCREEN),
_USER32.GetSystemMetrics(GeistWindowsBackend.SM_CYVIRTUALSCREEN)
)
desktop_dc = _USER32.GetWindowDC(hwnd)
capture_dc = _GDI32.CreateCompatibleDC(desktop_dc)
# Check that the screen has bit depth of 32
bits = _GDI32.GetDeviceCaps(desktop_dc, GeistWindowsBackend.BITSPIXEL)
if bits != 32:
raise NotImplementedError(
"Geist only supports displays with a bit depth of 32 (%d)"
% bits)
bmp = _GDI32.CreateCompatibleBitmap(desktop_dc, width, height)
_GDI32.SelectObject(capture_dc, bmp)
_GDI32.BitBlt(
capture_dc, 0, 0, width, height, desktop_dc, 0, 0,
GeistWindowsBackend.SRCCOPY
)
bmp_info = _BITMAPINFO()
bmp_info.bmiHeader.biSize = sizeof(bmp_info)
bmp_info.bmiHeader.biPlanes = 1
bmp_info.bmiHeader.biBitCount = 32
bmp_info.bmiHeader.biWidth = width
bmp_info.bmiHeader.biHeight = -height
memarray = numpy.ndarray((height, width), dtype='4B')
_GDI32.GetDIBits(
capture_dc,
bmp,
0,
height,
memarray.ctypes.data_as(POINTER(c_ubyte)),
byref(bmp_info),
_DIB_RGB_COLORS
)
_GDI32.DeleteObject(bmp)
_GDI32.DeleteDC(capture_dc)
_GDI32.DeleteDC(desktop_dc)
#strip alpha and reverse bgr to rgb
image = memarray[:, :, 2::-1]
return LocationList([Location(0, 0, width, height, image=image)])
def key_down(self, name):
self._keyboard.key_down(name)
def key_up(self, name):
self._keyboard.key_up(name)
def button_down(self, button_num):
self._mouse.button_down(button_num)
def button_up(self, button_num):
self._mouse.button_up(button_num)
def move(self, point):
self._mouse.move(point)
def cursor_position(self):
return self._mouse.cursor_position()
def close(self):
pass
def __del__(self):
self.close()
class _KeyBoard(object):
KEYEVENTF_KEYUP = 0x0002
KEYEVENTF_KEYDOWN = 0x0000
NAME_TO_VK_MAP = {
'page down': 0x22,
'page up': 0x21,
'end': 0x23,
'home': 0x24,
'shift': 0x10,
'menu': 0x12,
'control': 0x11,
'down': 0x28,
'up': 0x26,
'left': 0x25,
'right': 0x27,
'lshift': 0xA0,
'rshift': 0xA1,
'escape': 0x1B,
}
NAME_TO_CHAR_MAP = {
'return': '\r',
'space': ' ',
'tab': '\t',
'period': '.',
'minus': '-',
'colon': ':',
'backslash': '\\',
'underscore': '_',
'exclam': '!',
'fslash': '/',
'greaterthan':'>',
}
def _convert_keyname_to_virtual_key(self, name):
if name in _KeyBoard.NAME_TO_VK_MAP:
return _KeyBoard.NAME_TO_VK_MAP[name]
elif name in _KeyBoard.NAME_TO_CHAR_MAP:
char = _KeyBoard.NAME_TO_CHAR_MAP[name]
else:
char = name
assert len(char) == 1, "can not convert %r" % (char,)
return _USER32.VkKeyScanW(WCHAR(char)) & 0xFF
def _map_virtual_key(self, key):
return _USER32.MapVirtualKeyA(key & 0xff, 0)
def key_down(self, name):
vkey = self._convert_keyname_to_virtual_key(name)
scan = self._map_virtual_key(vkey)
_USER32.keybd_event(vkey, scan, _KeyBoard.KEYEVENTF_KEYDOWN, None)
def key_up(self, name):
vkey = self._convert_keyname_to_virtual_key(name)
scan = self._map_virtual_key(vkey)
_USER32.keybd_event(vkey, scan, _KeyBoard.KEYEVENTF_KEYUP, None)
class _Mouse(object):
MOUSEEVENTF_MOVE = 0x0001
MOUSEEVENTF_LEFTDOWN = 0x0002
MOUSEEVENTF_LEFTUP = 0x0004
MOUSEEVENTF_RIGHTDOWN = 0x0008
MOUSEEVENTF_RIGHTUP = 0x0010
MOUSEEVENTF_MIDDLEDOWN = 0x0020
MOUSEEVENTF_MIDDLEUP = 0x0040
MOUSEEVENTF_WHEEL = 0x0800
MOUSEEVENTF_VIRTUALDESK = 0x4000
MOUSEEVENTF_ABSOLUTE = 0x8000
SPI_SETMOUSE = 0x0004
SPI_SETMOUSESPEED = 0x0071
SM_CXSCREEN = 0
SM_CYSCREEN = 1
LEFT_BUTTON, MIDDLE_BUTTON, RIGHT_BUTTON = [1, 2, 3]
def _normalize_coords(self, point):
norm = 65535
x, y = point
w = _USER32.GetSystemMetrics(_Mouse.SM_CXSCREEN)
h = _USER32.GetSystemMetrics(_Mouse.SM_CYSCREEN)
return (int(x * (norm / w)), int(y * (norm/h)))
def move(self, point):
_USER32.SetCursorPos(*point)
def cursor_position(self):
point = POINT()
_USER32.GetCursorPos(byref(point))
return point.x, point.y
def scroll(lines):
_USER32.mouse_event(
_Mouse.MOUSEEVENTF_WHEEL,
0,
0,
int(120 * lines),
None
)
def button_down(self, button):
_USER32.mouse_event(self._map_button_down(button), 0, 0, 0, None)
def button_up(self, button):
_USER32.mouse_event(self._map_button_up(button), 0, 0, 0, None)
def _map_button_down(self, button):
assert button in [
_Mouse.LEFT_BUTTON,
_Mouse.MIDDLE_BUTTON,
_Mouse.RIGHT_BUTTON
]
return [
_Mouse.MOUSEEVENTF_LEFTDOWN,
_Mouse.MOUSEEVENTF_MIDDLEDOWN,
_Mouse.MOUSEEVENTF_RIGHTDOWN
][button - 1]
def _map_button_up(self, button):
assert button in [
_Mouse.LEFT_BUTTON,
_Mouse.MIDDLE_BUTTON,
_Mouse.RIGHT_BUTTON
]
return [
_Mouse.MOUSEEVENTF_LEFTUP,
_Mouse.MOUSEEVENTF_MIDDLEUP,
_Mouse.MOUSEEVENTF_RIGHTUP
][button - 1]
_EnumWindowsProc = WINFUNCTYPE(BOOL, HWND, LPARAM)
def hwnd_to_pid(hwnd):
pid = DWORD()
_USER32.GetWindowThreadProcessId(int(hwnd), byref(pid))
return pid.value
class Process(object):
def __init__(self, pid):
self.__pid = int(pid)
@property
def pid(self):
return self.__pid
def destroy(self):
subprocess.call(
'taskkill /F /T /PID %d' % (self.pid),
shell=True
)
def get_window(self):
found_hwnd = []
def callback(hwnd, data):
found_pid = hwnd_to_pid(hwnd)
if found_pid == self.pid:
found_hwnd.append(hwnd)
return False
return True
_USER32.EnumWindows(_EnumWindowsProc(callback), 0)
if found_hwnd:
return Window(found_hwnd[0]).get_root()
return None
def get_all_windows():
windows = []
def callback(hwnd, data):
windows.append(Window(hwnd))
return True
_USER32.EnumDesktopWindows(None, _EnumWindowsProc(callback), 0)
return windows
def get_window_at(x, y):
point = POINT()
point.x, point.y = x, y
hwnd = ctypes.windll.user32.WindowFromPoint(point)
if not hwnd:
return None
else:
return Window(hwnd)
class Window(object):
SWP_NOOWNERZORDER = 0x0200
SWP_NOSENDCHANGING = 0x0400
SW_SHOWMAXIMIZED = 3
SW_SHOWMINIMIZED = 2
BITSPIXEL = 12
def __init__(self, hwnd):
self.__hwnd = int(hwnd)
def _rect(self):
rect = RECT()
_USER32.GetWindowRect(self.__hwnd, byref(rect))
return (
rect.left,
rect.top,
(rect.right - rect.left),
(rect.bottom - rect.top),
)
def set_position(self, rect):
x, y, w, h = rect
_USER32.SetWindowPos(
self.__hwnd,
0,
x,
y,
w,
h,
Window.SWP_NOSENDCHANGING | Window.SWP_NOOWNERZORDER
)
@property
def title(self):
max_len = 128
text = (WCHAR*max_len)()
_USER32.GetWindowTextW(self.__hwnd, text, max_len)
return text.value
@property
def classname(self):
max_len = 128
text = (WCHAR*max_len)()
_USER32.GetClassNameW(self.__hwnd, text, max_len)
return text.value
@property
def visible(self):
return bool(_USER32.IsWindowVisible(self.__hwnd))
def switch_to(self):
_USER32.SwitchToThisWindow(self.__hwnd, False)
def maximise(self):
"""Maximise the window and return True if previously visible"""
return bool(_USER32.ShowWindow(self.__hwnd, Window.SW_SHOWMAXIMIZED))
def minimise(self):
"""Minimise the window and return True if previously visible"""
return bool(_USER32.ShowWindow(self.__hwnd, Window.SW_SHOWMINIMIZED))
def get_process(self):
return Process(hwnd_to_pid(self.__hwnd))
def get_root(self):
hwnd = self.__hwnd
while _USER32.GetParent(hwnd):
hwnd = _USER32.GetParent(hwnd)
if hwnd == self.__hwnd:
return self
else:
return Window(hwnd)
def __hash__(self):
return self.__hwnd
def __eq__(self, other):
try:
return self.__hwnd == other.__hwnd
except:
return False
def capture_locations(self):
x, y, width, height = self._rect()
window_dc = _USER32.GetWindowDC(self.__hwnd)
capture_dc = _GDI32.CreateCompatibleDC(window_dc)
# Check that the screen has bit depth of 32
bits = _GDI32.GetDeviceCaps(window_dc, Window.BITSPIXEL)
if bits != 32:
raise NotImplementedError(
"Geist only supports displays with a bit depth of 32 (%d)"
% bits)
bmp = _GDI32.CreateCompatibleBitmap(window_dc, width, height)
_GDI32.SelectObject(capture_dc, bmp)
_USER32.PrintWindow(self.__hwnd, capture_dc, 0)
bmp_info = _BITMAPINFO()
bmp_info.bmiHeader.biSize = sizeof(bmp_info)
bmp_info.bmiHeader.biPlanes = 1
bmp_info.bmiHeader.biBitCount = 32
bmp_info.bmiHeader.biWidth = width
bmp_info.bmiHeader.biHeight = -height
memarray = numpy.ndarray((height, width), dtype='4B')
_GDI32.GetDIBits(
capture_dc,
bmp,
0,
height,
memarray.ctypes.data_as(POINTER(c_ubyte)),
byref(bmp_info),
_DIB_RGB_COLORS
)
_GDI32.DeleteObject(bmp)
_GDI32.DeleteDC(capture_dc)
_GDI32.DeleteDC(window_dc)
#strip alpha and reverse bgr to rgb
image = memarray[:, :, 2::-1]
return LocationList([Location(x, y, width, height, image=image)])
def get_child_window_at(self, x, y):
point = POINT()
point.x, point.y = x, y
child_hwnd = ctypes.windll.user32.RealChildWindowFromPoint(
self.__hwnd,
point
)
if not child_hwnd:
return None
else:
return Window(child_hwnd)
def client_area_rect(self):
client_rect = RECT()
win_rect = RECT()
offset = POINT()
_USER32.GetClientRect(self.__hwnd, byref(client_rect))
_USER32.GetWindowRect(self.__hwnd, byref(win_rect))
_USER32.ClientToScreen(self.__hwnd, byref(offset))
x = offset.x - win_rect.left
y = offset.y - win_rect.top
w = client_rect.right
h = client_rect.bottom
return x, y, w, h
def __repr__(self):
return "Window(hwnd=%r, title=%r, classname=%r)" % (
self.__hwnd, self.title, self.classname
)
| mit | -8,654,186,419,630,648,000 | 25.451243 | 78 | 0.543498 | false |
brentd-smith/smolkinsite | songs/migrations/0001_initial.py | 1 | 4209 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-04-13 19:46
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BookName',
fields=[
('name', models.CharField(max_length=64, primary_key=True, serialize=False)),
('display', models.CharField(max_length=64)),
('seq_number', models.PositiveSmallIntegerField()),
],
),
migrations.CreateModel(
name='HaftarahReading',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('extension', models.CharField(choices=[('mp3', 'mp3'), ('pdf', 'pdf'), ('jpg', 'jpg')], default='mp3', max_length=3)),
('s3_obj_key', models.CharField(max_length=2048)),
('seq_number', models.PositiveSmallIntegerField()),
('file_name', models.CharField(max_length=128)),
],
),
migrations.CreateModel(
name='ParshaName',
fields=[
('name', models.CharField(max_length=64, primary_key=True, serialize=False)),
('display', models.CharField(max_length=64)),
('seq_number', models.PositiveSmallIntegerField()),
('book_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='songs.BookName')),
],
),
migrations.CreateModel(
name='ServiceName',
fields=[
('name', models.CharField(max_length=64, primary_key=True, serialize=False)),
('display', models.CharField(max_length=64)),
('seq_number', models.PositiveSmallIntegerField()),
],
),
migrations.CreateModel(
name='Song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('display', models.CharField(max_length=128)),
('s3_obj_key', models.CharField(max_length=2048)),
('extension', models.CharField(choices=[('mp3', 'mp3'), ('pdf', 'pdf'), ('jpg', 'jpg')], default='mp3', max_length=3)),
('page_number', models.PositiveSmallIntegerField()),
('seq_number', models.PositiveSmallIntegerField()),
('file_name', models.CharField(max_length=128)),
('service_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='songs.ServiceName')),
],
),
migrations.CreateModel(
name='TorahReading',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('triennial', models.CharField(choices=[(None, None), ('1st', '1st Triennial'), ('2nd', '2nd Triennial'), ('3rd', '3rd Triennial')], default='1st', max_length=3)),
('aliyah', models.CharField(choices=[(None, None), ('1st', '1st Aliyah'), ('2nd', '2nd Aliyah'), ('3rd', '3rd Aliyah'), ('4th', '4th Aliyah'), ('5th', '5th Aliyah'), ('6th', '6th Aliyah'), ('7th', '7th Aliyah'), ('Maftir', 'Maftir')], default='1st', max_length=6)),
('extension', models.CharField(choices=[('mp3', 'mp3'), ('pdf', 'pdf'), ('jpg', 'jpg')], default='mp3', max_length=3)),
('s3_obj_key', models.CharField(max_length=2048)),
('seq_number', models.PositiveSmallIntegerField()),
('file_name', models.CharField(max_length=128)),
('parsha', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='songs.ParshaName')),
],
),
migrations.AddField(
model_name='haftarahreading',
name='parsha',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='songs.ParshaName'),
),
]
| gpl-3.0 | 6,483,843,645,224,168,000 | 49.107143 | 281 | 0.552388 | false |
pkgw/pwkit | pwkit/bblocks.py | 1 | 16546 | # -*- mode: python; coding: utf-8 -*-
# Copyright 2014 Peter Williams <[email protected]> and collaborators.
# Licensed under the MIT License.
"""pwkit.bblocks - Bayesian Blocks analysis, with a few extensions.
Bayesian Blocks analysis for the "time tagged" case described by Scargle+
2013. Inspired by the bayesian_blocks implementation by Jake Vanderplas in the
AstroML package, but that turned out to have some limitations.
We have iterative determination of the best number of blocks (using an ad-hoc
routine described in Scargle+ 2013) and bootstrap-based determination of
uncertainties on the block heights (ditto).
Functions are:
:func:`bin_bblock`
Bayesian Blocks analysis with counts and bins.
:func:`tt_bblock`
BB analysis of time-tagged events.
:func:`bs_tt_bblock`
Like :func:`tt_bblock` with bootstrap-based uncertainty assessment. NOTE:
the uncertainties are not very reliable!
"""
from __future__ import absolute_import, division, print_function, unicode_literals
__all__ = str ('nlogn bin_bblock tt_bblock bs_tt_bblock').split ()
from six.moves import range
import numpy as np
from . import Holder
def nlogn (n, dt):
# I really feel like there must be a cleverer way to do this
# scalar-or-vector possible-bad-value masking.
if np.isscalar (n):
if n == 0:
return 0.
return n * (np.log (n) - np.log (dt))
n = np.asarray (n)
mask = (n == 0)
r = n * (np.log (np.where (mask, 1, n)) - np.log (dt))
return np.where (mask, 0, r)
def bin_bblock (widths, counts, p0=0.05):
"""Fundamental Bayesian Blocks algorithm. Arguments:
widths - Array of consecutive cell widths.
counts - Array of numbers of counts in each cell.
p0=0.05 - Probability of preferring solutions with additional bins.
Returns a Holder with:
blockstarts - Start times of output blocks.
counts - Number of events in each output block.
finalp0 - Final value of p0, after iteration to minimize `nblocks`.
nblocks - Number of output blocks.
ncells - Number of input cells/bins.
origp0 - Original value of p0.
rates - Event rate associated with each block.
widths - Width of each output block.
"""
widths = np.asarray (widths)
counts = np.asarray (counts)
ncells = widths.size
origp0 = p0
if np.any (widths <= 0):
raise ValueError ('bin widths must be positive')
if widths.size != counts.size:
raise ValueError ('widths and counts must have same size')
if p0 < 0 or p0 >= 1.:
raise ValueError ('p0 must lie within [0, 1)')
vedges = np.cumsum (np.concatenate (([0], widths))) # size: ncells + 1
block_remainders = vedges[-1] - vedges # size: nedges = ncells + 1
ccounts = np.cumsum (np.concatenate (([0], counts)))
count_remainders = ccounts[-1] - ccounts
prev_blockstarts = None
best = np.zeros (ncells, dtype=np.float)
last = np.zeros (ncells, dtype=np.int)
for _ in range (10):
# Pluggable num-change-points prior-weight expression:
ncp_prior = 4 - np.log (p0 / (0.0136 * ncells**0.478))
for r in range (ncells):
tk = block_remainders[:r+1] - block_remainders[r+1]
nk = count_remainders[:r+1] - count_remainders[r+1]
# Pluggable fitness expression:
fit_vec = nlogn (nk, tk)
# This incrementally penalizes partitions with more blocks:
tmp = fit_vec - ncp_prior
tmp[1:] += best[:r]
imax = np.argmax (tmp)
last[r] = imax
best[r] = tmp[imax]
# different semantics than Scargle impl: our blockstarts is similar to
# their changepoints, but we always finish with blockstarts[0] = 0.
work = np.zeros (ncells, dtype=int)
workidx = 0
ind = last[-1]
while True:
work[workidx] = ind
workidx += 1
if ind == 0:
break
ind = last[ind - 1]
blockstarts = work[:workidx][::-1]
if prev_blockstarts is not None:
if (blockstarts.size == prev_blockstarts.size and
(blockstarts == prev_blockstarts).all ()):
break # converged
if blockstarts.size == 1:
break # can't shrink any farther
# Recommended ad-hoc iteration to favor fewer blocks above and beyond
# the value of p0:
p0 = 1. - (1. - p0)**(1. / (blockstarts.size - 1))
prev_blockstarts = blockstarts
assert blockstarts[0] == 0
nblocks = blockstarts.size
info = Holder ()
info.ncells = ncells
info.nblocks = nblocks
info.origp0 = origp0
info.finalp0 = p0
info.blockstarts = blockstarts
info.counts = np.empty (nblocks, dtype=np.int)
info.widths = np.empty (nblocks)
for iblk in range (nblocks):
cellstart = blockstarts[iblk]
if iblk == nblocks - 1:
cellend = ncells - 1
else:
cellend = blockstarts[iblk+1] - 1
info.widths[iblk] = widths[cellstart:cellend+1].sum ()
info.counts[iblk] = counts[cellstart:cellend+1].sum ()
info.rates = info.counts / info.widths
return info
def tt_bblock (tstarts, tstops, times, p0=0.05, intersect_with_bins=False):
"""Bayesian Blocks for time-tagged events. Arguments:
*tstarts*
Array of input bin start times.
*tstops*
Array of input bin stop times.
*times*
Array of event arrival times.
*p0* = 0.05
Probability of preferring solutions with additional bins.
*intersect_with_bins* = False
If true, intersect bblock bins with input bins; can result in more bins
than bblocks wants; they will have the same rate values.
Returns a Holder with:
*counts*
Number of events in each output block.
*finalp0*
Final value of p0, after iteration to minimize `nblocks`.
*ledges*
Times of left edges of output blocks.
*midpoints*
Times of midpoints of output blocks.
*nblocks*
Number of output blocks.
*ncells*
Number of input cells/bins.
*origp0*
Original value of p0.
*rates*
Event rate associated with each block.
*redges*
Times of right edges of output blocks.
*widths*
Width of each output block.
Bin start/stop times are best derived from a 1D Voronoi tesselation of the
event arrival times, with some kind of global observation start/stop time
setting the extreme edges. Or they can be set from "good time intervals"
if observations were toggled on or off as in an X-ray telescope.
If *intersect_with_bins* is True, the true Bayesian Blocks bins (BBBs) are
intersected with the "good time intervals" (GTIs) defined by the *tstarts*
and *tstops* variables. One GTI may contain multiple BBBs if the event
rate appears to change within the GTI, and one BBB may span multiple GTIs
if the event date does *not* appear to change between the GTIs. The
intersection will ensure that no BBB intervals cross the edge of a GTI. If
this would happen, the BBB is split into multiple, partially redundant
records. Each of these records will have the **same** value for the
*counts*, *rates*, and *widths* values. However, the *ledges*, *redges*,
and *midpoints* values will be recalculated. Note that in this mode, it is
not necessarily true that ``widths = ledges - redges`` as is usually the
case. When this flag is true, keep in mind that multiple bins are
therefore *not* necessarily independent statistical samples.
"""
tstarts = np.asarray (tstarts)
tstops = np.asarray (tstops)
times = np.asarray (times)
if tstarts.size != tstops.size:
raise ValueError ('must have same number of starts and stops')
ngti = tstarts.size
if ngti < 1:
raise ValueError ('must have at least one goodtime interval')
if np.any ((tstarts[1:] - tstarts[:-1]) <= 0):
raise ValueError ('tstarts must be ordered and distinct')
if np.any ((tstops[1:] - tstops[:-1]) <= 0):
raise ValueError ('tstops must be ordered and distinct')
if np.any (tstarts >= tstops):
raise ValueError ('tstarts must come before tstops')
if np.any ((times[1:] - times[:-1]) < 0):
raise ValueError ('times must be ordered')
if times.min () < tstarts[0]:
raise ValueError ('no times may be smaller than first tstart')
if times.max () > tstops[-1]:
raise ValueError ('no times may be larger than last tstop')
for i in range (1, ngti):
if np.where ((times > tstops[i-1]) & (times < tstarts[i]))[0].size:
raise ValueError ('no times may fall in goodtime gap #%d' % i)
if p0 < 0 or p0 >= 1.:
raise ValueError ('p0 must lie within [0, 1)')
utimes, uidxs = np.unique (times, return_index=True)
nunique = utimes.size
counts = np.empty (nunique)
counts[:-1] = uidxs[1:] - uidxs[:-1]
counts[-1] = times.size - uidxs[-1]
assert counts.sum () == times.size
# we grow these arrays with concats, which will perform badly with lots of
# GTIs. Not expected to be a big deal.
widths = np.empty (0)
ledges = np.empty (0)
redges = np.empty (0)
for i in range (ngti):
tstart, tstop = tstarts[i], tstops[i]
w = np.where ((utimes >= tstart) & (utimes <= tstop))[0]
if not w.size:
# No events during this goodtime! We have to insert a zero-count
# event block. This may break assumptions within bin_bblock()?
# j = idx of first event after this GTI
wafter = np.where (utimes > tstop)[0]
if wafter.size:
j = wafter[0]
else:
j = utimes.size
assert j == 0 or np.where (utimes < tstart)[0][-1] == j - 1
counts = np.concatenate ((counts[:j], [0], counts[j:]))
widths = np.concatenate ((widths, [tstop - tstart]))
ledges = np.concatenate ((ledges, [tstart]))
redges = np.concatenate ((redges, [tstop]))
else:
gtutimes = utimes[w]
midpoints = 0.5 * (gtutimes[1:] + gtutimes[:-1]) # size: n - 1
gtedges = np.concatenate (([tstart], midpoints, [tstop])) # size: n + 1
gtwidths = gtedges[1:] - gtedges[:-1] # size: n
assert gtwidths.sum () == tstop - tstart
widths = np.concatenate ((widths, gtwidths))
ledges = np.concatenate ((ledges, gtedges[:-1]))
redges = np.concatenate ((redges, gtedges[1:]))
assert counts.size == widths.size
info = bin_bblock (widths, counts, p0=p0)
info.ledges = ledges[info.blockstarts]
# The right edge of the i'th block is the right edge of its rightmost
# bin, which is the bin before the leftmost bin of the (i+1)'th block:
info.redges = np.concatenate ((redges[info.blockstarts[1:] - 1], [redges[-1]]))
info.midpoints = 0.5 * (info.ledges + info.redges)
del info.blockstarts
if intersect_with_bins:
# OK, we now need to intersect the bblock bins with the input bins.
# This can fracture one bblock bin into multiple ones but shouldn't
# make any of them disappear, since they're definitionally constrained
# to contain events.
#
# First: sorted list of all timestamps at which *something* changes:
# either a bblock edge, or a input bin edge. We drop the last entry,
# giving is a list of left edges of bins in which everything is the
# same.
all_times = set(tstarts)
all_times.update(tstops)
all_times.update(info.ledges)
all_times.update(info.redges)
all_times = np.array(sorted(all_times))[:-1]
# Now, construct a lookup table of which bblock number each of these
# bins corresponds to. More than one bin may have the same bblock
# number, if a GTI change slices a single bblock into more than one
# piece. We do this in a somewhat non-obvious way since we know that
# the bblocks completely cover the overall GTI span in order.
bblock_ids = np.zeros(all_times.size)
for i in range(1, info.nblocks):
bblock_ids[all_times >= info.ledges[i]] = i
# Now, a lookup table of which bins are within a good GTI span. Again,
# we know that all bins are either entirely in a good GTI or entirely
# outside, so the logic is simplified but not necessarily obvious.
good_timeslot = np.zeros(all_times.size, dtype=np.bool)
for t0, t1 in zip(tstarts, tstops):
ok = (all_times >= t0) & (all_times < t1)
good_timeslot[ok] = True
# Finally, look for contiguous spans that are in a good timeslot *and*
# have the same underlying bblock number. These are our intersected
# blocks.
old_bblock_ids = []
ledges = []
redges = []
cur_bblock_id = -1
for i in range(all_times.size):
if bblock_ids[i] != cur_bblock_id or not good_timeslot[i]:
if cur_bblock_id >= 0:
# Ending a previous span.
redges.append(all_times[i])
cur_bblock_id = -1
if good_timeslot[i]:
# Starting a new span.
ledges.append(all_times[i])
old_bblock_ids.append(bblock_ids[i])
cur_bblock_id = bblock_ids[i]
if cur_bblock_id >= 0:
# End the last span.
redges.append(tstops[-1])
# Finally, rewrite all of the data as planned.
old_bblock_ids = np.array(old_bblock_ids, dtype=np.int)
info.counts = info.counts[old_bblock_ids]
info.rates = info.rates[old_bblock_ids]
info.widths = info.widths[old_bblock_ids]
info.ledges = np.array(ledges)
info.redges = np.array(redges)
info.midpoints = 0.5 * (info.ledges + info.redges)
info.nblocks = info.ledges.size
return info
def bs_tt_bblock (times, tstarts, tstops, p0=0.05, nbootstrap=512):
"""Bayesian Blocks for time-tagged events with bootstrapping uncertainty
assessment. THE UNCERTAINTIES ARE NOT VERY GOOD! Arguments:
tstarts - Array of input bin start times.
tstops - Array of input bin stop times.
times - Array of event arrival times.
p0=0.05 - Probability of preferring solutions with additional bins.
nbootstrap=512 - Number of bootstrap runs to perform.
Returns a Holder with:
blockstarts - Start times of output blocks.
bsrates - Mean event rate in each bin from bootstrap analysis.
bsrstds - ~Uncertainty: stddev of event rate in each bin from bootstrap analysis.
counts - Number of events in each output block.
finalp0 - Final value of p0, after iteration to minimize `nblocks`.
ledges - Times of left edges of output blocks.
midpoints - Times of midpoints of output blocks.
nblocks - Number of output blocks.
ncells - Number of input cells/bins.
origp0 - Original value of p0.
rates - Event rate associated with each block.
redges - Times of right edges of output blocks.
widths - Width of each output block.
"""
times = np.asarray (times)
tstarts = np.asarray (tstarts)
tstops = np.asarray (tstops)
nevents = times.size
if nevents < 1:
raise ValueError ('must be given at least 1 event')
info = tt_bblock (tstarts, tstops, times, p0)
# Now bootstrap resample to assess uncertainties on the bin heights. This
# is the approach recommended by Scargle+.
bsrsums = np.zeros (info.nblocks)
bsrsumsqs = np.zeros (info.nblocks)
for _ in range (nbootstrap):
bstimes = times[np.random.randint (0, times.size, times.size)]
bstimes.sort ()
bsinfo = tt_bblock (tstarts, tstops, bstimes, p0)
blocknums = np.minimum (np.searchsorted (bsinfo.redges, info.midpoints),
bsinfo.nblocks - 1)
samprates = bsinfo.rates[blocknums]
bsrsums += samprates
bsrsumsqs += samprates**2
bsrmeans = bsrsums / nbootstrap
mask = bsrsumsqs / nbootstrap <= bsrmeans**2
bsrstds = np.sqrt (np.where (mask, 0, bsrsumsqs / nbootstrap - bsrmeans**2))
info.bsrates = bsrmeans
info.bsrstds = bsrstds
return info
| mit | -3,543,484,899,377,232,400 | 36.265766 | 89 | 0.620513 | false |
mganeva/mantid | scripts/Muon/GUI/Common/home_tab/home_tab_view.py | 1 | 1149 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
from PyQt4 import QtGui
class HomeTabView(QtGui.QWidget):
def __init__(self, parent=None,widget_list=None):
super(HomeTabView, self).__init__(parent)
self._widget_list = widget_list
self.splitter = None
self.vertical_layout = None
self.setup_interface()
def setup_interface(self):
self.setObjectName("HomeTab")
self.setWindowTitle("Home Tab")
self.resize(500, 100)
self.vertical_layout = QtGui.QVBoxLayout()
if self._widget_list:
for i, widget in enumerate(self._widget_list):
widget.setParent(self)
self.vertical_layout.addWidget(widget)
self.setLayout(self.vertical_layout)
# for docking
def getLayout(self):
return self.vertical_layout
| gpl-3.0 | 5,541,721,634,874,385,000 | 27.725 | 68 | 0.652742 | false |
snowflakedb/snowflake-connector-python | src/snowflake/connector/proxy.py | 1 | 1473 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2021 Snowflake Computing Inc. All right reserved.
#
import os
def set_proxies(proxy_host, proxy_port, proxy_user=None, proxy_password=None):
"""Sets proxy dict for requests."""
PREFIX_HTTP = "http://"
PREFIX_HTTPS = "https://"
proxies = None
if proxy_host and proxy_port:
if proxy_host.startswith(PREFIX_HTTP):
proxy_host = proxy_host[len(PREFIX_HTTP) :]
elif proxy_host.startswith(PREFIX_HTTPS):
proxy_host = proxy_host[len(PREFIX_HTTPS) :]
if proxy_user or proxy_password:
proxy_auth = "{proxy_user}:{proxy_password}@".format(
proxy_user=proxy_user if proxy_user is not None else "",
proxy_password=proxy_password if proxy_password is not None else "",
)
else:
proxy_auth = ""
proxies = {
"http": "http://{proxy_auth}{proxy_host}:{proxy_port}".format(
proxy_host=proxy_host,
proxy_port=str(proxy_port),
proxy_auth=proxy_auth,
),
"https": "http://{proxy_auth}{proxy_host}:{proxy_port}".format(
proxy_host=proxy_host,
proxy_port=str(proxy_port),
proxy_auth=proxy_auth,
),
}
os.environ["HTTP_PROXY"] = proxies["http"]
os.environ["HTTPS_PROXY"] = proxies["https"]
return proxies
| apache-2.0 | 845,093,474,728,301,600 | 34.926829 | 84 | 0.552614 | false |
smarterclayton/solum | solum/openstack/common/rpc/amqp.py | 1 | 23625 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared code between AMQP based openstack.common.rpc implementations.
The code in this module is shared between the rpc implemenations based on AMQP.
Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses
AMQP, but is deprecated and predates this code.
"""
import collections
import inspect
import sys
import uuid
from eventlet import greenpool
from eventlet import pools
from eventlet import queue
from eventlet import semaphore
from oslo.config import cfg
from solum.openstack.common import excutils
from solum.openstack.common.gettextutils import _ # noqa
from solum.openstack.common import local
from solum.openstack.common import log as logging
from solum.openstack.common.rpc import common as rpc_common
amqp_opts = [
cfg.BoolOpt('amqp_durable_queues',
default=False,
deprecated_name='rabbit_durable_queues',
deprecated_group='DEFAULT',
help='Use durable queues in amqp.'),
cfg.BoolOpt('amqp_auto_delete',
default=False,
help='Auto-delete queues in amqp.'),
]
cfg.CONF.register_opts(amqp_opts)
UNIQUE_ID = '_unique_id'
LOG = logging.getLogger(__name__)
class Pool(pools.Pool):
"""Class that implements a Pool of Connections."""
def __init__(self, conf, connection_cls, *args, **kwargs):
self.connection_cls = connection_cls
self.conf = conf
kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
kwargs.setdefault("order_as_stack", True)
super(Pool, self).__init__(*args, **kwargs)
self.reply_proxy = None
# TODO(comstud): Timeout connections not used in a while
def create(self):
LOG.debug(_('Pool creating new connection'))
return self.connection_cls(self.conf)
def empty(self):
while self.free_items:
self.get().close()
# Force a new connection pool to be created.
# Note that this was added due to failing unit test cases. The issue
# is the above "while loop" gets all the cached connections from the
# pool and closes them, but never returns them to the pool, a pool
# leak. The unit tests hang waiting for an item to be returned to the
# pool. The unit tests get here via the tearDown() method. In the run
# time code, it gets here via cleanup() and only appears in service.py
# just before doing a sys.exit(), so cleanup() only happens once and
# the leakage is not a problem.
self.connection_cls.pool = None
_pool_create_sem = semaphore.Semaphore()
def get_connection_pool(conf, connection_cls):
with _pool_create_sem:
# Make sure only one thread tries to create the connection pool.
if not connection_cls.pool:
connection_cls.pool = Pool(conf, connection_cls)
return connection_cls.pool
class ConnectionContext(rpc_common.Connection):
"""The class that is actually returned to the create_connection() caller.
This is essentially a wrapper around Connection that supports 'with'.
It can also return a new Connection, or one from a pool.
The function will also catch when an instance of this class is to be
deleted. With that we can return Connections to the pool on exceptions
and so forth without making the caller be responsible for catching them.
If possible the function makes sure to return a connection to the pool.
"""
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
"""Create a new connection, or get one from the pool."""
self.connection = None
self.conf = conf
self.connection_pool = connection_pool
if pooled:
self.connection = connection_pool.get()
else:
self.connection = connection_pool.connection_cls(
conf,
server_params=server_params)
self.pooled = pooled
def __enter__(self):
"""When with ConnectionContext() is used, return self."""
return self
def _done(self):
"""If the connection came from a pool, clean it up and put it back.
If it did not come from a pool, close it.
"""
if self.connection:
if self.pooled:
# Reset the connection so it's ready for the next caller
# to grab from the pool
self.connection.reset()
self.connection_pool.put(self.connection)
else:
try:
self.connection.close()
except Exception:
pass
self.connection = None
def __exit__(self, exc_type, exc_value, tb):
"""End of 'with' statement. We're done here."""
self._done()
def __del__(self):
"""Caller is done with this connection. Make sure we cleaned up."""
self._done()
def close(self):
"""Caller is done with this connection."""
self._done()
def create_consumer(self, topic, proxy, fanout=False):
self.connection.create_consumer(topic, proxy, fanout)
def create_worker(self, topic, proxy, pool_name):
self.connection.create_worker(topic, proxy, pool_name)
def join_consumer_pool(self, callback, pool_name, topic, exchange_name,
ack_on_error=True):
self.connection.join_consumer_pool(callback,
pool_name,
topic,
exchange_name,
ack_on_error)
def consume_in_thread(self):
self.connection.consume_in_thread()
def __getattr__(self, key):
"""Proxy all other calls to the Connection instance."""
if self.connection:
return getattr(self.connection, key)
else:
raise rpc_common.InvalidRPCConnectionReuse()
class ReplyProxy(ConnectionContext):
"""Connection class for RPC replies / callbacks."""
def __init__(self, conf, connection_pool):
self._call_waiters = {}
self._num_call_waiters = 0
self._num_call_waiters_wrn_threshhold = 10
self._reply_q = 'reply_' + uuid.uuid4().hex
super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
self.declare_direct_consumer(self._reply_q, self._process_data)
self.consume_in_thread()
def _process_data(self, message_data):
msg_id = message_data.pop('_msg_id', None)
waiter = self._call_waiters.get(msg_id)
if not waiter:
LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s'
', message : %(data)s'), {'msg_id': msg_id,
'data': message_data})
LOG.warn(_('_call_waiters: %s') % str(self._call_waiters))
else:
waiter.put(message_data)
def add_call_waiter(self, waiter, msg_id):
self._num_call_waiters += 1
if self._num_call_waiters > self._num_call_waiters_wrn_threshhold:
LOG.warn(_('Number of call waiters is greater than warning '
'threshhold: %d. There could be a MulticallProxyWaiter '
'leak.') % self._num_call_waiters_wrn_threshhold)
self._num_call_waiters_wrn_threshhold *= 2
self._call_waiters[msg_id] = waiter
def del_call_waiter(self, msg_id):
self._num_call_waiters -= 1
del self._call_waiters[msg_id]
def get_reply_q(self):
return self._reply_q
def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
failure=None, ending=False, log_failure=True):
"""Sends a reply or an error on the channel signified by msg_id.
Failure should be a sys.exc_info() tuple.
"""
with ConnectionContext(conf, connection_pool) as conn:
if failure:
failure = rpc_common.serialize_remote_exception(failure,
log_failure)
msg = {'result': reply, 'failure': failure}
if ending:
msg['ending'] = True
_add_unique_id(msg)
# If a reply_q exists, add the msg_id to the reply and pass the
# reply_q to direct_send() to use it as the response queue.
# Otherwise use the msg_id for backward compatibilty.
if reply_q:
msg['_msg_id'] = msg_id
conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
else:
conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call."""
def __init__(self, **kwargs):
self.msg_id = kwargs.pop('msg_id', None)
self.reply_q = kwargs.pop('reply_q', None)
self.conf = kwargs.pop('conf')
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['conf'] = self.conf
values['msg_id'] = self.msg_id
values['reply_q'] = self.reply_q
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False,
connection_pool=None, log_failure=True):
if self.msg_id:
msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool,
reply, failure, ending, log_failure)
if ending:
self.msg_id = None
def unpack_context(conf, msg):
"""Unpack context from msg."""
context_dict = {}
for key in list(msg.keys()):
# NOTE(vish): Some versions of python don't like unicode keys
# in kwargs.
key = str(key)
if key.startswith('_context_'):
value = msg.pop(key)
context_dict[key[9:]] = value
context_dict['msg_id'] = msg.pop('_msg_id', None)
context_dict['reply_q'] = msg.pop('_reply_q', None)
context_dict['conf'] = conf
ctx = RpcContext.from_dict(context_dict)
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
return ctx
def pack_context(msg, context):
"""Pack context into msg.
Values for message keys need to be less than 255 chars, so we pull
context out into a bunch of separate keys. If we want to support
more arguments in rabbit messages, we may want to do the same
for args at some point.
"""
if isinstance(context, dict):
context_d = dict([('_context_%s' % key, value)
for (key, value) in context.iteritems()])
else:
context_d = dict([('_context_%s' % key, value)
for (key, value) in context.to_dict().iteritems()])
msg.update(context_d)
class _MsgIdCache(object):
"""This class checks any duplicate messages."""
# NOTE: This value is considered can be a configuration item, but
# it is not necessary to change its value in most cases,
# so let this value as static for now.
DUP_MSG_CHECK_SIZE = 16
def __init__(self, **kwargs):
self.prev_msgids = collections.deque([],
maxlen=self.DUP_MSG_CHECK_SIZE)
def check_duplicate_message(self, message_data):
"""AMQP consumers may read same message twice when exceptions occur
before ack is returned. This method prevents doing it.
"""
if UNIQUE_ID in message_data:
msg_id = message_data[UNIQUE_ID]
if msg_id not in self.prev_msgids:
self.prev_msgids.append(msg_id)
else:
raise rpc_common.DuplicateMessageError(msg_id=msg_id)
def _add_unique_id(msg):
"""Add unique_id for checking duplicate messages."""
unique_id = uuid.uuid4().hex
msg.update({UNIQUE_ID: unique_id})
LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
class _ThreadPoolWithWait(object):
"""Base class for a delayed invocation manager.
Used by the Connection class to start up green threads
to handle incoming messages.
"""
def __init__(self, conf, connection_pool):
self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
self.connection_pool = connection_pool
self.conf = conf
def wait(self):
"""Wait for all callback threads to exit."""
self.pool.waitall()
class CallbackWrapper(_ThreadPoolWithWait):
"""Wraps a straight callback.
Allows it to be invoked in a green thread.
"""
def __init__(self, conf, callback, connection_pool,
wait_for_consumers=False):
"""Initiates CallbackWrapper object.
:param conf: cfg.CONF instance
:param callback: a callable (probably a function)
:param connection_pool: connection pool as returned by
get_connection_pool()
:param wait_for_consumers: wait for all green threads to
complete and raise the last
caught exception, if any.
"""
super(CallbackWrapper, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.callback = callback
self.wait_for_consumers = wait_for_consumers
self.exc_info = None
def _wrap(self, message_data, **kwargs):
"""Wrap the callback invocation to catch exceptions.
"""
try:
self.callback(message_data, **kwargs)
except Exception:
self.exc_info = sys.exc_info()
def __call__(self, message_data):
self.exc_info = None
self.pool.spawn_n(self._wrap, message_data)
if self.wait_for_consumers:
self.pool.waitall()
if self.exc_info:
raise self.exc_info[1], None, self.exc_info[2]
class ProxyCallback(_ThreadPoolWithWait):
"""Calls methods on a proxy object based on method and args."""
def __init__(self, conf, proxy, connection_pool):
super(ProxyCallback, self).__init__(
conf=conf,
connection_pool=connection_pool,
)
self.proxy = proxy
self.msg_id_cache = _MsgIdCache()
def __call__(self, message_data):
"""Consumer callback to call a method on a proxy object.
Parses the message for validity and fires off a thread to call the
proxy object method.
Message data should be a dictionary with two keys:
method: string representing the method to call
args: dictionary of arg: value
Example: {'method': 'echo', 'args': {'value': 42}}
"""
# It is important to clear the context here, because at this point
# the previous context is stored in local.store.context
if hasattr(local.store, 'context'):
del local.store.context
rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
self.msg_id_cache.check_duplicate_message(message_data)
ctxt = unpack_context(self.conf, message_data)
method = message_data.get('method')
args = message_data.get('args', {})
version = message_data.get('version')
namespace = message_data.get('namespace')
if not method:
LOG.warn(_('no method for message: %s') % message_data)
ctxt.reply(_('No method for message: %s') % message_data,
connection_pool=self.connection_pool)
return
self.pool.spawn_n(self._process_data, ctxt, version, method,
namespace, args)
def _process_data(self, ctxt, version, method, namespace, args):
"""Process a message in a new thread.
If the proxy object we have has a dispatch method
(see rpc.dispatcher.RpcDispatcher), pass it the version,
method, and args and let it dispatch as appropriate. If not, use
the old behavior of magically calling the specified method on the
proxy we have here.
"""
ctxt.update_store()
try:
rval = self.proxy.dispatch(ctxt, version, method, namespace,
**args)
# Check if the result was a generator
if inspect.isgenerator(rval):
for x in rval:
ctxt.reply(x, None, connection_pool=self.connection_pool)
else:
ctxt.reply(rval, None, connection_pool=self.connection_pool)
# This final None tells multicall that it is done.
ctxt.reply(ending=True, connection_pool=self.connection_pool)
except rpc_common.ClientException as e:
LOG.debug(_('Expected exception during message handling (%s)') %
e._exc_info[1])
ctxt.reply(None, e._exc_info,
connection_pool=self.connection_pool,
log_failure=False)
except Exception:
# sys.exc_info() is deleted by LOG.exception().
exc_info = sys.exc_info()
LOG.error(_('Exception during message handling'),
exc_info=exc_info)
ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
class MulticallProxyWaiter(object):
def __init__(self, conf, msg_id, timeout, connection_pool):
self._msg_id = msg_id
self._timeout = timeout or conf.rpc_response_timeout
self._reply_proxy = connection_pool.reply_proxy
self._done = False
self._got_ending = False
self._conf = conf
self._dataqueue = queue.LightQueue()
# Add this caller to the reply proxy's call_waiters
self._reply_proxy.add_call_waiter(self, self._msg_id)
self.msg_id_cache = _MsgIdCache()
def put(self, data):
self._dataqueue.put(data)
def done(self):
if self._done:
return
self._done = True
# Remove this caller from reply proxy's call_waiters
self._reply_proxy.del_call_waiter(self._msg_id)
def _process_data(self, data):
result = None
self.msg_id_cache.check_duplicate_message(data)
if data['failure']:
failure = data['failure']
result = rpc_common.deserialize_remote_exception(self._conf,
failure)
elif data.get('ending', False):
self._got_ending = True
else:
result = data['result']
return result
def __iter__(self):
"""Return a result until we get a reply with an 'ending' flag."""
if self._done:
raise StopIteration
while True:
try:
data = self._dataqueue.get(timeout=self._timeout)
result = self._process_data(data)
except queue.Empty:
self.done()
raise rpc_common.Timeout()
except Exception:
with excutils.save_and_reraise_exception():
self.done()
if self._got_ending:
self.done()
raise StopIteration
if isinstance(result, Exception):
self.done()
raise result
yield result
def create_connection(conf, new, connection_pool):
"""Create a connection."""
return ConnectionContext(conf, connection_pool, pooled=not new)
_reply_proxy_create_sem = semaphore.Semaphore()
def multicall(conf, context, topic, msg, timeout, connection_pool):
"""Make a call that returns multiple times."""
LOG.debug(_('Making synchronous call on %s ...'), topic)
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug(_('MSG_ID is %s') % (msg_id))
_add_unique_id(msg)
pack_context(msg, context)
with _reply_proxy_create_sem:
if not connection_pool.reply_proxy:
connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
return wait_msg
def call(conf, context, topic, msg, timeout, connection_pool):
"""Sends a message on a topic and wait for a response."""
rv = multicall(conf, context, topic, msg, timeout, connection_pool)
# NOTE(vish): return the last result from the multicall
rv = list(rv)
if not rv:
return
return rv[-1]
def cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a topic without waiting for a response."""
LOG.debug(_('Making asynchronous cast on %s...'), topic)
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast(conf, context, topic, msg, connection_pool):
"""Sends a message on a fanout exchange without waiting for a response."""
LOG.debug(_('Making asynchronous fanout cast...'))
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
"""Sends a message on a topic to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.topic_send(topic, rpc_common.serialize_msg(msg))
def fanout_cast_to_server(conf, context, server_params, topic, msg,
connection_pool):
"""Sends a message on a fanout exchange to a specific server."""
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool, pooled=False,
server_params=server_params) as conn:
conn.fanout_send(topic, rpc_common.serialize_msg(msg))
def notify(conf, context, topic, msg, connection_pool, envelope):
"""Sends a notification event on a topic."""
LOG.debug(_('Sending %(event_type)s on %(topic)s'),
dict(event_type=msg.get('event_type'),
topic=topic))
_add_unique_id(msg)
pack_context(msg, context)
with ConnectionContext(conf, connection_pool) as conn:
if envelope:
msg = rpc_common.serialize_msg(msg)
conn.notify_send(topic, msg)
def cleanup(connection_pool):
if connection_pool:
connection_pool.empty()
def get_control_exchange(conf):
return conf.control_exchange
| apache-2.0 | -6,104,428,898,525,230,000 | 36.146226 | 79 | 0.603513 | false |
plusreed/foxpy | main3.py | 1 | 2910 | from discord.ext import commands
import discord
import datetime, re
import asyncio
import copy
import logging
import traceback
import sys
from collections import Counter
import config
description = """
I'm Fox, a multi-purpose and modular Discord bot.
"""
init_cogs = [
'plugins.admin.eval',
'plugins.admin.shutdown',
'plugins.core.math',
'plugins.core.ping',
'plugins.core.lastfm',
'plugins.music.voice',
]
dc_log = logging.getLogger(discord)
dc_log.setLevel(logging.DEBUG)
log = logging.getLogger()
log.setLevel(logging.INFO)
handler = logging.FileHandler(filename='fox.log', encoding='utf-8', mode='w')
log.addHandler(handler)
hattr = dict(hidden=True)
prefix = ['$', '^']
fox = commands.Bot(command_prefix=prefix, description=description, pm_help=None, help_attrs=hattr)
@fox.event
async def on_command_error(error, ctx):
if isinstance(error, commands.NoPrivateMessage):
await fox.send_message(ctx.message.author, "Sorry, you can't use this command in private messages.")
elif isinstance(error, commands.DisabledCommand):
await fox.send_message(ctx.message.author, 'Sorry, it looks like that command is disabled.')
elif isinstance(error, commands.CommandInvokeError):
print('In {0.command.qualified_name}:'.format(ctx))
traceback.print_tb(error.original.__traceback__)
print('{0.__class__.__name__}: {0}'.format(error.original))
@fox.event
async def on_ready():
print('Fox is now ready!')
print('Username: ' + fox.user.name)
print('ID: ' + fox.user.id)
print('------')
if not hasattr(fox, 'uptime'):
fox.uptime = datetime.datetime.utcnow()
@fox.event
async def on_resumed():
print("Fox has resumed.")
@fox.event
async def on_command(command, ctx):
fox.commands_used[command.name] += 1
message = ctx.message
if message.channel.is_private:
destination = 'Private Message'
else:
destination = '#{0.channel.name} ({0.server.name})'.format(message)
log.info('{0.timestamp}: {0.author.name} in {1}: {0.content}'.format(message, destination))
@fox.event
async def on_message(message):
if message.author.bot:
return
await fox.process_commands(message)
# @bot.command()
# async def ping():
# await bot.say("pong")
if __name__ == '__main__':
if any('debug' in arg.lower() for arg in sys.argv):
print("Fox is running in debug mode. The command prefix is now '^^'.")
fox.command_prefix = '^^'
fox.client_id = config.BOT_ID
fox.commands_used = Counter()
for plugin in init_cogs:
try:
fox.load_extention(plugin)
except Exception as e:
print('Error: failed to load plugin {}\n{}: {}'.format(plugin, type(e).__name__, e))
fox.run(config.BOT_TOKEN)
handlers = log.handlers[:]
for hdlr in handlers:
hdlr.close()
log.removeHandler(hdlr)
| mit | -8,494,187,333,163,795,000 | 25.944444 | 108 | 0.657388 | false |
Sirs0ri/PersonalAssistant | samantha/plugins/schedule_plugin.py | 1 | 8649 | """This plugin triggers schedules events.
The different commands are triggered:
* every 10 seconds
* at the start of every minute
* ..hour
* ..day
* ..month
* ..year
All these events are triggered as soon as possible, i.e. 'Day' will be
triggered at 0:00, month on the 1st at 0:00, etc.
"""
###############################################################################
#
# TODO: [ ]
#
###############################################################################
# standard library imports
import datetime
import logging
import random
import threading
import time
# related third party imports
# application specific imports
import samantha.context as context
from samantha.core import subscribe_to
from samantha.plugins.plugin import Plugin
from samantha.tools import eventbuilder
__version__ = "1.3.17"
# Initialize the logger
LOGGER = logging.getLogger(__name__)
SUNRISE = datetime.datetime(1970, 1, 1)
SUNSET = datetime.datetime(1970, 1, 1)
PLUGIN = Plugin("Schedule", True, LOGGER, __file__)
def worker():
"""Check if events should be triggered, sleep 1sec, repeat."""
name = __name__ + ".Thread"
logger = logging.getLogger(name)
logger.debug("Started.")
def _check_daytime(_datetime_obj, _timelist):
if (SUNSET < SUNRISE < _datetime_obj or
SUNRISE < _datetime_obj < SUNSET or
_datetime_obj < SUNSET < SUNRISE):
# The sun has risen.
time_of_day = "day"
else:
# The sun hasn't risen yet.
time_of_day = "night"
if time_of_day == context.get_value("time.time_of_day", None):
logger.debug("It's still %stime.", time_of_day)
else:
logger.debug("It's now %stime.", time_of_day)
context.set_property("time.time_of_day", time_of_day)
keyword = "time.time_of_day.{}".format(time_of_day)
eventbuilder.eEvent(sender_id=name,
keyword=keyword,
data=_timelist).trigger()
# calculate time between now and sunrise
if SUNRISE < _datetime_obj:
# the sunrise is in the past
sunrise_pre_post = "post"
diff_sunrise = _datetime_obj - SUNRISE
else:
# the sunrise is in the future
sunrise_pre_post = "pre"
diff_sunrise = SUNRISE - _datetime_obj
if 0 < diff_sunrise.seconds % 300 < 59:
# the difference between now and the sunrise is a multiple of 5
# minutes (this check is executed every minute, thus I'm checking
# this in a way that the condition becomes true every 5th minute.
keyword_sunrise = "time.sunrise.{}.{}".format(
sunrise_pre_post,
diff_sunrise.seconds / 60)
LOGGER.warning("Triggering event '%s'!", keyword_sunrise)
eventbuilder.eEvent(sender_id=name,
keyword=keyword_sunrise,
data=_timelist).trigger()
# calculate time between now and sunset
if SUNSET < _datetime_obj:
# the sunset is in the past
sunset_pre_post = "post"
diff_sunset = _datetime_obj - SUNSET
else:
# the sunset is in the future
sunset_pre_post = "pre"
diff_sunset = SUNSET - _datetime_obj
if 0 < diff_sunset.seconds % 300 < 59:
# the difference between now and the sunset is a multiple of 5
# minutes (this check is executed every minute, thus I'm checking
# this in a way that the condition becomes true every 5th minute.
keyword_sunset = "time.sunset.{}.{}".format(
sunset_pre_post,
diff_sunset.seconds / 60)
LOGGER.warning("Triggering event '%s'!", keyword_sunset)
eventbuilder.eEvent(sender_id=name,
keyword=keyword_sunset,
data=_timelist).trigger()
logger.debug("SUNRISE: %s, SUNSET: %s, NOW: %s",
SUNRISE, SUNSET, _datetime_obj)
def _trigger(keyword, data):
if "10s" in keyword:
ttl = 8
elif "10s" in keyword:
ttl = 55
elif "10s" in keyword:
ttl = 3300
else:
ttl = 0
eventbuilder.eEvent(sender_id=name,
keyword=keyword,
data=data,
ttl=ttl).trigger()
# Initialize the two random events.
# They'll be triggered randomly once an hour/once a day. These two counters
# count down the seconds until the next event. They'll be reset to a random
# value every hour (day) between 0 and the number of seconds in an hour/day
# The default values are 120secs for the hourly event and 180 for the daily
# so that the two events are being triggered relatively soon after starting
rnd_hourly_counter = 120
rnd_daily_counter = 180
while True:
datetime_obj = datetime.datetime.now()
timetuple = datetime_obj.timetuple()
"""
# value: time.struct_time(tm_year=2016, tm_mon=1, tm_mday=22,
# tm_hour=11, tm_min=26, tm_sec=13,
# tm_wday=4, tm_yday=22, tm_isdst=-1)
# ..[0]: tm_year = 2016
# ..[1]: tm_mon = 1
# ..[2]: tm_mday = 22
# ..[3]: tm_hour = 11
# ..[4]: tm_min = 26
# ..[5]: tm_sec = 13
# ..[6]: tm_wday = 4
# ..[7]: tm_yday = 22
# ..[8]: tm_isdst = -1
"""
timelist = list(timetuple)
if rnd_hourly_counter == 0:
_trigger(keyword="time.schedule.hourly_rnd", data=timelist)
if rnd_daily_counter == 0:
_trigger(keyword="time.schedule.daily_rnd", data=timelist)
rnd_hourly_counter -= 1
rnd_daily_counter -= 1
if timelist[5] in [0, 10, 20, 30, 40, 50]:
_trigger(keyword="time.schedule.10s", data=timelist)
if timelist[5] == 0:
# Seconds = 0 -> New Minute
_trigger(keyword="time.schedule.min", data=timelist)
# Check for a change in the time of day
_check_daytime(datetime_obj, timelist)
if timelist[4] == 0:
# Minutes = 0 -> New Hour
_trigger(keyword="time.schedule.hour", data=timelist)
rnd_hourly_counter = random.randint(0, 3599)
if timelist[3] == 0:
# Hours = 0 -> New Day
_trigger(keyword="time.schedule.day", data=timelist)
rnd_daily_counter = random.randint(0, 86399)
if timelist[2] == 1:
# Day of Month = 1 -> New Month
_trigger(keyword="time.schedule.mon",
data=timelist)
if timelist[1] == 1:
# Month = 1 -> New Year
_trigger(keyword="time.schedule.year",
data=timelist)
# sleep to take work from the CPU
time.sleep(1)
@subscribe_to("system.onstart")
def start_thread(key, data):
"""Set up the plugin by starting the worker-thread."""
thread = threading.Thread(target=worker)
thread.daemon = True
thread.start()
return "Worker started successfully."
@subscribe_to("weather.sys.update")
def sun_times(key, data):
"""Update the times for sunset and -rise."""
global SUNRISE, SUNSET
result = ""
invalid = True
if "sunrise" in data:
invalid = False
sunrise = datetime.datetime.fromtimestamp(data["sunrise"])
if SUNRISE is not sunrise:
SUNRISE = sunrise
LOGGER.debug("Updated Sunrise to %s",
SUNRISE.strftime('%Y-%m-%d %H:%M:%S'))
result += "Sunrise updated successfully."
if "sunset" in data:
invalid = False
sunset = datetime.datetime.fromtimestamp(data["sunset"])
if SUNSET is not sunset:
SUNSET = sunset
LOGGER.debug("Updated Sunset to %s",
SUNSET.strftime('%Y-%m-%d %H:%M:%S'))
result += "Sunset updated successfully."
if invalid:
result = "Error: The data does not contain info about sunrise/-set."
if result == "":
result = "Sunrise/-set were already up to date."
return result
| mit | 360,726,340,552,907,840 | 36.441558 | 79 | 0.529194 | false |
RaphaelNajera/Sunlight_Sensor | documentation/CENG355 Solar Capstone/firmware/Solar_Capstone_PV_v4.py | 1 | 5201 | #Retrieving from PV1, PV2, PV4
#Working on retrieving from PV3
#Solar Capstone
#Johnson, Raphael & Adrian
from bs4 import BeautifulSoup
from datetime import datetime
import urllib.request
import threading #Loop
import time
from ctypes import c_short
from ctypes import c_byte
from ctypes import c_ubyte
from time import sleep
#Module for push data to firebase
import pyrebase
#Config for connecting to the Firebase
config = {
"apiKey": "AIzaSyB_inMZruQbJUzueOSRqf0-zwbYoUnZqDA",
"authDomain": "solar-capstone.firebaseapp.com",
"databaseURL": "https://solar-capstone.firebaseio.com/",
"storageBucket": "solar-capstone.appspot.com",
#"serviceAccount": "path/to/serviceAccountKey.json"
}
#====================================================================================================
#Send the data to firebase database every 30 minutes.
#PV1
def SendtoFirebasePV1(db, Date, Power, Dailyyield, Totalyield):
PV1 = {"Date": Date, "Power": Power, "Daily_yield": Dailyyield, "Total_yield": Totalyield}
PV1_result = db.child("PV1").push(PV1)
return;
#PV2
def SendtoFirebasePV2(db, Date, Power, Dailyyield, Totalyield):
PV2 = {"Date": Date, "Power": Power, "Daily_yield": Dailyyield, "Total_yield": Totalyield}
PV2_result = db.child("PV2").push(PV2)
return;
#PV3
#PV4
def SendtoFirebasePV4(db, Date, Power, Dailyyield, Totalyield):
PV4 = {"Date": Date, "Power": Power, "Daily_yield": Dailyyield, "Total_yield": Totalyield}
PV4_result = db.child("PV4").push(PV4)
return;
#====================================================================================================
def GetAuthorized(firebase):
auth = firebase.auth()
return '';
#====================================================================================================
#This function execute every hour to retrieve data from all solar panels
def repeatEveryHourly():
firebase = pyrebase.initialize_app(config)
#runs the code every 30mins or replace the timer with
threading.Timer(1800.0, repeatEveryHourly).start()
#grabs the current date and time
currentTime = datetime.now()
print(currentTime.strftime("\n%Y/%m/%d %I:%M %p\n"))
date = currentTime.strftime("%Y/%m/%d %I:%M %p")
#requesting to open this html for reading
PV1 = urllib.request.urlopen("http://10.116.25.7/home.htm").read()
PV2 = urllib.request.urlopen("http://10.116.25.5/production?locale=en").read()
#PV3
PV4 = urllib.request.urlopen("http://10.116.25.6/home.htm").read()
#uses the BeautifulSoup function to process xml and html in Python.
PV1_data = BeautifulSoup(PV1,'lxml')
PV2_data = BeautifulSoup(PV2, 'lxml')
PV4_data = BeautifulSoup(PV4, 'lxml')
#used the find() function to find all html tags consisting with <table> with an id of "OvTb1"
#PV1
PV1_table = PV1_data.find('table', id="OvTbl")
PV1table_row = PV1_table.find_all('tr')
#PV2
PV2_table = PV2_data.find_all('table')
#PV4
PV4_table = PV4_data.find('table', id="OvTbl")
PV4table_row = PV4_table.find_all('tr')
#Global variables for string comparison
power = "Power:"
daily = "Daily yield:"
total = "Total yield:"
#PV2 global variables for string comparison
power_2 = "Currently"
daily_2 = "Today"
total_2 = "Since Installation"
#Variables for PV1
PV1_power = ""
PV1_daily = ""
PV1_total = ""
#Variables for PV2
PV2_daily = ""
PV2_power = ""
PV2_total = ""
#Variables for PV4
PV4_power = ""
PV4_daily = ""
PV4_total = ""
#Display the info
print("Solar Panel PV1")
for tr in PV1table_row:
td = tr.find_all('td')
row = [i.string for i in td]
print(row[0] + " " + row[1])
if power == row[0]:
PV1_power = row[1]
#print(PV1_power)
if daily == row[0]:
PV1_daily = row[1]
#print(PV1_daily)
if total == row[0]:
PV1_total = row[1]
#print(PV1_total)
print("\nSolar Panel PV2")
for tr in PV2_table:
td = tr.find_all('td')
row = [i.text for i in td]
#Testing
#print("\n Row0: "+row[0])
#print("\n Row1: "+row[1])
#print("\n Row2: "+row[2])
#print("\n Row3: "+row[3])
#print("\n Row4: "+row[4])
#print("\n Row5: "+row[5])
#print("\n Row6: "+row[6])
#print("\n Row7: "+row[7])
#print("\n Row8: "+row[8])
if power_2 == row[1]:
PV2_power = row[2]
print("Power:"+PV2_power)
if daily_2 == row[3]:
PV2_daily = row[4]
print("Daily yield: "+PV2_daily)
if total_2 == row[7]:
PV2_total = row[8]
print("Total yield:"+PV2_total)
print("\nSolar Panel PV4")
for tr in PV4table_row:
td = tr.find_all('td')
row = [i.text for i in td]
print(row[0] + " " + row[1])
if power == row[0]:
PV4_power = row[1]
#print(PV4_power)
if daily == row[0]:
PV4_daily = row[1]
#print(PV4_daily)
if total == row[0]:
PV4_total = row[1]
#print(PV4_total)
#Calls to push the data to the firebase
SendtoFirebasePV1( firebase.database(), date, PV1_power, PV1_daily, PV1_total)
SendtoFirebasePV2( firebase.database(), date, PV2_power, PV2_daily, PV2_total)
SendtoFirebasePV4( firebase.database(), date, PV4_power, PV4_daily, PV4_total)
#====================================================================================
#Main program
def main():
repeatEveryHourly()
return
if __name__ == "__main__":
main() | agpl-3.0 | -4,950,104,520,752,690,000 | 28.224719 | 101 | 0.614882 | false |
googlearchive/py-gfm | tests/test_spaced_link.py | 1 | 3601 | # Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import gfm
from test_case import TestCase
class TestMultilineLink(TestCase):
def setUp(self):
self.spaced_link = gfm.SpacedLinkExtension([])
def test_normal_link(self):
self.assert_renders("""
<p><a href="href">link</a></p>
""", """
[link](href)
""", [self.spaced_link])
def test_normal_reference(self):
self.assert_renders("""
<p><a href="href">link</a></p>
""", """
[link][id]
[id]: href
""", [self.spaced_link])
def test_normal_image_link(self):
self.assert_renders("""
<p><img alt="alt" src="href" /></p>
""", """

""", [self.spaced_link])
def test_normal_image_reference(self):
self.assert_renders("""
<p><img alt="alt" src="href" /></p>
""", """
![alt][id]
[id]: href
""", [self.spaced_link])
def test_spaced_link(self):
self.assert_renders("""
<p><a href="href">link</a></p>
""", """
[link] (href)
""", [self.spaced_link])
def test_spaced_reference(self):
self.assert_renders("""
<p><a href="href">link</a></p>
""", """
[link] [id]
[id]: href
""", [self.spaced_link])
def test_spaced_image_link(self):
self.assert_renders("""
<p><img alt="alt" src="href" /></p>
""", """
![alt] (href)
""", [self.spaced_link])
def test_spaced_image_reference(self):
self.assert_renders("""
<p><img alt="alt" src="href" /></p>
""", """
![alt] [id]
[id]: href
""", [self.spaced_link])
def test_multiline_link(self):
self.assert_renders("""
<p><a href="href">link</a></p>
""", """
[link]
(href)
""", [self.spaced_link])
def test_multiline_reference(self):
self.assert_renders("""
<p><a href="href">link</a></p>
""", """
[link]
[id]
[id]: href
""", [self.spaced_link])
def test_multiline_image_link(self):
self.assert_renders("""
<p><img alt="alt" src="href" /></p>
""", """
![alt]
(href)
""", [self.spaced_link])
def test_multiline_image_reference(self):
self.assert_renders("""
<p><img alt="alt" src="href" /></p>
""", """
![alt]
[id]
[id]: href
""", [self.spaced_link])
def test_multiline_and_spaced_link(self):
self.assert_renders("""
<p><a href="href">link</a></p>
""", """
[link]
(href)
""", [self.spaced_link])
def test_multiline_and_spaced_reference(self):
self.assert_renders("""
<p><a href="href">link</a></p>
""", """
[link]
[id]
[id]: href
""", [self.spaced_link])
def test_multiline_and_spaced_image_link(self):
self.assert_renders("""
<p><img alt="alt" src="href" /></p>
""", """
![alt]
(href)
""", [self.spaced_link])
def test_multiline_and_spaced_image_reference(self):
self.assert_renders("""
<p><img alt="alt" src="href" /></p>
""", """
![alt]
[id]
[id]: href
""", [self.spaced_link])
| bsd-3-clause | -5,022,080,655,735,300,000 | 23.496599 | 76 | 0.464038 | false |
seleniumbase/SeleniumBase | seleniumbase/translate/chinese.py | 1 | 22162 | # Chinese / 中文 - Translations - Python 3 Only!
from seleniumbase import BaseCase
from seleniumbase import MasterQA
class 硒测试用例(BaseCase): # noqa
def __init__(self, *args, **kwargs):
super(硒测试用例, self).__init__(*args, **kwargs)
self._language = "Chinese"
def 开启(self, *args, **kwargs):
# open(url)
return self.open(*args, **kwargs)
def 开启网址(self, *args, **kwargs):
# open_url(url)
return self.open_url(*args, **kwargs)
def 单击(self, *args, **kwargs):
# click(selector)
return self.click(*args, **kwargs)
def 双击(self, *args, **kwargs):
# double_click(selector)
return self.double_click(*args, **kwargs)
def 慢单击(self, *args, **kwargs):
# slow_click(selector)
return self.slow_click(*args, **kwargs)
def 如果可见请单击(self, *args, **kwargs):
# click_if_visible(selector, by=By.CSS_SELECTOR)
return self.click_if_visible(*args, **kwargs)
def 单击链接文本(self, *args, **kwargs):
# click_link_text(link_text)
return self.click_link_text(*args, **kwargs)
def 更新文本(self, *args, **kwargs):
# update_text(selector, text)
return self.update_text(*args, **kwargs)
def 输入文本(self, *args, **kwargs):
# type(selector, text) # Same as update_text()
return self.type(*args, **kwargs)
def 添加文本(self, *args, **kwargs):
# add_text(selector, text)
return self.add_text(*args, **kwargs)
def 获取文本(self, *args, **kwargs):
# get_text(selector, text)
return self.get_text(*args, **kwargs)
def 断言文本(self, *args, **kwargs):
# assert_text(text, selector)
return self.assert_text(*args, **kwargs)
def 确切断言文本(self, *args, **kwargs):
# assert_exact_text(text, selector)
return self.assert_exact_text(*args, **kwargs)
def 断言链接文本(self, *args, **kwargs):
# assert_link_text(link_text)
return self.assert_link_text(*args, **kwargs)
def 断言元素(self, *args, **kwargs):
# assert_element(selector)
return self.assert_element(*args, **kwargs)
def 断言元素可见(self, *args, **kwargs):
# assert_element_visible(selector) # Same as self.assert_element()
return self.assert_element_visible(*args, **kwargs)
def 断言元素不可见(self, *args, **kwargs):
# assert_element_not_visible(selector)
return self.assert_element_not_visible(*args, **kwargs)
def 断言元素存在(self, *args, **kwargs):
# assert_element_present(selector)
return self.assert_element_present(*args, **kwargs)
def 断言元素不存在(self, *args, **kwargs):
# assert_element_absent(selector)
return self.assert_element_absent(*args, **kwargs)
def 断言属性(self, *args, **kwargs):
# assert_attribute(selector, attribute, value)
return self.assert_attribute(*args, **kwargs)
def 断言标题(self, *args, **kwargs):
# assert_title(title)
return self.assert_title(*args, **kwargs)
def 获取标题(self, *args, **kwargs):
# get_title()
return self.get_title(*args, **kwargs)
def 断言为真(self, *args, **kwargs):
# assert_true(expr)
return self.assert_true(*args, **kwargs)
def 断言为假(self, *args, **kwargs):
# assert_false(expr)
return self.assert_false(*args, **kwargs)
def 断言等于(self, *args, **kwargs):
# assert_equal(first, second)
return self.assert_equal(*args, **kwargs)
def 断言不等于(self, *args, **kwargs):
# assert_not_equal(first, second)
return self.assert_not_equal(*args, **kwargs)
def 刷新页面(self, *args, **kwargs):
# refresh_page()
return self.refresh_page(*args, **kwargs)
def 获取当前网址(self, *args, **kwargs):
# get_current_url()
return self.get_current_url(*args, **kwargs)
def 获取页面源代码(self, *args, **kwargs):
# get_page_source()
return self.get_page_source(*args, **kwargs)
def 回去(self, *args, **kwargs):
# go_back()
return self.go_back(*args, **kwargs)
def 向前(self, *args, **kwargs):
# go_forward()
return self.go_forward(*args, **kwargs)
def 文本是否显示(self, *args, **kwargs):
# is_text_visible(text, selector="html")
return self.is_text_visible(*args, **kwargs)
def 元素是否可见(self, *args, **kwargs):
# is_element_visible(selector)
return self.is_element_visible(*args, **kwargs)
def 元素是否启用(self, *args, **kwargs):
# is_element_enabled(selector)
return self.is_element_enabled(*args, **kwargs)
def 元素是否存在(self, *args, **kwargs):
# is_element_present(selector)
return self.is_element_present(*args, **kwargs)
def 等待文本(self, *args, **kwargs):
# wait_for_text(text, selector="html")
return self.wait_for_text(*args, **kwargs)
def 等待元素(self, *args, **kwargs):
# wait_for_element(selector)
return self.wait_for_element(*args, **kwargs)
def 等待元素可见(self, *args, **kwargs):
# wait_for_element_visible(selector) # Same as wait_for_element()
return self.wait_for_element_visible(*args, **kwargs)
def 等待元素不可见(self, *args, **kwargs):
# wait_for_element_not_visible(selector)
return self.wait_for_element_not_visible(*args, **kwargs)
def 等待元素存在(self, *args, **kwargs):
# wait_for_element_present(selector)
return self.wait_for_element_present(*args, **kwargs)
def 等待元素不存在(self, *args, **kwargs):
# wait_for_element_absent(selector)
return self.wait_for_element_absent(*args, **kwargs)
def 等待属性(self, *args, **kwargs):
# wait_for_attribute(selector, attribute, value)
return self.wait_for_attribute(*args, **kwargs)
def 睡(self, *args, **kwargs):
# sleep(seconds)
return self.sleep(*args, **kwargs)
def 等待(self, *args, **kwargs):
# wait(seconds) # Same as sleep(seconds)
return self.wait(*args, **kwargs)
def 提交(self, *args, **kwargs):
# submit(selector)
return self.submit(*args, **kwargs)
def 清除(self, *args, **kwargs):
# clear(selector)
return self.clear(*args, **kwargs)
def 专注于(self, *args, **kwargs):
# focus(selector)
return self.focus(*args, **kwargs)
def JS单击(self, *args, **kwargs):
# js_click(selector)
return self.js_click(*args, **kwargs)
def JS更新文本(self, *args, **kwargs):
# js_update_text(selector, text)
return self.js_update_text(*args, **kwargs)
def JS输入文本(self, *args, **kwargs):
# js_type(selector, text)
return self.js_type(*args, **kwargs)
def 检查HTML(self, *args, **kwargs):
# inspect_html()
return self.inspect_html(*args, **kwargs)
def 保存截图(self, *args, **kwargs):
# save_screenshot(name)
return self.save_screenshot(*args, **kwargs)
def 保存截图到日志(self, *args, **kwargs):
# save_screenshot_to_logs(name)
return self.save_screenshot_to_logs(*args, **kwargs)
def 选择文件(self, *args, **kwargs):
# choose_file(selector, file_path)
return self.choose_file(*args, **kwargs)
def 执行脚本(self, *args, **kwargs):
# execute_script(script)
return self.execute_script(*args, **kwargs)
def 安全执行脚本(self, *args, **kwargs):
# safe_execute_script(script)
return self.safe_execute_script(*args, **kwargs)
def 加载JQUERY(self, *args, **kwargs):
# activate_jquery()
return self.activate_jquery(*args, **kwargs)
def 阻止广告(self, *args, **kwargs):
# ad_block()
return self.ad_block(*args, **kwargs)
def 跳过(self, *args, **kwargs):
# skip(reason="")
return self.skip(*args, **kwargs)
def 检查断开的链接(self, *args, **kwargs):
# assert_no_404_errors()
return self.assert_no_404_errors(*args, **kwargs)
def 检查JS错误(self, *args, **kwargs):
# assert_no_js_errors()
return self.assert_no_js_errors(*args, **kwargs)
def 切换到帧(self, *args, **kwargs):
# switch_to_frame(frame)
return self.switch_to_frame(*args, **kwargs)
def 切换到默认内容(self, *args, **kwargs):
# switch_to_default_content()
return self.switch_to_default_content(*args, **kwargs)
def 打开新窗口(self, *args, **kwargs):
# open_new_window()
return self.open_new_window(*args, **kwargs)
def 切换到窗口(self, *args, **kwargs):
# switch_to_window(window)
return self.switch_to_window(*args, **kwargs)
def 切换到默认窗口(self, *args, **kwargs):
# switch_to_default_window()
return self.switch_to_default_window(*args, **kwargs)
def 切换到最新的窗口(self, *args, **kwargs):
# switch_to_newest_window()
return self.switch_to_newest_window(*args, **kwargs)
def 最大化窗口(self, *args, **kwargs):
# maximize_window()
return self.maximize_window(*args, **kwargs)
def 亮点(self, *args, **kwargs):
# highlight(selector)
return self.highlight(*args, **kwargs)
def 亮点单击(self, *args, **kwargs):
# highlight_click(selector)
return self.highlight_click(*args, **kwargs)
def 滚动到(self, *args, **kwargs):
# scroll_to(selector)
return self.scroll_to(*args, **kwargs)
def 滚动到顶部(self, *args, **kwargs):
# scroll_to_top()
return self.scroll_to_top(*args, **kwargs)
def 滚动到底部(self, *args, **kwargs):
# scroll_to_bottom()
return self.scroll_to_bottom(*args, **kwargs)
def 悬停并单击(self, *args, **kwargs):
# hover_and_click(hover_selector, click_selector)
return self.hover_and_click(*args, **kwargs)
def 是否被选中(self, *args, **kwargs):
# is_selected(selector)
return self.is_selected(*args, **kwargs)
def 按向上箭头(self, *args, **kwargs):
# press_up_arrow(selector="html", times=1)
return self.press_up_arrow(*args, **kwargs)
def 按向下箭头(self, *args, **kwargs):
# press_down_arrow(selector="html", times=1)
return self.press_down_arrow(*args, **kwargs)
def 按向左箭头(self, *args, **kwargs):
# press_left_arrow(selector="html", times=1)
return self.press_left_arrow(*args, **kwargs)
def 按向右箭头(self, *args, **kwargs):
# press_right_arrow(selector="html", times=1)
return self.press_right_arrow(*args, **kwargs)
def 单击可见元素(self, *args, **kwargs):
# click_visible_elements(selector)
return self.click_visible_elements(*args, **kwargs)
def 按文本选择选项(self, *args, **kwargs):
# select_option_by_text(dropdown_selector, option)
return self.select_option_by_text(*args, **kwargs)
def 按索引选择选项(self, *args, **kwargs):
# select_option_by_index(dropdown_selector, option)
return self.select_option_by_index(*args, **kwargs)
def 按值选择选项(self, *args, **kwargs):
# select_option_by_value(dropdown_selector, option)
return self.select_option_by_value(*args, **kwargs)
def 创建演示文稿(self, *args, **kwargs):
# create_presentation(name=None, theme="default", transition="default")
return self.create_presentation(*args, **kwargs)
def 添加幻灯片(self, *args, **kwargs):
# add_slide(content=None, image=None, code=None, iframe=None,
# content2=None, notes=None, transition=None, name=None)
return self.add_slide(*args, **kwargs)
def 保存演示文稿(self, *args, **kwargs):
# save_presentation(name=None, filename=None,
# show_notes=False, interval=0)
return self.save_presentation(*args, **kwargs)
def 开始演示文稿(self, *args, **kwargs):
# begin_presentation(name=None, filename=None,
# show_notes=False, interval=0)
return self.begin_presentation(*args, **kwargs)
def 创建饼图(self, *args, **kwargs):
# create_pie_chart(chart_name=None, title=None, subtitle=None,
# data_name=None, unit=None, libs=True)
return self.create_pie_chart(*args, **kwargs)
def 创建条形图(self, *args, **kwargs):
# create_bar_chart(chart_name=None, title=None, subtitle=None,
# data_name=None, unit=None, libs=True)
return self.create_bar_chart(*args, **kwargs)
def 创建柱形图(self, *args, **kwargs):
# create_column_chart(chart_name=None, title=None, subtitle=None,
# data_name=None, unit=None, libs=True)
return self.create_column_chart(*args, **kwargs)
def 创建折线图(self, *args, **kwargs):
# create_line_chart(chart_name=None, title=None, subtitle=None,
# data_name=None, unit=None, zero=False, libs=True)
return self.create_line_chart(*args, **kwargs)
def 创建面积图(self, *args, **kwargs):
# create_area_chart(chart_name=None, title=None, subtitle=None,
# data_name=None, unit=None, zero=False, libs=True)
return self.create_area_chart(*args, **kwargs)
def 将系列添加到图表(self, *args, **kwargs):
# add_series_to_chart(data_name=None, chart_name=None)
return self.add_series_to_chart(*args, **kwargs)
def 添加数据点(self, *args, **kwargs):
# add_data_point(label, value, color=None, chart_name=None)
return self.add_data_point(*args, **kwargs)
def 保存图表(self, *args, **kwargs):
# save_chart(chart_name=None, filename=None)
return self.save_chart(*args, **kwargs)
def 显示图表(self, *args, **kwargs):
# display_chart(chart_name=None, filename=None, interval=0)
return self.display_chart(*args, **kwargs)
def 提取图表(self, *args, **kwargs):
# extract_chart(chart_name=None)
return self.extract_chart(*args, **kwargs)
def 创建游览(self, *args, **kwargs):
# create_tour(name=None, theme=None)
return self.create_tour(*args, **kwargs)
def 创建SHEPHERD游览(self, *args, **kwargs):
# create_shepherd_tour(name=None, theme=None)
return self.create_shepherd_tour(*args, **kwargs)
def 创建BOOTSTRAP游览(self, *args, **kwargs):
# create_bootstrap_tour(name=None, theme=None)
return self.create_bootstrap_tour(*args, **kwargs)
def 创建DRIVERJS游览(self, *args, **kwargs):
# create_driverjs_tour(name=None, theme=None)
return self.create_driverjs_tour(*args, **kwargs)
def 创建HOPSCOTCH游览(self, *args, **kwargs):
# create_hopscotch_tour(name=None, theme=None)
return self.create_hopscotch_tour(*args, **kwargs)
def 创建INTROJS游览(self, *args, **kwargs):
# create_introjs_tour(name=None, theme=None)
return self.create_introjs_tour(*args, **kwargs)
def 添加游览步骤(self, *args, **kwargs):
# add_tour_step(message, selector=None, name=None,
# title=None, theme=None, alignment=None)
return self.add_tour_step(*args, **kwargs)
def 播放游览(self, *args, **kwargs):
# play_tour(name=None)
return self.play_tour(*args, **kwargs)
def 导出游览(self, *args, **kwargs):
# export_tour(name=None, filename="my_tour.js", url=None)
return self.export_tour(*args, **kwargs)
def 获取PDF文本(self, *args, **kwargs):
# get_pdf_text(pdf, page=None, maxpages=None, password=None,
# codec='utf-8', wrap=False, nav=False, override=False)
return self.get_pdf_text(*args, **kwargs)
def 断言PDF文本(self, *args, **kwargs):
# assert_pdf_text(pdf, text, page=None, maxpages=None, password=None,
# codec='utf-8', wrap=True, nav=False, override=False)
return self.assert_pdf_text(*args, **kwargs)
def 下载文件(self, *args, **kwargs):
# download_file(file)
return self.download_file(*args, **kwargs)
def 下载的文件是否存在(self, *args, **kwargs):
# is_downloaded_file_present(file)
return self.is_downloaded_file_present(*args, **kwargs)
def 获取下载的文件路径(self, *args, **kwargs):
# get_path_of_downloaded_file(file)
return self.get_path_of_downloaded_file(*args, **kwargs)
def 检查下载的文件(self, *args, **kwargs):
# assert_downloaded_file(file)
return self.assert_downloaded_file(*args, **kwargs)
def 删除下载的文件(self, *args, **kwargs):
# delete_downloaded_file(file)
return self.delete_downloaded_file(*args, **kwargs)
def 失败(self, *args, **kwargs):
# fail(msg=None) # Inherited from "unittest"
return self.fail(*args, **kwargs)
def 获取(self, *args, **kwargs):
# get(url) # Same as open(url)
return self.get(*args, **kwargs)
def 访问(self, *args, **kwargs):
# visit(url) # Same as open(url)
return self.visit(*args, **kwargs)
def 访问网址(self, *args, **kwargs):
# visit_url(url) # Same as open(url)
return self.visit_url(*args, **kwargs)
def 获取元素(self, *args, **kwargs):
# get_element(selector) # Element can be hidden
return self.get_element(*args, **kwargs)
def 查找元素(self, *args, **kwargs):
# find_element(selector) # Element must be visible
return self.find_element(*args, **kwargs)
def 删除第一个元素(self, *args, **kwargs):
# remove_element(selector)
return self.remove_element(*args, **kwargs)
def 删除所有元素(self, *args, **kwargs):
# remove_elements(selector)
return self.remove_elements(*args, **kwargs)
def 查找文本(self, *args, **kwargs):
# find_text(text, selector="html") # Same as wait_for_text
return self.find_text(*args, **kwargs)
def 设置文本(self, *args, **kwargs):
# set_text(selector, text)
return self.set_text(*args, **kwargs)
def 获取属性(self, *args, **kwargs):
# get_attribute(selector, attribute)
return self.get_attribute(*args, **kwargs)
def 设置属性(self, *args, **kwargs):
# set_attribute(selector, attribute, value)
return self.set_attribute(*args, **kwargs)
def 设置所有属性(self, *args, **kwargs):
# set_attributes(selector, attribute, value)
return self.set_attributes(*args, **kwargs)
def 写文本(self, *args, **kwargs):
# write(selector, text) # Same as update_text()
return self.write(*args, **kwargs)
def 设置消息主题(self, *args, **kwargs):
# set_messenger_theme(theme="default", location="default")
return self.set_messenger_theme(*args, **kwargs)
def 显示讯息(self, *args, **kwargs):
# post_message(message, duration=None, pause=True, style="info")
return self.post_message(*args, **kwargs)
def 打印(self, *args, **kwargs):
# _print(msg) # Same as Python print()
return self._print(*args, **kwargs)
def 推迟断言元素(self, *args, **kwargs):
# deferred_assert_element(selector)
return self.deferred_assert_element(*args, **kwargs)
def 推迟断言文本(self, *args, **kwargs):
# deferred_assert_text(text, selector="html")
return self.deferred_assert_text(*args, **kwargs)
def 处理推迟断言(self, *args, **kwargs):
# process_deferred_asserts(print_only=False)
return self.process_deferred_asserts(*args, **kwargs)
def 接受警报(self, *args, **kwargs):
# accept_alert(timeout=None)
return self.accept_alert(*args, **kwargs)
def 解除警报(self, *args, **kwargs):
# dismiss_alert(timeout=None)
return self.dismiss_alert(*args, **kwargs)
def 切换到警报(self, *args, **kwargs):
# switch_to_alert(timeout=None)
return self.switch_to_alert(*args, **kwargs)
def 拖放(self, *args, **kwargs):
# drag_and_drop(drag_selector, drop_selector)
return self.drag_and_drop(*args, **kwargs)
def 设置HTML(self, *args, **kwargs):
# set_content(html_string, new_page=False)
return self.set_content(*args, **kwargs)
def 加载HTML文件(self, *args, **kwargs):
# load_html_file(html_file, new_page=True)
return self.load_html_file(*args, **kwargs)
def 打开HTML文件(self, *args, **kwargs):
# open_html_file(html_file)
return self.open_html_file(*args, **kwargs)
def 删除所有COOKIE(self, *args, **kwargs):
# delete_all_cookies()
return self.delete_all_cookies(*args, **kwargs)
def 获取用户代理(self, *args, **kwargs):
# get_user_agent()
return self.get_user_agent(*args, **kwargs)
def 获取语言代码(self, *args, **kwargs):
# get_locale_code()
return self.get_locale_code(*args, **kwargs)
class MasterQA_中文(MasterQA, 硒测试用例):
def 校验(self, *args, **kwargs):
# "Manual Check"
self.DEFAULT_VALIDATION_TITLE = "手动检查"
# "Does the page look good?"
self.DEFAULT_VALIDATION_MESSAGE = "页面是否看起来不错?"
# verify(QUESTION)
return self.verify(*args, **kwargs)
| mit | 8,168,400,214,733,496,000 | 33.495017 | 79 | 0.597419 | false |
bolkedebruin/airflow | tests/providers/microsoft/azure/operators/test_azure_container_instances.py | 1 | 9083 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from collections import namedtuple
import mock
from azure.mgmt.containerinstance.models import ContainerState, Event
from airflow.exceptions import AirflowException
from airflow.providers.microsoft.azure.operators.azure_container_instances import (
AzureContainerInstancesOperator,
)
def make_mock_cg(container_state, events=None):
"""
Make a mock Container Group as the underlying azure Models have read-only attributes
See https://docs.microsoft.com/en-us/rest/api/container-instances/containergroups
"""
events = events or []
instance_view_dict = {"current_state": container_state,
"events": events}
instance_view = namedtuple("InstanceView",
instance_view_dict.keys())(*instance_view_dict.values())
container_dict = {"instance_view": instance_view}
container = namedtuple("Container", container_dict.keys())(*container_dict.values())
container_g_dict = {"containers": [container]}
container_g = namedtuple("ContainerGroup",
container_g_dict.keys())(*container_g_dict.values())
return container_g
class TestACIOperator(unittest.TestCase):
@mock.patch("airflow.providers.microsoft.azure.operators."
"azure_container_instances.AzureContainerInstanceHook")
def test_execute(self, aci_mock):
expected_c_state = ContainerState(state='Terminated', exit_code=0, detail_status='test')
expected_cg = make_mock_cg(expected_c_state)
aci_mock.return_value.get_state.return_value = expected_cg
aci_mock.return_value.exists.return_value = False
aci = AzureContainerInstancesOperator(ci_conn_id=None,
registry_conn_id=None,
resource_group='resource-group',
name='container-name',
image='container-image',
region='region',
task_id='task')
aci.execute(None)
self.assertEqual(aci_mock.return_value.create_or_update.call_count, 1)
(called_rg, called_cn, called_cg), _ = \
aci_mock.return_value.create_or_update.call_args
self.assertEqual(called_rg, 'resource-group')
self.assertEqual(called_cn, 'container-name')
self.assertEqual(called_cg.location, 'region')
self.assertEqual(called_cg.image_registry_credentials, None)
self.assertEqual(called_cg.restart_policy, 'Never')
self.assertEqual(called_cg.os_type, 'Linux')
called_cg_container = called_cg.containers[0]
self.assertEqual(called_cg_container.name, 'container-name')
self.assertEqual(called_cg_container.image, 'container-image')
self.assertEqual(aci_mock.return_value.delete.call_count, 1)
@mock.patch("airflow.providers.microsoft.azure.operators."
"azure_container_instances.AzureContainerInstanceHook")
def test_execute_with_failures(self, aci_mock):
expected_c_state = ContainerState(state='Terminated', exit_code=1, detail_status='test')
expected_cg = make_mock_cg(expected_c_state)
aci_mock.return_value.get_state.return_value = expected_cg
aci_mock.return_value.exists.return_value = False
aci = AzureContainerInstancesOperator(ci_conn_id=None,
registry_conn_id=None,
resource_group='resource-group',
name='container-name',
image='container-image',
region='region',
task_id='task')
with self.assertRaises(AirflowException):
aci.execute(None)
self.assertEqual(aci_mock.return_value.delete.call_count, 1)
@mock.patch("airflow.providers.microsoft.azure.operators."
"azure_container_instances.AzureContainerInstanceHook")
def test_execute_with_tags(self, aci_mock):
expected_c_state = ContainerState(state='Terminated', exit_code=0, detail_status='test')
expected_cg = make_mock_cg(expected_c_state)
tags = {"testKey": "testValue"}
aci_mock.return_value.get_state.return_value = expected_cg
aci_mock.return_value.exists.return_value = False
aci = AzureContainerInstancesOperator(ci_conn_id=None,
registry_conn_id=None,
resource_group='resource-group',
name='container-name',
image='container-image',
region='region',
task_id='task',
tags=tags)
aci.execute(None)
self.assertEqual(aci_mock.return_value.create_or_update.call_count, 1)
(called_rg, called_cn, called_cg), _ = \
aci_mock.return_value.create_or_update.call_args
self.assertEqual(called_rg, 'resource-group')
self.assertEqual(called_cn, 'container-name')
self.assertEqual(called_cg.location, 'region')
self.assertEqual(called_cg.image_registry_credentials, None)
self.assertEqual(called_cg.restart_policy, 'Never')
self.assertEqual(called_cg.os_type, 'Linux')
self.assertEqual(called_cg.tags, tags)
called_cg_container = called_cg.containers[0]
self.assertEqual(called_cg_container.name, 'container-name')
self.assertEqual(called_cg_container.image, 'container-image')
self.assertEqual(aci_mock.return_value.delete.call_count, 1)
@mock.patch("airflow.providers.microsoft.azure.operators."
"azure_container_instances.AzureContainerInstanceHook")
def test_execute_with_messages_logs(self, aci_mock):
events = [Event(message="test"), Event(message="messages")]
expected_c_state1 = ContainerState(state='Running', exit_code=0, detail_status='test')
expected_cg1 = make_mock_cg(expected_c_state1, events)
expected_c_state2 = ContainerState(state='Terminated', exit_code=0, detail_status='test')
expected_cg2 = make_mock_cg(expected_c_state2, events)
aci_mock.return_value.get_state.side_effect = [expected_cg1,
expected_cg2]
aci_mock.return_value.get_logs.return_value = ["test", "logs"]
aci_mock.return_value.exists.return_value = False
aci = AzureContainerInstancesOperator(ci_conn_id=None,
registry_conn_id=None,
resource_group='resource-group',
name='container-name',
image='container-image',
region='region',
task_id='task')
aci.execute(None)
self.assertEqual(aci_mock.return_value.create_or_update.call_count, 1)
self.assertEqual(aci_mock.return_value.get_state.call_count, 2)
self.assertEqual(aci_mock.return_value.get_logs.call_count, 2)
self.assertEqual(aci_mock.return_value.delete.call_count, 1)
def test_name_checker(self):
valid_names = ['test-dash', 'name-with-length---63' * 3]
invalid_names = ['test_underscore',
'name-with-length---84' * 4,
'name-ending-with-dash-',
'-name-starting-with-dash']
for name in invalid_names:
with self.assertRaises(AirflowException):
AzureContainerInstancesOperator._check_name(name)
for name in valid_names:
checked_name = AzureContainerInstancesOperator._check_name(name)
self.assertEqual(checked_name, name)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 2,417,109,348,444,527,600 | 45.106599 | 97 | 0.594187 | false |
luci/luci-py | appengine/swarming/proto/config/realms_pb2.py | 2 | 4962 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: realms.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='realms.proto',
package='swarming.config',
syntax='proto3',
serialized_options=b'Z3go.chromium.org/luci/swarming/proto/config;configpb',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0crealms.proto\x12\x0fswarming.config*\xda\x03\n\x0fRealmPermission\x12 \n\x1cREALM_PERMISSION_UNSPECIFIED\x10\x00\x12&\n\"REALM_PERMISSION_POOLS_CREATE_TASK\x10\x01\x12%\n!REALM_PERMISSION_POOLS_LIST_TASKS\x10\x04\x12&\n\"REALM_PERMISSION_POOLS_CANCEL_TASK\x10\x05\x12%\n!REALM_PERMISSION_POOLS_CREATE_BOT\x10\x06\x12$\n REALM_PERMISSION_POOLS_LIST_BOTS\x10\x07\x12(\n$REALM_PERMISSION_POOLS_TERMINATE_BOT\x10\x08\x12%\n!REALM_PERMISSION_POOLS_DELETE_BOT\x10\t\x12*\n&REALM_PERMISSION_TASKS_CREATE_IN_REALM\x10\x02\x12!\n\x1dREALM_PERMISSION_TASKS_ACT_AS\x10\x03\x12\x1e\n\x1aREALM_PERMISSION_TASKS_GET\x10\n\x12!\n\x1dREALM_PERMISSION_TASKS_CANCEL\x10\x0b\x42\x35Z3go.chromium.org/luci/swarming/proto/config;configpbb\x06proto3'
)
_REALMPERMISSION = _descriptor.EnumDescriptor(
name='RealmPermission',
full_name='swarming.config.RealmPermission',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='REALM_PERMISSION_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='REALM_PERMISSION_POOLS_CREATE_TASK', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='REALM_PERMISSION_POOLS_LIST_TASKS', index=2, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='REALM_PERMISSION_POOLS_CANCEL_TASK', index=3, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='REALM_PERMISSION_POOLS_CREATE_BOT', index=4, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='REALM_PERMISSION_POOLS_LIST_BOTS', index=5, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='REALM_PERMISSION_POOLS_TERMINATE_BOT', index=6, number=8,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='REALM_PERMISSION_POOLS_DELETE_BOT', index=7, number=9,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='REALM_PERMISSION_TASKS_CREATE_IN_REALM', index=8, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='REALM_PERMISSION_TASKS_ACT_AS', index=9, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='REALM_PERMISSION_TASKS_GET', index=10, number=10,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='REALM_PERMISSION_TASKS_CANCEL', index=11, number=11,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=34,
serialized_end=508,
)
_sym_db.RegisterEnumDescriptor(_REALMPERMISSION)
RealmPermission = enum_type_wrapper.EnumTypeWrapper(_REALMPERMISSION)
REALM_PERMISSION_UNSPECIFIED = 0
REALM_PERMISSION_POOLS_CREATE_TASK = 1
REALM_PERMISSION_POOLS_LIST_TASKS = 4
REALM_PERMISSION_POOLS_CANCEL_TASK = 5
REALM_PERMISSION_POOLS_CREATE_BOT = 6
REALM_PERMISSION_POOLS_LIST_BOTS = 7
REALM_PERMISSION_POOLS_TERMINATE_BOT = 8
REALM_PERMISSION_POOLS_DELETE_BOT = 9
REALM_PERMISSION_TASKS_CREATE_IN_REALM = 2
REALM_PERMISSION_TASKS_ACT_AS = 3
REALM_PERMISSION_TASKS_GET = 10
REALM_PERMISSION_TASKS_CANCEL = 11
DESCRIPTOR.enum_types_by_name['RealmPermission'] = _REALMPERMISSION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| apache-2.0 | 3,459,187,126,329,450,500 | 40.008264 | 752 | 0.737001 | false |
alfred82santa/telebot | aiotelebot/formatters.py | 1 | 2590 | from json import dumps
from aiohttp.hdrs import CONTENT_TYPE
from aiohttp.multipart import MultipartWriter
from aiohttp.payload import get_payload
from multidict import CIMultiDict
from dirty_models.fields import ArrayField, ModelField
from dirty_models.models import BaseModel
from dirty_models.utils import ModelFormatterIter, JSONEncoder, ListFormatterIter
from service_client.json import json_decoder
from .messages import FileModel, Response
class ContainsFileError(Exception):
pass
class TelegramModelFormatterIter(ModelFormatterIter):
def format_field(self, field, value):
if isinstance(field, ModelField):
if isinstance(value, FileModel):
return value
return dumps(value, cls=JSONEncoder)
elif isinstance(field, ArrayField):
return dumps(ListFormatterIter(obj=value,
field=value.get_field_type(),
parent_formatter=ModelFormatterIter(model=self.model)),
cls=JSONEncoder)
return super(TelegramModelFormatterIter, self).format_field(field, value)
class TelegramJsonEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, FileModel):
raise ContainsFileError()
elif isinstance(obj, BaseModel):
obj = TelegramModelFormatterIter(obj)
return super(TelegramJsonEncoder, self).default(obj)
def telegram_encoder(content, *args, **kwargs):
try:
return dumps(content, cls=TelegramJsonEncoder)
except ContainsFileError:
pass
formatter = TelegramModelFormatterIter(content)
kwargs['endpoint_desc']['stream_request'] = True
mp = MultipartWriter('form-data')
for field, value in formatter:
content_dispositon = {'name': field}
if isinstance(value, FileModel):
part = get_payload(value.stream, headers=CIMultiDict())
if value.name:
content_dispositon['filename'] = value.name
if value.mime_type:
part.headers[CONTENT_TYPE] = value.mime_type
else:
part = get_payload(str(value), headers=CIMultiDict())
part.set_content_disposition("form-data", **content_dispositon)
mp.append_payload(part)
try:
kwargs['request_params']['headers'].update(mp.headers)
except KeyError:
kwargs['request_params']['headers'] = mp.headers
return mp
def telegram_decoder(content, *args, **kwargs):
return Response(json_decoder(content, *args, **kwargs))
| lgpl-3.0 | 2,767,400,065,749,186,000 | 30.975309 | 98 | 0.661004 | false |
ActiveState/code | recipes/Python/578102_Nautilus_script_push_files_S3/recipe-578102.py | 1 | 1426 | #!/usr/bin/env python
import mimetypes
import os
import sys
import boto
from boto.s3.connection import S3Connection
from boto.s3.key import Key
def get_s3_conn():
return S3Connection()
def get_bucket(conn, name):
return conn.get_bucket(name)
og = os.environ.get
bucket_name = og('NAUTILUS_BUCKET_NAME', 'media.foo.com')
bucket_prefix = og('NAUTILUS_BUCKET_PREFIX', 'scrapspace/files')
conn = get_s3_conn()
bucket = get_bucket(conn, bucket_name)
def get_ctype(f):
return mimetypes.guess_type(f)[0] or "application/x-octet-stream"
def put_file(filename, keyname):
new_key = Key(bucket)
new_key.key = keyname
new_key.set_metadata('Content-Type', get_ctype(filename))
new_key.set_contents_from_filename(filename)
if __name__ == '__main__':
for name in sys.argv[1:]:
full = os.path.abspath(name)
if os.path.isdir(name):
parent_dir = os.path.dirname(full)
for base, directories, files in os.walk(full):
for filename in files:
full_path = os.path.join(base, filename)
rel_path = os.path.relpath(full_path, parent_dir)
keyname = os.path.join(bucket_prefix, rel_path)
put_file(full_path, keyname)
else:
filename = os.path.basename(name)
keyname = os.path.join(bucket_prefix, filename)
put_file(filename, keyname)
| mit | -6,771,188,464,086,638,000 | 28.708333 | 69 | 0.626928 | false |
wmvanvliet/psychic | psychic/scalpplot.py | 1 | 3391 | import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate
from matplotlib.path import Path
from matplotlib.patches import PathPatch, Circle
from . import positions
def plot_scalp(densities, sensors, sensor_locs=None,
plot_sensors=True, plot_contour=True, cmap=None, clim=None, smark='k.', linewidth=2, fontsize=8):
if sensor_locs is None:
sensor_locs = positions.POS_10_5
if cmap is None:
cmap = plt.get_cmap('RdBu_r')
# add densities
if clim is None:
cmax = np.max(np.abs(densities))
clim = [-cmax, cmax]
locs = [positions.project_scalp(*sensor_locs[lab]) for lab in sensors]
add_density(densities, locs, cmap=cmap, clim=clim, plot_contour=plot_contour)
# setup plot
MARGIN = 1.2
plt.xlim(-MARGIN, MARGIN)
plt.ylim(-MARGIN, MARGIN)
plt.box(False)
ax = plt.gca()
ax.set_aspect(1.2)
ax.yaxis.set_ticks([],[])
ax.xaxis.set_ticks([],[])
# add details
add_head(linewidth)
if plot_sensors:
add_sensors(sensors, locs, smark, fontsize)
def add_head(linewidth=2):
'''Draw head outline'''
nose = [(Path.MOVETO, (-.1, 1.)), (Path.LINETO, (0, 1.1)),
(Path.LINETO, (.1, 1.))]
lear = [(Path.MOVETO, (-1, .134)), (Path.LINETO, (-1.04, 0.08)),
(Path.LINETO, (-1.08, -0.11)), (Path.LINETO, (-1.06, -0.16)),
(Path.LINETO, (-1.02, -0.15)), (Path.LINETO, (-1, -0.12))]
rear = [(c, (-px, py)) for (c, (px, py)) in lear]
# plot outline
ax = plt.gca()
ax.add_artist(plt.Circle((0, 0), 1, fill=False, linewidth=linewidth))
# add nose and ears
for p in [nose, lear, rear]:
code, verts = list(zip(*p))
ax.add_patch(PathPatch(Path(verts, code), fill=False, linewidth=linewidth))
def add_sensors(labels, locs, smark='k.', fontsize=8):
'''Adds sensor names and markers'''
for (label, (x, y)) in zip(labels, locs):
if len(labels) <= 16:
plt.text(x, y + .03, label, fontsize=fontsize, ha='center')
plt.plot(x, y, smark, ms=2.)
def add_density(dens, locs, cmap=plt.cm.jet, clim=None, plot_contour=True):
'''
This function draws the densities using the locations provided in
sensor_dict. The two are connected throught the list labels. The densities
are inter/extrapolated on a grid slightly bigger than the head using
scipy.interpolate.rbf. The grid is drawn using the colors provided in cmap
and clim inside a circle. Contours are drawn on top of this grid.
'''
RESOLUTION = 50
RADIUS = 1.2
xs, ys = list(zip(*locs))
extent = [-1.2, 1.2, -1.2, 1.2]
vmin, vmax = clim
# interpolate
# TODO: replace with Gaussian process interpolator. ids don't trust SciPy's
# interpolation functions (they wiggle and they segfault).
rbf = interpolate.Rbf(xs, ys, dens, function='linear')
xg = np.linspace(extent[0], extent[1], RESOLUTION)
yg = np.linspace(extent[2], extent[3], RESOLUTION)
xg, yg = np.meshgrid(xg, yg)
zg = rbf(xg, yg)
# draw contour
if plot_contour:
plt.contour(xg, yg, np.where(xg ** 2 + yg ** 2 <= RADIUS ** 2, zg, np.nan),
np.linspace(vmin, vmax, 13), colors='k', extent=extent, linewidths=.3)
# draw grid, needs to be last to enable plt.colormap() to work
im = plt.imshow(zg, origin='lower', extent=extent, vmin=vmin, vmax=vmax,
cmap=cmap)
# clip grid to circle
patch = Circle((0, 0), radius=RADIUS, facecolor='none', edgecolor='none')
plt.gca().add_patch(patch)
im.set_clip_path(patch)
| bsd-3-clause | 607,890,273,072,048,300 | 32.245098 | 99 | 0.654969 | false |
ShaguptaS/python | bigml/tests/create_evaluation_steps.py | 1 | 3119 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2012, 2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import json
from datetime import datetime, timedelta
from world import world
from bigml.api import HTTP_CREATED
from bigml.api import FINISHED
from bigml.api import FAULTY
from bigml.api import get_status
from read_evaluation_steps import i_get_the_evaluation
#@step(r'I create an evaluation for the model with the dataset$')
def i_create_an_evaluation(step):
dataset = world.dataset.get('resource')
model = world.model.get('resource')
resource = world.api.create_evaluation(model, dataset)
world.status = resource['code']
assert world.status == HTTP_CREATED
world.location = resource['location']
world.evaluation = resource['object']
world.evaluations.append(resource['resource'])
#@step(r'I create an evaluation for the ensemble with the dataset$')
def i_create_an_evaluation_ensemble(step):
dataset = world.dataset.get('resource')
ensemble = world.ensemble.get('resource')
resource = world.api.create_evaluation(ensemble, dataset)
world.status = resource['code']
assert world.status == HTTP_CREATED
world.location = resource['location']
world.evaluation = resource['object']
world.evaluations.append(resource['resource'])
#@step(r'I wait until the evaluation status code is either (\d) or (-\d) less than (\d+)')
def wait_until_evaluation_status_code_is(step, code1, code2, secs):
start = datetime.utcnow()
i_get_the_evaluation(step, world.evaluation['resource'])
status = get_status(world.evaluation)
while (status['code'] != int(code1) and
status['code'] != int(code2)):
time.sleep(3)
assert datetime.utcnow() - start < timedelta(seconds=int(secs))
i_get_the_evaluation(step, world.evaluation['resource'])
status = get_status(world.evaluation)
assert status['code'] == int(code1)
#@step(r'I wait until the evaluation is ready less than (\d+)')
def the_evaluation_is_finished_in_less_than(step, secs):
wait_until_evaluation_status_code_is(step, FINISHED, FAULTY, secs)
#@step(r'the measured "(.*)" is (\d+\.*\d*)')
def the_measured_measure_is_value(step, measure, value):
ev = world.evaluation['result']['model'][measure] + 0.0
assert ev == float(value), "The %s is: %s and %s is expected" % (
measure, ev, float(value))
#@step(r'the measured "(.*)" is greater than (\d+\.*\d*)')
def the_measured_measure_is_greater_value(step, measure, value):
assert world.evaluation['result']['model'][measure] + 0.0 > float(value)
| apache-2.0 | 8,521,414,331,171,372,000 | 38.987179 | 90 | 0.702469 | false |
miquelo/exectask | packages/exectask/context.py | 1 | 4804 | #
# This file is part of EXECTASK.
#
# EXECTASK is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EXECTASK is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EXECTASK. If not, see <http://www.gnu.org/licenses/>.
#
from exectask.expression import *
from exectask.merge import *
import json
import sys
class ExecuteTaskContext:
class NonePrinterFactory:
def printer(self, out):
return NonePrinter()
class NonePrinter:
def print(self, text, level=0, color=None, style=None):
pass
def __init__(self, actions={}, printer_fact=NonePrinterFactory()):
self.__actions = actions
self.__printer_fact = printer_fact
self.__variables_stack = [
{}
]
self.__variables = ExpressionDict(self.__variables_stack[-1], self)
def __len__(self):
return self.__variables.__len__()
def __length_hint__(self):
return self.__variables.__length_hint__()
def __getitem__(self, key):
return self.__variables.__getitem__(key)
def __missing__(self):
self.__variables.__missing__()
def __setitem__(self, key, value):
self.__variables.__setitem__(key, value)
def __delitem__(self, key):
self.__variables.__delitem__(key)
def __iter__(self):
return self.__variables.__iter__()
def __reversed__(self):
return self.__variables.__reversed__()
def __contains__(self, item):
return self.__variables.__contains__(item)
def items(self):
return self.__variables.items()
def printer(self, out):
return self.__printer_fact.printer(out)
def execute_task(self, task, variables={}):
# Check parameter types
if not isinstance(task, dict):
raise TypeError('\'task\' must be a dictionary')
if not isinstance(variables, dict):
raise TypeError('\'variables\' must be a dictionary')
# Gather top variables
top_vars = self.__variables_stack[-1]
try:
task_vars = task['variables']
if not isinstance(task_vars, dict):
raise TypeError('Task \'variables\' must be a dictionary')
merge_dict(top_vars, task_vars)
except KeyError:
pass
merge_dict(top_vars, variables)
# Update variables stack
self.__variables_stack.append(top_vars)
self.__variables = ExpressionDict(self.__variables_stack[-1], self)
# Gather description and actions
task_desc = None
task_actions = []
for key, value in task.items():
if key == 'variables':
pass # Already gathered
elif key == 'description':
if not isinstance(value, str):
raise TypeError('Task \'description\' must be an string')
task_desc = expression_eval(value, self)
elif key == 'actions':
if not isinstance(value, list):
raise TypeError('Task \'actions\' must be a list')
task_actions = value
else:
raise TypeError('Unknown task field \'{}\''.format(key))
# Print task information
printer = self.__printer_fact.printer(sys.stdout)
if task_desc is not None:
printer.print('==> {}'.format(task_desc), 0, 'white', 'bright')
printer.print('Variables:', 1)
printer.print(json.dumps(top_vars, indent=4, sort_keys=True), 1)
printer.print('Actions:', 1)
printer.print(json.dumps(task_actions, indent=4, sort_keys=True), 1)
# Call task actions
for action in ExpressionList(task_actions, self):
self.call_action(action)
# Restore variables stack
self.__variables_stack.pop()
self.__variables = ExpressionDict(self.__variables_stack[-1], self)
def call_action(self, action):
# Check parameter types
if not isinstance(action, dict):
raise TypeError('\'action\' must be a dictionary')
# Gather name and parameters
name = None
parameters = {}
for key, value in action.items():
if key == 'name':
if not isinstance(value, str):
raise TypeError('Action \'name\' must be an string')
name = value
elif key == 'parameters':
if not isinstance(value, dict):
raise TypeError('Action \'parameters\' must be a '
'dictionary')
parameters = value
else:
raise TypeError('Unknown action field \'{}\''.format(key))
if name is None:
raise TypeError('Action \'name\' must be defined')
# Call action function
try:
fn = self.__actions[name]
except KeyError:
raise TypeError('Action \'{}\' was not found'.format(name))
action_locals = {
'fn': fn,
'context': self,
'parameters': parameters
}
eval('fn(context, parameters)', {}, action_locals)
| gpl-3.0 | -5,314,101,906,513,230,000 | 27.595238 | 70 | 0.667777 | false |
tadek-project/tadek-tutorial | examples/gucharmap/testcases/gucharmap/basic.py | 1 | 4000 | ################################################################################
## ##
## This file is a part of TADEK. ##
## ##
## TADEK - Test Automation in a Distributed Environment ##
## (http://tadek.comarch.com) ##
## ##
## Copyright (C) 2011 Comarch S.A. ##
## All rights reserved. ##
## ##
## TADEK is free software for non-commercial purposes. For commercial ones ##
## we offer a commercial license. Please check http://tadek.comarch.com for ##
## details or write to [email protected] ##
## ##
## You can redistribute it and/or modify it under the terms of the ##
## GNU General Public License as published by the Free Software Foundation, ##
## either version 3 of the License, or (at your option) any later version. ##
## ##
## TADEK is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with TADEK bundled with this file in the file LICENSE. ##
## If not, see http://www.gnu.org/licenses/. ##
## ##
## Please notice that Contributor Agreement applies to any contribution ##
## you make to TADEK. The Agreement must be completed, signed and sent ##
## to Comarch before any contribution is made. You should have received ##
## a copy of Contribution Agreement along with TADEK bundled with this file ##
## in the file CONTRIBUTION_AGREEMENT.pdf or see http://tadek.comarch.com ##
## or write to [email protected] ##
## ##
################################################################################
from tadek.engine.testdefs import *
from tadek.engine.searchers import *
from tadek.models.gucharmap import *
from tadek.core.constants import *
from tadek.teststeps.gucharmap.basic import *
caseEnterLetters = TestCase(
stepRunCharacterMap(),
stepLatin(),
stepEnterCharacters(chars="Azfla"),
description="Activate a sequence of letters and compare to the text field")
def closeCharacterMap(self, test, device):
if app.getImmediate(device):
app.menu.File.Close.click(device)
class ActivateCharactersWithKeyboardTestsSuite(TestSuite):
description = "Activate characters using keyboard"
caseActivateCharactersWithSpace = TestCase(
stepRunCharacterMap(),
stepLatin(),
stepActivateLettersWithKeyboard(letters="EbFFGF"),
description="Activate a sequence of letters using Space and compare to the text field")
caseActivateCharactersWithEnter = TestCase(
stepRunCharacterMap(),
stepLatin(),
stepActivateLettersWithKeyboard(letters="EbFFGF", activatingKey=KEY_SYMS["ENTER"]),
description="Activate a sequence of letters using Enter and compare to the text field")
| gpl-3.0 | 6,162,875,537,077,569,000 | 59.606061 | 95 | 0.48825 | false |
VanceKingSaxbeA/MarketsEngine | src/bloombergquote.py | 1 | 1536 | /*Owner & Copyrights: Vance King Saxbe. A.*/""" Copyright (c) <2014> Author Vance King Saxbe. A, and contributors Power Dominion Enterprise, Precieux Consulting and other contributors. Modelled, Architected and designed by Vance King Saxbe. A. with the geeks from GoldSax Consulting and GoldSax Technologies email @[email protected]. Development teams from Power Dominion Enterprise, Precieux Consulting. Project sponsored by GoldSax Foundation, GoldSax Group and executed by GoldSax Manager."""import urllib3
import string
from time import localtime, strftime
class bloombergquote:
def getquote(symbol):
url = "http://www.bloomberg.com/quote/"+symbol
http = urllib3.PoolManager()
r = http.request('GET', url)
r.release_conn()
f = r.data.decode("UTF-8")
a = f.split('span class="ticker_data">')
b = []
tstamp = strftime("%H:%M:%S", localtime())
contents = []
try:
b = a[1].split('</span>')
contents.extend(symbol.replace(':',''))
contents.extend(strftime("%Y-%m-%d"))
contents.extend(tstamp)
contents.extend(b[0])
except IndexError:
print("Index error")
return contents
/*email to provide support at [email protected], [email protected], For donations please write to [email protected]*/ | mit | -6,879,921,841,213,445,000 | 52 | 508 | 0.628255 | false |
alekseyig/fusion | lib/pylibcurl/multi.py | 1 | 2883 | #coding=utf8
import ctypes
import lib
import const
import prototype
from pylibcurl.base import Base
### classes
class Multi(Base):
_pointer_type = ctypes.POINTER(const.CURLM)
_lib_init_func = lib.curl_multi_init
_lib_cleanup_func = lib.curl_multi_cleanup
def __init__(self, **kwargs):
self._handles = set()
self._callbacks = {}
if kwargs:
self.setopt(**kwargs)
def __setattr__(self, name, value):
try:
self.setopt(**{name: value})
except ValueError:
object.__setattr__(self, name, value)
def _clear(self):
self._handles.clear()
self._callbacks.clear()
def add_handle(self, curl):
lib.curl_multi_add_handle(self._handle, curl._handle)
self._handles.add(curl)
def remove_handle(self, curl):
lib.curl_multi_remove_handle(self._handle, curl._handle)
self._handles.remove(curl)
def assign(self, socket, callback):
raise NotImplementedError
def fdset(self):
raise NotImplementedError
def perform(self):
running_handles = ctypes.c_int()
code = lib.curl_multi_perform(self._handle, ctypes.byref(running_handles))
return code, running_handles.value
def socket_action(self, socket, event):
running_handles = ctypes.c_int()
code = lib.curl_multi_socket_action(self._handle, socket, event, ctypes.byref(running_handles))
return code, running_handles.value
def info_read(self):
"""
return tuple(msg, number_in_queue)
or
return None
"""
return lib.curl_multi_info_read(self._handle)
def setopt(self, **kwargs):
"""
c.pipelning = 1
or
c.setopt(pipelining=1)
or
c.setopt(pipelining=1, maxconnects=10)
"""
def setopt(name, value):
option_name = 'CURLMOPT_%s' % name.upper()
if name.islower() and hasattr(const, option_name):
option_value = getattr(const, option_name)
if hasattr(prototype, name):
if callable(value):
value = getattr(prototype, name)(value)
self._callbacks[name] = value
else:
self._callbacks[name] = None
lib.curl_multi_setopt(self._handle, option_value, value)
else:
raise ValueError('invalid option name "%s"' % name)
for k, v in kwargs.items():
setopt(k, v)
def strerror(self, errornum):
return lib.curl_multi_strerror(errornum)
def timeout(self):
time_out = ctypes.c_long()
lib.curl_multi_timeout(self._handle, ctypes.byref(time_out))
return time_out.value
| mit | -3,252,982,349,766,039,000 | 26.990291 | 103 | 0.556712 | false |
michalpravda/Anki_helpers | add-ons/export.py | 1 | 1192 | # import the main window object (mw) from ankiqt
from aqt import mw
# import the "show info" tool from utils.py
from aqt.utils import showInfo
# import all of the Qt GUI library
from aqt.qt import *
import re
# We're going to add a menu item below. First we want to create a function to
# be called when the menu item is activated.
def testFunction():
ids = mw.col.findCards("deck:'slovicka nemecky'")
with open('d:\\exp.txt', 'w') as f:
output = set()
for id in ids:
card = mw.col.getCard(id)
note = card.note()
for (name, value) in note.items():
if (name == 'Word') or name == 'Text':
value = re.sub('{{c.::(.*?)}}', '\\1', value)
value = value.replace(' ', '').replace('<div>', '').replace('</div>', '')
output.add(value.encode('utf-8'))
lis = sorted(list(output))
for val in lis:
f.write(val + '\n')
f.close
# create a new menu item, "test"
action = QAction("test", mw)
# set it to call testFunction when it's clicked
mw.connect(action, SIGNAL("triggered()"), testFunction)
# and add it to the tools menu
mw.form.menuTools.addAction(action)
| mit | -2,603,656,548,504,197,600 | 33.057143 | 96 | 0.598154 | false |
paulscherrerinstitute/pyscan | tests/helpers/scan_old.py | 1 | 38634 | from copy import deepcopy
from datetime import datetime
from time import sleep
import numpy as np
from tests.helpers.utils import TestPyScanDal
# This is just a dummy GUI class.
class DummyClass:
def __init__(self):
self.Progress = 1 # For Thomas!!
def showPanel(self, s):
pass
def emit(self, signal):
pass
class Scan(object):
def __init__(self, fromGUI=0):
self.epics_dal = None
self.fromGUI = fromGUI
self.outdict = None
self.n_validations = None
self.n_observables = None
self.n_readbacks = None
self.ProgDisp = DummyClass()
self.abortScan = 0
self.pauseScan = 0
def finalizeScan(self):
self.epics_dal.close_group('All')
if self.inlist[-1]['Monitor']:
self.epics_dal.close_group('Monitor')
self.outdict['ErrorMessage'] = 'Measurement finalized (finished/aborted) normally. ' \
'Need initialisation before next measurement.'
if self.fromGUI:
self.ProgDisp.showPanel(0)
def _add_group(self, dic, name, sources, result, close=True):
temp_handle = self.epics_dal.add_group(name, sources)
[output, summary, status] = self.epics_dal.get_group(temp_handle)
if summary != 1: # Something wrong. Try again.
[output, summary, status] = self.epics_dal.get_group(temp_handle)
if summary != 1:
for si in status:
if si != 1:
wch = sources[status.index(si)]
self.epics_dal.close_group(temp_handle)
raise ValueError('Something wrong in Epics channel: ' + wch)
if result:
dic[result] = output
if close:
self.epics_dal.close_group(temp_handle)
def initializeScan(self, inlist, dal):
self.epics_dal = dal or TestPyScanDal()
self.inlist = []
if not isinstance(inlist, list): # It is a simple SKS or MKS
inlist = [inlist]
try:
for index, dic in enumerate(inlist):
dic['ID'] = index # Just in case there are identical input dictionaries. (Normally, it may not happen.)
if index == len(inlist) - 1 and ('Waiting' not in dic.keys()):
raise ValueError('Waiting for the scan was not given.')
self._setup_knobs(index, dic)
self._setup_knob_scan_values(index, dic)
if index == len(inlist) - 1 and ('Observable' not in dic.keys()):
raise ValueError('The observable is not given.')
elif index == len(inlist) - 1:
if not isinstance(dic['Observable'], list):
dic['Observable'] = [dic['Observable']]
if index == len(inlist) - 1 and ('NumberOfMeasurements' not in dic.keys()):
dic['NumberOfMeasurements'] = 1
if 'PreAction' in dic.keys():
if not isinstance(dic['PreAction'], list):
raise ValueError('PreAction should be a list. Input dictionary ' + str(i) + '.')
for l in dic['PreAction']:
if not isinstance(l, list):
raise ValueError('Every PreAction should be a list. Input dictionary ' + str(i) + '.')
if len(l) != 5:
if not l[0] == 'SpecialAction':
raise ValueError('Every PreAction should be in a form of '
'[Ch-set, Ch-read, Value, Tolerance, Timeout]. '
'Input dictionary ' + str(i) + '.')
if 'PreActionWaiting' not in dic.keys():
dic['PreActionWaiting'] = 0.0
if not isinstance(dic['PreActionWaiting'], float) and not isinstance(dic['PreActionWaiting'], int):
raise ValueError('PreActionWating should be a float. Input dictionary ' + str(i) + '.')
if 'PreActionOrder' not in dic.keys():
dic['PreActionOrder'] = [0] * len(dic['PreAction'])
if not isinstance(dic['PreActionOrder'], list):
raise ValueError('PreActionOrder should be a list. Input dictionary ' + str(i) + '.')
else:
dic['PreAction'] = []
dic['PreActionWaiting'] = 0.0
dic['PreActionOrder'] = [0] * len(dic['PreAction'])
if 'In-loopPreAction' in dic.keys():
if not isinstance(dic['In-loopPreAction'], list):
raise ValueError('In-loopPreAction should be a list. Input dictionary ' + str(i) + '.')
for l in dic['In-loopPreAction']:
if not isinstance(l, list):
raise ValueError('Every In-loopPreAction should be a list. '
'Input dictionary ' + str(i) + '.')
if len(l) != 5:
if not l[0] == 'SpecialAction':
raise ValueError('Every In-loopPreAction should be in a form of '
'[Ch-set, Ch-read, Value, Tolerance, Timeout]. '
'Input dictionary ' + str(i) + '.')
if 'In-loopPreActionWaiting' not in dic.keys():
dic['In-loopPreActionWaiting'] = 0.0
if not isinstance(dic['In-loopPreActionWaiting'], float) and not isinstance(
dic['In-loopPreActionWaiting'], int):
raise ValueError('In-loopPreActionWating should be a float. Input dictionary ' + str(i) + '.')
if 'In-loopPreActionOrder' not in dic.keys():
dic['In-loopPreActionOrder'] = [0] * len(dic['In-loopPreAction'])
if not isinstance(dic['In-loopPreActionOrder'], list):
raise ValueError('In-loopPreActionOrder should be a list. Input dictionary ' + str(i) + '.')
else:
dic['In-loopPreAction'] = []
dic['In-loopPreActionWaiting'] = 0.0
dic['In-loopPreActionOrder'] = [0] * len(dic['In-loopPreAction'])
if 'PostAction' in dic.keys():
if dic['PostAction'] == 'Restore':
PA = []
for i in range(0, len(dic['Knob'])):
k = dic['Knob'][i]
v = dic['KnobSaved'][i]
PA.append([k, k, v, 1.0, 10])
dic['PostAction'] = PA
elif not isinstance(dic['PostAction'], list):
raise ValueError('PostAction should be a list. Input dictionary ' + str(i) + '.')
Restore = 0
for i in range(0, len(dic['PostAction'])):
l = dic['PostAction'][i]
if l == 'Restore':
Restore = 1
PA = []
for j in range(0, len(dic['Knob'])):
k = dic['Knob'][j]
v = dic['KnobSaved'][j]
PA.append([k, k, v, 1.0, 10])
elif not isinstance(l, list):
raise ValueError('Every PostAction should be a list. Input dictionary ' + str(i) + '.')
elif len(l) != 5:
if not l[0] == 'SpecialAction':
raise ValueError('Every PostAction should be in a form of '
'[Ch-set, Ch-read, Value, Tolerance, Timeout]. '
'Input dictionary ' + str(i) + '.')
if Restore:
dic['PostAction'].remove('Restore')
dic['PostAction'] = dic['PostAction'] + PA
else:
dic['PostAction'] = []
if 'In-loopPostAction' in dic.keys():
if dic['In-loopPostAction'] == 'Restore':
PA = []
for i in range(0, len(dic['Knob'])):
k = dic['Knob'][i]
v = dic['KnobSaved'][i]
PA.append([k, k, v, 1.0, 10])
dic['In-loopPostAction'] = PA
elif not isinstance(dic['In-loopPostAction'], list):
raise ValueError('In-loopPostAction should be a list. Input dictionary ' + str(i) + '.')
Restore = 0
for i in range(0, len(dic['In-loopPostAction'])):
l = dic['In-loopPostAction'][i]
if l == 'Restore':
Restore = 1
PA = []
for j in range(0, len(dic['Knob'])):
k = dic['Knob'][j]
v = dic['KnobSaved'][j]
PA.append([k, k, v, 1.0, 10])
dic['In-loopPostAction'][i] = PA
elif not isinstance(l, list):
raise ValueError('Every In-loopPostAction should be a list. '
'Input dictionary ' + str(i) + '.')
elif len(l) != 5:
raise ValueError('Every In-loopPostAction should be in a form of '
'[Ch-set, Ch-read, Value, Tolerance, Timeout]. '
'Input dictionary ' + str(i) + '.')
if Restore:
dic['In-loopPostAction'].remove('Restore')
dic['In-loopPostAction'] = dic['In-loopPostAction'] + PA
else:
dic['In-loopPostAction'] = []
if 'Validation' in dic.keys():
if not isinstance(dic['Validation'], list):
raise ValueError('Validation should be a list of channels. Input dictionary ' + str(i) + '.')
else:
dic['Validation'] = []
self._setup_monitors(dic, index, inlist)
if 'Additive' not in dic.keys():
dic['Additive'] = 0
if index == len(inlist) - 1 and ('StepbackOnPause' not in dic.keys()):
dic['StepbackOnPause'] = 1
self.allch = []
self.n_readbacks = 0
for d in inlist:
self.allch.append(d['KnobReadback'])
self.n_readbacks += len(d['KnobReadback'])
self.allch.append(inlist[-1]['Validation'])
self.n_validations = len(inlist[-1]['Validation'])
self.allch.append(inlist[-1]['Observable'])
self.n_observables = len(inlist[-1]['Observable'])
self.allch = [item for sublist in self.allch for item in sublist] # Recursive in one line!
self._add_group(dic, 'All', self.allch, None, close=False)
self.Ntot = 1 # Total number of measurements
for dic in inlist:
if not dic['Series']:
self.Ntot = self.Ntot * dic['Nstep']
else:
self.Ntot = self.Ntot * sum(dic['Nstep'])
self.inlist = inlist
self.ProgDisp.Progress = 0
# Prealocating the place for the output
self.outdict = {"ErrorMessage": None,
"KnobReadback": self.allocateOutput(),
"Validation": self.allocateOutput(),
"Observable": self.allocateOutput()}
except ValueError as e:
self.outdict = {"ErrorMessage": str(e)}
return self.outdict
def _setup_monitors(self, dic, index, inlist):
if index == len(inlist) - 1 and ('Monitor' in dic.keys()) and (dic['Monitor']):
if isinstance(dic['Monitor'], str):
dic['Monitor'] = [dic['Monitor']]
self._add_group(dic, 'Monitor', dic['Monitor'], None)
if 'MonitorValue' not in dic.keys():
[dic['MonitorValue'], summary, status] = self.epics_dal.get_group('Monitor')
elif not isinstance(dic['MonitorValue'], list):
dic['MonitorValue'] = [dic['MonitorValue']]
if len(dic['MonitorValue']) != len(dic['Monitor']):
raise ValueError('The length of MonitorValue does not meet to the length of Monitor.')
if 'MonitorTolerance' not in dic.keys():
dic['MonitorTolerance'] = []
[Value, summary, status] = self.epics_dal.get_group('Monitor')
for v in Value:
if isinstance(v, str):
dic['MonitorTolerance'].append(None)
elif v == 0:
dic['MonitorTolerance'].append(0.1)
else:
dic['MonitorTolerance'].append(
abs(v * 0.1)) # 10% of the current value will be the torelance when not given
elif not isinstance(dic['MonitorTolerance'], list):
dic['MonitorTolerance'] = [dic['MonitorTolerance']]
if len(dic['MonitorTolerance']) != len(dic['Monitor']):
raise ValueError('The length of MonitorTolerance does not meet to the length of Monitor.')
if 'MonitorAction' not in dic.keys():
raise ValueError('MonitorAction is not give though Monitor is given.')
if not isinstance(dic['MonitorAction'], list):
dic['MonitorAction'] = [dic['MonitorAction']]
for m in dic['MonitorAction']:
if m != 'Abort' and m != 'Wait' and m != 'WaitAndAbort':
raise ValueError('MonitorAction shold be Wait, Abort, or WaitAndAbort.')
if 'MonitorTimeout' not in dic.keys():
dic['MonitorTimeout'] = [30.0] * len(dic['Monitor'])
elif not isinstance(dic['MonitorTimeout'], list):
dic['MonitorValue'] = [dic['MonitorValue']]
if len(dic['MonitorValue']) != len(dic['Monitor']):
raise ValueError('The length of MonitorValue does not meet to the length of Monitor.')
for m in dic['MonitorTimeout']:
try:
float(m)
except:
raise ValueError('MonitorTimeout should be a list of float(or int).')
elif index == len(inlist) - 1:
dic['Monitor'] = []
dic['MonitorValue'] = []
dic['MonitorTolerance'] = []
dic['MonitorAction'] = []
dic['MonitorTimeout'] = []
def _setup_knob_scan_values(self, index, dic):
if 'Series' not in dic.keys():
dic['Series'] = 0
if not dic['Series']: # Setting up scan values for SKS and MKS
if 'ScanValues' not in dic.keys():
if 'ScanRange' not in dic.keys():
raise ValueError('Neither ScanRange nor ScanValues is given '
'in the input dictionary ' + str(index) + '.')
elif not isinstance(dic['ScanRange'], list):
raise ValueError('ScanRange is not given in the right format. '
'Input dictionary ' + str(index) + '.')
elif not isinstance(dic['ScanRange'][0], list):
dic['ScanRange'] = [dic['ScanRange']]
if ('Nstep' not in dic.keys()) and ('StepSize' not in dic.keys()):
raise ValueError('Neither Nstep nor StepSize is given.')
if 'Nstep' in dic.keys(): # StepSize is ignored when Nstep is given
if not isinstance(dic['Nstep'], int):
raise ValueError('Nstep should be an integer. Input dictionary ' + str(index) + '.')
ran = []
for r in dic['ScanRange']:
s = (r[1] - r[0]) / (dic['Nstep'] - 1)
f = np.arange(r[0], r[1], s)
f = np.append(f, np.array(r[1]))
ran.append(f.tolist())
dic['KnobExpanded'] = ran
else: # StepSize given
if len(dic['Knob']) > 1:
raise ValueError('Give Nstep instead of StepSize for MKS. '
'Input dictionary ' + str(index) + '.')
# StepSize is only valid for SKS
r = dic['ScanRange'][0]
s = dic['StepSize'][0]
f = np.arange(r[0], r[1], s)
f = np.append(f, np.array(r[1]))
dic['Nstep'] = len(f)
dic['KnobExpanded'] = [f.tolist()]
else:
# Scan values explicitly defined.
if not isinstance(dic['ScanValues'], list):
raise ValueError('ScanValues is not given in the right fromat. '
'Input dictionary ' + str(index) + '.')
if len(dic['ScanValues']) != len(dic['Knob']) and len(dic['Knob']) != 1:
raise ValueError('The length of ScanValues does not meet to the number of Knobs.')
if len(dic['Knob']) > 1:
minlen = 100000
for r in dic['ScanValues']:
if minlen > len(r):
minlen = len(r)
ran = []
for r in dic['ScanValues']:
ran.append(r[0:minlen]) # Cut at the length of the shortest list.
dic['KnobExpanded'] = ran
dic['Nstep'] = minlen
else:
dic['KnobExpanded'] = [dic['ScanValues']]
dic['Nstep'] = len(dic['ScanValues'])
else: # Setting up scan values for Series scan
if 'ScanValues' not in dic.keys():
raise ValueError('ScanValues should be given for Series '
'scan in the input dictionary ' + str(index) + '.')
if not isinstance(dic['ScanValues'], list):
raise ValueError('ScanValues should be given as a list (of lists) '
'for Series scan in the input dictionary ' + str(index) + '.')
if len(dic['Knob']) != len(dic['ScanValues']):
raise ValueError('Scan values length does not match to the '
'number of knobs in the input dictionary ' + str(index) + '.')
Nstep = []
for vl in dic['ScanValues']:
if not isinstance(vl, list):
raise ValueError('ScanValue element should be given as a list for '
'Series scan in the input dictionary ' + str(index) + '.')
Nstep.append(len(vl))
dic['Nstep'] = Nstep
def _setup_knobs(self, index, dic):
"""
Setup the values for moving knobs in the scan.
:param index: Index in the dictionary.
:param dic: The dictionary.
"""
if 'Knob' not in dic.keys():
raise ValueError('Knob for the scan was not given for the input dictionary' + str(index) + '.')
else:
if not isinstance(dic['Knob'], list):
dic['Knob'] = [dic['Knob']]
if 'KnobReadback' not in dic.keys():
dic['KnobReadback'] = dic['Knob']
if not isinstance(dic['KnobReadback'], list):
dic['KnobReadback'] = [dic['KnobReadback']]
if len(dic['KnobReadback']) != len(dic['Knob']):
raise ValueError('The number of KnobReadback does not meet to the number of Knobs.')
if 'KnobTolerance' not in dic.keys():
dic['KnobTolerance'] = [1.0] * len(dic['Knob'])
if not isinstance(dic['KnobTolerance'], list):
dic['KnobTolerance'] = [dic['KnobTolerance']]
if len(dic['KnobTolerance']) != len(dic['Knob']):
raise ValueError('The number of KnobTolerance does not meet to the number of Knobs.')
if 'KnobWaiting' not in dic.keys():
dic['KnobWaiting'] = [10.0] * len(dic['Knob'])
if not isinstance(dic['KnobWaiting'], list):
dic['KnobWaiting'] = [dic['KnobWaiting']]
if len(dic['KnobWaiting']) != len(dic['Knob']):
raise ValueError('The number of KnobWaiting does not meet to the number of Knobs.')
if 'KnobWaitingExtra' not in dic.keys():
dic['KnobWaitingExtra'] = 0.0
else:
try:
dic['KnobWaitingExtra'] = float(dic['KnobWaitingExtra'])
except:
raise ValueError('KnobWaitingExtra is not a number in the input dictionary ' + str(index) + '.')
self._add_group(dic, str(index), dic['Knob'], 'KnobSaved')
def startMonitor(self, dic):
self.epics_dal.add_group("Monitor", dic["Monitor"])
# def cbMonitor(h):
# def matchValue(h):
# en = self.MonitorInfo[h][1]
# c = self.epics_dal.getPVCache(h)
# v = c.value[0]
# if v == '':
# # To comply with RF-READY-STATUS channle, where ENUM is empty...
# c = self.epics_dal.getPVCache(h, dt='int')
# v = c.value[0]
# if isinstance(self.MonitorInfo[h][2], list): # Monitor value is in list, i.e. several cases are okay
# if v in self.MonitorInfo[h][2]:
# print('value OK')
# return 1
# else:
# print('kkkkkkk', en, self.MonitorInfo[h][2], v)
# print('value NG')
# return 0
# elif isinstance(v, str):
# if v == self.MonitorInfo[h][2]:
# print('value OK')
# return 1
# else:
# print('nnnnn', en, self.MonitorInfo[h][2], v)
# print('value NG')
# return 0
#
# elif isinstance(v, int) or isinstance(v, float):
# if abs(v - self.MonitorInfo[h][2]) <= self.MonitorInfo[h][3]:
# return 1
# else:
# print('value NG')
# print(v, self.MonitorInfo[h][2], self.MonitorInfo[h][3])
# return 0
# else:
# 'Return value from getPVCache', v
#
# if matchValue(h):
# self.stopScan[self.MonitorInfo[h][0]] = 0
# else:
# self.stopScan[self.MonitorInfo[h][0]] = 1
#
# dic = self.inlist[-1]
# self.stopScan = [0] * len(dic['Monitor'])
# self.MonitorInfo = {}
#
# HandleList = self.epics_dal.getHandlesFromWithinGroup(self.MonitorHandle)
# # self.cafe.openPrepare()
# for i in range(0, len(HandleList)):
# h = HandleList[i]
# self.MonitorInfo[h] = [i, dic['Monitor'][i], dic['MonitorValue'][i], dic['MonitorTolerance'][i],
# dic['MonitorAction'][i], dic['MonitorTimeout']]
#
# self.epics_dal.openMonitorPrepare()
# m0 = self.epics_dal.groupMonitorStartWithCBList(self.MonitorHandle, cb=[cbMonitor] * len(dic['Monitor']))
#
# self.epics_dal.openMonitorNowAndWait(2)
def PreAction(self, dic, key='PreAction'):
order = np.array(dic[key + 'Order'])
maxo = order.max()
mino = order.min()
stat = 0
for i in range(mino, maxo + 1):
for j in range(0, len(order)):
od = order[j]
if i == od:
if dic[key][j][0].lower() == 'specialaction':
self.ObjectSA.SpecialAction(dic[key][j][1])
else:
chset = dic[key][j][0]
chread = dic[key][j][1]
val = dic[key][j][2]
tol = dic[key][j][3]
timeout = dic[key][j][4]
if chset.lower() == 'match':
# print('****************************----')
try:
status = self.epics_dal.match(val, chread, tol, timeout, 1)
# print('--------------', status)
except Exception as inst:
print('Exception in preAction')
print(inst)
stat = 1
else:
try:
status = self.epics_dal.set_and_match(chset, val, chread, tol, timeout, 0)
# print('===========', status)
except Exception as inst:
print('Exception in preAction')
print(inst)
stat = 1
sleep(dic[key + 'Waiting'])
return stat # Future development: Give it to output dictionary
def PostAction(self, dic, key='PostAction'):
for act in dic[key]:
if act[0] == 'SpecialAction':
self.ObjectSA.SpecialAction(act[1])
else:
chset = act[0]
chread = act[1]
val = act[2]
tol = act[3]
timeout = act[4]
try:
self.epics_dal.set_and_match(chset, val, chread, tol, timeout, 0)
except Exception as inst:
print(inst)
def CrossReference(self, Object):
self.ObjectSA = Object
def allocateOutput(self):
root_list = []
for dimension in reversed(self.inlist):
n_steps = dimension['Nstep']
if dimension['Series']:
# For Series scan, each step of each knob represents another result.
current_dimension_list = []
for n_steps_in_knob in n_steps:
current_knob_list = []
for _ in range(n_steps_in_knob):
current_knob_list.append(deepcopy(root_list))
current_dimension_list.append(deepcopy(current_knob_list))
root_list = current_dimension_list
else:
# For line scan, each step represents another result.
current_dimension_list = []
for _ in range(n_steps):
current_dimension_list.append(deepcopy(root_list))
root_list = current_dimension_list
return root_list
def execute_scan(self):
self.Scan(self.outdict['KnobReadback'], self.outdict['Validation'], self.outdict['Observable'], 0)
def startScan(self):
if self.outdict['ErrorMessage']:
if 'After the last scan,' not in self.outdict['ErrorMessage']:
self.outdict[
'ErrorMessage'] = 'It seems that the initialization was not successful... No scan was performed.'
return self.outdict
self.outdict['TimeStampStart'] = datetime.now()
self.stopScan = []
self.abortScan = 0
self.pauseScan = 0
if self.inlist[-1]['Monitor']:
self.startMonitor(self.inlist[-1])
if self.fromGUI:
self.ProgDisp.showPanel(1)
self.ProgDisp.abortScan = 0
self.ProgDisp.emit("pb")
self.Ndone = 0
self.execute_scan()
if self.fromGUI:
self.ProgDisp.showPanel(0)
self.finalizeScan()
self.outdict['TimeStampEnd'] = datetime.now()
return self.outdict
def Scan(self, Rback, Valid, Obs, dic_index):
dic = self.inlist[dic_index]
# print('*****************', dic)
# Execute pre actions.
if len(dic['PreAction']):
self.PreAction(dic)
series_scan = True if dic['Series'] else False
last_pass = dic_index == len(self.inlist) - 1
# Knob, KnobReadback = Writer
# KnobExpanded = Vector scan.
if last_pass:
if series_scan:
self.last_series_scan(Obs, Rback, Valid, dic)
else:
self.last_line_scan(Obs, Rback, Valid, dic)
else:
if series_scan:
self.series_scan(Obs, Rback, Valid, dic_index)
else:
self.line_scan(Obs, Rback, Valid, dic_index)
# Execute post actions.
if len(dic['PostAction']):
self.PostAction(dic)
def post_measurment_actions(self, Obs, Rback, Valid, dic, step_index):
step_index = step_index + 1
self.Ndone = self.Ndone + 1
step_index = self.verify_and_stepback(step_index, Obs, Rback, Valid, dic)
self.update_progress()
return step_index
def update_progress(self):
self.ProgDisp.Progress = 100.0 * self.Ndone / self.Ntot
if self.fromGUI:
self.ProgDisp.emit("pb")
def verify_and_stepback(self, Iscan, Obs, Rback, Valid, dic):
Stepback = 0
count = [0] * len(self.stopScan)
k_stop = None
p_stop = None
while self.stopScan.count(1) + self.pauseScan: # Problem detected in the channel under monitoring
Stepback = 1
sleep(1.0)
for k in range(0, len(self.stopScan)):
if self.stopScan[k]:
k_stop = k
if dic['MonitorAction'][k] == 'Abort':
self.abortScan = 1
count[k] = count[k] + 1
else:
count[k] = 0
if dic['MonitorAction'][k] == 'WaitAndAbort' and count[k] > dic['MonitorTimeout'][k]:
self.abortScan = 1
if self.abortScan:
if len(dic['PostAction']):
self.PostAction(dic)
raise Exception("Scan aborted")
# print('Monitor??')
# print(self.stopScan)
if self.pauseScan:
p_stop = 1
if k_stop and dic['MonitorAction'][k_stop] == 'WaitAndNoStepBack':
Stepback = 0
if p_stop and not dic['StepbackOnPause']:
Stepback = 0
if Stepback:
# print('Stepping back')
Iscan = Iscan - 1
self.Ndone = self.Ndone - 1
Rback[Iscan].pop()
Valid[Iscan].pop()
Obs[Iscan].pop()
if self.fromGUI and self.ProgDisp.abortScan:
self.abortScan = 1
if self.abortScan:
if len(dic['PostAction']):
self.PostAction(dic)
raise Exception("Scan aborted")
if len(dic['In-loopPostAction']):
self.PostAction(dic, 'In-loopPostAction')
return Iscan
def pre_measurment_actions(self, dic):
if dic['KnobWaitingExtra']:
sleep(dic['KnobWaitingExtra'])
if len(dic['In-loopPreAction']):
self.PreAction(dic, 'In-loopPreAction')
def measure_and_save(self, Iscan, Obs, Rback, Valid, dic, Kscan=None):
for j in range(dic['NumberOfMeasurements']):
[v, s, sl] = self.epics_dal.get_group('All')
if self.n_readbacks == 1:
rback_result = v[0]
else:
rback_result = v[0:self.n_readbacks]
if self.n_validations == 1:
valid_result = v[self.n_readbacks]
else:
valid_result = v[self.n_readbacks:self.n_readbacks + self.n_validations]
if self.n_observables == 1:
obs_result = v[-1]
else:
obs_result = v[self.n_readbacks + self.n_validations:
self.n_readbacks + self.n_validations + self.n_observables]
if dic['NumberOfMeasurements'] > 1:
if Kscan is not None:
Rback[Kscan][Iscan].append(rback_result)
Valid[Kscan][Iscan].append(valid_result)
Obs[Kscan][Iscan].append(obs_result)
else:
Rback[Iscan].append(rback_result)
Valid[Iscan].append(valid_result)
Obs[Iscan].append(obs_result)
else:
if Kscan is not None:
Rback[Kscan][Iscan] = rback_result
Valid[Kscan][Iscan] = valid_result
Obs[Kscan][Iscan] = obs_result
else:
Rback[Iscan] = rback_result
Valid[Iscan] = valid_result
Obs[Iscan] = obs_result
sleep(dic['Waiting'])
def line_scan(self, Obs, Rback, Valid, dic_index):
dic = self.inlist[dic_index]
for step_index in range(dic['Nstep']):
# print('Dict' + str(dic_index) + ' Loop' + str(step_index))
for knob_index in range(len(dic['Knob'])):
if dic['Additive']:
KV = np.array(dic['KnobExpanded'][knob_index]) + dic['KnobSaved'][knob_index]
else:
KV = dic['KnobExpanded'][knob_index]
try:
self.set_knob_value(dic, knob_index, KV[step_index])
except Exception as inst:
print('Exception in range_scan')
print(inst)
# Delay between setting the position and reading the values.
if dic['KnobWaitingExtra']:
sleep(dic['KnobWaitingExtra'])
self.Scan(Rback[step_index], Valid[step_index], Obs[step_index], dic_index + 1)
if self.abortScan:
if len(dic['PostAction']):
self.PostAction(dic)
raise Exception("Scan aborted")
def set_knob_value(self, dic, knob_index, pv_value):
set_pv_name = dic['Knob'][knob_index]
readback_pv_name = dic['KnobReadback'][knob_index]
pv_tolerance = dic['KnobTolerance'][knob_index]
pv_wait_time = dic['KnobWaiting'][knob_index]
self.epics_dal.set_and_match(set_pv_name, pv_value, readback_pv_name, pv_tolerance, pv_wait_time, 0)
def last_line_scan(self, Obs, Rback, Valid, dic):
step_index = 0
while step_index < dic['Nstep']:
# print(step_index)
# set knob for this loop
for knob_index in range(len(dic['Knob'])): # Replace later with a group method, setAndMatchGroup?
if dic['Additive']:
KV = np.array(dic['KnobExpanded'][knob_index]) + dic['KnobSaved'][knob_index]
else:
KV = dic['KnobExpanded'][knob_index]
try:
self.set_knob_value(dic, knob_index, KV[step_index])
except Exception as inst:
print('Exception in Scan loop')
print(inst)
self.pre_measurment_actions(dic)
self.measure_and_save(step_index, Obs, Rback, Valid, dic)
step_index = self.post_measurment_actions(Obs, Rback, Valid, dic, step_index)
def series_scan(self, Obs, Rback, Valid, dic_index):
dic = self.inlist[dic_index]
# For every PV.
for Kscan in range(0, len(dic['Knob'])):
# For the number of steps for this PV.
for step_index in range(dic['Nstep'][Kscan]):
# For every PV.
for knob_index in range(len(dic['Knob'])):
#
if knob_index == Kscan:
if dic['Additive']:
KV = dic['KnobSaved'] + dic['ScanValues'][knob_index][step_index]
else:
KV = dic['ScanValues'][knob_index][step_index]
else:
KV = dic['KnobSaved'][knob_index]
try:
self.set_knob_value(dic, knob_index, KV)
except Exception as inst:
raise ValueError('Exception in series_scan', inst)
if dic['KnobWaitingExtra']:
sleep(dic['KnobWaitingExtra'])
self.Scan(Rback[Kscan][step_index], Valid[Kscan][step_index], Obs[Kscan][step_index], dic_index+1)
if self.abortScan:
if len(dic['PostAction']):
self.PostAction(dic)
raise Exception("Scan aborted")
def last_series_scan(self, Obs, Rback, Valid, dic):
Kscan = 0
while Kscan < len(dic['Knob']):
step_index = 0
while step_index < dic['Nstep'][Kscan]:
# print(Kscan, step_index)
# set knob for this loop
for knob_index in range(0, len(dic['Knob'])): # Replace later with a group method, setAndMatchGroup?
if knob_index == Kscan:
if dic['Additive']:
KV = dic['KnobSaved'][knob_index] + dic['ScanValues'][knob_index][step_index]
else:
KV = dic['ScanValues'][knob_index][step_index]
else:
KV = dic['KnobSaved'][knob_index]
try:
self.set_knob_value(dic, knob_index, KV)
except Exception as inst:
print('Exception in preAction')
print(inst)
self.pre_measurment_actions(dic)
self.measure_and_save(step_index, Obs, Rback, Valid, dic, Kscan)
step_index = self.post_measurment_actions(Obs, Rback, Valid, dic, step_index)
Kscan = Kscan + 1
| gpl-3.0 | 2,175,520,377,062,554,000 | 41.689503 | 120 | 0.476523 | false |
ankonzoid/Deep-Reinforcement-Learning-Tutorials | advanced_ML/model_tree/src/ModelTree.py | 1 | 13608 | """
ModelTree.py (author: Anson Wong / git: ankonzoid)
"""
import numpy as np
from copy import deepcopy
from graphviz import Digraph
class ModelTree(object):
def __init__(self, model, max_depth=5, min_samples_leaf=10,
search_type="greedy", n_search_grid=100):
self.model = model
self.max_depth = max_depth
self.min_samples_leaf = min_samples_leaf
self.search_type = search_type
self.n_search_grid = n_search_grid
self.tree = None
def get_params(self, deep=True):
return {
"model": self.model.get_params() if deep else self.model,
"max_depth": self.max_depth,
"min_samples_leaf": self.min_samples_leaf,
"search_type": self.search_type,
"n_search_grid": self.n_search_grid,
}
def set_params(self, **params):
for param, value in params.items():
setattr(self, param, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return "{}({})".format(class_name, ', '.join([ "{}={}".format(k,v) for k, v in self.get_params(deep=False).items() ]))
# ======================
# Fit
# ======================
def fit(self, X, y, verbose=False):
# Settings
model = self.model
min_samples_leaf = self.min_samples_leaf
max_depth = self.max_depth
search_type = self.search_type
n_search_grid = self.n_search_grid
if verbose:
print(" max_depth={}, min_samples_leaf={}, search_type={}...".format(max_depth, min_samples_leaf, search_type))
def _build_tree(X, y):
global index_node_global
def _create_node(X, y, depth, container):
loss_node, model_node = _fit_model(X, y, model)
node = {"name": "node",
"index": container["index_node_global"],
"loss": loss_node,
"model": model_node,
"data": (X, y),
"n_samples": len(X),
"j_feature": None,
"threshold": None,
"children": {"left": None, "right": None},
"depth": depth}
container["index_node_global"] += 1
return node
# Recursively split node + traverse node until a terminal node is reached
def _split_traverse_node(node, container):
# Perform split and collect result
result = _splitter(node, model,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
search_type=search_type,
n_search_grid=n_search_grid)
# Return terminal node if split is not advised
if not result["did_split"]:
if verbose:
depth_spacing_str = " ".join([" "] * node["depth"])
print(" {}*leaf {} @ depth {}: loss={:.6f}, N={}".format(depth_spacing_str, node["index"], node["depth"], node["loss"], result["N"]))
return
# Update node information based on splitting result
node["j_feature"] = result["j_feature"]
node["threshold"] = result["threshold"]
del node["data"] # delete node stored data
# Extract splitting results
(X_left, y_left), (X_right, y_right) = result["data"]
model_left, model_right = result["models"]
# Report created node to user
if verbose:
depth_spacing_str = " ".join([" "] * node["depth"])
print(" {}node {} @ depth {}: loss={:.6f}, j_feature={}, threshold={:.6f}, N=({},{})".format(depth_spacing_str, node["index"], node["depth"], node["loss"], node["j_feature"], node["threshold"], len(X_left), len(X_right)))
# Create children nodes
node["children"]["left"] = _create_node(X_left, y_left, node["depth"]+1, container)
node["children"]["right"] = _create_node(X_right, y_right, node["depth"]+1, container)
node["children"]["left"]["model"] = model_left
node["children"]["right"]["model"] = model_right
# Split nodes
_split_traverse_node(node["children"]["left"], container)
_split_traverse_node(node["children"]["right"], container)
container = {"index_node_global": 0} # mutatable container
root = _create_node(X, y, 0, container) # depth 0 root node
_split_traverse_node(root, container) # split and traverse root node
return root
# Construct tree
self.tree = _build_tree(X, y)
# ======================
# Predict
# ======================
def predict(self, X):
assert self.tree is not None
def _predict(node, x):
no_children = node["children"]["left"] is None and \
node["children"]["right"] is None
if no_children:
y_pred_x = node["model"].predict([x])[0]
return y_pred_x
else:
if x[node["j_feature"]] <= node["threshold"]: # x[j] < threshold
return _predict(node["children"]["left"], x)
else: # x[j] > threshold
return _predict(node["children"]["right"], x)
y_pred = np.array([_predict(self.tree, x) for x in X])
return y_pred
# ======================
# Explain
# ======================
def explain(self, X, header):
assert self.tree is not None
def _explain(node, x, explanation):
no_children = node["children"]["left"] is None and \
node["children"]["right"] is None
if no_children:
return explanation
else:
if x[node["j_feature"]] <= node["threshold"]: # x[j] < threshold
explanation.append("{} = {:.6f} <= {:.6f}".format(header[node["j_feature"]], x[node["j_feature"]], node["threshold"]))
return _explain(node["children"]["left"], x, explanation)
else: # x[j] > threshold
explanation.append("{} = {:.6f} > {:.6f}".format(header[node["j_feature"]], x[node["j_feature"]], node["threshold"]))
return _explain(node["children"]["right"], x, explanation)
explanations = [_explain(self.tree, x, []) for x in X]
return explanations
# ======================
# Loss
# ======================
def loss(self, X, y, y_pred):
loss = self.model.loss(X, y, y_pred)
return loss
# ======================
# Tree diagram
# ======================
def export_graphviz(self, output_filename, feature_names,
export_png=True, export_pdf=False):
"""
Assumes node structure of:
node["name"]
node["n_samples"]
node["children"]["left"]
node["children"]["right"]
node["j_feature"]
node["threshold"]
node["loss"]
"""
g = Digraph('g', node_attr={'shape': 'record', 'height': '.1'})
def build_graphviz_recurse(node, parent_node_index=0, parent_depth=0, edge_label=""):
# Empty node
if node is None:
return
# Create node
node_index = node["index"]
if node["children"]["left"] is None and node["children"]["right"] is None:
threshold_str = ""
else:
threshold_str = "{} <= {:.1f}\\n".format(feature_names[node['j_feature']], node["threshold"])
label_str = "{} n_samples = {}\\n loss = {:.6f}".format(threshold_str, node["n_samples"], node["loss"])
# Create node
nodeshape = "rectangle"
bordercolor = "black"
fillcolor = "white"
fontcolor = "black"
g.attr('node', label=label_str, shape=nodeshape)
g.node('node{}'.format(node_index),
color=bordercolor, style="filled",
fillcolor=fillcolor, fontcolor=fontcolor)
# Create edge
if parent_depth > 0:
g.edge('node{}'.format(parent_node_index),
'node{}'.format(node_index), label=edge_label)
# Traverse child or append leaf value
build_graphviz_recurse(node["children"]["left"],
parent_node_index=node_index,
parent_depth=parent_depth + 1,
edge_label="")
build_graphviz_recurse(node["children"]["right"],
parent_node_index=node_index,
parent_depth=parent_depth + 1,
edge_label="")
# Build graph
build_graphviz_recurse(self.tree,
parent_node_index=0,
parent_depth=0,
edge_label="")
# Export pdf
if export_pdf:
print("Saving model tree diagram to '{}.pdf'...".format(output_filename))
g.format = "pdf"
g.render(filename=output_filename, view=False, cleanup=True)
# Export png
if export_png:
print("Saving model tree diagram to '{}.png'...".format(output_filename))
g.format = "png"
g.render(filename=output_filename, view=False, cleanup=True)
# ***********************************
#
# Side functions
#
# ***********************************
def _splitter(node, model,
max_depth=5, min_samples_leaf=10,
search_type="greedy", n_search_grid=100):
# Extract data
X, y = node["data"]
depth = node["depth"]
N, d = X.shape
# Find feature splits that might improve loss
did_split = False
loss_best = node["loss"]
data_best = None
models_best = None
j_feature_best = None
threshold_best = None
# Perform threshold split search only if node has not hit max depth
if (depth >= 0) and (depth < max_depth):
for j_feature in range(d):
# If using adaptive search type, decide on one to use
search_type_use = search_type
if search_type == "adaptive":
if N > n_search_grid:
search_type_use = "grid"
else:
search_type_use = "greedy"
# Use decided search type and generate threshold search list (j_feature)
threshold_search = []
if search_type_use == "greedy":
for i in range(N):
threshold_search.append(X[i, j_feature])
elif search_type_use == "grid":
x_min, x_max = np.min(X[:, j_feature]), np.max(X[:, j_feature])
dx = (x_max - x_min) / n_search_grid
for i in range(n_search_grid+1):
threshold_search.append(x_min + i*dx)
else:
exit("err: invalid search_type = {} given!".format(search_type))
# Perform threshold split search on j_feature
for threshold in threshold_search:
# Split data based on threshold
(X_left, y_left), (X_right, y_right) = _split_data(j_feature, threshold, X, y)
N_left, N_right = len(X_left), len(X_right)
# Splitting conditions
split_conditions = [N_left >= min_samples_leaf,
N_right >= min_samples_leaf]
# Do not attempt to split if split conditions not satisfied
if not all(split_conditions):
continue
# Compute weight loss function
loss_left, model_left = _fit_model(X_left, y_left, model)
loss_right, model_right = _fit_model(X_right, y_right, model)
loss_split = (N_left*loss_left + N_right*loss_right) / N
# Update best parameters if loss is lower
if loss_split < loss_best:
did_split = True
loss_best = loss_split
models_best = [model_left, model_right]
data_best = [(X_left, y_left), (X_right, y_right)]
j_feature_best = j_feature
threshold_best = threshold
# Return the best result
result = {"did_split": did_split,
"loss": loss_best,
"models": models_best,
"data": data_best,
"j_feature": j_feature_best,
"threshold": threshold_best,
"N": N}
return result
def _fit_model(X, y, model):
model_copy = deepcopy(model) # must deepcopy the model!
model_copy.fit(X, y)
y_pred = model_copy.predict(X)
loss = model_copy.loss(X, y, y_pred)
assert loss >= 0.0
return loss, model_copy
def _split_data(j_feature, threshold, X, y):
idx_left = np.where(X[:, j_feature] <= threshold)[0]
idx_right = np.delete(np.arange(0, len(X)), idx_left)
assert len(idx_left) + len(idx_right) == len(X)
return (X[idx_left], y[idx_left]), (X[idx_right], y[idx_right])
| mit | 7,199,203,135,097,981,000 | 37.769231 | 241 | 0.486846 | false |
globaltoken/globaltoken | test/functional/mempool_packages.py | 1 | 10556 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test descendant package tracking code."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import COIN
MAX_ANCESTORS = 25
MAX_DESCENDANTS = 25
class MempoolPackagesTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-maxorphantx=1000"], ["-maxorphantx=1000", "-limitancestorcount=5"]]
# Build a transaction that spends parent_txid:vout
# Return amount sent
def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs):
send_value = satoshi_round((value - fee)/num_outputs)
inputs = [ {'txid' : parent_txid, 'vout' : vout} ]
outputs = {}
for i in range(num_outputs):
outputs[node.getnewaddress()] = send_value
rawtx = node.createrawtransaction(inputs, outputs)
signedtx = node.signrawtransactionwithwallet(rawtx)
txid = node.sendrawtransaction(signedtx['hex'])
fulltx = node.getrawtransaction(txid, 1)
assert(len(fulltx['vout']) == num_outputs) # make sure we didn't generate a change output
return (txid, send_value)
def run_test(self):
''' Mine some blocks and have them mature. '''
self.nodes[0].generate(101)
utxo = self.nodes[0].listunspent(10)
txid = utxo[0]['txid']
vout = utxo[0]['vout']
value = utxo[0]['amount']
fee = Decimal("0.0001")
# MAX_ANCESTORS transactions off a confirmed tx should be fine
chain = []
for i in range(MAX_ANCESTORS):
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, 0, value, fee, 1)
value = sent_value
chain.append(txid)
# Check mempool has MAX_ANCESTORS transactions in it, and descendant
# count and fees should look correct
mempool = self.nodes[0].getrawmempool(True)
assert_equal(len(mempool), MAX_ANCESTORS)
descendant_count = 1
descendant_fees = 0
descendant_size = 0
descendants = []
ancestors = list(chain)
for x in reversed(chain):
# Check that getmempoolentry is consistent with getrawmempool
entry = self.nodes[0].getmempoolentry(x)
assert_equal(entry, mempool[x])
# Check that the descendant calculations are correct
assert_equal(mempool[x]['descendantcount'], descendant_count)
descendant_fees += mempool[x]['fee']
assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee'])
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN)
descendant_size += mempool[x]['size']
assert_equal(mempool[x]['descendantsize'], descendant_size)
descendant_count += 1
# Check that getmempooldescendants is correct
assert_equal(sorted(descendants), sorted(self.nodes[0].getmempooldescendants(x)))
descendants.append(x)
# Check that getmempoolancestors is correct
ancestors.remove(x)
assert_equal(sorted(ancestors), sorted(self.nodes[0].getmempoolancestors(x)))
# Check that getmempoolancestors/getmempooldescendants correctly handle verbose=true
v_ancestors = self.nodes[0].getmempoolancestors(chain[-1], True)
assert_equal(len(v_ancestors), len(chain)-1)
for x in v_ancestors.keys():
assert_equal(mempool[x], v_ancestors[x])
assert(chain[-1] not in v_ancestors.keys())
v_descendants = self.nodes[0].getmempooldescendants(chain[0], True)
assert_equal(len(v_descendants), len(chain)-1)
for x in v_descendants.keys():
assert_equal(mempool[x], v_descendants[x])
assert(chain[0] not in v_descendants.keys())
# Check that ancestor modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(txid=chain[0], fee_delta=1000)
mempool = self.nodes[0].getrawmempool(True)
ancestor_fees = 0
for x in chain:
ancestor_fees += mempool[x]['fee']
assert_equal(mempool[x]['ancestorfees'], ancestor_fees * COIN + 1000)
# Undo the prioritisetransaction for later tests
self.nodes[0].prioritisetransaction(txid=chain[0], fee_delta=-1000)
# Check that descendant modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(txid=chain[-1], fee_delta=1000)
mempool = self.nodes[0].getrawmempool(True)
descendant_fees = 0
for x in reversed(chain):
descendant_fees += mempool[x]['fee']
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 1000)
# Adding one more transaction on to the chain should fail.
assert_raises_rpc_error(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], txid, vout, value, fee, 1)
# Check that prioritising a tx before it's added to the mempool works
# First clear the mempool by mining a block.
self.nodes[0].generate(1)
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
# Prioritise a transaction that has been mined, then add it back to the
# mempool by using invalidateblock.
self.nodes[0].prioritisetransaction(txid=chain[-1], fee_delta=2000)
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Keep node1's tip synced with node0
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
# Now check that the transaction is in the mempool, with the right modified fee
mempool = self.nodes[0].getrawmempool(True)
descendant_fees = 0
for x in reversed(chain):
descendant_fees += mempool[x]['fee']
if (x == chain[-1]):
assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee']+satoshi_round(0.00002))
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 2000)
# TODO: check that node1's mempool is as expected
# TODO: test ancestor size limits
# Now test descendant chain limits
txid = utxo[1]['txid']
value = utxo[1]['amount']
vout = utxo[1]['vout']
transaction_package = []
# First create one parent tx with 10 children
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 10)
parent_transaction = txid
for i in range(10):
transaction_package.append({'txid': txid, 'vout': i, 'amount': sent_value})
# Sign and send up to MAX_DESCENDANT transactions chained off the parent tx
for i in range(MAX_DESCENDANTS - 1):
utxo = transaction_package.pop(0)
(txid, sent_value) = self.chain_transaction(self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
for j in range(10):
transaction_package.append({'txid': txid, 'vout': j, 'amount': sent_value})
mempool = self.nodes[0].getrawmempool(True)
assert_equal(mempool[parent_transaction]['descendantcount'], MAX_DESCENDANTS)
# Sending one more chained transaction will fail
utxo = transaction_package.pop(0)
assert_raises_rpc_error(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
# TODO: check that node1's mempool is as expected
# TODO: test descendant size limits
# Test reorg handling
# First, the basics:
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].invalidateblock(self.nodes[0].getbestblockhash())
self.nodes[1].reconsiderblock(self.nodes[0].getbestblockhash())
# Now test the case where node1 has a transaction T in its mempool that
# depends on transactions A and B which are in a mined block, and the
# block containing A and B is disconnected, AND B is not accepted back
# into node1's mempool because its ancestor count is too high.
# Create 8 transactions, like so:
# Tx0 -> Tx1 (vout0)
# \--> Tx2 (vout1) -> Tx3 -> Tx4 -> Tx5 -> Tx6 -> Tx7
#
# Mine them in the next block, then generate a new tx8 that spends
# Tx1 and Tx7, and add to node1's mempool, then disconnect the
# last block.
# Create tx0 with 2 outputs
utxo = self.nodes[0].listunspent()
txid = utxo[0]['txid']
value = utxo[0]['amount']
vout = utxo[0]['vout']
send_value = satoshi_round((value - fee)/2)
inputs = [ {'txid' : txid, 'vout' : vout} ]
outputs = {}
for i in range(2):
outputs[self.nodes[0].getnewaddress()] = send_value
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx)
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
tx0_id = txid
value = send_value
# Create tx1
tx1_id, _ = self.chain_transaction(self.nodes[0], tx0_id, 0, value, fee, 1)
# Create tx2-7
vout = 1
txid = tx0_id
for i in range(6):
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1)
vout = 0
value = sent_value
# Mine these in a block
self.nodes[0].generate(1)
self.sync_all()
# Now generate tx8, with a big fee
inputs = [ {'txid' : tx1_id, 'vout': 0}, {'txid' : txid, 'vout': 0} ]
outputs = { self.nodes[0].getnewaddress() : send_value + value - 4*fee }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx)
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
sync_mempools(self.nodes)
# Now try to disconnect the tip on each node...
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
sync_blocks(self.nodes)
if __name__ == '__main__':
MempoolPackagesTest().main()
| mit | -6,945,249,846,931,241,000 | 42.619835 | 154 | 0.622395 | false |
sevenian3/ChromaStarPy | KappasRaylGas.py | 1 | 10384 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 25 17:18:39 2017
@author: ishort
"""
import math
import Useful
import PartitionFn
import ToolBox
#import numpy
#/* Rayleigh scattering opacity routines taken from Moog (moogjul2014/, MOOGJUL2014.tar)
#Chris Sneden (Universtiy of Texas at Austin) and collaborators
#http://www.as.utexas.edu/~chris/moog.html
#//From Moog source file Opacscat.f
#*/
"""
#JB#
#a function to create a cubic function fit extrapolation
def cubicFit(x,y):
coeffs = numpy.polyfit(x,y,3)
#returns an array of coefficents for the cubic fit of the form
#Ax^3 + Bx^2 + Cx + D as [A,B,C,D]
return coeffs
#this will work for any number of data points!
def valueFromFit(fit,x):
#return the value y for a given fit, at point x
return (fit[0]*(x**3)+fit[1]*(x**2)+fit[2]*x+fit[3])
"""
masterTemp=[130,500,3000,8000,10000]
#JB#
def masterRayl(numDeps, numLams, temp, lambdaScale, stagePops, gsName, gsFirstMol, molPops):
""" /*c******************************************************************************
c The subroutines needed to calculate the opacities from scattering by
c H I, H2, He I, are in this file. These are from ATLAS9.
c******************************************************************************
*/"""
#//System.out.println("masterRayl called...");
#//From Moog source file Opacitymetals.f
#// From how values such as aC1[] are used in Moog file Opacit.f to compute the total opacity
#// and then the optical depth scale, I infer that they are extinction coefficients
#// in cm^-1
#//
#// There does not seem to be any correction for stimulated emission
logE = math.log10(math.e)
masterRScat = [ [ 0.0 for i in range(numDeps) ] for j in range(numLams) ]
logUH1 = [0.0 for i in range(5)]
logUHe1 = [0.0 for i in range(5)]
logStatWH1 = 0.0
logStatWHe1 = 0.0
theta = 1.0
species = ""
logGroundPopsH1 = [0.0 for i in range(numDeps)]
logGroundPopsHe1 = [0.0 for i in range(numDeps)]
logH2Pops = [0.0 for i in range(numDeps)]
#//
#// H I: Z=1 --> iZ=0:
sigH1 = [0.0 for i in range(numDeps)]
#// He I: Z=2 --> iZ=1:
sigHe1 = [0.0 for i in range(numDeps)]
species = "HI"
logUH1 = PartitionFn.getPartFn2(species)
species = "HeI"
logUHe1 = PartitionFn.getPartFn2(species)
sigH2 = [0.0 for i in range(numDeps)]
#Find index of H2 in molPops array
for iH2 in range(len(gsName)):
if (gsName[iH2].strip() == "H2"):
break;
#print("iH2 ", iH2, " iH2-gsFirstMol ", (iH2-gsFirstMol))
#//System.out.println("iD PopsH1 PopsHe1");
for iD in range(numDeps):
#//neutral stage
#//Assumes ground state stat weight, g_1, is 1.0
#theta = 5040.0 / temp[0][iD]
#// U[0]: theta = 1.0, U[1]: theta = 0.5
"""
if (theta <= 0.5):
logStatWH1 = logUH1[1]
logStatWHe1 = logUHe1[1]
elif ( (theta < 1.0) and (theta > 0.5) ):
logStatWH1 = ( (theta-0.5) * logUH1[0] ) + ( (1.0-theta) * logUH1[1] )
logStatWHe1 = ( (theta-0.5) * logUHe1[0] ) + ( (1.0-theta) * logUHe1[1] )
#//divide by common factor of interpolation interval of 0.5 = (1.0 - 0.5):
logStatWH1 = 2.0 * logStatWH1
logStatWHe1 = 2.0 * logStatWHe1
else:
logStatWH1 = logUH1[0]
logStatWHe1 = logUHe1[0]
"""
thisTemp = temp[0][iD];
#JB#
logWH1Fit = ToolBox.cubicFit(masterTemp,logUH1)
logStatWH1 = ToolBox.valueFromFit(logWH1Fit,thisTemp)
logWHe1Fit = ToolBox.cubicFit(masterTemp,logUHe1)
logStatWHe1 = ToolBox.valueFromFit(logWHe1Fit,thisTemp)
#logStatWH1Fun = spline(masterTemp,logUH1)
#logStatWH1=logStatWH1Fun(thisTemp)
#logStatWHe1Fun = spline(masterTemp,logUHe1)
#logStatWHe1=logStatWHe1Fun(thisTemp)
#JB#
#// NEW Interpolation with temperature for new partition function: lburns
thisTemp = temp[0][iD];
if (thisTemp <= 130.0):
logStatWH1 = logUH1[0]
logStatWHe1 = logUHe1[0]
if (thisTemp >= 10000.0):
logStatWH1 = logUH1[4]
logStatWHe1 = logUHe1[4]
"""
elif (thisTemp > 130 and thisTemp <= 500):
logStatWH1 = logUH1[1] * (thisTemp - 130)/(500 - 130) \
+ logUH1[0] * (500 - thisTemp)/(500 - 130)
logStatWHe1 = logUHe1[1] * (thisTemp - 130)/(500 - 130) \
+ logUHe1[0] * (500 - thisTemp)/(500 - 130)
elif (thisTemp > 500 and thisTemp <= 3000):
logStatWH1 = logUH1[2] * (thisTemp - 500)/(3000 - 500) \
+ logUH1[1] * (3000 - thisTemp)/(3000 - 500)
logStatWHe1 = logUHe1[2] * (thisTemp - 500)/(3000 - 500) \
+ logUHe1[1] * (3000 - thisTemp)/(3000 - 500)
elif (thisTemp > 3000 and thisTemp <= 8000):
logStatWH1 = logUH1[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUH1[2] * (8000 - thisTemp)/(8000 - 3000)
logStatWHe1 = logUHe1[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUHe1[2] * (8000 - thisTemp)/(8000 - 3000)
elif (thisTemp > 8000 and thisTemp < 10000):
logStatWH1 = logUH1[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUH1[3] * (10000 - thisTemp)/(10000 - 8000)
logStatWHe1 = logUHe1[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUHe1[3] * (10000 - thisTemp)/(10000 - 8000)
else:
#// for temperatures of greater than or equal to 10000K lburns
logStatWH1 = logUH1[4]
logStatWHe1 = logUHe1[4]
"""
logGroundPopsH1[iD] = stagePops[0][0][iD] - logStatWH1
logGroundPopsHe1[iD] = stagePops[1][0][iD] - logStatWHe1
logH2Pops[iD] = molPops[iH2-gsFirstMol][iD]
#print("iD " , iD , " logH2 " , logH2Pops[iD])
#// if (iD%10 == 1){
#// System.out.format("%03d, %21.15f, %21.15f %n",
#// iD, logE*logGroundPopsH1[iD], logE*logGroundPopsHe1[iD]);
#// }
kapRScat = 0.0
#//System.out.println("iD iL lambda sigH1 sigHe1 ");
for iL in range(numLams):
#//
for i in range(numDeps):
sigH1[i] = 0.0
sigHe1[i] = 0.0
sigH2[i] = 0.0
#//System.out.println("Calling opacH1 from masterMetal...");
sigH1 = opacHscat(numDeps, temp, lambdaScale[iL], logGroundPopsH1)
sigHe1 = opacHescat(numDeps, temp, lambdaScale[iL], logGroundPopsHe1)
sigH2 = opacH2scat(numDeps, temp, lambdaScale[iL], logH2Pops)
for iD in range(numDeps):
kapRScat = sigH1[iD] + sigHe1[iD] + sigH2[iD]
masterRScat[iL][iD] = math.log(kapRScat)
#if ( (iD%10 == 0) and (iL%10 == 0) ):
# print("iD ", iD, " iL ", iL, " lambda ", lambdaScale[iL], math.log10(sigH1[iD]), math.log10(sigHe1[iD]), math.log10(sigH2[iD]) )
#} //iD
#} //iL
return masterRScat
#} //end method masterRayl
def opacHscat(numDeps, temp, lambda2, logGroundPops):
"""//c******************************************************************************
//c This routine computes H I Rayleigh scattering opacities.
//c******************************************************************************"""
#//System.out.println("opacHscat called");
sigH = [0.0 for i in range(numDeps)]
#//cross-section is zero below threshold, so initialize:
for i in range(numDeps):
sigH[i] = 0.0
freq = Useful.c() / lambda2
#// include 'Atmos.com'
#// include 'Kappa.com'
#// include 'Linex.com'
wavetemp = 2.997925e18 / min(freq, 2.463e15)
ww = math.pow(wavetemp, 2)
sig = ( 5.799e-13 + (1.422e-6/ww) + (2.784/(ww*ww)) ) / (ww*ww)
for i in range(numDeps):
sigH[i] = sig * 2.0 * math.exp(logGroundPops[i])
return sigH
#} //end method opacHscat
def opacHescat(numDeps, temp, lambda2, logGroundPops):
"""//c******************************************************************************
//c This routine computes He I Rayleigh scattering opacities.
//c******************************************************************************"""
#//System.out.println("opacHescat called");
sigHe = [0.0 for i in range(numDeps)]
#//cross-section is zero below threshold, so initialize:
for i in range(numDeps):
sigHe[i] = 0.0
freq = Useful.c() / lambda2
#// include 'Atmos.com'
#// include 'Kappa.com'
#// include 'Linex.com'
wavetemp = 2.997925e18 / min(freq, 5.15e15)
ww = math.pow(wavetemp, 2)
sig = (5.484e-14/ww/ww) * math.pow( ( 1.0 + ((2.44e5 + (5.94e10/(ww-2.90e5)))/ww) ), 2 )
for i in range(numDeps):
sigHe[i] = sig * math.exp(logGroundPops[i])
return sigHe
#} //end method opacHescat
def opacH2scat(numDeps, temp, lambda2, molPops):
sigH2 = [0.0e0 for i in range(numDeps)]
#//cross-section is zero below threshold, so initialize:
for i in range(numDeps):
sigH2[i] = 0.0
freq = Useful.c() / lambda2;
"""
//c******************************************************************************
//c This routine computes H2 I Rayleigh scattering opacities.
//c******************************************************************************
// include 'Atmos.com'
// include 'Kappa.com'
// include 'Linex.com'
"""
wavetemp = 2.997925e18 / min(freq, 2.463e15)
ww = wavetemp**2
sig = ( 8.14e-13 + (1.28e-6/ww) + (1.61/(ww*ww)) ) / (ww*ww)
#print("freq ", freq, " wavetemp ", wavetemp, " ww ", ww, " sig ", sig)
for i in range(numDeps):
sigH2[i] = sig * math.exp(molPops[i])
#print("i " , i , " molPops " , molPops[i] , " sigH2 " , sigH2[i])
return sigH2
| mit | 1,023,656,047,883,678,600 | 33.2 | 149 | 0.510401 | false |
ac-seguridad/ac-seguridad | project/manejador/cliente_entrada.py | 1 | 2574 |
# Este archivo es el encargado de recibir la placa leída y decidir si dejar
# pasar a un vehículo o no, dependiendo de la configuración de este. Además,
# busca si la placa está registrada en el sistema, en caso de estarlo, busca
# el usuario asociado al vehículo.
# Este archivo básicamente maneja las alertas que se generan en el sistema.
# from ac_seguridad.models import *
import requests
from mysocket import MySocket
import socket
import pdb
# Constantes.
NUM_PUERTA = 5
RIF = "12345"
HOST = "localhost"
PORT = 8000
#1234 acceso restringido
#0000 acceso no restringido
#pdb.set_trace()
# Funciones
def leer_placa():
placa = input("Placa: ")
return placa
# Programa comienza aquí.
# ref: https://docs.python.org/3/howto/sockets.html
# Crear un socket como cliente.
print("Creando socket")
# socket_cliente = MySocket()
# socket_cliente.connect(host=HOST, port=PORT)
print("Socket conectado.")
# Enviar primer mensaje:
# Estructura del primer mensaje:
# * RIF: lleno
# * ticket: None.
# * placa: llena.
# * tipo: llena ('placa_leida')
# * puerta: llena.
# * lectura_automatica: llena, sus posibles valores son:
# True: lectura realizada de forma automática
# False: lentura realizada de forma manual
# None: No aplica la información (ejemplo, mensajes servidor-cliente)
# * registrado: si el ticket es registrado, en el caso de entrada es None
print("Preparando mensaje")
mensaje = dict()
mensaje['estacionamiento'] = RIF
mensaje['ticket'] = None
mensaje['placa'] = leer_placa()
mensaje['puerta'] = NUM_PUERTA
mensaje['tipo'] = 'placa_leida_entrada'
mensaje['lectura_automatica']= True
mensaje['registrado']=None
print("Enviando mensaje: {}".format(mensaje))
# socket_cliente.sendall_json(mensaje)
# socket_cliente.mysend("Hola, este es el mensaje\0".encode(encoding="utf-8", errors="strict"))
url = "http://{}:{}/manejador/manejar_mensaje/".format(HOST,PORT)
data_mensaje = mensaje
respuesta_request = requests.post(url, data=data_mensaje)
respuesta = respuesta_request.json()
print("Mensaje enviado")
print("Recibiendo respuesta")
# respuesta = socket_cliente.receive()
pdb.set_trace()
print("Respuesta recibida: {}".format(respuesta))
if (respuesta['tipo'] == "OK_entrada_estacionamiento"):
print("Luz verde.")
elif (respuesta['tipo'] == "NO_entrada_estacionamiento"):
print("Luz roja.")
elif (respuesta['tipo'] == "NO_carro_dentro"):
print("Luz roja.")
else:
print("Respuesta no válida")
# socket_cliente.sock.shutdown(socket.SHUT_WR)
# socket_cliente.sock.close()
| apache-2.0 | 5,597,889,476,890,702,000 | 28.45977 | 95 | 0.712446 | false |
open-synergy/opnsynid-stock-logistics-warehouse | stock_other_receipt_operation/tests/test_warehouse.py | 1 | 2104 | # -*- coding: utf-8 -*-
# © 2017 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from .base import BaseOtherReceiptOperation
import itertools
class TestWarehouse(BaseOtherReceiptOperation):
def test_warehouse_create(self):
reception_steps = [
"one_step",
"two_steps",
"three_steps",
"transit_one_step",
"transit_two_steps",
"transit_three_steps",
]
delivery_steps = [
"ship_only",
"pick_ship",
"pick_pack_ship",
"ship_transit",
"pick_ship_transit",
"pick_pack_ship_transit",
]
num = 1
for combination in itertools.product(reception_steps, delivery_steps):
self.create_wh({
"name": "X WH %s" % str(num),
"code": "X%s" % str(num),
"reception_steps": combination[0],
"delivery_steps": combination[1],
})
num += 1
def test_warehouse_edit(self):
reception_steps = [
"one_step",
"two_steps",
"three_steps",
"transit_one_step",
"transit_two_steps",
"transit_three_steps",
]
delivery_steps = [
"ship_only",
"pick_ship",
"pick_pack_ship",
"ship_transit",
"pick_ship_transit",
"pick_pack_ship_transit",
]
num = 1
for combination in itertools.product(reception_steps, delivery_steps):
if num == 1:
wh = self.create_wh({
"name": "X WH %s" % str(num),
"code": "X%s" % str(num),
"reception_steps": combination[0],
"delivery_steps": combination[1],
})
else:
self.edit_wh(wh, {
"reception_steps": combination[0],
"delivery_steps": combination[1],
})
num += 1
| agpl-3.0 | -107,100,302,950,765,000 | 29.478261 | 78 | 0.456015 | false |
amerlyq/piony | piony/gstate.py | 1 | 3033 | from PyQt5.QtCore import QObject, pyqtSignal # , QRect, QPoint
# from PyQt5.QtWidgets import qApp
from piony import logger
from piony.config import ymlparser as yml
from piony.config.argparser import ArgParser
from piony.config.budparser import BudParser, BudError
from piony.config.keyparser import KeymapParser
class GState(QObject):
invalidated = pyqtSignal(dict)
def __init__(self, argv):
super().__init__()
logger.info('%s init', self.__class__.__qualname__)
self.active_window = '%1'
self.cfg = None
self.bud = None
self.now = None # Instant states like current visibility, etc
self.kmp = None
yml.init()
self._psArg = ArgParser()
self.update(argv)
def update(self, argv):
kgs = self.parse(argv)
# chg_gs = self.compare(kgs)
# if chg_gs:
# self.invalidated.emit(self.get_gs(), chg_gs)
logger.info('GState updated')
self.invalidated.emit(kgs)
def _set_args_from_command_line(self, cfg, args):
ar = [(k, v) for k, v in vars(args).items() if v]
for section, opts in cfg.items():
for k, v in ar:
if k in opts:
cfg[section][k] = str(v)
def parse(self, argv): # NEED: RFC
args = self._psArg.parse(argv[1:])
self._psArg.apply(args) # Set gvars
cfg = yml.parse(yml.G_CONFIG_PATH)
self.sty = yml.parse(yml.G_STYLE_PATH)
kp = KeymapParser()
self.kmp = kp.convert()
if args.kill:
print("kill:")
self.quit.emit()
self._psArg.apply(args) # Set gvars
self._set_args_from_command_line(cfg, args)
entries = args.buds if args.buds else str(cfg['Bud']['default'])
Bud_Ps = BudParser()
try:
bud = Bud_Ps.parse(entries)
except BudError as e:
print("Error:", e)
if not self.bud: # NOTE: work must go on if client args are bad?
# qApp.quit() # don't work until main loop
raise e
# TODO: Make 'bReload' as tuple to distinguish necessary refreshes.
bReload = {}
bReload['toggle'] = bool(0 == len(argv))
bReload['Window'] = bool(self.cfg and cfg['Window'] != self.cfg['Window'])
self.cfg = cfg
self.bud = bud
# TODO: ret whole new current state?
return bReload
def compare(self, kgs): # WARNING: broken
""" Used as separate function because of embedded file paths in arg """
# Compose dict of current GState variables
# curr_gs = self.get_gs()
# Detected changes in global state
chg_gs = [('cfg', 'Window'), 'bud']
# TODO: recursive diff cgs/kgs and inserting 1 in changed keys/branches
return chg_gs
# TODO: replace with namedtuple (use it to emit)
def get_gs(self):
return {k: v for k, v in self.__dict__.items()
if not k.startswith('__') and not callable(k)}
| gpl-3.0 | 6,719,100,791,870,633,000 | 33.465909 | 82 | 0.5727 | false |
Quantia-Analytics/AzureML-Regression-Example | Python files/transform.py | 1 | 2490 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 21 12:49:06 2015
@author: Steve Elston
"""
## The main function with a single argument, a Pandas data frame
## from the first input port of the Execute Python Script module.
def azureml_main(BikeShare):
import pandas as pd
from sklearn import preprocessing
import utilities as ut
import numpy as np
import os
## If not in the Azure environment, read the data from a csv
## file for testing purposes.
Azure = False
if(Azure == False):
pathName = "C:/Users/Steve/GIT/Quantia-Analytics/AzureML-Regression-Example/Python files"
fileName = "BikeSharing.csv"
filePath = os.path.join(pathName, fileName)
BikeShare = pd.read_csv(filePath)
## Drop the columns we do not need
BikeShare = BikeShare.drop(['instant',
'instant',
'atemp',
'casual',
'registered'], 1)
## Normalize the numeric columns
scale_cols = ['temp', 'hum', 'windspeed']
arry = BikeShare[scale_cols].as_matrix()
BikeShare[scale_cols] = preprocessing.scale(arry)
## Create a new column to indicate if the day is a working day or not.
work_day = BikeShare['workingday'].as_matrix()
holiday = BikeShare['holiday'].as_matrix()
BikeShare['isWorking'] = np.where(np.logical_and(work_day == 1, holiday == 0), 1, 0)
## Compute a new column with the count of months from
## the start of the series which can be used to model
## trend
BikeShare['monthCount'] = ut.mnth_cnt(BikeShare)
## Shift the order of the hour variable so that it is smoothly
## "humped over 24 hours.## Add a column of the count of months which could
hr = BikeShare.hr.as_matrix()
BikeShare['xformHr'] = np.where(hr > 4, hr - 5, hr + 19)
## Add a variable with unique values for time of day for working
## and non-working days.
isWorking = BikeShare['isWorking'].as_matrix()
BikeShare['xformWorkHr'] = np.where(isWorking,
BikeShare.xformHr,
BikeShare.xformHr + 24.0)
BikeShare['dayCount'] = pd.Series(range(BikeShare.shape[0]))/24
return BikeShare | gpl-2.0 | -3,416,187,222,252,025,300 | 37.921875 | 97 | 0.562651 | false |
nbsdx/abac | examples/python_tests/acme_rockets_rt0/query.py | 1 | 3122 | #!/usr/bin/env python
"""
Run the queries described in README
cmd: env keystore=`pwd` ./query.py
"""
import os
import ABAC
ctxt = ABAC.Context()
# Keystore is the directory containing the principal credentials.
# Load existing principals and/or policy credentials
if (os.environ.has_key("keystore")) :
keystore=os.environ["keystore"]
ctxt.load_directory(keystore)
else:
print("keystore is not set...")
exit(1)
# retrieve principals' keyid value from local credential files
acmeID=ABAC.ID("Acme_ID.pem");
acmeID.load_privkey("Acme_private.pem");
acme=acmeID.keyid()
coyoteID=ABAC.ID("Coyote_ID.pem");
coyoteID.load_privkey("Coyote_private.pem");
coyote=coyoteID.keyid()
bigbirdID=ABAC.ID("Bigbird_ID.pem");
bigbirdID.load_privkey("Bigbird_private.pem");
bigbird=bigbirdID.keyid()
##########################################################################
# dump the loaded attribute policies
#
print "\n...policy attribute set..."
credentials = ctxt.credentials()
for credential in credentials:
print "context: %s <- %s" % (credential.head().string(), credential.tail().string())
##########################################################################
# is coyote a preferred_customer of Acme ?
# role=[keyid:Acme].role:preferred_customer
# p =[keyid:coyote]
print "\n===good============ Acme.preferred_customer <- Coyote"
(success, credentials) = ctxt.query("%s.preferred_customer" % acme, coyote)
if success:
print "success!"
else:
print "failure!"
for credential in credentials:
print "credential %s <- %s" % (credential.head().string(), credential.tail().string())
##########################################################################
# can coyote buy rockets from Acme ?
# role=[keyid:Acme].role:buy_rockets
# p =[keyid:coyote]
print "\n===good============ Acme.buy_rockets <- Coyote"
(success, credentials) = ctxt.query("%s.buy_rockets" % acme, coyote)
if success:
print "success!"
else:
print "failure!"
for credential in credentials:
print "credential %s <- %s" % (credential.head().string(), credential.tail().string())
##########################################################################
# is Acme a friend of coyote ?
# role=[keyid:Coyote].role:friend
# p=[keyid:Acme]
print "\n===bad=============== Coyote.friend <- Acme"
(success, credentials) = ctxt.query("%s.friend" % coyote, acme)
if success:
print "success!"
else:
print "failure!"
for credential in credentials:
print "credential %s <- %s" % (credential.head().string(), credential.tail().string())
##########################################################################
# using complex role to ask a question.. expecting to fail
# role=[keyid:Acme].role:buy_rockets
# p=[keyid:Acme].role:preferred_customer
print "\n===bad?=============== Acme.buy_rockets <- Acme.preferred_customer"
(success, credentials) = ctxt.query("%s.buy_rockets" % acme, "%s.preferred_customer" % acme)
if success:
print "success!"
else:
print "failure!"
for credential in credentials:
print "credential %s <- %s" % (credential.head().string(), credential.tail().string())
| mit | -4,170,137,190,318,609,000 | 30.535354 | 92 | 0.601858 | false |
shantanu561993/FAndR | docs/conf.py | 1 | 8415 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# FAR documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import FAR
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Find and Replace'
copyright = u'2015, Shantanu Khandelwal'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = FAR.__version__
# The full version, including alpha/beta/rc tags.
release = FAR.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'FARdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'FAR.tex',
u'Find and Replace Documentation',
u'Shantanu Khandelwal', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'FAR',
u'Find and Replace Documentation',
[u'Shantanu Khandelwal'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'FAR',
u'Find and Replace Documentation',
u'Shantanu Khandelwal',
'FAR',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bsd-3-clause | 2,237,559,652,515,695,600 | 29.6 | 76 | 0.7041 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/aio/operations/_service_association_links_operations.py | 1 | 4964 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ServiceAssociationLinksOperations:
"""ServiceAssociationLinksOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list(
self,
resource_group_name: str,
virtual_network_name: str,
subnet_name: str,
**kwargs
) -> "_models.ServiceAssociationLinksListResult":
"""Gets a list of service association links for a subnet.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceAssociationLinksListResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.ServiceAssociationLinksListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceAssociationLinksListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceAssociationLinksListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}/ServiceAssociationLinks'} # type: ignore
| mit | 7,083,261,679,586,513,000 | 47.194175 | 223 | 0.675463 | false |
volodymyrss/3ML | threeML/plugins/spectrum/binned_spectrum_set.py | 1 | 3209 | import numpy as np
from threeML.utils.time_interval import TimeIntervalSet
from threeML.plugins.spectrum.binned_spectrum import BinnedSpectrum
class BinnedSpectrumSet(object):
def __init__(self, binned_spectrum_list, reference_time=0.0, time_intervals=None):
"""
a set of binned spectra with optional time intervals
:param binned_spectrum_list: lit of binned spectal
:param reference_time: reference time for time intervals
:param time_intervals: optional timeinterval set
"""
self._binned_spectrum_list = binned_spectrum_list # type: list(BinnedSpectrum)
self._reference_time = reference_time
# normalize the time intervals if there are any
if time_intervals is not None:
self._time_intervals = time_intervals - reference_time # type: TimeIntervalSet
assert len(time_intervals) == len(
binned_spectrum_list), 'time intervals mus be the same length as binned spectra'
else:
self._time_intervals = None
@property
def reference_time(self):
return self._reference_time
def __getitem__(self, item):
return self._binned_spectrum_list[item]
def __len__(self):
return len(self._binned_spectrum_list)
def time_to_index(self, time):
"""
get the index of the input time
:param time: time to search for
:return: integer
"""
assert self._time_intervals is not None, 'This spectrum set has no time intervals'
return self._time_intervals.containing_bin(time)
def sort(self):
"""
sort the bin spectra in place according to time
:return:
"""
assert self._time_intervals is not None, 'must have time intervals to do sorting'
# get the sorting index
idx = self._time_intervals.argsort()
# reorder the spectra
self._binned_spectrum_list = self._binned_spectrum_list[idx]
# sort the time intervals in place
self._time_intervals.sort()
@property
def quality_per_bin(self):
return np.array([spectrum.quality for spectrum in self._binned_spectrum_list])
@property
def n_channels(self):
return self.counts_per_bin.shape[1]
@property
def counts_per_bin(self):
return np.array([spectrum.counts for spectrum in self._binned_spectrum_list])
@property
def count_errors_per_bin(self):
return np.array([spectrum.count_errors for spectrum in self._binned_spectrum_list])
@property
def rates_per_bin(self):
return np.array([spectrum.rates for spectrum in self._binned_spectrum_list])
@property
def rate_errors_per_bin(self):
return np.array([spectrum.rate_errors for spectrum in self._binned_spectrum_list])
@property
def sys_errors_per_bin(self):
return np.array([spectrum.sys_errors for spectrum in self._binned_spectrum_list])
@property
def exposure_per_bin(self):
return np.array([spectrum.exposure for spectrum in self._binned_spectrum_list])
@property
def time_intervals(self):
return self._time_intervals
| bsd-3-clause | -6,567,661,062,073,182,000 | 25.303279 | 96 | 0.647554 | false |
lowks/SDST | seqtools/demultiplexer.py | 1 | 7927 | import argparse
import subprocess
import os
import collections
import pylev
from seqtools.utils import revcomp,fileOpen
from seqtools.fastq import Fastq
def isIndexRevComp(indexfile,indexes,n=500000):
"""Determine if the indexes are reverse complemented or not
:param indexfile: filename of the Fastq index file
:param indexes: list or tuple of index strings
:param n: integer number of reads to sample
"""
print("HERE")
ifile = Fastq(indexfile)
ilength=len(indexes[0])
print(ilength)
indexreads = collections.defaultdict(int)
for i in range(n):
indexreads[ifile.next().sequence[:ilength]]+=1
counts = {'normal':0,
'revcomp':0}
for k,v in list(indexreads.items()):
print(k,v)
for i in indexes:
if(pylev.levenshtein(k,i)<=1):
counts['normal']+=v
continue
if(pylev.levenshtein(k,revcomp(i))<=1):
counts['revcomp']+=v
if(counts['revcomp']>counts['normal']):
print('using revcomp')
else:
print('NOT revcomp')
return(counts['revcomp']>counts['normal'])
def demultiplex(readfile,
indexfile,
indexes,
readfile2=None,
indexfile2=None):
"""Demultiplex from separate FASTQ files.
All FASTQ files can be gzipped (with suffix .gz).
:param readfile: The filename of the first fastq file
:param indexfile: The filename of the first index fastq file
:param indexes: An iterable of indexes. If dual-barcoding is used, the indexes should be comma-separated strings, one string for each barcode pair.
:param indexfile2: The filename of the second index fastq file. If this parameter is included, then the indexes parameter should be a set of comma-separated pairs of indexes.
:param readfile2: The filename of the second fastq file [optional]
"""
# single readfile, single indexfile
if(readfile2 is None) and (indexfile2 is None):
rfile1 = Fastq(readfile)
(rpath,rname) = os.path.split(readfile)
ifile = Fastq(indexfile)
indexRevComp = isIndexRevComp(indexfile,indexes)
existingIndexes = []
for i in indexes:
ofname1 = os.path.join(rpath,i + "_" + rname)
if(not os.path.exists(ofname1)):
ofile1[i]=fileOpen(os.path.join(rpath,i + "_" + rname),'w')
else:
print(ofname1," already exists, skipping")
existingIndexes.append(i)
for i in existingIndexes:
indexes.remove(i)
if(len(indexes)==0):
exit(0)
for (r1,i) in zip(rfile1,ifile):
try:
if indexRevComp:
i2 = revcomp(i.sequence[:indexlen])
ofile1[i2].write(str(r1))
else:
i2 = i.sequence[:indexlen]
ofile1[i2].write(str(r1))
except KeyError:
pass
rfile1.close()
ifile.close()
for ofile in list(ofile1.values()):
ofile.close()
## for i in indexes:
## os.rename(os.path.join(rpath,'tmp.' + i + "_" + rname),
## os.path.join(rpath,i + "_" + rname))
# two readfiles, single indexfile
if(readfile2 is not None) and (indexfile2 is None):
print("here1")
rfile1 = Fastq(readfile)
rfile2 = Fastq(readfile2)
(rpath,rname) = os.path.split(readfile)
(rpath2,rname2) = os.path.split(readfile2)
ifile = Fastq(indexfile)
indexRevComp = isIndexRevComp(indexfile,indexes)
ofile1 = {}
ofile2 = {}
existingIndexes = []
for i in indexes:
ofname1 = os.path.join(rpath,i + "_" + rname)
ofname2 = os.path.join(rpath2,i + "_" + rname2)
if(os.path.exists(ofname1) and os.path.exists(ofname2)):
print(ofname1,ofname2, " already exist, skipping")
existingIndexes.append(i)
else:
ofile1[i]=fileOpen(os.path.join(rpath,i + "_" + rname),'w')
ofile2[i]=fileOpen(os.path.join(rpath2,i + "_" + rname2),'w')
for i in existingIndexes:
indexes.remove(i)
if(len(indexes)==0):
exit(0)
indexlen = len(indexes[0])
for (r1,r2,i) in zip(rfile1,rfile2,ifile):
try:
if indexRevComp:
i2 = revcomp(i.sequence[:indexlen])
ofile1[i2].write(str(r1))
ofile2[i2].write(str(r2))
else:
i2 = i.sequence[:indexlen]
ofile1[i2].write(str(r1))
ofile2[i2].write(str(r2))
except KeyError:
pass
rfile1.close()
rfile2.close()
ifile.close()
for ofile in list(ofile1.values()):
ofile.close()
for ofile in list(ofile2.values()):
ofile.close()
## for i in indexes:
## print os.path.join(rpath,'tmp.' + i + "_" + rname),os.path.join(rpath,i + "_"+rname)
## os.rename(os.path.join(rpath,'tmp.' + i + "_" + rname),
## os.path.join(rpath,i + "_"+rname))
## os.rename(os.path.join(rpath2,'tmp.' + i +"_"+ rname2),
## os.path.join(rpath2,i +"_"+ rname2))
# two readfiles, two indexfiles
if(readfile2 is not None) and (indexfile2 is not None):
rfile1 = Fastq(readfile)
rfile2 = Fastq(readfile2)
(rpath,rname) = os.path.split(readfile)
(rpath2,rname2) = os.path.split(readfile2)
ifile = Fastq(indexfile)
ifile2 = Fastq(indexfile2)
indexes = [tuple(x.split(',')) for x in indexes]
indexRevComp = isIndexRevComp(indexfile,[i[0] for i in indexes])
ofile1 = {}
ofile2 = {}
existingIndexes = []
for j in indexes:
i = ''.join(j)
ofname1 = os.path.join(rpath,i + "_" + rname)
ofname2 = os.path.join(rpath2,i + "_" + rname2)
if(os.path.exists(ofname1) and os.path.exists(ofname2)):
print(ofname1,ofname2, " already exist, skipping")
existingIndexes.append(i)
else:
ofile1[i]=fileOpen(ofname1,'w')
ofile2[i]=fileOpen(ofname2,'w')
for i in existingIndexes:
indexes.remove(i)
if(len(indexes)==0):
exit(0)
indexlen = len(indexes[0][0])
for (r1,r2,i,i2) in zip(rfile1,rfile2,ifile,ifile2):
try:
if indexRevComp:
ir = revcomp(i.sequence[:indexlen])
ir2 = revcomp(i2.sequence[:indexlen])
istr = ir+ir2
ofile1[istr].write(str(r1))
ofile2[istr].write(str(r2))
else:
ir = i.sequence[:indexlen]
ir2 = i2.sequence[:indexlen]
istr = ir+ir2
ofile1[istr].write(str(r1))
ofile2[istr].write(str(r2))
except KeyError:
pass
rfile1.close()
rfile2.close()
ifile.close()
ifile2.close()
for ofile in list(ofile1.values()):
ofile.close()
for ofile in list(ofile2.values()):
ofile.close()
## for i in indexes:
## ofname1 = os.path.join(rpath,''.join(i) + "_" + rname)
## ofname2 = os.path.join(rpath2,''.join(i) + "_" + rname2)
## os.rename(os.path.join(rpath,'tmp.' + ofname1),
## os.path.join(rpath,ofname1))
## os.rename(os.path.join(rpath2,'tmp.'+ofname2),
## os.path.join(rpath2,ofname2))
| mit | 2,461,416,735,547,084,000 | 37.110577 | 181 | 0.526303 | false |
ragavvenkatesan/Convolutional-Neural-Networks | yann/core/conv.py | 1 | 10586 | """
``yann.core.conv.py`` is one file that contains all the convolution operators.
It contains two functions for performing either 2d convolution (``conv2d``) or 3d convolution
(``conv3d``).
These functions shall be called by every convolution layer from ``yann.layers.py``
TODO:
* Add 3D convolution support from theano.
* Add Masked convolution support.
"""
from theano.tensor.nnet import conv2d
from theano.tensor.nnet.abstract_conv import conv2d_grad_wrt_inputs as deconv2d
from theano.tensor.nnet.abstract_conv import get_conv_output_shape as conv_shape
class convolver_2d(object):
"""
Class that performs convolution
This class basically performs convolution. These ouputs can be probed using the
convolution layer if needed. This keeps things simple.
Args:
input: This variable should either ``thenao.tensor4`` (``theano.matrix``
reshaped also works) variable or an output from a pervious layer which is
a ``theano.tensor4`` convolved with a ``theano.shared``. The input should
be of shape ``(batchsize, channels, height, width)``. For those who have
tried ``pylearn2`` or such, this is called bc01 format.
fitlers: This variable should be ``theano.shared`` variables of filter weights
could even be a filter bank. ``filters`` should be of shape ``(nchannels,
nkerns, filter_height, filter_width)``. ``nchannles`` is the number of input \
channels and ``nkerns`` is the number of kernels or output channels.
subsample: Stride Tuple of ``(int, int)``.
filter_shape: This variable should be a tuple or an array:
``[nkerns, nchannles, filter_height, filter_width]``
image_shape: This variable should a tuple or an array:
``[batchsize, channels, height, width]``
``image_shape[1]`` must be equal to ``filter_shape[1]``
border_mode: The input to this can be either ``'same'`` or other theano defaults
Notes:
* ``conv2d.out`` output, Output that could be provided as
output to the next layer or to other convolutional layer options.
The size of the outut depends on border mode and subsample
operation performed.
* ``conv2d.out_shp``: (``int``, ``int``), A tuple (height, width) of all feature maps
The options for ``border_mode`` input which at the moment of writing this doc are
* ``'valid'`` - apply filter wherever it completely overlaps with the
input. Generates output of shape ``input shape - filter shape + 1``
* ``'full'``- apply filter wherever it partly overlaps with the input.
Generates output of shape ``input shape + filter shape - 1``
* ``'half'``: pad input with a symmetric border of ``filter rows // 2``
rows and ``filter columns // 2`` columns, then perform a valid
convolution. For filters with an odd number of rows and columns, this
leads to the output shape being equal to the input shape.
* ``<int>``: pad input with a symmetric border of zeros of the given
width, then perform a valid convolution.
* ``(<int1>, <int2>)``: pad input with a symmetric border of ``int1``
rows and ``int2`` columns, then perform a valid convolution.
Refer to `theano documentation's convolution page
<http://deeplearning.net/software/theano/library/tensor/nnet/conv.html>`_
for more details on this.
Basically cuDNN is used for ``same`` because at the moment of writing
this funciton, ``theano.conv2d`` doesn't support``same`` convolutions
on the GPU. For everything else, ``theano`` default will be used.
TODO:
Implement ``border_mode = 'same'`` for libgpuarray backend. As of now only supports
CUDA backend.
Need to something about this. With V0.10 of theano, I cannot use cuda.dnn for
same convolution.
"""
def __init__ ( self,
input,
filters,
subsample,
filter_shape,
image_shape,
border_mode = 'valid',
verbose = 1
):
if not image_shape[1] == filter_shape[1]:
raise Exception ("input_shape[1] and filter_shape[1] must match")
if verbose >=3 :
print "... creating convolution operator"
_,_,_out_height,_out_width = conv_shape (image_shape = image_shape,
kernel_shape = filter_shape,
border_mode = border_mode,
subsample = subsample)
self.out = conv2d (
input = input,
filters = filters,
input_shape = image_shape,
filter_shape = filter_shape,
subsample = subsample,
border_mode = border_mode,
)
self.out_shp = (_out_height, _out_width)
class deconvolver_2d(object):
"""
class that performs deconvolution
This class basically performs convolution.
Args:
input: This variable should either ``thenao.tensor4`` (``theano.matrix``
reshaped also works) variable or an output from a pervious layer which is
a ``theano.tensor4`` convolved with a ``theano.shared``. The input should
be of shape ``(batchsize, channels, height, width)``. For those who have
tried ``pylearn2`` or such, this is called bc01 format.
fitlers: This variable should be ``theano.shared`` variables of filter weights
could even be a filter bank. ``filters`` should be of shape ``(nchannels,
nkerns, filter_height, filter_width)``. ``nchannles`` is the number of input \
channels and ``nkerns`` is the number of kernels or output channels.
subsample: Stride Tuple of ``(int, int)``.
filter_shape: This variable should be a tuple or an array:
``[nkerns, nchannles, filter_height, filter_width]``
image_shape: This variable should a tuple or an array:
``[batchsize, channels, height, width]``
``image_shape[1]`` must be equal to ``filter_shape[1]``
output_shape: Request a size of output of image required. This variable should a tuple.
border_mode: The input to this can be either ``'same'`` or other theano defaults
Notes:
* ``conv2d.out`` output, Output that could be provided as
output to the next layer or to other convolutional layer options.
The size of the outut depends on border mode and subsample
operation performed.
* ``conv2d.out_shp``: (``int``, ``int``), A tuple (height, width) of all feature maps
The options for ``border_mode`` input which at the moment of writing this doc are
* ``'valid'`` - apply filter wherever it completely overlaps with the
input. Generates output of shape ``input shape - filter shape + 1``
* ``'full'``- apply filter wherever it partly overlaps with the input.
Generates output of shape ``input shape + filter shape - 1``
* ``'half'``: pad input with a symmetric border of ``filter rows // 2``
rows and ``filter columns // 2`` columns, then perform a valid
convolution. For filters with an odd number of rows and columns, this
leads to the output shape being equal to the input shape.
* ``<int>``: pad input with a symmetric border of zeros of the given
width, then perform a valid convolution.
* ``(<int1>, <int2>)``: pad input with a symmetric border of ``int1``
rows and ``int2`` columns, then perform a valid convolution.
Refer to `theano documentation's convolution page
<http://deeplearning.net/software/theano/library/tensor/nnet/conv.html>`_
for more details on this.
Basically cuDNN is used for ``same`` because at the moment of writing
this funciton, ``theano.conv2d`` doesn't support``same`` convolutions
on the GPU. For everything else, ``theano`` default will be used.
TODO:
Implement ``border_mode = 'same'`` and full for libgpuarray backend. As of now only supports
CUDA backend.
Need to something about this. With V0.10 of theano, I cannot use cuda.dnn for
same convolution.
Right now deconvolution works only with ``border_mode = 'valid'``
"""
def __init__ ( self,
input,
filters,
subsample,
filter_shape,
image_shape,
output_shape,
border_mode = 'valid',
verbose = 1
):
# if not image_shape[1] == filter_shape[1]:
# raise Exception ("input_shape[1] and filter_shape[1] must match")
if verbose >=3 :
print "... creating deconvolution operator"
# Transpose the convoltuion
# self.filters.dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1]
_out_height = output_shape[2]
_out_width = output_shape[3]
self.out = deconv2d (
output_grad = input,
filters = filters,
input_shape = output_shape,
#filter_shape = filter_shape,
border_mode = border_mode,
subsample = subsample,
)
self.out_shp = (_out_height, _out_width)
# Check by using the reverse on a convolution shape, the actual size.
_,_,_in_height,_in_width = conv_shape (image_shape = output_shape,
kernel_shape = filter_shape,
border_mode = border_mode,
subsample = subsample)
if not _in_height == image_shape [2] and _in_width == image_shape [3]:
raise Exception (" This dimensionality of th output image cannot be achieved.")
if __name__ == '__main__': #pragma: no cover
pass | mit | 6,412,593,143,335,912,000 | 44.051064 | 100 | 0.573871 | false |
moberweger/deep-prior-pp | src/trainer/optimizer.py | 1 | 4689 | """Basis for different optimization algorithms.
Optimizer provides interface for creating the update rules for gradient based optimization.
It includes SGD, NAG, RMSProp, etc.
Copyright 2015 Markus Oberweger, ICG,
Graz University of Technology <[email protected]>
This file is part of DeepPrior.
DeepPrior is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
DeepPrior is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with DeepPrior. If not, see <http://www.gnu.org/licenses/>.
"""
import theano
import theano.tensor as T
import numpy
__author__ = "Markus Oberweger <[email protected]>"
__copyright__ = "Copyright 2015, ICG, Graz University of Technology, Austria"
__credits__ = ["Paul Wohlhart", "Markus Oberweger"]
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Markus Oberweger"
__email__ = "[email protected]"
__status__ = "Development"
class Optimizer(object):
"""
Class with different optimizers of the loss function
"""
def __init__(self, grads, params):
"""
Initialize object
:param grads: gradients of the loss function
:param params: model parameters that should be updated
"""
self.grads = grads
self.params = params
self.updates = []
self.shared = []
if len(grads) != len(params):
print "Warning: Size of gradients ({}) does not fit size of parameters ({})!".format(len(grads), len(params))
def ADAM(self, learning_rate=0.0002, beta1=0.9, beta2=0.999, epsilon=1e-8, gamma=1-1e-8):
"""
Adam update rule by Kingma and Ba, ICLR 2015, version 2 (with momentum decay).
:param learning_rate: alpha in the paper, the step size
:param beta1: exponential decay rate of the 1st moment estimate
:param beta2: exponential decay rate of the 2nd moment estimate
:param epsilon: small epsilon to prevent divide-by-0 errors
:param gamma: exponential increase rate of beta1
:return: updates
"""
t = theano.shared(numpy.cast[theano.config.floatX](1.0)) # timestep, for bias correction
beta1_t = beta1*gamma**(t-1.) # decay the first moment running average coefficient
for param_i, grad_i in zip(self.params, self.grads):
mparam_i = theano.shared(numpy.zeros(param_i.get_value().shape, dtype=theano.config.floatX)) # 1st moment
self.shared.append(mparam_i)
vparam_i = theano.shared(numpy.zeros(param_i.get_value().shape, dtype=theano.config.floatX)) # 2nd moment
self.shared.append(vparam_i)
m = beta1_t * mparam_i + (1. - beta1_t) * grad_i # new value for 1st moment estimate
v = beta2 * vparam_i + (1. - beta2) * T.sqr(grad_i) # new value for 2nd moment estimate
m_unbiased = m / (1. - beta1**t) # bias corrected 1st moment estimate
v_unbiased = v / (1. - beta2**t) # bias corrected 2nd moment estimate
w = param_i - (learning_rate * m_unbiased) / (T.sqrt(v_unbiased) + epsilon) # new parameter values
self.updates.append((mparam_i, m))
self.updates.append((vparam_i, v))
self.updates.append((param_i, w))
self.updates.append((t, t + 1.))
return self.updates
def RMSProp(self, learning_rate=0.01, decay=0.9, epsilon=1.0 / 100.):
"""
RMSProp of Tieleman et al.
:param learning_rate: learning rate
:param decay: decay rate of gradient history
:param epsilon: gradient clip
:return: update
"""
for param_i, grad_i in zip(self.params, self.grads):
# Accumulate gradient
msg = theano.shared(numpy.zeros(param_i.get_value().shape, dtype=theano.config.floatX))
self.shared.append(msg)
new_mean_squared_grad = (decay * msg + (1 - decay) * T.sqr(grad_i))
# Compute update
rms_grad_t = T.sqrt(new_mean_squared_grad)
rms_grad_t = T.maximum(rms_grad_t, epsilon)
delta_x_t = -learning_rate * grad_i / rms_grad_t
# Apply update
self.updates.append((param_i, param_i + delta_x_t))
self.updates.append((msg, new_mean_squared_grad))
return self.updates
| gpl-3.0 | -5,467,339,482,598,829,000 | 39.076923 | 121 | 0.641288 | false |
synw/django-mqueue | mqueue/signals.py | 1 | 2131 | from __future__ import print_function
from mqueue.models import MEvent
from mqueue.utils import get_user, get_url, get_admin_url, get_object_name
def mmessage_create(sender, instance, created, **kwargs):
if created is True:
# try to get the user
user = get_user(instance)
# try to get the object name
obj_name = get_object_name(instance, user)
# try to get the admin url
admin_url = get_admin_url(instance)
event_class = instance.__class__.__name__ + ' created'
# create event
MEvent.objects.create(
model=instance.__class__,
name=obj_name,
obj_pk=instance.pk,
user=user,
url=get_url(instance),
admin_url=admin_url,
event_class=event_class,
)
return
def mmessage_delete(sender, instance, **kwargs):
# try to get the user
user = get_user(instance)
# try to get the object name
obj_name = get_object_name(instance, user)
event_class = instance.__class__.__name__ + ' deleted'
# create event
MEvent.objects.create(
model=instance.__class__,
name=obj_name,
obj_pk=instance.pk,
user=user,
event_class=event_class,
)
return
def mmessage_save(sender, instance, created, **kwargs):
if created is False:
# try to get the user
user = get_user(instance)
if 'name' not in kwargs.keys():
# try to get the object name
obj_name = get_object_name(instance, user)
else:
obj_name = kwargs('name')
# try to get the admin url
admin_url = get_admin_url(instance)
event_str = ' edited'
if created:
event_str = ' created'
event_class = instance.__class__.__name__ + event_str
# create event
MEvent.objects.create(
model=instance.__class__,
name=obj_name,
obj_pk=instance.pk,
user=user,
url=get_url(instance),
admin_url=admin_url,
event_class=event_class,
)
return
| mit | 5,622,654,903,332,074,000 | 29.442857 | 74 | 0.56077 | false |
rodgzilla/fractal_GAN | src/mandel_test.py | 1 | 5720 | import sys
import pygame
from pygame import gfxdraw
from pygame import Color
import cmath
import random
pygame.init()
# The small size will help for future generation.
unit = 75
width = 3 * unit
height = 2 * unit
# nb_sections represents the number of slice of each axis we are going
# to consider for the zoom. The number of sections considered will be
# nb_sections * nb_sections
nb_sections = 10
section_width = int(width / nb_sections)
section_height = int(height / nb_sections)
# We select the region on which we zoom amongst the top_select most bright
top_select = 20
def convert_pixel_complex(x, y, re_min, re_max, im_min, im_max):
"""
Converts pixel coordinates to complex plane coordinates. The re and
im arguments indicates the part of the complex plane represented by the window.
"""
re = x * (re_max - re_min) / width + re_min
im = y * (im_max - im_min) / height + im_min
return complex(re, im)
def draw_mandel(window, sequence, max_iter, re_min = -2, re_max = 1, im_min = -1, im_max = 1):
"""
Computes the mandelbrot set on a given part of the complex plane.
"""
screen_array = [[0] * height for _ in range(width)]
# For every pixel of the screen
for x in range(width):
for y in range(height):
# Compute the associated complex number
c = convert_pixel_complex(x, y, re_min, re_max, im_min, im_max)
# Then, compute max_iter element of sequence function with
# c as initial value
z = c
for i in range(max_iter):
z = sequence(z, c)
# If we detect that the sequence diverges
if (z.real * z.real + z.imag * z.imag) > 4:
# We draw a pixel which intensity corresponds to
# the number of iterations we ran before detecting
# the divergence.
color_ratio = int((i * 255.) / max_iter)
gfxdraw.pixel(window, x, y, Color(color_ratio, color_ratio, color_ratio, 255))
screen_array[x][y] = color_ratio
break
else:
# If we did not detect a divergence in max_iter steps,
# we consider that the sequence does not diverge and
# draw a black pixel.
gfxdraw.pixel(window, x, y, Color(0,0,0,255))
pygame.display.flip()
return screen_array
def sec_number_to_indices(w_sec, h_sec):
"""
Converts sections indices into window coordinates.
"""
x_min = w_sec * section_width
x_max = (w_sec + 1) * section_width
y_min = h_sec * section_height
y_max = (h_sec + 1) * section_height
return x_min, x_max, y_min, y_max
def section_intensity(screen_array, weight, w_sec, h_sec):
"""
Computes the weighted average of the pixel intensity after
computing the Mandelbrot set.
"""
x_min, x_max, y_min, y_max = sec_number_to_indices(w_sec, h_sec)
s = sum((weight(screen_array[x][y]) for x in range(x_min, x_max) for y in range(y_min, y_max)))
norm = section_width * section_height
return s / norm
def sections_to_intensities(screen_array, weight = lambda x: x):
"""
Creates a dictionary which associates sections indices to their
weighted average pixel intensity.
"""
sec_to_int = {}
for w_sec in range(nb_sections):
for h_sec in range(nb_sections):
sec_to_int[(w_sec, h_sec)] = section_intensity(screen_array, weight, w_sec, h_sec)
return sec_to_int
def sort_section_intensities(sec_to_int):
"""
Sorts the sections indices according to their intensities in
decreasing order.
"""
return sorted(sec_to_int.keys(), key = sec_to_int.get, reverse = True)
def generate_fractal_sequence(window, sequence = lambda z, c: z**2 + c, seq_len = 8, top_select = 5):
"""
Generates the multiple zoom on the Mandelbrot set. seq_len
pictures will be generated and the zoom will chose amongst the
top_select most intense sections.
"""
tl = complex(-2, 1) # top left complex number
br = complex(1, -1) # bottom right complex number
for i in range(seq_len):
min_re, max_re = tl.real, br.real
min_im, max_im = br.imag, tl.imag
# Experimental formula to have greater max_iter when we zoom
max_iter = 50 + i ** 3 * 16
print('iteration', i + 1)
print('min_re, max_re = ', min_re, ',', max_re)
print('min_im, max_im = ', min_im, ',', max_im)
print('max_iter', max_iter)
# Draw the fractal in the window, divide the result in
# sections and compute their intensities. Chose one of the
# most intense section and update the top left and bottom
# right complex numbers to zoom on this section.
screen_array = draw_mandel(window, sequence, max_iter, min_re, max_re, min_im, max_im)
sec_to_int = sections_to_intensities(screen_array)
w_sec_max, h_sec_max = random.choice(sort_section_intensities(sec_to_int)[:top_select])
x_min, x_max, y_min, y_max = sec_number_to_indices(w_sec_max, h_sec_max)
tl = convert_pixel_complex(x_min, y_min, min_re, max_re, min_im, max_im)
br = convert_pixel_complex(x_max, y_max, min_re, max_re, min_im, max_im)
if __name__ == '__main__':
window = pygame.display.set_mode((width, height))
generate_fractal_sequence(window, seq_len = 6)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit(0)
| gpl-3.0 | 5,040,484,455,637,613,000 | 37.648649 | 109 | 0.597203 | false |
hsoft/pdfmasher | ebooks/compression/palmdoc.py | 1 | 1892 | # Copyright 2008, Kovid Goyal <kovid at kovidgoyal.net>
# Copyright 2013 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPL v3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/gplv3_license
from struct import pack
def compress_doc(data):
out = bytearray()
i = 0
ldata = len(data)
while i < ldata:
if i > 10 and (ldata - i) > 10:
chunk = b''
match = -1
for j in range(10, 2, -1):
chunk = data[i:i+j]
try:
match = data.rindex(chunk, 0, i)
except ValueError:
continue
if (i - match) <= 2047:
break
match = -1
if match >= 0:
n = len(chunk)
m = i - match
code = 0x8000 + ((m << 3) & 0x3ff8) + (n - 3)
out += pack(b'>H', code)
i += n
continue
och = data[i]
ch = bytes([och])
i += 1
if ch == b' ' and (i + 1) < ldata:
onch = data[i]
if onch >= 0x40 and onch < 0x80:
out += pack(b'>B', onch ^ 0x80)
i += 1
continue
if och == 0 or (och > 8 and och < 0x80):
out += ch
else:
j = i
binseq = [ch]
while j < ldata and len(binseq) < 8:
och = data[j]
ch = bytes([och])
if och == 0 or (och > 8 and och < 0x80):
break
binseq.append(ch)
j += 1
out += pack(b'>B', len(binseq))
out += b''.join(binseq)
i += len(binseq) - 1
return bytes(out)
| gpl-3.0 | -5,296,342,882,972,843,000 | 31.067797 | 91 | 0.426004 | false |
markalansmith/draftmim | spider/nbadraft_net/nbadraft_net/spiders/nbadraft.py | 1 | 2465 | # -*- coding: utf-8 -*-
import scrapy
import urlparse
from urllib2 import quote
from nbadraft_net import PlayerItem
class NbadraftSpider(scrapy.Spider):
name = "nbadraft"
allowed_domains = ["nbadraft.net"]
start_urls = (
'http://www.nbadraft.net/2016mock_draft',
)
def parse(self, response):
selector = scrapy.Selector(response)
updated_on = selector.xpath('//p[@class="updated"]/text()').extract()[0]
mock_draft_one = selector.xpath('//div[@id="nba_consensus_mock1"]')
mock_draft_two = selector.xpath('//div[@id="nba_consensus_mock2"]')
for mock_draft in [mock_draft_one, mock_draft_two]:
player_rows = mock_draft.xpath('.//table/tbody/tr')
for player_row in player_rows:
player_item = PlayerItem()
player_info = player_row.xpath('./td/text()').extract()
player_name = player_row.xpath('./td/a/text()').extract()[0]
player_page = player_row.xpath('./td/a/@href').extract()[-1]
player_page_url = urlparse.urljoin(response.url, player_page.strip())
player_page_request = scrapy.Request(player_page_url, callback=self.parse_player_page_request)
player_mock_draft_pos = int(player_info[0])
player_height = player_info[2]
player_weight = int(player_info[3])
player_position = player_info[4]
player_school = player_info[5]
player_class = player_info[6]
player_item['name'] = player_name
self.logger.info("PlayerInfo: %s, Player Name: %s, Player Page: %s" % (str(player_info), player_name, str(player_page_request),))
yield player_page_request
def parse_player_page_request(self, response):
selector = scrapy.Selector(response)
player_stats = selector.xpath('//div[@id="nba_player_stats"]')
player_img_src = player_stats.xpath('./img/@src').extract()
player_attribute_scores = selector.xpath('//p[@class="nba_player_attrib_score"]/text()').extract()
player_overall_score = selector.xpath('//p[@class="whitebox"]/text()').extract()
player_notes = selector.xpath('//div[@id="nbap_content_bottom"]/p/text()').extract()
player_videos = selector.xpath('//div[@id="nbap_content_bottom"]/p/iframe/@src').extract()
return
| apache-2.0 | -2,331,472,478,925,752,000 | 41.5 | 145 | 0.586207 | false |
ngageoint/scale | scale/recipe/configuration/json/recipe_config_v6.py | 1 | 8823 | """Manages the v6 recipe configuration schema"""
from __future__ import unicode_literals
from jsonschema import validate
from jsonschema.exceptions import ValidationError
from job.configuration.mount import HostMountConfig, VolumeMountConfig
from job.execution.configuration.volume import HOST_TYPE, VOLUME_TYPE
from recipe.configuration.configuration import DEFAULT_PRIORITY, RecipeConfiguration
from recipe.configuration.exceptions import InvalidRecipeConfiguration
SCHEMA_VERSION = '7'
SCHEMA_VERSIONS = ['6', '7']
RECIPE_CONFIG_SCHEMA = {
'type': 'object',
'required': ['output_workspaces'],
'additionalProperties': False,
'properties': {
'version': {
'description': 'Version of the recipe configuration schema',
'type': 'string',
},
'mounts': {
'description': 'Defines volumes to use for the jobs\' mounts',
'type': 'object',
'additionalProperties': {
'$ref': '#/definitions/mount'
},
},
'output_workspaces': {
'description': 'Defines workspaces to use for the jobs\' output files',
'type': 'object',
'required': ['default', 'outputs'],
'additionalProperties': False,
'properties': {
'default': {
'description': 'Defines the jobs\' default output workspace',
'type': 'string',
},
'outputs': {
'description': 'Defines a workspace for each given output name',
'type': 'object',
'additionalProperties': {
'type': 'string',
},
},
},
},
'priority': {
'description': 'Defines the jobs\' priority',
'type': 'integer',
'minimum': 1,
},
'settings': {
'description': 'Defines values to use for the jobs\' settings',
'type': 'object',
'additionalProperties': {
'type': 'string',
},
},
},
'definitions': {
'mount': {
'oneOf': [{
'type': 'object',
'description': 'A configuration for a host mount',
'required': ['type', 'host_path'],
'additionalProperties': False,
'properties': {
'type': {
'enum': ['host'],
},
'host_path': {
'type': 'string',
},
},
}, {
'type': 'object',
'description': 'A configuration for a volume mount',
'required': ['type', 'driver', 'driver_opts'],
'additionalProperties': False,
'properties': {
'type': {
'enum': ['volume'],
},
'driver': {
'type': 'string',
},
'driver_opts': {
'type': 'object',
'additionalProperties': {
'type': 'string',
},
},
},
}],
},
},
}
def convert_config_to_v6_json(config):
"""Returns the v6 recipe configuration JSON for the given configuration
:param config: The recipe configuration
:type config: :class:`recipe.configuration.configuration.RecipeConfiguration`
:returns: The v6 recipe configuration JSON
:rtype: :class:`recipe.configuration.json.recipe_config_v6.RecipeConfigurationV6`
"""
mounts_dict = {}
for mount_config in config.mounts.values():
if mount_config.mount_type == HOST_TYPE:
mounts_dict[mount_config.name] = {'type': 'host', 'host_path': mount_config.host_path}
elif mount_config.mount_type == VOLUME_TYPE:
vol_dict = {'type': 'volume', 'driver_opts': mount_config.driver_opts}
if mount_config.driver:
vol_dict['driver'] = mount_config.driver
mounts_dict[mount_config.name] = vol_dict
workspace_dict = {'outputs': config.output_workspaces}
if config.default_output_workspace:
workspace_dict['default'] = config.default_output_workspace
config_dict = {'version': SCHEMA_VERSION, 'mounts': mounts_dict, 'output_workspaces': workspace_dict,
'priority': config.priority, 'settings': config.settings}
return RecipeConfigurationV6(config=config_dict, do_validate=False)
class RecipeConfigurationV6(object):
"""Represents a v6 recipe configuration JSON"""
def __init__(self, config=None, existing=None, do_validate=False):
"""Creates a v6 job configuration JSON object from the given dictionary
:param config: The recipe configuration JSON dict
:type config: dict
:param existing: Existing RecipeConfiguration to use for default values for unspecified fields
:type existing: RecipeConfigurationV6
:param do_validate: Whether to perform validation on the JSON schema
:type do_validate: bool
:raises :class:`recipe.configuration.exceptions.InvalidRecipeConfiguration`: If the given configuration is invalid
"""
if not config:
config = {}
self._config = config
self._existing_config = None
if existing:
self._existing_config = existing._config
if 'version' not in self._config:
self._config['version'] = SCHEMA_VERSION
if self._config['version'] not in SCHEMA_VERSIONS:
msg = '%s is an unsupported version number'
raise InvalidRecipeConfiguration('INVALID_VERSION', msg % self._config['version'])
self._populate_default_values()
try:
if do_validate:
validate(self._config, RECIPE_CONFIG_SCHEMA)
except ValidationError as ex:
raise InvalidRecipeConfiguration('INVALID_CONFIGURATION', 'Invalid configuration: %s' % unicode(ex))
def get_configuration(self):
"""Returns the recipe configuration represented by this JSON
:returns: The recipe configuration
:rtype: :class:`recipe.configuration.configuration.RecipeConfiguration`:
"""
config = RecipeConfiguration()
for name, mount_dict in self._config['mounts'].items():
if mount_dict['type'] == 'host':
config.add_mount(HostMountConfig(name, mount_dict['host_path']))
elif mount_dict['type'] == 'volume':
config.add_mount(VolumeMountConfig(name, mount_dict['driver'], mount_dict['driver_opts']))
default_workspace = self._config['output_workspaces']['default']
if default_workspace:
config.default_output_workspace = default_workspace
for output, workspace in self._config['output_workspaces']['outputs'].items():
config.add_output_workspace(output, workspace)
config.priority = self._config['priority']
for name, value in self._config['settings'].items():
config.add_setting(name, value)
return config
def get_dict(self):
"""Returns the internal dictionary
:returns: The internal dictionary
:rtype: dict
"""
return self._config
def _populate_default_values(self):
"""Populates any missing required values with defaults
"""
if 'mounts' not in self._config:
self._config['mounts'] = self._existing_config['mounts'] if self._existing_config else {}
for mount_dict in self._config['mounts'].values():
if mount_dict['type'] == 'volume':
if 'driver' not in mount_dict:
mount_dict['driver'] = ''
if 'driver_opts' not in mount_dict:
mount_dict['driver_opts'] = {}
if 'output_workspaces' not in self._config:
self._config['output_workspaces'] = self._existing_config['output_workspaces'] if self._existing_config else {}
if 'default' not in self._config['output_workspaces']:
self._config['output_workspaces']['default'] = ''
if 'outputs' not in self._config['output_workspaces']:
self._config['output_workspaces']['outputs'] = {}
if 'priority' not in self._config:
self._config['priority'] = self._existing_config['priority'] if self._existing_config else DEFAULT_PRIORITY
if 'settings' not in self._config:
self._config['settings'] = self._existing_config['settings'] if self._existing_config else {}
| apache-2.0 | 5,617,838,189,864,306,000 | 37.030172 | 123 | 0.557067 | false |
ekansa/open-context-py | opencontext_py/apps/ldata/arachne/api.py | 1 | 5134 | import json
import requests
from urllib.parse import urlparse, parse_qs
from opencontext_py.libs.general import LastUpdatedOrderedDict
from opencontext_py.libs.generalapi import GeneralAPI
class ArachneAPI():
""" Interacts with Arachne """
ARACHNE_SEARCH = 'arachne.dainst.org/search'
DEFAULT_API_BASE_URL = 'https://arachne.dainst.org/data/search'
DEFAULT_HTML_BASE_URL = 'https://arachne.dainst.org/search'
DEFAULT_IMAGE_BASE_URL = 'https://arachne.dainst.org/data/image/height/'
DEFAULT_ENTITY_BASE_URL = 'https://arachne.dainst.org/entity/'
DEFAULT_IMAGE_HEIGHT = 120
def __init__(self):
self.arachne_json_r = False
self.arachne_json_url = False
self.arachne_html_url = False
self.filter_by_images = True
self.image_height = self.DEFAULT_IMAGE_HEIGHT
self.result_count = False
self.results = False
def get_keyword_results(self, keyword):
""" sends JSON request, makes list of oc_object entities if
search finds entities
"""
self.get_keyword_search_json(keyword)
self.get_result_metadata()
self.generate_results_list()
return self.results
def get_results_from_search_url(self, search_url):
""" parses a search URL, then makes a lost of
oc_object entities if search finds entities
"""
self.get_json_from_search_url(search_url)
self.get_result_metadata()
self.generate_results_list()
return self.results
def get_result_metadata(self):
""" gets metadata about the search result """
if self.arachne_json_r is not False:
if 'size' in self.arachne_json_r:
self.result_count = self.arachne_json_r['size']
def generate_results_list(self):
""" makes a list of results with full URLs """
if self.arachne_json_r is not False:
if 'entities' in self.arachne_json_r:
self.results = []
for entity in self.arachne_json_r['entities']:
oc_obj = LastUpdatedOrderedDict()
oc_obj['id'] = self.generate_entity_url(entity['entityId'])
oc_obj['slug'] = oc_obj['id']
if 'title' in entity:
oc_obj['label'] = entity['title']
elif 'subtitle' in entity:
oc_obj['label'] = entity['subtitle']
else:
oc_obj['label'] = '[No Arachne Label]'
oc_obj['oc-gen:thumbnail-uri'] = self.generate_thumbnail_image_src(entity['thumbnailId'])
oc_obj['type'] = 'oc-gen:image'
self.results.append(oc_obj)
def generate_entity_url(self, entity_id):
"""
makes a URL for the entity
"""
url = self.DEFAULT_ENTITY_BASE_URL + str(entity_id)
return url
def generate_thumbnail_image_src(self, thumb_id):
"""
makes a URL for the thumbnail image bitmap file
"""
url = self.DEFAULT_IMAGE_BASE_URL + str(thumb_id)
url += '?height=' + str(self.image_height)
return url
def get_json_from_search_url(self, search_url):
""" gets json data from Arachne by first parsing
a search url and then converting that into a
keyword search
"""
self.arachne_html_url = search_url
payload = parse_qs(urlparse(search_url).query)
print('payload: ' + str(payload))
json_r = self.get_arachne_json(payload)
return json_r
def get_keyword_search_json(self, keyword):
"""
gets json data from Arachne in response to a keyword search
"""
payload = {}
payload['q'] = keyword
json_r = self.get_arachne_json(payload)
return json_r
def get_arachne_json(self, payload):
"""
executes a search for json data from arachne
"""
if isinstance(payload, dict):
if self.filter_by_images:
payload['fq'] = 'facet_image:"ja"'
url = self.DEFAULT_API_BASE_URL
try:
gapi = GeneralAPI()
r = requests.get(url,
params=payload,
timeout=240,
headers=gapi.client_headers)
print('r url: ' + r.url)
self.set_arachne_search_urls(r.url)
r.raise_for_status()
json_r = r.json()
except:
json_r = False
else:
json_r = False
self.arachne_json_r = json_r
return json_r
def set_arachne_search_urls(self, arachne_json_url):
""" Sets URLs for Arachne searches, JSON + HTML """
self.arachne_json_url = arachne_json_url
if not isinstance(self.arachne_html_url, str):
self.arachne_html_url = arachne_json_url.replace(self.DEFAULT_API_BASE_URL,
self.DEFAULT_HTML_BASE_URL)
| gpl-3.0 | 7,220,749,312,685,747,000 | 37.313433 | 109 | 0.553954 | false |
tensorflow/graphics | tensorflow_graphics/image/transformer.py | 1 | 7541 | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""This module implements image transformation functionalities."""
import enum
from typing import Optional
from six.moves import range
import tensorflow as tf
from tensorflow_addons import image as tfa_image
from tensorflow_graphics.geometry.representation import grid
from tensorflow_graphics.util import export_api
from tensorflow_graphics.util import shape
from tensorflow_graphics.util import type_alias
class ResamplingType(enum.Enum):
NEAREST = 0
BILINEAR = 1
class BorderType(enum.Enum):
ZERO = 0
DUPLICATE = 1
class PixelType(enum.Enum):
INTEGER = 0
HALF_INTEGER = 1
def sample(image: type_alias.TensorLike,
warp: type_alias.TensorLike,
resampling_type: ResamplingType = ResamplingType.BILINEAR,
border_type: BorderType = BorderType.ZERO,
pixel_type: PixelType = PixelType.HALF_INTEGER,
name: Optional[str] = "sample") -> tf.Tensor:
"""Samples an image at user defined coordinates.
Note:
The warp maps target to source. In the following, A1 to An are optional
batch dimensions.
Args:
image: A tensor of shape `[B, H_i, W_i, C]`, where `B` is the batch size,
`H_i` the height of the image, `W_i` the width of the image, and `C` the
number of channels of the image.
warp: A tensor of shape `[B, A_1, ..., A_n, 2]` containing the x and y
coordinates at which sampling will be performed. The last dimension must
be 2, representing the (x, y) coordinate where x is the index for width
and y is the index for height.
resampling_type: Resampling mode. Supported values are
`ResamplingType.NEAREST` and `ResamplingType.BILINEAR`.
border_type: Border mode. Supported values are `BorderType.ZERO` and
`BorderType.DUPLICATE`.
pixel_type: Pixel mode. Supported values are `PixelType.INTEGER` and
`PixelType.HALF_INTEGER`.
name: A name for this op. Defaults to "sample".
Returns:
Tensor of sampled values from `image`. The output tensor shape
is `[B, A_1, ..., A_n, C]`.
Raises:
ValueError: If `image` has rank != 4. If `warp` has rank < 2 or its last
dimension is not 2. If `image` and `warp` batch dimension does not match.
"""
with tf.name_scope(name):
image = tf.convert_to_tensor(value=image, name="image")
warp = tf.convert_to_tensor(value=warp, name="warp")
shape.check_static(image, tensor_name="image", has_rank=4)
shape.check_static(
warp,
tensor_name="warp",
has_rank_greater_than=1,
has_dim_equals=(-1, 2))
shape.compare_batch_dimensions(
tensors=(image, warp), last_axes=0, broadcast_compatible=False)
if pixel_type == PixelType.HALF_INTEGER:
warp -= 0.5
if resampling_type == ResamplingType.NEAREST:
warp = tf.math.round(warp)
if border_type == BorderType.DUPLICATE:
image_size = tf.cast(tf.shape(input=image)[1:3], dtype=warp.dtype)
height, width = tf.unstack(image_size, axis=-1)
warp_x, warp_y = tf.unstack(warp, axis=-1)
warp_x = tf.clip_by_value(warp_x, 0.0, width - 1.0)
warp_y = tf.clip_by_value(warp_y, 0.0, height - 1.0)
warp = tf.stack((warp_x, warp_y), axis=-1)
return tfa_image.resampler(image, warp)
def perspective_transform(
image: type_alias.TensorLike,
transform_matrix: type_alias.TensorLike,
output_shape: Optional[type_alias.TensorLike] = None,
resampling_type: ResamplingType = ResamplingType.BILINEAR,
border_type: BorderType = BorderType.ZERO,
pixel_type: PixelType = PixelType.HALF_INTEGER,
name: Optional[str] = "perspective_transform",
) -> tf.Tensor:
"""Applies a projective transformation to an image.
The projective transformation is represented by a 3 x 3 matrix
[[a0, a1, a2], [b0, b1, b2], [c0, c1, c2]], mapping a point `[x, y]` to a
transformed point
`[x', y'] = [(a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k]`, where
`k = c0 x + c1 y + c2`.
Note:
The transformation matrix maps target to source by transforming output
points to input points.
Args:
image: A tensor of shape `[B, H_i, W_i, C]`, where `B` is the batch size,
`H_i` the height of the image, `W_i` the width of the image, and `C` the
number of channels of the image.
transform_matrix: A tensor of shape `[B, 3, 3]` containing projective
transform matrices. The transformation maps target to source by
transforming output points to input points.
output_shape: The heigh `H_o` and width `W_o` output dimensions after the
transform. If None, output is the same size as input image.
resampling_type: Resampling mode. Supported values are
`ResamplingType.NEAREST` and `ResamplingType.BILINEAR`.
border_type: Border mode. Supported values are `BorderType.ZERO` and
`BorderType.DUPLICATE`.
pixel_type: Pixel mode. Supported values are `PixelType.INTEGER` and
`PixelType.HALF_INTEGER`.
name: A name for this op. Defaults to "perspective_transform".
Returns:
A tensor of shape `[B, H_o, W_o, C]` containing transformed images.
Raises:
ValueError: If `image` has rank != 4. If `transform_matrix` has rank < 3 or
its last two dimensions are not 3. If `image` and `transform_matrix` batch
dimension does not match.
"""
with tf.name_scope(name):
image = tf.convert_to_tensor(value=image, name="image")
transform_matrix = tf.convert_to_tensor(
value=transform_matrix, name="transform_matrix")
output_shape = tf.shape(
input=image)[-3:-1] if output_shape is None else tf.convert_to_tensor(
value=output_shape, name="output_shape")
shape.check_static(image, tensor_name="image", has_rank=4)
shape.check_static(
transform_matrix,
tensor_name="transform_matrix",
has_rank=3,
has_dim_equals=((-1, 3), (-2, 3)))
shape.compare_batch_dimensions(
tensors=(image, transform_matrix),
last_axes=0,
broadcast_compatible=False)
dtype = image.dtype
zero = tf.cast(0.0, dtype)
height, width = tf.unstack(output_shape, axis=-1)
warp = grid.generate(
starts=(zero, zero),
stops=(tf.cast(width, dtype) - 1.0, tf.cast(height, dtype) - 1.0),
nums=(width, height))
warp = tf.transpose(a=warp, perm=[1, 0, 2])
if pixel_type == PixelType.HALF_INTEGER:
warp += 0.5
padding = [[0, 0] for _ in range(warp.shape.ndims)]
padding[-1][-1] = 1
warp = tf.pad(
tensor=warp, paddings=padding, mode="CONSTANT", constant_values=1.0)
warp = warp[..., tf.newaxis]
transform_matrix = transform_matrix[:, tf.newaxis, tf.newaxis, ...]
warp = tf.linalg.matmul(transform_matrix, warp)
warp = warp[..., 0:2, 0] / warp[..., 2, :]
return sample(image, warp, resampling_type, border_type, pixel_type)
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
| apache-2.0 | -7,227,354,629,905,629,000 | 36.331683 | 79 | 0.670335 | false |
buscarini/meta | templates/model/platforms/sql/Platform.py | 1 | 1689 | import sys
import os
import json
from meta.MetaProcessor import MetaProcessor
class Platform(MetaProcessor):
"""docstring for Platform"""
def preprocess_property(self,property,hash,hashes):
"""docstring for preprocess_property"""
property['_camelcase_'] = self.stringUtils.camelcase(str(property['name']))
property['_capitalized_'] = self.stringUtils.capitalize(str(property['name']))
if 'default' in property:
property['default'] = self.globalPlatform.platformValueForValue(property['default'])
type = property['type']
property['type_' + type] = True
platformType = self.globalPlatform.platformTypeForType(type)
if platformType!=None:
property['type'] = platformType
else:
print("Error: unknown property type: " + type)
sys.exit()
def preprocess(self,hash,hashes):
if hash!=None and 'properties' in hash:
i=0
properties = hash['properties']
for property in properties:
self.preprocess_property(property,hash,hashes)
i=i+1
self.preprocessList(properties)
if hash!=None and 'primaryKeys' in hash:
self.preprocessList(hash['primaryKeys'])
def finalFileName(self,fileName,hash):
"""docstring for finalFileName"""
entityName = None
if hash!=None and 'entityName' in hash:
entityName = hash['entityName']
if (entityName):
fileName = fileName.replace("entity",entityName)
return fileName | mit | -3,116,892,086,885,498,400 | 32.8 | 96 | 0.58733 | false |
chrsbats/kvstore | kvstore/fs.py | 1 | 2352 | from __future__ import absolute_import
import traceback
import os, errno
import shutil
from .signal import interrupt_protect
class FileSystemAdapter(object):
def __init__(self, path, **kwargs):
# expand ~ or we'll end up creating a /~ directory
# abspath doesn't do this for us
self.path = os.path.abspath(os.path.expanduser(path))
self.make_sure_path_exists(self.path)
def make_sure_path_exists(self, key):
try:
os.makedirs(key)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def key_path(self, key):
key = key
if key[0] == '/':
key = key[1:]
return os.path.join(self.path, key)
def get(self, key):
full_path = self.key_path(key)
try:
with open(full_path,'r') as f:
return f.read()
except IOError as e:
if e.errno == errno.ENOENT:
raise KeyError('{}: {}'.format(key,str(e)))
raise
@interrupt_protect
def put(self, key, data, **kwargs):
full_path = self.key_path(key)
directory = os.path.dirname(full_path)
self.make_sure_path_exists(directory)
with open(full_path,'w') as f:
f.write(data)
def delete(self, key):
full_path = self.key_path(key)
try:
os.remove(full_path)
except OSError:
# doesn't exist
pass
def exists(self, key):
full_path = self.key_path(key)
if os.path.isfile(full_path):
try:
with open(full_path,'r') as f:
return True
except IOError:
return False
else:
return False
def list(self, key='/'):
full_path = self.key_path(key)
for directory, subdirs, files in os.walk(full_path):
for file in files:
if file[0] == '.':
continue
path = os.path.join(directory, file)
# remove our directory
path = path.split(self.path)[1]
yield path
def drop_all(self):
# delete the directory and then recreate it
shutil.rmtree(self.path, ignore_errors=True)
self.make_sure_path_exists(self.path)
| mit | 6,117,030,195,128,649,000 | 28.772152 | 61 | 0.528912 | false |
UncleBarney/ochothon | images/portal/resources/toolset/toolset/commands/grep.py | 1 | 2581 | #
# Copyright (c) 2015 Autodesk Inc.
# All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import json
from toolset.io import fire, run
from toolset.tool import Template
#: Our ochopod logger.
logger = logging.getLogger('ochopod')
def go():
class _Tool(Template):
help = \
'''
Displays high-level information for the specified cluster(s).
'''
tag = 'grep'
def customize(self, parser):
parser.add_argument('clusters', type=str, nargs='+', help='1+ clusters (can be a glob pattern, e.g foo*)')
parser.add_argument('-j', '--json', action='store_true', help='switch for json output')
def body(self, args, proxy):
outs = {}
for token in args.clusters:
def _query(zk):
replies = fire(zk, token, 'info')
return len(replies), [[key, '|', hints['ip'], '|', hints['node'], '|', hints['process'], '|', hints['state']]
for key, (_, hints, code) in sorted(replies.items()) if code == 200]
total, js = run(proxy, _query)
outs.update({item[0]: {'ip': item[2], 'node': item[4], 'process': item[6], 'state': item[8]} for item in js})
if js and not args.json:
#
# - justify & format the whole thing in a nice set of columns
#
pct = (len(js) * 100) / total
logger.info('<%s> -> %d%% replies (%d pods total) ->\n' % (token, pct, len(js)))
rows = [['pod', '|', 'pod IP', '|', 'node', '|', 'process', '|', 'state'], ['', '|', '', '|', '', '|', '', '|', '']] + js
widths = [max(map(len, col)) for col in zip(*rows)]
for row in rows:
logger.info(' '.join((val.ljust(width) for val, width in zip(row, widths))))
if args.json:
logger.info(json.dumps(outs))
return _Tool()
| apache-2.0 | 3,284,121,562,353,902,000 | 34.356164 | 141 | 0.532352 | false |
windworship/kmeansds | clustered_ds.py | 1 | 17562 | # -*- coding: utf-8 -*-
#
# Author: huang
#
'''
The implementation of the framework of combining kmeans with distant supervision
'''
import argparse
import logging
import time
import random
import collections
from sklearn.cluster import MiniBatchKMeans, Birch
from sklearn.feature_extraction import FeatureHasher
from sklearn.metrics.pairwise import euclidean_distances
NEG_RATIO = 0.05 # the ratio of subsample negatives
SUBSAMPLE = True # Subsample the cluster or not
class MentionDatum(object):
'''
The Class of Mention in the Datum
'''
ENTITY = {} # First entity of the entity pair
TYPE = {} # Type of first entity
NE = {} # Type of second entity
SLOT = {} # Second entity of the entity pair
RELATION = {} # Belonged Relation in DS
FEATURE = {}
FEATURE_APPEARENCE = []
# Class variable of the counts of values
entity_number = 0
type_number = 0
ne_number = 0
slot_number = 0
relation_number = 0
feature_number = 0
# Initialization for @property
_entity_id = None
_entity_type = None
_ne_type = None
_slot_value = None
_relation = []
_features = []
def __init__(self, args):
self.entity_id = args[0]
self.entity_type = args[1]
self.ne_type = args[2]
self.slot_value = args[3]
self.relation = args[4]
self.features = args[5:]
self.relabel_relation = []
@property
def entity_id(self):
return self._entity_id
@property
def entity_type(self):
return self._entity_type
@property
def ne_type(self):
return self._ne_type
@property
def slot_value(self):
return self._slot_value
@property
def relation(self):
return self._relation
@property
def features(self):
return self._features
@entity_id.setter
def entity_id(self, value):
if value not in MentionDatum.ENTITY:
MentionDatum.ENTITY[value] = self.entity_number
MentionDatum.entity_number += 1
self._entity_id = MentionDatum.ENTITY.get(value)
@entity_type.setter
def entity_type(self, value):
if value not in MentionDatum.TYPE:
MentionDatum.TYPE[value] = self.type_number
MentionDatum.type_number += 1
self._entity_type = MentionDatum.TYPE.get(value)
@ne_type.setter
def ne_type(self, value):
if value not in MentionDatum.NE:
MentionDatum.NE[value] = self.ne_number
MentionDatum.ne_number += 1
self._ne_type = MentionDatum.NE.get(value)
@slot_value.setter
def slot_value(self, value):
if value not in MentionDatum.SLOT:
MentionDatum.SLOT[value] = self.slot_number
MentionDatum.slot_number += 1
self._slot_value = MentionDatum.SLOT.get(value)
@relation.setter
def relation(self, value):
value = value.split('|')
reform_relation = []
for rel in value:
if rel not in MentionDatum.RELATION:
MentionDatum.RELATION[rel] = self.relation_number
MentionDatum.relation_number += 1
reform_relation.append(MentionDatum.RELATION.get(rel))
self._relation = reform_relation
@features.setter
def features(self, value):
reform_feature = []
for feature in value:
if feature not in MentionDatum.FEATURE:
MentionDatum.FEATURE[feature] = self.feature_number
MentionDatum.feature_number += 1
MentionDatum.FEATURE_APPEARENCE.append(0)
feature_index = MentionDatum.FEATURE.get(feature)
MentionDatum.FEATURE_APPEARENCE[feature_index] += 1
reform_feature.append(feature_index)
self._features = reform_feature
def __str__(self):
relation = self.relation if not self.relabel_relation else self.relabel_relation
mention_str =\
(
'{0} {1} {2} {3} {4} {5}'
).format(
MentionDatum.ENTITY.get(self.entity_id),
MentionDatum.TYPE.get(self.entity_type),
MentionDatum.NE.get(self.ne_type),
MentionDatum.SLOT.get(self.slot_value),
'|'.join([MentionDatum.RELATION.get(rel) for rel in relation]),
' '.join([MentionDatum.FEATURE.get(fea) for fea in self.features]),
)
return mention_str
@classmethod
def shrink_features(cls, threshold=5):
'''
Shrink the features whose appearence is less than the setting threshold.
'''
shrinked_index = 0
shrinked_feature = {}
cls.FEATURE_INDEX = {} # Regenerate index for shrinked feature space
for fea, index in cls.FEATURE.iteritems():
if cls.FEATURE_APPEARENCE[index] >= threshold:
shrinked_feature[fea] = index
cls.FEATURE_INDEX[index] = shrinked_index
shrinked_index += 1
shrinked_feature_number = cls.feature_number - shrinked_index
cls.feature_number = shrinked_index
cls.FEATURE_APPEARENCE = None
logging.info('[OK]...Feature Shrinking')
logging.info('---# of shrinked Features: {0}'.format(shrinked_feature_number))
def _feature_vector_generation(self):
'''
Generate the feature vector in the shrinked feature space.
'''
return dict(
[
(str(MentionDatum.FEATURE_INDEX[index]), 1)
for index in self.features
if index in MentionDatum.FEATURE_INDEX
]
)
@classmethod
def regenerate_feature(cls, mentions):
'''
Generate feature vectors for all relation mentions
'''
return [mention._feature_vector_generation() for mention in mentions]
@classmethod
def transpose_values(cls):
'''
Transpose all value dicts for the generation of datum files.
'''
cls.ENTITY = dict(
zip(cls.ENTITY.values(), cls.ENTITY.keys())
)
cls.TYPE = dict(zip(cls.TYPE.values(), cls.TYPE.keys()))
cls.NE = dict(zip(cls.NE.values(), cls.NE.keys()))
cls.SLOT = dict(zip(cls.SLOT.values(), cls.SLOT.keys()))
cls.RELATION = dict(
zip(cls.RELATION.values(), cls.RELATION.keys())
)
cls.FEATURE = dict(
zip(cls.FEATURE.values(), cls.FEATURE.keys())
)
def _subsample_negatives(mention):
'''
Subsample negatives from mention.
:type mention: MentionDatum
:rtype boolean
'''
nr = MentionDatum.RELATION.get('_NR', None)
if nr is not None\
and [nr] == mention.relation\
and random.uniform(0, 1) > NEG_RATIO:
return False
return True
def _read_datum_file(file_path):
'''
Load the datum from the datum file
:type file_path: basestring
:type neg_ratio: double in [0,1]
:rtype List[MentionDatum]
'''
mentions = []
with open(file_path) as f:
for line in f:
mention = MentionDatum(line.split())
if not _subsample_negatives(mention):
continue
mentions.append(mention)
logging.debug(
'---[OK]...Datum File {0} Loaded | {1} Mentions Loaded'.format(
file_path,
len(mentions),
)
)
return mentions
def datums_read(directory, number=88):
'''
Load datums from NUMBER of datum files in the DIRECTORY
:type directory: basestring
:type number: int in [0, # of datum file in the DIRECTORY]
:rtype List[MentionDatum]
'''
def _generate_file_path(index, generate_mode='{0}/kb_part-00{1:0>2d}.datums'):
'''
Generate the file path in the directory
'''
return generate_mode.format(directory, index)
start = time.clock()
loaded_mentions = []
for datum_number in xrange(number):
loaded_mentions += _read_datum_file(_generate_file_path(datum_number+1))
time_cost = time.clock() - start
logging.info(
(
'[OK]...All Datums Loaded\n'
'---Cost Time: {0} | Average Per File: {1}\n'
'---# of Loaded Mentions: {2}\n'
'---# of Loaded Entities: {3}\n'
'---# of Loaded Entity Types: {4}\n'
'---# of Loaded NE Types: {5}\n'
'---# of Loaded Slots: {6}\n'
'---# of Loaded Relations: {7}\n'
'---# of Loaded Features: {8}\n'
).format(
time_cost,
time_cost/number,
len(loaded_mentions),
MentionDatum.entity_number,
MentionDatum.type_number,
MentionDatum.ne_number,
MentionDatum.slot_number,
MentionDatum.relation_number,
MentionDatum.feature_number,
)
)
return loaded_mentions
def _generate_feature_space(mentions):
'''
Generate the features space.
---------------------------------
:type mentions: List[MentionDatum]
:rtype: numpy.ndarray
'''
start = time.clock()
# Shrink the features
MentionDatum.shrink_features(threshold=5)
# Regenerate feature vectors
feature_space = MentionDatum.regenerate_feature(mentions)
# Generate feature space
feature_space =\
FeatureHasher(
n_features=MentionDatum.feature_number
).transform(feature_space)
time_cost = time.clock() - start
logging.info('[OK]...Generate Feature Space in {0}s'.format(time_cost))
return feature_space
def _minibatchkmeans(feature_space, cluster_number):
'''
Use MinibatchKkmeans to divide the feature_space into cluster_number bags.
-------------------------------------------------------------------------
:type feature_space: numpy.ndarray
:type cluster_number: int
:rtype: numpy.ndarray[n_mentions,] labels of the mentions
'''
start = time.clock()
model =\
MiniBatchKMeans(
n_clusters=cluster_number,
n_init=22,
batch_size=5700
)
predicts = model.fit_predict(feature_space)
logging.info('[OK]...Kmeans Clustering | Cost {0}s'.format(time.clock()-start))
return predicts
def _predict_to_cluster(predicts, mentions):
'''
Transform predicts to clusters.
-------------------------------
:type predicts: numpy.ndarray[n_samples,]
:type mentions: List[MentionDatum]
:rtype: List[[int,]]
'''
cluster_number = len(set(predicts))
clusters = [[] for size in xrange(cluster_number)]
for index, predict in enumerate(predicts):
clusters[predict]+=mentions[index].relation
logging.info('------[OK]...Labels Transform To Clusters')
return clusters
def _assign_cluster_relation(predicts, mentions):
'''
Assign each cluster the most similar relation according to the assumption.
--------------------------------------------------------------------------
:type predicts: numpy.ndarray[n_samples,]
:type mentions: List[MentionDatum]
:rtype: List[(int, double)]
'''
start = time.clock()
relation_for_clusters = []
# Predicts -> clusters
clusters = _predict_to_cluster(predicts, mentions)
for cluster in clusters:
relation_counter = collections.Counter(cluster)
logging.info('---Cluster assign: {0}'.format(relation_counter))
assign_relation = relation_counter.most_common(1)[0]
relation_for_clusters.append(
(
assign_relation[0],
(assign_relation[1]+0.0)/len(cluster),
)
)
time_cost = time.clock() - start
logging.info('---[OK]...Assign cluster relations cost of {0}'.format(time_cost))
return relation_for_clusters
def _subsample_mention(predicts, clusters, mentions):
'''
Subsample mentions in a cluster based on the probability of the relation.
-------------------------------------------------------------------------
:type predicts: numpy.ndarray[n_samples,]
:type clusters: List[(int, double)]
:type mentions: List[MentionDatum]
:rtype: None
'''
start = time.clock()
subsample_number = 0
for index, predict in enumerate(predicts):
relation, probability = clusters[predict]
if not SUBSAMPLE or random.random() < probability:
mentions[index].relabel_relation.append(relation)
subsample_number += 1
time_cost = time.clock() - start
logging.info('---[OK]...Subsample mentions cost of {0}'.format(time_cost))
logging.info('------# of subsamples: {0}'.format(subsample_number))
def kmeans_predict(mentions, cluster_number=100):
'''
The framework predicts labels of mentions as following:
1. Generate the feature space
2. Kmeans divides the feature space into k clusters
3. Reassign each cluster a relation based on DS
4. Subsample mentions in the cluster to be labeled with corresponding relation
NOTE: Usually k is much higher than the # of known relations.
---------------------------------------------------
:type mentions:List[DatumMention]
:type cluster_number:int
:rtype None
'''
start = time.clock()
feature_space = _generate_feature_space(mentions)
predicts = _minibatchkmeans(feature_space, cluster_number)
relation_for_clusters = _assign_cluster_relation(predicts, mentions)
_generate_cluster(predicts, relation_for_clusters, mentions)
_subsample_mention(predicts, relation_for_clusters, mentions)
logging.info('[OK]...Framework | Cost {0}s'.format(time.clock()-start))
def regenerate_datums(mentions, filepath):
'''
Regenerate datums with the new relation
-------------------------------------------------
:type mentions: List[MentionDatum]
:type filepath: basestring
:rtype: None
'''
start = time.clock()
file_number = len(mentions) / 90000 + 1
negative_number = 0
nr = MentionDatum.RELATION.get('_NR')
#transpose values
MentionDatum.transpose_values()
for index in xrange(file_number):
with open(filepath + '/{0:0>2d}.datums'.format(index), 'w') as f:
for mention in mentions[index*90000:(index+1)*90000]:
if nr in mention.relabel_relation:
negative_number += 1
f.write(str(mention))
f.write('\n')
logging.debug('---[OK]...Generate {0:0>2d}.datums'.format(index))
spend = time.clock() - start
logging.info('[OK]...Generate {0} Datums File'.format(file_number))
logging.info('[OK]...Negative number: {0}'.format(negative_number))
logging.info('---Cost time: {0} | Average per file: {1}'.format(spend, spend/file_number))
def _generate_cluster(predicts, clusters, mentions):
'''
Generate clusters from predicts.
=======================================
:type predicts: numpy.ndarray[n_samples,]
:type clusters: List[(int, double)]
:type mentions: List[MentionDatum]
:rtype: None
'''
entity_index = dict(
zip(MentionDatum.ENTITY.values(), MentionDatum.ENTITY.keys())
)
slot_index = dict(
zip(MentionDatum.SLOT.values(), MentionDatum.SLOT.keys())
)
relation_index = dict(
zip(MentionDatum.RELATION.values(), MentionDatum.RELATION.keys())
)
cluster_results = [[] for index in xrange(100)]
for index, predict in enumerate(predicts):
relation, probability = clusters[predict]
cluster_results[predict].append(
(
entity_index[mentions[index].entity_id],
slot_index[mentions[index].slot_value],
relation_index[mentions[index].relation[0]],
relation_index[relation],
)
)
for index, cluster_result in enumerate(cluster_results):
with open('result/'+str(index), 'w') as f:
f.write('\n'.join([str(result) for result in cluster_result]))
with open('result/index', 'w') as f:
f.write('\n'.join([str(index) for index in sorted(enumerate(clusters), key=lambda x:x[1][1])]))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.info('METHOD START')
parser = argparse.ArgumentParser()
parser.add_argument('-n', type=int)
parser.add_argument('-r', type=float)
parser.add_argument('-o', type=str)
parser.add_argument('-d', type=str)
parser.add_argument('-s', type=bool)
parser.add_argument('-f', type=int)
args = parser.parse_args()
start = time.clock()
cluster_number = args.n
NEG_RATIO = (args.r + 0.0) / 100
SUBSAMPLE = True if args.s else False
logging.info('CLUSTER NUMBER:{0}'.format(cluster_number))
logging.info('NEG_RATIO:{0}'.format(NEG_RATIO))
logging.info('OUTPUT_DIR:{0}'.format(args.o))
logging.info('DATA_DIR:{0}'.format(args.d))
logging.info('SUBSAMPLE:{0}'.format(SUBSAMPLE))
mentions = datums_read(args.d, number=args.f)
kmeans_predict(mentions, cluster_number)
regenerate_datums(
mentions,
args.o,
)
logging.info('Method End With {0}s'.format(time.clock()-start))
| mit | -1,580,487,062,954,818,800 | 31.643123 | 103 | 0.581597 | false |
SwordYork/sequencing | sequencing_np/nn/rnn_cells/rnn.py | 1 | 2729 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Sword York
# GitHub: https://github.com/SwordYork/sequencing
# No rights reserved.
#
from abc import ABCMeta, abstractmethod
from ..base import Layer
from ... import np, TIME_MAJOR
class RNN(Layer, metaclass=ABCMeta):
def __init__(self, init_state, param_keys, activation=None,
base_name=None, name=None, *args, **kwargs):
"""
numpy rnn cell.
It only used for inferring, not training, thus we don't need initialization
in this implementation.
The weights and other things are passed by params.
:param init_state: initial states of RNN, [B, H] or tuple([B, H], ...)
:param param_keys: name of params, such as kernel and bias
:param activation: activation function
:param base_name: name of parent Layer
:param name: name of this Layer
"""
super(RNN, self).__init__(param_keys, base_name, name, **kwargs)
# get state size
if type(init_state) != type(np.empty([])):
self.init_state = tuple(init_state)
self.hidden_units = tuple(init_state)[0].shape[1]
else:
self.init_state = init_state
self.hidden_units = init_state.shape[1]
self.time_major = TIME_MAJOR
self.activation = activation or np.tanh
def encode(self, inputs, sequence_length=None, reverse=False):
"""
Encode multi-step inputs.
:param inputs: if time_major [T, B, ...] else [B, T, ...]
:param sequence_length: length of the sequence [B]
:param reverse: used in bidirectional RNN
:return: lstm outputs
"""
if not self.time_major:
inputs = np.transpose(inputs, (1, 0, 2))
steps = inputs.shape[0]
outputs = np.zeros(inputs.shape[:-1] + (self.hidden_units,),
inputs.dtype)
state = self.init_state
iter_range = reversed(range(steps)) if reverse else range(steps)
for idx in iter_range:
# rnn step
curr_input = inputs[idx, :, :]
mask = idx < sequence_length if sequence_length is not None else None
outputs[idx, :, :], state = self.step(state, curr_input, mask)
if not self.time_major:
outputs = np.transpose(outputs, (1, 0, 2))
return outputs, state
@abstractmethod
def step(self, prev_states, input_, mask=None):
"""
run rnn for one step
:param prev_states: [B, ...]
:param input_: [B, ...]
:param mask: mask the terminated sequence in the batch
:return: output, state
"""
raise NotImplementedError
| mit | -3,731,958,042,420,923,400 | 33.544304 | 83 | 0.5797 | false |
olemb/mido | tests/test_syx.py | 1 | 1299 | from pytest import raises
from mido.messages import Message
from mido.syx import read_syx_file, write_syx_file
def test_read(tmpdir):
path = tmpdir.join("test.syx").strpath
msg = Message('sysex', data=(1, 2, 3))
with open(path, 'wb') as outfile:
outfile.write(msg.bin())
assert read_syx_file(path) == [msg]
with open(path, 'wt') as outfile:
outfile.write(msg.hex())
assert read_syx_file(path) == [msg]
with open(path, 'wt') as outfile:
outfile.write('NOT HEX')
with raises(ValueError):
read_syx_file(path)
def test_handle_any_whitespace(tmpdir):
path = tmpdir.join("test.syx").strpath
with open(path, 'wt') as outfile:
outfile.write('F0 01 02 \t F7\n F0 03 04 F7\n')
assert read_syx_file(path) == [Message('sysex', data=[1, 2]),
Message('sysex', data=[3, 4])]
def test_write(tmpdir):
# p = tmpdir.mkdir("sub").join("hello.txt")
path = tmpdir.join("test.syx").strpath
msg = Message('sysex', data=(1, 2, 3))
write_syx_file(path, [msg])
with open(path, 'rb') as infile:
assert infile.read() == msg.bin()
write_syx_file(path, [msg], plaintext=True)
with open(path, 'rt') as infile:
assert infile.read().strip() == msg.hex()
| mit | 8,884,796,596,589,155,000 | 27.23913 | 65 | 0.595843 | false |
leanix/leanix-sdk-python | src/leanix/models/ProjectHasResource.py | 1 | 1781 | #!/usr/bin/env python
"""
The MIT License (MIT)
Copyright (c) 2017 LeanIX GmbH
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.
"""
class ProjectHasResource:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
self.swaggerTypes = {
'ID': 'str',
'projectID': 'str',
'resourceID': 'str',
'comment': 'str',
'projectImpactID': 'str'
}
self.ID = None # str
self.projectID = None # str
self.resourceID = None # str
self.comment = None # str
self.projectImpactID = None # str
| mit | 5,855,163,189,732,606,000 | 37.717391 | 105 | 0.705783 | false |
stormi/tsunami | src/primaires/vehicule/vecteur.py | 1 | 8948 | # -*-coding:Utf-8 -*
# Copyright (c) 2010 DAVY Guillaume
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la classe Vecteur, détaillée plus bas."""
from math import sqrt, cos, sin, radians, degrees, atan, pi
from abstraits.obase import *
from primaires.salle.coordonnees import Coordonnees
# Constantes
NPRECISION = 5
class Vecteur(BaseObj):
"""Classe représentant un vecteur en trois dimensions.
Elle gère les opérations usuelles dessus, ainsi que leur rotation
autour d'un axe du repère.
"""
def __init__(self, x=0, y=0, z=0, parent=None):
"""Constructeur du vecteur"""
BaseObj.__init__(self)
self.parent = parent
self._x = x
self._y = y
self._z = z
self._construire()
def __getnewargs__(self):
return ()
def __str__(self):
"""Affiche le vecteur plus proprement"""
return "({}, {}, {})".format(self.x, self.y, self.z)
def __repr__(self):
"""Affichage des coordonnées dans un cas de debug"""
return "Vecteur(x={}, y={}, z={})".format(self.x, self.y, self.z)
@property
def coordonnees(self):
return Coordonnees(self.x, self.y, self.z)
@property
def tuple(self):
"""Retourne le tuple (x, y, z)"""
return (self.x, self.y, self.z)
@property
def norme(self):
return sqrt(self.x ** 2 + self.y ** 2 + self.z ** 2)
@property
def direction(self):
"""Retourne un angle en degré représentant la direction.
0 => est
45 => sud-est
90 => sud
135 => sud-ouest
180 => ouest
225 => nord-ouest
270 => nord
315 => nord-est
"""
return -self.argument() % 360
@property
def inclinaison(self):
"""Retourne l'angle d'inclinaison en degré."""
x, y, z = self.x, self.y, self.z
n = sqrt(x ** 2 + y ** 2)
if n == 0:
if z == 0:
return 0
else:
return 90
return degrees(atan(z/n))
@property
def nom_direction(self):
"""Retourne le nom de la direction.
0 => "est"
45 => "sud-est"
...
"""
direction = self.direction
if direction < 22.5:
return "est"
elif direction < 67.5:
return "sud-est"
elif direction < 112.5:
return "sud"
elif direction < 157.5:
return "sud-ouest"
elif direction < 202.5:
return "ouest"
elif direction < 247.5:
return "nord-ouest"
elif direction < 292.5:
return "nord"
elif direction < 337.5:
return "nord-est"
else:
return "est"
def _get_x(self):
return self._x
def _set_x(self, x):
self._x = round(x, NPRECISION)
x = property(_get_x, _set_x)
def _get_y(self):
return self._y
def _set_y(self, y):
self._y = round(y, NPRECISION)
y = property(_get_y, _set_y)
def _get_z(self):
return self._z
def _set_z(self, z):
self._z = round(z, NPRECISION)
z = property(_get_z, _set_z)
def copier(self):
"""Retourne une copie de self"""
return Vecteur(self.x, self.y, self.z, self.parent)
def est_nul(self, arrondi=3):
"""Retourne True si le vcteur est considéré comme nul."""
x = round(self._x, arrondi)
y = round(self._y, arrondi)
z = round(self._z, arrondi)
return x == 0 and y == 0 and z == 0
def tourner_autour_x(self, angle):
"""Tourne autour de l'âxe X.
L'angle doit être en degré.
"""
r = radians(angle)
x, y, z = self.x, self.y, self.z
self.x = x * 1 + y * 0 + z * 0
self.y = x * 0 + y * cos(r) - z * sin(r)
self.z = x * 0 + y * sin(r) + z * cos(r)
return self
def tourner_autour_y(self, angle):
"""Tourne autour de l'âxe Y.
L'angle doit être en degré.
"""
r = radians(angle)
x, y, z = self.x, self.y, self.z
self.x = x * cos(r) - y * 0 + z * sin(r)
self.y = x * 0 + y * 1 + z * 0
self.z = x * sin(r) + y * 0 + z * cos(r)
return self
def tourner_autour_z(self, angle):
"""Tourne autour de l'âxe Z.
L'angle doit être en degré.
"""
r = radians(angle)
x, y, z = self.x, self.y, self.z
self.x = x * cos(r) - -1 * y * sin(r) + z * 0
self.y = -1 * x * sin(r) + y * cos(r) + z * 0
self.z = x * 0 + y * 0 + z * 1
return self
def incliner(self, angle):
"""Incline le véhicule.
L'angle doit être en degré.
"""
r = radians(angle)
x, y, z = self.x, self.y, self.z
n = sqrt(x * x + y * y)
if n == 0:
if z == 0 or sin(r) == 0 or (x == 0 and y == 0):
self.x = 0
self.y = 0
self.z = z * cos(r)
else:
raise ValueError("impossible d'incliner un vecteur vertical")
else:
self.x = x * cos(r) - z * x * sin(r) / n
self.y = y * cos(r) - z * y * sin(r) / n
self.z = z * cos(r) + sin(r) * n
return self
def argument(self):
x, y = self.x, self.y
if x > 0:
return degrees(atan(y / x)) % 360
elif x < 0:
return (180 + degrees(atan(y / x))) % 360
elif y > 0:
return 90
elif y < 0:
return -90
else:
return 0
def normalise(self):
norme = self.norme
if norme == 0:
raise ValueError("impossible de normaliser nul")
return Vecteur(self.x / norme, self.y / norme, self.z / norme)
def orienter(self, angle):
"""Oriente le vecteur horizontalement.
L'angle doit être indiqué en degré.
A la différence de tourner_autour_z, l'angle précisé est absolu.
Après l'appelle à la méthode vecteur.orienter(180) par exemple,
vecteur.direction doit être 180.
"""
direction = self.direction
angle -= direction
self.tourner_autour_z(angle)
# Méthodes spéciales mathématiques
def __neg__(self):
"""Retourne le vecteur négatif."""
return Vecteur(-self.x, -self.y, -self.z)
def __add__(self, autre):
"""Additionne deux vecteurs."""
return Vecteur(self.x + autre.x, self.y + autre.y, self.z + autre.z)
def __sub__(self, autre):
"""Soustrait deux vecteurs."""
return Vecteur(self.x - autre.x, self.y - autre.y, self.z - autre.z)
def __mul__(self, valeur):
"""Multiplie le vecteur par un nombre."""
return Vecteur(self.x * valeur, self.y * valeur, self.z * valeur)
def __rmul__(self, valeur):
"""Multiplie le vecteur par un nombre."""
return Vecteur(self.x * valeur, self.y * valeur, self.z * valeur)
def __eq__(self, autre):
return self.x == autre.x and self.y == autre.y and self.z == autre.z
def __hash__(self):
return hash(self.tuple)
# Fonctions du module (à utiliser pour l'optimisation)
def get_direction(vecteur):
"""Retourne la direction en degrés du vecteur."""
argument = (-vecteur.argument) % (pi * 2)
return degrees(argument) % 360
| bsd-3-clause | 3,360,314,228,164,489,000 | 28.892617 | 79 | 0.554558 | false |
musically-ut/python-glob2 | setup.py | 2 | 1227 | #!/usr/bin/env python
import os
from setuptools import setup, find_packages
# Figure out the version
import re
here = os.path.dirname(os.path.abspath(__file__))
version_re = re.compile(
r'__version__ = (\(.*?\))')
fp = open(os.path.join(here, 'glob2', '__init__.py'))
version = None
for line in fp:
match = version_re.search(line)
if match:
version = eval(match.group(1))
break
else:
raise Exception("Cannot find version in __init__.py")
fp.close()
setup(
name = 'glob2',
version = ".".join(map(str, version)),
description = 'Version of the glob module that can capture patterns '+
'and supports recursive wildcards',
author = 'Michael Elsdoerfer',
author_email = '[email protected]',
license='BSD',
url = 'http://github.com/miracle2k/python-glob2/',
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries',
],
packages = find_packages()
)
| bsd-2-clause | -5,408,287,253,185,717,000 | 28.214286 | 74 | 0.611247 | false |
Xevib/osmbot | bot/typeemoji.py | 1 | 7711 | """
Dict with the emojis of osm tyles
"""
typeemoji = {
'aerialway:cable_car': '\xF0\x9F\x9A\xA1',
'aerialway:station': '\xF0\x9F\x9A\xA1',
'aeroway:aerodrome': '\xE2\x9C\x88',
'aeroway:terminal': '\xE2\x9C\x88',
'amenity:ambulance_station': '\xF0\x9F\x9A\x91',
'amenity:atm': '\xF0\x9F\x92\xB3',
'amenity:bank': '\xF0\x9F\x92\xB0',
'amenity:bar': '\xF0\x9F\x8D\xB8',
'amenity:biergarten': '\xF0\x9F\x8D\xBA',
'amenity:brothel': '\xF0\x9F\x91\xAF',
'amenity:cafe': '\xE2\x98\x95',
'amenity:casino': '\xE2\x99\xA0',
'amenity:cinema': '\xF0\x9F\x8E\xAC',
'amenity:college': '\xF0\x9F\x8E\x93',
'amenity:crematorium': '\xE2\x9A\xB1',
'amenity:drinking_water': '\xF0\x9F\x9A\xB0',
'amenity:fast_food': '\xF0\x9F\x8D\x94',
'amenity:fire_station': '\xF0\x9F\x9A\x92',
'amenity:fountain': '\xE2\x9B\xB2',
'amenity:fuel': '\xE2\x9B\xBD',
'amenity:hospital': '\xF0\x9F\x8F\xA5',
'amenity:hotel': '\xF0\x9F\x8F\xA8',
'amenity:ice_cream': '\xF0\x9F\x8D\xA6',
'amenity:kindergarten': '\xF0\x9F\x91\xB6',
'amenity:karaoke_box': '\xF0\x9F\x8E\xA4',
'amenity:library': '\xF0\x9F\x93\x96',
'amenity:love_hotel': '\xF0\x9F\x8F\xA9',
'amenity:place_of_worship': '\xF0\x9F\x9B\x90',
'amenity:pharmacy': '\xF0\x9F\x92\x8A',
'amenity:police': '\xF0\x9F\x9A\x93',
'amenity:pub': '\xF0\x9F\x8D\xBA',
'amenity:recycling': '\xE2\x99\xBB',
'amenity:restaurant': '\xF0\x9F\x8D\xB4',
'amenity:sauna': '\xE2\x99\xA8',
'amenity:school': '\xF0\x9F\x8E\x92',
'amenity:stripclub': '\xF0\x9F\x91\xAF',
'amenity:studio': '\xF0\x9F\x8E\x99',
'amenity:swimming_pool': '\xF0\x9F\x8F\x8A',
'amenity:taxi': '\xF0\x9F\x9A\x95',
'amenity:telephone': '\xF0\x9F\x93\x9E',
'amenity:theatre': '\xF0\x9F\x8E\xAD',
'amenity:toilets': '\xF0\x9F\x9A\xBB',
'amenity:university': '\xF0\x9F\x8E\x93',
'building:church': '\xE2\x9B\xAA',
'building:mosque': '\xF0\x9F\x95\x8C',
'building:synagogue': '\xF0\x9F\x95\x8D',
'building:stadium': '\xF0\x9F\x8F\x9F',
'building:temple': '\xF0\x9F\x8F\x9B',
'building:train_station': '\xF0\x9F\x9A\x89',
'craft:beekeeper': '\xF0\x9F\x90\x9D',
'cuisine:pasta': '\xF0\x9F\x8D\x9D',
'cuisine:pizza': '\xF0\x9F\x8D\x95',
'cuisine:sushi': '\xF0\x9F\x8D\xA3',
'emergency:ambulance_station': '\xF0\x9F\x9A\x91',
'emergency:defibrillator': '\xF0\x9F\x92\x94',
'emergency:phone': '\xF0\x9F\x86\x98',
'emergency:assembly_point':'\xF0\x9F\x8E\xAF',
'highway:bridleway': '\xE3\x80\xB0 \xF0\x9F\x90\x8E',
'highway:bus_stop': '\xF0\x9F\x9A\x8C',
'highway:construction': '\xE3\x80\xB0 \xF0\x9F\x9A\xA7',
'highway:cycleway': '\xE3\x80\xB0 \xF0\x9F\x9A\xB4',
'highway:footway': '\xE3\x80\xB0 \xF0\x9F\x9A\xB6',
'highway:living_street': '\xE3\x80\xB0 \xF0\x9F\x8F\xA0',
'highway:motorway': '\xE3\x80\xB0 \xF0\x9F\x9A\x97',
'highway:path': '\xE3\x80\xB0 \xF0\x9F\x9A\xB6',
'highway:pedestrian': '\xE3\x80\xB0 \xF0\x9F\x8F\xA0',
'highway:primary': '\xE3\x80\xB0 \xF0\x9F\x9A\x9B',
'highway:raceway': '\xE3\x80\xB0 \xF0\x9F\x8F\x81',
'highway:residential': '\xE3\x80\xB0 \xF0\x9F\x8F\xA0',
'highway:road': '\xE3\x80\xB0 \xE2\x9D\x93',
'highway:secondary': '\xE3\x80\xB0 \xF0\x9F\x9A\x9B',
'highway:tertiary': '\xE3\x80\xB0 \xF0\x9F\x9A\x9B',
'highway:track': '\xE3\x80\xB0 \xF0\x9F\x9A\x9C',
'highway:trunk': '\xE3\x80\xB0 \xF0\x9F\x9A\x97',
'highway:unclassified': '\xE3\x80\xB0 \xE2\x9D\x93',
'historic:castle': '\xF0\x9F\x8F\xB0',
'historic:monument': '\xF0\x9F\x97\xBD',
'landuse:cemetery': '\xE2\x9A\xB0',
'landuse:plant_nursery': '\xF0\x9F\x8C\xB1',
'leisure:bowling_alley': '\xF0\x9F\x8E\xB3',
'leisure:golf_course': '\xE2\x9B\xB3',
'leisure:swimming_pool': '\xF0\x9F\x8F\x8A',
'man_made:works': '\xF0\x9F\x8F\xAD',
'natural:peak': '\xF0\x9F\x97\xBB',
'natural:volcano': '\xF0\x9F\x8C\x8B',
'place:city': '\xF0\x9F\x8C\x86',
'place:ocean': '\xF0\x9F\x8C\x8A',
'place:sea': '\xF0\x9F\x8C\x8A',
'place:town': '\xF0\x9F\x8F\x98',
'place:village': '\xF0\x9F\x8F\x98',
'railway:station': '\xF0\x9F\x9A\x89',
'railway:subway': '\xF0\x9F\x9A\x87',
'railway:subway_entrance': '\xF0\x9F\x9A\x87',
'railway:tram': '\xF0\x9F\x9A\x83',
'route:piste': '\xF0\x9F\x8E\xBF',
'route:subway': '\xF0\x9F\x9A\x87',
'shop:art': '\xF0\x9F\x8E\xA8',
'shop:bag': '\xF0\x9F\x91\x9C',
'shop:bakery': '\xF0\x9F\x8D\x9E',
'shop:baby_goods': '\xF0\x9F\x8D\xBC',
'shop:books': '\xF0\x9F\x93\x9A',
'shop:butcher': '\xF0\x9F\x8D\x97',
'shop:cheese': '\xF0\x9F\xA7\x80',
'shop:chocolate': '\xF0\x9F\x8D\xAB',
'shop:clothes': '\xF0\x9F\x91\x97',
'shop:coffee': '\xE2\x98\x95',
'shop:computer': '\xF0\x9F\x92\xBB',
'shop:confectionary': '\xF0\x9F\x8D\xB0',
'shop:cosmetics': '\xF0\x9F\x92\x85',
'shop:doityourself': '\xF0\x9F\x94\xA7',
'shop:electronics': '\xF0\x9F\x93\xBA',
'shop:erotic': '\xF0\x9F\x92\x8B',
'shop:garden_centre': '\xF0\x9F\x8C\xB1',
'shop:gift': '\xF0\x9F\x8E\x81',
'shop:fishing': '\xF0\x9F\x8E\xA3',
'shop:florist': '\xF0\x9F\x92\x90',
'shop:greengrocer': '\xF0\x9F\x8D\x89',
'shop:hairdresser': '\xF0\x9F\x92\x87',
'shop:hifi': '\xF0\x9F\x94\x8A',
'shop:ice_cream': '\xF0\x9F\x8D\xA6',
'shop:jewelry': '\xF0\x9F\x92\x8D',
'shop:locksmith': '\xF0\x9F\x94\x91',
'shop:mobile_phone': '\xF0\x9F\x93\xB1',
'shop:music': '\xF0\x9F\x92\xBF',
'shop:musical_instrument': '\xF0\x9F\x8E\xB8',
'shop:newsagent': '\xF0\x9F\x93\xB0',
'shop:optician': '\xF0\x9F\x91\x93',
'shop:pastry': '\xF0\x9F\x8D\xAA',
'shop:photo': '\xF0\x9F\x93\xB7',
'shop:seafood': '\xF0\x9F\x90\x9F',
'shop:shoes': '\xF0\x9F\x91\x9E',
'shop:sports': '\xE2\x9A\xBD',
'shop:swimming_pool': '\xF0\x9F\x8F\x8A',
'shop:ticket': '\xF0\x9F\x8E\xAB',
'shop:tobacco': '\xF0\x9F\x9A\xAC',
'shop:video': '\xF0\x9F\x93\xBC',
'shop:video_games': '\xF0\x9F\x8E\xAE',
'shop:watches': '\xE2\x8C\x9A',
'shop:wine': '\xF0\x9F\x8D\xB7',
'sport:american_football': '\xF0\x9F\x8F\x88',
'sport:9pin': '\xF0\x9F\x8E\xB3',
'sport:10pin': '\xF0\x9F\x8E\xB3',
'sport:archery': '\xF0\x9F\x8F\xB9',
'sport:badminton': '\xF0\x9F\x8F\xB8',
'sport:baseball': '\xE2\x9A\xBE',
'sport:basketball': '\xF0\x9F\x8F\x80',
'sport:billiards': '\xF0\x9F\x8E\xB1',
'sport:cricket': '\xF0\x9F\x8F\x8F',
'sport:cycling': '\xF0\x9F\x9A\xB4',
'sport:darts': '\xF0\x9F\x8E\xAF',
'sport:equestrian': '\xF0\x9F\x8F\x87',
'sport:field_hockey': '\xF0\x9F\x8F\x91',
'sport:golf': '\xF0\x9F\x8F\x8C',
'sport:gymnastics': '\xF0\x9F\x8F\x8B',
'sport:horse_racing': '\xF0\x9F\x8F\x87',
'sport:ice_hockey': '\xF0\x9F\x8F\x92',
'sport:ice_skating': '\xE2\x9B\xB8',
'sport:rugby_league': '\xF0\x9F\x8F\x89',
'sport:rugby_union': '\xF0\x9F\x8F\x89',
'sport:sailing': '\xE2\x9B\xB5',
'sport:soccer': '\xE2\x9A\xBD',
'sport:surfing': '\xF0\x9F\x8F\x84',
'sport:table_tennis': '\xF0\x9F\x8F\x93',
'sport:tennis': '\xF0\x9F\x8E\xBE',
'sport:volleyball': '\xF0\x9F\x8F\x90',
'studio:audio': '\xF0\x9F\x8E\xB9',
'studio:radio': '\xF0\x9F\x93\xBB',
'studio:television': '\xF0\x9F\x93\xBA',
'studio:video': '\xF0\x9F\x8E\xA5',
'tourism:aquarium': '\xF0\x9F\x90\xA0',
'tourism:camp_site': '\xE2\x9B\xBA',
'tourism:hotel': '\xF0\x9F\x8F\xA8',
'tourism:information': '\xE2\x84\xB9',
'tourism:zoo': '\xF0\x9F\x90\x8A',
'vending:cigarettes': '\xF0\x9F\x9A\xAC'
}
| gpl-3.0 | -3,471,203,922,891,204,600 | 41.60221 | 61 | 0.6007 | false |
sammyshj/nyx | nyx/panel/header.py | 1 | 16295 | # Copyright 2009-2016, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Top panel for every page, containing basic system and tor related information.
This expands the information it presents to two columns if there's room
available.
"""
import os
import time
import stem
import stem.control
import stem.util.proc
import stem.util.str_tools
import stem.util.system
import nyx.controller
import nyx.curses
import nyx.panel
import nyx.popups
import nyx.tracker
from stem.util import conf, log
from nyx import msg, tor_controller
from nyx.curses import RED, GREEN, YELLOW, CYAN, WHITE, BOLD, HIGHLIGHT
MIN_DUAL_COL_WIDTH = 141 # minimum width where we'll show two columns
SHOW_FD_THRESHOLD = 60 # show file descriptor usage if usage is over this percentage
UPDATE_RATE = 5 # rate in seconds at which we refresh
CONFIG = conf.config_dict('nyx', {
'attr.flag_colors': {},
'attr.version_status_colors': {},
'tor.chroot': '',
})
class HeaderPanel(nyx.panel.DaemonPanel):
"""
Top area containing tor settings and system information.
"""
def __init__(self):
nyx.panel.DaemonPanel.__init__(self, UPDATE_RATE)
self._vals = Sampling.create()
self._last_width = nyx.curses.screen_size().width
self._reported_inactive = False
self._message = None
self._message_attr = []
tor_controller().add_status_listener(self.reset_listener)
def show_message(self, message = None, *attr, **kwargs):
"""
Sets the message displayed at the bottom of the header. If not called with
anything it clears the override.
:param str message: message to be displayed
:param list attr: text attributes to apply
:param int max_wait: seconds to wait for user input, no limit if **None**
:returns: :class:`~nyx.curses.KeyInput` user pressed if provided a
**max_wait**, **None** otherwise or if prompt was canceled
"""
self._message = message
self._message_attr = attr
self.redraw()
if 'max_wait' in kwargs:
user_input = nyx.curses.key_input(kwargs['max_wait'])
self.show_message() # clear override
return user_input
def is_wide(self):
"""
True if we should show two columns of information, False otherwise.
"""
return self._last_width >= MIN_DUAL_COL_WIDTH
def get_height(self):
"""
Provides the height of the content, which is dynamically determined by the
panel's maximum width.
"""
max_height = nyx.panel.DaemonPanel.get_height(self)
if self._vals.is_relay:
return min(max_height, 5 if self.is_wide() else 7)
else:
return min(max_height, 4 if self.is_wide() else 5)
def send_newnym(self):
"""
Requests a new identity and provides a visual queue.
"""
controller = tor_controller()
if not controller.is_newnym_available():
return
controller.signal(stem.Signal.NEWNYM)
# If we're wide then the newnym label in this panel will give an
# indication that the signal was sent. Otherwise use a msg.
if not self.is_wide():
self.show_message('Requesting a new identity', HIGHLIGHT, max_wait = 1)
def key_handlers(self):
def _reconnect():
if self._vals.is_connected:
return
controller = tor_controller()
self.show_message('Reconnecting...', HIGHLIGHT)
try:
try:
controller.reconnect(chroot_path = CONFIG['tor.chroot'])
except stem.connection.MissingPassword:
password = nyx.controller.input_prompt('Controller Password: ')
if password:
controller.authenticate(password)
log.notice("Reconnected to Tor's control port")
self.show_message('Tor reconnected', HIGHLIGHT, max_wait = 1)
except Exception as exc:
self.show_message('Unable to reconnect (%s)' % exc, HIGHLIGHT, max_wait = 3)
controller.close()
return (
nyx.panel.KeyHandler('n', action = self.send_newnym),
nyx.panel.KeyHandler('r', action = _reconnect),
)
def _draw(self, subwindow):
vals = self._vals # local reference to avoid concurrency concerns
self._last_width = subwindow.width
is_wide = self.is_wide()
# space available for content
nyx_controller = nyx.controller.get_controller()
left_width = max(subwindow.width / 2, 77) if is_wide else subwindow.width
right_width = subwindow.width - left_width
pause_time = nyx_controller.get_pause_time() if nyx_controller.is_paused() else None
_draw_platform_section(subwindow, 0, 0, left_width, vals)
if vals.is_connected:
_draw_ports_section(subwindow, 0, 1, left_width, vals)
else:
_draw_disconnected(subwindow, 0, 1, vals.last_heartbeat)
if is_wide:
_draw_resource_usage(subwindow, left_width, 0, right_width, vals, pause_time)
if vals.is_relay:
_draw_fingerprint_and_fd_usage(subwindow, left_width, 1, right_width, vals)
_draw_flags(subwindow, 0, 2, vals.flags)
_draw_exit_policy(subwindow, left_width, 2, vals.exit_policy)
elif vals.is_connected:
_draw_newnym_option(subwindow, left_width, 1, vals.newnym_wait)
else:
_draw_resource_usage(subwindow, 0, 2, left_width, vals, pause_time)
if vals.is_relay:
_draw_fingerprint_and_fd_usage(subwindow, 0, 3, left_width, vals)
_draw_flags(subwindow, 0, 4, vals.flags)
_draw_status(subwindow, 0, self.get_height() - 1, nyx_controller.is_paused(), self._message, *self._message_attr)
def reset_listener(self, controller, event_type, _):
self._update()
if event_type == stem.control.State.CLOSED:
log.notice('Tor control port closed')
def _update(self):
self._vals = Sampling.create(self._vals)
if self._vals.fd_used and self._vals.fd_limit != -1:
fd_percent = 100 * self._vals.fd_used / self._vals.fd_limit
if fd_percent >= 90:
log_msg = msg('panel.header.fd_used_at_ninety_percent', percentage = fd_percent)
log.log_once('fd_used_at_ninety_percent', log.WARN, log_msg)
log.DEDUPLICATION_MESSAGE_IDS.add('fd_used_at_sixty_percent')
elif fd_percent >= 60:
log_msg = msg('panel.header.fd_used_at_sixty_percent', percentage = fd_percent)
log.log_once('fd_used_at_sixty_percent', log.NOTICE, log_msg)
if self._vals.is_connected:
if not self._reported_inactive and (time.time() - self._vals.last_heartbeat) >= 10:
self._reported_inactive = True
log.notice('Relay unresponsive (last heartbeat: %s)' % time.ctime(self._vals.last_heartbeat))
elif self._reported_inactive and (time.time() - self._vals.last_heartbeat) < 10:
self._reported_inactive = False
log.notice('Relay resumed')
self.redraw()
class Sampling(object):
def __init__(self, **attr):
self._attr = attr
for key, value in attr.items():
setattr(self, key, value)
@staticmethod
def create(last_sampling = None):
controller = tor_controller()
retrieved = time.time()
pid = controller.get_pid('')
tor_resources = nyx.tracker.get_resource_tracker().get_value()
nyx_total_cpu_time = sum(os.times()[:3], stem.util.system.SYSTEM_CALL_TIME)
or_listeners = controller.get_listeners(stem.control.Listener.OR, [])
control_listeners = controller.get_listeners(stem.control.Listener.CONTROL, [])
if controller.get_conf('HashedControlPassword', None):
auth_type = 'password'
elif controller.get_conf('CookieAuthentication', None) == '1':
auth_type = 'cookie'
else:
auth_type = 'open'
try:
fd_used = stem.util.proc.file_descriptors_used(pid)
except IOError:
fd_used = None
if last_sampling:
nyx_cpu_delta = nyx_total_cpu_time - last_sampling.nyx_total_cpu_time
nyx_time_delta = retrieved - last_sampling.retrieved
nyx_cpu = nyx_cpu_delta / nyx_time_delta
else:
nyx_cpu = 0.0
attr = {
'retrieved': retrieved,
'is_connected': controller.is_alive(),
'connection_time': controller.connection_time(),
'last_heartbeat': controller.get_latest_heartbeat(),
'fingerprint': controller.get_info('fingerprint', 'Unknown'),
'nickname': controller.get_conf('Nickname', ''),
'newnym_wait': controller.get_newnym_wait(),
'exit_policy': controller.get_exit_policy(None),
'flags': getattr(controller.get_network_status(default = None), 'flags', []),
'version': str(controller.get_version('Unknown')).split()[0],
'version_status': controller.get_info('status/version/current', 'Unknown'),
'address': or_listeners[0][0] if (or_listeners and or_listeners[0][0] != '0.0.0.0') else controller.get_info('address', 'Unknown'),
'or_port': or_listeners[0][1] if or_listeners else '',
'dir_port': controller.get_conf('DirPort', '0'),
'control_port': str(control_listeners[0][1]) if control_listeners else None,
'socket_path': controller.get_conf('ControlSocket', None),
'is_relay': bool(or_listeners),
'auth_type': auth_type,
'pid': pid,
'start_time': stem.util.system.start_time(pid),
'fd_limit': int(controller.get_info('process/descriptor-limit', '-1')),
'fd_used': fd_used,
'nyx_total_cpu_time': nyx_total_cpu_time,
'tor_cpu': '%0.1f' % (100 * tor_resources.cpu_sample),
'nyx_cpu': '%0.1f' % (nyx_cpu),
'memory': stem.util.str_tools.size_label(tor_resources.memory_bytes) if tor_resources.memory_bytes > 0 else 0,
'memory_percent': '%0.1f' % (100 * tor_resources.memory_percent),
'hostname': os.uname()[1],
'platform': '%s %s' % (os.uname()[0], os.uname()[2]), # [platform name] [version]
}
return Sampling(**attr)
def format(self, message, crop_width = None):
formatted_msg = message.format(**self._attr)
if crop_width is not None:
formatted_msg = stem.util.str_tools.crop(formatted_msg, crop_width)
return formatted_msg
def _draw_platform_section(subwindow, x, y, width, vals):
"""
Section providing the user's hostname, platform, and version information...
nyx - odin (Linux 3.5.0-52-generic) Tor 0.2.5.1-alpha-dev (unrecommended)
|------ platform (40 characters) ------| |----------- tor version -----------|
"""
initial_x, space_left = x, min(width, 40)
x = subwindow.addstr(x, y, vals.format('nyx - {hostname}', space_left))
space_left -= x - initial_x
if space_left >= 10:
subwindow.addstr(x, y, ' (%s)' % vals.format('{platform}', space_left - 3))
x, space_left = initial_x + 43, width - 43
if vals.version != 'Unknown' and space_left >= 10:
x = subwindow.addstr(x, y, vals.format('Tor {version}', space_left))
space_left -= x - 43 - initial_x
if space_left >= 7 + len(vals.version_status):
version_color = CONFIG['attr.version_status_colors'].get(vals.version_status, WHITE)
x = subwindow.addstr(x, y, ' (')
x = subwindow.addstr(x, y, vals.version_status, version_color)
subwindow.addstr(x, y, ')')
def _draw_ports_section(subwindow, x, y, width, vals):
"""
Section providing our nickname, address, and port information...
Unnamed - 0.0.0.0:7000, Control Port (cookie): 9051
"""
if not vals.is_relay:
x = subwindow.addstr(x, y, 'Relaying Disabled', CYAN)
else:
x = subwindow.addstr(x, y, vals.format('{nickname} - {address}:{or_port}'))
if vals.dir_port != '0':
x = subwindow.addstr(x, y, vals.format(', Dir Port: {dir_port}'))
if vals.control_port:
if width >= x + 19 + len(vals.control_port) + len(vals.auth_type):
auth_color = RED if vals.auth_type == 'open' else GREEN
x = subwindow.addstr(x, y, ', Control Port (')
x = subwindow.addstr(x, y, vals.auth_type, auth_color)
subwindow.addstr(x, y, vals.format('): {control_port}'))
else:
subwindow.addstr(x, y, vals.format(', Control Port: {control_port}'))
elif vals.socket_path:
subwindow.addstr(x, y, vals.format(', Control Socket: {socket_path}'))
def _draw_disconnected(subwindow, x, y, last_heartbeat):
"""
Message indicating that tor is disconnected...
Tor Disconnected (15:21 07/13/2014, press r to reconnect)
"""
x = subwindow.addstr(x, y, 'Tor Disconnected', RED, BOLD)
last_heartbeat_str = time.strftime('%H:%M %m/%d/%Y', time.localtime(last_heartbeat))
subwindow.addstr(x, y, ' (%s, press r to reconnect)' % last_heartbeat_str)
def _draw_resource_usage(subwindow, x, y, width, vals, pause_time):
"""
System resource usage of the tor process...
cpu: 0.0% tor, 1.0% nyx mem: 0 (0.0%) pid: 16329 uptime: 12-20:42:07
"""
if vals.start_time:
if not vals.is_connected:
now = vals.connection_time
elif pause_time:
now = pause_time
else:
now = time.time()
uptime = stem.util.str_tools.short_time_label(now - vals.start_time)
else:
uptime = ''
sys_fields = (
(0, vals.format('cpu: {tor_cpu}% tor, {nyx_cpu}% nyx')),
(27, vals.format('mem: {memory} ({memory_percent}%)')),
(47, vals.format('pid: {pid}')),
(59, 'uptime: %s' % uptime),
)
for (start, label) in sys_fields:
if width >= start + len(label):
subwindow.addstr(x + start, y, label)
else:
break
def _draw_fingerprint_and_fd_usage(subwindow, x, y, width, vals):
"""
Presents our fingerprint, and our file descriptor usage if we're running
out...
fingerprint: 1A94D1A794FCB2F8B6CBC179EF8FDD4008A98D3B, file desc: 900 / 1000 (90%)
"""
initial_x, space_left = x, width
x = subwindow.addstr(x, y, vals.format('fingerprint: {fingerprint}', width))
space_left -= x - initial_x
if space_left >= 30 and vals.fd_used and vals.fd_limit != -1:
fd_percent = 100 * vals.fd_used / vals.fd_limit
if fd_percent >= SHOW_FD_THRESHOLD:
if fd_percent >= 95:
percentage_format = (RED, BOLD)
elif fd_percent >= 90:
percentage_format = (RED,)
elif fd_percent >= 60:
percentage_format = (YELLOW,)
else:
percentage_format = ()
x = subwindow.addstr(x, y, ', file descriptors' if space_left >= 37 else ', file desc')
x = subwindow.addstr(x, y, vals.format(': {fd_used} / {fd_limit} ('))
x = subwindow.addstr(x, y, '%i%%' % fd_percent, *percentage_format)
subwindow.addstr(x, y, ')')
def _draw_flags(subwindow, x, y, flags):
"""
Presents flags held by our relay...
flags: Running, Valid
"""
x = subwindow.addstr(x, y, 'flags: ')
if flags:
for i, flag in enumerate(flags):
flag_color = CONFIG['attr.flag_colors'].get(flag, WHITE)
x = subwindow.addstr(x, y, flag, flag_color, BOLD)
if i < len(flags) - 1:
x = subwindow.addstr(x, y, ', ')
else:
subwindow.addstr(x, y, 'none', CYAN, BOLD)
def _draw_exit_policy(subwindow, x, y, exit_policy):
"""
Presents our exit policy...
exit policy: reject *:*
"""
x = subwindow.addstr(x, y, 'exit policy: ')
if not exit_policy:
return
rules = list(exit_policy.strip_private().strip_default())
for i, rule in enumerate(rules):
policy_color = GREEN if rule.is_accept else RED
x = subwindow.addstr(x, y, str(rule), policy_color, BOLD)
if i < len(rules) - 1:
x = subwindow.addstr(x, y, ', ')
if exit_policy.has_default():
if rules:
x = subwindow.addstr(x, y, ', ')
subwindow.addstr(x, y, '<default>', CYAN, BOLD)
def _draw_newnym_option(subwindow, x, y, newnym_wait):
"""
Provide a notice for requiesting a new identity, and time until it's next
available if in the process of building circuits.
"""
if newnym_wait == 0:
subwindow.addstr(x, y, "press 'n' for a new identity")
else:
plural = 's' if newnym_wait > 1 else ''
subwindow.addstr(x, y, 'building circuits, available again in %i second%s' % (newnym_wait, plural))
def _draw_status(subwindow, x, y, is_paused, message, *attr):
"""
Provides general usage information or a custom message.
"""
if message:
subwindow.addstr(x, y, message, *attr)
elif not is_paused:
controller = nyx.controller.get_controller()
subwindow.addstr(x, y, 'page %i / %i - m: menu, p: pause, h: page help, q: quit' % (controller.get_page() + 1, controller.get_page_count()))
else:
subwindow.addstr(x, y, 'Paused', HIGHLIGHT)
| gpl-3.0 | -6,505,644,122,368,500,000 | 31.076772 | 144 | 0.639828 | false |
Diti24/python-ivi | ivi/agilent/agilentDSO90404A.py | 1 | 1686 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent90000 import *
class agilentDSO90404A(agilent90000):
"Agilent Infiniium DSO90404A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'DSO90404A')
super(agilentDSO90404A, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 4e9
self._init_channels()
| mit | 5,013,336,484,230,546,000 | 37.318182 | 86 | 0.736655 | false |
michaelpantic/tolScreenCleaner | outputAdapterODMax.py | 1 | 4963 | import csv
import numpy
import pandas
import os
import matplotlib
matplotlib.use('PDF')
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
aggregatedData = []
def createOutputFile(folderName):
return folderName
def outputPlateData(plate, folderName):
aggregatedData.extend([[plate.Name]+t.ToTable() for t in plate.DataArray]);
return 0
def finish(folderName):
# read data
dataFrame = pandas.DataFrame(aggregatedData, columns=['Plate','Coordinate','Medium','Strain']+list(range(1,49)))
dataFrame.set_index(['Strain']);
# separate by blank /nonblank
dataFrameNonBlanks = dataFrame.loc[(dataFrame['Strain'] != 'BLANK')]
dataFrameNonBlanks.set_index(['Plate','Medium','Strain'])
dataFrameBlanks = dataFrame.loc[dataFrame['Strain'] == 'BLANK']
dataFrameBlanks.set_index(['Plate','Medium']);
dataFrameBlanks[["Medium"]+(list(range(1,49)))].groupby(['Medium']).aggregate(['mean','std']).to_csv(os.path.join(folderName, "output_blanks.csv"), sep = '\t');
#correct for blank by medium
corrected = dataFrameNonBlanks.apply(correctForBlank,axis=1,args=[dataFrameBlanks])
#select only non-mcpsm for next step
dataFrameMedia = corrected.loc[corrected['Medium'] == 'MCPSM']
dataFrameMedia.set_index(['Plate','Medium','Strain'])
#correct for medium (positive test)
# corrected = corrected.loc[corrected['Medium'] != 'MCPSM']
corrected = corrected.apply(correctForMedium,axis=1,args=[dataFrameMedia])
corrected.to_csv(os.path.join(folderName, "output_table.csv"),sep='\t')
# generate aggregated by experiment file
aggregated = corrected.groupby(['Medium','Strain']).aggregate(['mean'])
aggregated.to_csv(os.path.join(folderName, "output_table_corr_by_strain_medium.csv"), sep='\t')
#generate count file
count = corrected[['Medium','Strain','Plate','Coordinate']].groupby(['Medium','Strain']).aggregate(['count']);
count.to_csv(os.path.join(folderName,"output_count.csv"), sep='\t');
# generate global result file
correctedColumns = list(map(lambda x:"Cor_"+str(x), list(range(1,49))))
posTestColumns = list(map(lambda x:"PosTest_"+str(x), list(range(1,49))))
corrected["MaxCorOD"] = corrected[correctedColumns].max(axis=1);
corrected["MaxPosTestOD"] = corrected[posTestColumns].max(axis=1);
corrected[["Medium","MaxCorOD","MaxPosTestOD"]].groupby(["Medium"]).aggregate(['mean','std']).to_csv(os.path.join(folderName,"output_conclusion_by_medium.csv"), sep='\t');
corrected[["Medium","Strain","MaxCorOD","MaxPosTestOD"]].groupby(["Strain","Medium"]).aggregate(['mean','std']).to_csv(os.path.join(folderName,"output_conclusion_by_strain_medium.csv"), sep='\t');
corrected[["Strain","MaxCorOD","MaxPosTestOD"]].groupby(["Strain"]).aggregate(['mean','std']).to_csv(os.path.join(folderName, "output_conclusion_by_strain.csv"), sep='\t');
plotData(aggregated, folderName)
#grouped = dataFrameNonBlanks[['Strain','Medium','ODMax']].groupby(['Strain','Medium']).aggregate(['max','min','mean','std','count'])
return 0
def correctForBlank(row, dataFrameBlanks):
medium = row['Medium']
#get corresponding blanks
df=dataFrameBlanks.loc[(dataFrameBlanks['Medium']==medium)]
blankMean=df[list(range(0,52))].mean()
numBlanks = len(df.index)
if numBlanks == 0:
print("ERROR NO BLANKS FOUND FOR "+medium)
#attach corrected data!
row['NumberBlanks'] = len(df.index);
for x in range(1,49):
row['Blank_'+str(x)] = blankMean[x]
for x in range(1,49):
row['Cor_'+str(x)] = row[x]-blankMean[x]
return row;
def correctForMedium(row, dataFrameMedia):
strain = row['Strain']
plate = row['Plate']
medium = "MCPSM"
df=dataFrameMedia.loc[(dataFrameMedia['Strain']==strain) & (dataFrameMedia['Medium']==medium)]
correctedColumns = list(map(lambda x:"Cor_"+str(x), list(range(1,49))))
mediaMean=df[["Plate","Strain"]+correctedColumns].mean()
numPosTests = len(df.index)
if numPosTests == 0:
print("ERROR NO POSMEDIA FOUND FOR "+plate+"/"+strain)
row['NumberPosTests'] = numPosTests
for x in range(1,49):
if(numPosTests == 0):
row['PosTest_'+str(x)] = 0
else:
row['PosTest_'+str(x)] = mediaMean['Cor_'+str(x)]
return row
def plotData(df, folderName):
df=df.reset_index()
correctedColumns = list(map(lambda x:"Cor_"+str(x), list(range(1,49))))
allMedia = df["Medium"].unique();
for medium in allMedia:
# one dataFrame per Medium with only the corrected timeseries
dfMedium = df[correctedColumns].loc[(df['Medium'] == medium)];
dfMediumT = dfMedium.T.rename(columns=lambda x:df["Strain"][x]);
# get list of end OD
#dfEndOD = df[["Strain","Cor_48"]].sort("Cor_48");
#print(dfEndOD)
#generate colormap
#numEntries = len(dfMedium.index)
#colorMap = ListedColormap(["red"]+ ["gray"]*(numEntries-1),"test");
ax = dfMediumT.plot(legend = "Strain", title = "OD in "+medium,colormap="gist_ncar")
ax.set_xlabel("Time 0-48h")
ax.set_ylabel("OD")
plt.savefig(os.path.join(folderName,medium+".pdf"))
return 0
| gpl-2.0 | -5,382,308,567,606,533,000 | 29.262195 | 197 | 0.695547 | false |
nens/dpnetcdf | dpnetcdf/migrations/0050_auto__del_field_datasource_imported.py | 1 | 5366 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Datasource.imported'
db.delete_column('dpnetcdf_datasource', 'imported')
def backwards(self, orm):
# Adding field 'Datasource.imported'
db.add_column('dpnetcdf_datasource', 'imported',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
models = {
'dpnetcdf.datasource': {
'Meta': {'object_name': 'Datasource'},
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dpnetcdf.OpendapDataset']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shape_file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dpnetcdf.ShapeFile']", 'null': 'True'}),
'variable': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dpnetcdf.Variable']", 'null': 'True'})
},
'dpnetcdf.maplayer': {
'Meta': {'object_name': 'MapLayer'},
'datasources': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dpnetcdf.Datasource']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'styles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dpnetcdf.Style']", 'symmetrical': 'False', 'blank': 'True'})
},
'dpnetcdf.opendapcatalog': {
'Meta': {'object_name': 'OpendapCatalog'},
'base_url': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'catalog_url': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'http_url': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'opendap_url': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'service_prefix': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dpnetcdf.opendapdataset': {
'Meta': {'object_name': 'OpendapDataset'},
'calculation_facility': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'catalog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dpnetcdf.OpendapSubcatalog']"}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'program': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'scenario': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'strategy': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'time_zero': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'variables': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dpnetcdf.Variable']", 'symmetrical': 'False'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'})
},
'dpnetcdf.opendapsubcatalog': {
'Meta': {'object_name': 'OpendapSubcatalog'},
'catalog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dpnetcdf.OpendapCatalog']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'dpnetcdf.shapefile': {
'Meta': {'object_name': 'ShapeFile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'dpnetcdf.style': {
'Meta': {'object_name': 'Style'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'xml': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'dpnetcdf.variable': {
'Meta': {'object_name': 'Variable'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['dpnetcdf'] | gpl-3.0 | -3,347,639,049,884,668,400 | 59.988636 | 162 | 0.547521 | false |
multidadosti-erp/multidadosti-addons | project_default_stages/tests/test_project_project.py | 1 | 2128 | from odoo.tests.common import TransactionCase
class TestProjectProject(TransactionCase):
def setUp(self):
super(TestProjectProject, self).setUp()
self.project_values = {
'name': 'Projeto Teste',
'label_tasks': 'Tasks',
'partner_id': self.env.ref('base.res_partner_4').id,
'bring_default_task_type': True,
}
self.project_stage_0 = self.env.ref('project.project_stage_data_0')
self.project_stage_2 = self.env.ref('project.project_stage_data_2')
def test_create_with_default_task(self):
# Adicionamos todas os estagios de tarefas como False
for task_type in self.env['project.task.type'].search([]):
task_type.case_default = False
# Definimos os estagios New e Advanced como estagios default do
# projeto, logo elas serao adicionadas quando criamos um projeto
# e marcamos o checkbox para trazer estagios padrao
self.project_stage_0.case_default = True
self.project_stage_2.case_default = True
# Definimos que o projeto deve utilizar estagios default
self.project_values['bring_default_task_type'] = True
project = self.env['project.project'].create(self.project_values)
# Verificamos se os estagios do projeto sao apenas os dois estagios
# que definimos como True
self.assertEqual(len(project.type_ids), 2)
self.assertIn(self.project_stage_0.id, project.type_ids.ids)
self.assertIn(self.project_stage_2.id, project.type_ids.ids)
def test_create_with_default_task(self):
# Definimos que o projeto nao deve utilizar estagios default
self.project_values['bring_default_task_type'] = False
project = self.env['project.project'].create(self.project_values)
# Verificamos se os estagios do projeto sao apenas os dois estagios
# que definimos como True
self.assertEqual(len(project.type_ids), 0)
self.assertNotIn(self.project_stage_0.id, project.type_ids.ids)
self.assertNotIn(self.project_stage_2.id, project.type_ids.ids)
| agpl-3.0 | -4,126,335,958,288,204,000 | 37.690909 | 75 | 0.663534 | false |
antonxy/audiosync | gui.py | 1 | 5008 | import tkinter as tk
from tkinter.ttk import Progressbar
from tkinter import filedialog, messagebox
import os
import errno
from threading import Thread
import program_logic
def get_paths_in_dir(directory):
filenames = os.listdir(directory)
return [os.path.abspath(os.path.join(directory, name)) for name in filenames]
class MainFrame(tk.Frame):
def __init__(self, parent):
super(MainFrame, self).__init__(parent)
self.parent = parent
self.dir_var = tk.StringVar()
self.audio_progress_var = tk.IntVar()
self.video_progress_var = tk.IntVar()
self.fps_var = tk.StringVar()
self.audio_shift_var = tk.StringVar()
self.start_button = None
self.generate_ui()
self.center_window()
def generate_ui(self):
self.parent.title('audiosync')
dir_frame = tk.Frame(self)
dir_frame.pack(fill=tk.X)
tk.Label(dir_frame, text='Directory:').pack(side=tk.LEFT)
tk.Entry(dir_frame, textvariable=self.dir_var).pack(fill=tk.X, expand=1, side=tk.LEFT)
tk.Button(dir_frame, text='Select', command=self.select_dir).pack(side=tk.LEFT)
tk.Button(dir_frame, text='Create Structure', command=self.create_directory_structure).pack(side=tk.RIGHT)
fps_frame = tk.Frame(self)
fps_frame.pack()
tk.Label(fps_frame, text='FPS:').pack(side=tk.LEFT)
tk.Entry(fps_frame, textvariable=self.fps_var).pack(side=tk.LEFT)
audio_shift_frame = tk.Frame(self)
audio_shift_frame.pack()
tk.Label(audio_shift_frame, text='Shift Audio forward').pack(side=tk.LEFT)
tk.Entry(audio_shift_frame, textvariable=self.audio_shift_var).pack(side=tk.LEFT)
tk.Label(audio_shift_frame, text='frames').pack(side=tk.LEFT)
cmd_frame = tk.Frame(self)
cmd_frame.pack(fill=tk.X)
self.start_button = tk.Button(cmd_frame, text='Start', command=self.execute)
self.start_button.pack()
Progressbar(self, variable=self.video_progress_var).pack(fill=tk.X)
Progressbar(self, variable=self.audio_progress_var).pack(fill=tk.X)
self.pack(fill=tk.BOTH, expand=1)
def center_window(self):
w = 500
h = 140
sw = self.parent.winfo_screenwidth()
sh = self.parent.winfo_screenheight()
x = (sw - w) / 2
y = (sh - h) / 2
self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))
def select_dir(self):
dir_path = filedialog.askdirectory()
if dir_path != '':
self.dir_var.set(dir_path)
def create_directory_structure(self):
dir_path = self.dir_var.get()
if dir_path != '':
dir_names = ['video', 'audio', 'edl']
for dir_name in dir_names:
new_dir_path = os.path.join(dir_path, dir_name)
try:
os.makedirs(new_dir_path)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
def execute(self):
directory = self.dir_var.get()
if directory is '':
messagebox.showerror(title='audiosync', message='No directory selected')
return
try:
fps = float(self.fps_var.get())
except ValueError:
messagebox.showerror(title='audiosync', message='FPS has to be decimal number')
return
try:
audio_shift = int(self.audio_shift_var.get())
except ValueError:
messagebox.showerror(title='audiosync', message='Audio shift has to be integer')
return
thread = Thread(target=self.thread_target,
args=(self.audio_progress_var, self.video_progress_var, self.start_button, fps, directory, audio_shift))
thread.start()
self.start_button.config(state='disabled')
@staticmethod
def thread_target(audio_progress_var, video_progress_var, start_button, fps, directory, audio_shift):
video_ret = analyse_directory(os.path.join(directory, 'video'), video_progress_var)
audio_ret = analyse_directory(os.path.join(directory, 'audio'), audio_progress_var)
program_logic.rename_files(audio_ret, 'a')
program_logic.rename_files(video_ret, 'v')
program_logic.generate_edls(video_ret, audio_ret, fps, os.path.join(directory, 'edl'), audio_shift)
audio_progress_var.set(0)
video_progress_var.set(0)
start_button.config(state='normal')
def analyse_directory(directory, progress_var):
ret_list = []
files = os.listdir(directory)
for n, filename in enumerate(files):
path = os.path.abspath(os.path.join(directory, filename))
result = program_logic.analyse_file(path)
if result is not None:
ret_list.append(result)
progress_var.set(int((n + 1) / len(files) * 100))
return ret_list
if __name__ == '__main__':
root = tk.Tk()
app = MainFrame(root)
root.mainloop() | mit | 6,802,534,413,358,462,000 | 33.784722 | 128 | 0.609425 | false |
felipenaselva/felipe.repository | plugin.program.Build.Tools/resources/libs/skinSwitch.py | 1 | 9353 | ################################################################################
# Copyright (C) 2015 OpenELEQ #
# #
# This Program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2, or (at your option) #
# any later version. #
# #
# This Program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with XBMC; see the file COPYING. If not, write to #
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #
# http://www.gnu.org/copyleft/gpl.html #
################################################################################
import os, re,glob, shutil, time, xbmc, xbmcaddon, thread, wizard as wiz, uservar
try:
import json as simplejson
except:
import simplejson
KODIV = float(xbmc.getInfoLabel("System.BuildVersion")[:4])
COLOR1 = uservar.COLOR1
COLOR2 = uservar.COLOR2
HOME = xbmc.translatePath('special://home/')
ADDONS = os.path.join(HOME, 'addons')
#DIALOG = xbmcgui.Dialog()
def getOld(old):
try:
old = '"%s"' % old
query = '{"jsonrpc":"2.0", "method":"Settings.GetSettingValue","params":{"setting":%s}, "id":1}' % (old)
response = xbmc.executeJSONRPC(query)
response = simplejson.loads(response)
if response.has_key('result'):
if response['result'].has_key('value'):
return response ['result']['value']
except:
pass
return None
def setNew(new, value):
try:
new = '"%s"' % new
value = '"%s"' % value
query = '{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":%s,"value":%s}, "id":1}' % (new, value)
response = xbmc.executeJSONRPC(query)
except:
pass
return None
def swapSkins(skin):
if skin == 'skin.confluence':
HOME = xbmc.translatePath('special://home/')
skinfold = os.path.join(HOME, 'userdata', 'addon_data', 'skin.confluence')
settings = os.path.join(skinfold, 'settings.xml')
if not os.path.exists(settings):
string = '<settings>\n <setting id="FirstTimeRun" type="bool">true</setting>\n</settings>'
os.makedirs(skinfold)
f = open(settings, 'w'); f.write(string); f.close()
else: xbmcaddon.Addon(id='skin.confluence').setSetting('FirstTimeRun', 'true')
old = 'lookandfeel.skin'
value = skin
current = getOld(old)
new = old
setNew(new, value)
# if not xbmc.getCondVisibility(Skin.HasSetting(FirstTimeRun)):
# while xbmc.getCondVisibility('Window.IsVisible(1112)'):
# xbmc.executebuiltin('SendClick(100)')
def swapUS():
new = '"addons.unknownsources"'
value = 'true'
query = '{"jsonrpc":"2.0", "method":"Settings.GetSettingValue","params":{"setting":%s}, "id":1}' % (new)
response = xbmc.executeJSONRPC(query)
wiz.log("Unknown Sources Get Settings: %s" % str(response), xbmc.LOGDEBUG)
if 'false' in response:
thread.start_new_thread(dialogWatch, ())
xbmc.sleep(200)
query = '{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":%s,"value":%s}, "id":1}' % (new, value)
response = xbmc.executeJSONRPC(query)
wiz.LogNotify("[COLOR %s]%s[/COLOR]" % (COLOR1, ADDONTITLE), '[COLOR %s]Unknown Sources:[/COLOR] [COLOR %s]Enabled[/COLOR]' % (COLOR1, COLOR2))
wiz.log("Unknown Sources Set Settings: %s" % str(response), xbmc.LOGDEBUG)
def dialogWatch():
x = 0
while not xbmc.getCondVisibility("Window.isVisible(yesnodialog)") and x < 100:
x += 1
xbmc.sleep(100)
if xbmc.getCondVisibility("Window.isVisible(yesnodialog)"):
xbmc.executebuiltin('SendClick(11)')
########################################################################################
#######################################Still Needs Work#########################################
########################################################################################
#def popUPmenu():
# fold = glob.glob(os.path.join(ADDONS, 'skin*'))
# addonnames = []; addonids = []; addonfolds = []
# for folder in sorted(fold, key = lambda x: x):
# xml = os.path.join(folder, 'addon.xml')
# if os.path.exists(xml):
# foldername = os.path.split(folder[:-1])[1]
# f = open(xml)
# a = f.read()
# f.close()
# getid = parseDOM(a, 'addon', ret='id')
# getname = parseDOM(a, 'addon', ret='name')
# addid = foldername if len(getid) == 0 else getid[0]
# title = foldername if len(getname) == 0 else getname[0]
# temp = title.replace('[', '<').replace(']', '>')
# temp = re.sub('<[^<]+?>', '', temp)
# addonnames.append(temp)
# addonids.append(addid)
# addonfolds.append(foldername)
# #currskin = ["Current Skin -- %s" % currSkin()] + addonids
# select = DIALOG.select("Select the Skin you want to swap with.", addonids#currskin )
# if select == -1: return
# elif select == 1: addonids[select]
# swapSkins(addonids)
def parseDOM(html, name=u"", attrs={}, ret=False):
# Copyright (C) 2010-2011 Tobias Ussing And Henrik Mosgaard Jensen
if isinstance(html, str):
try:
html = [html.decode("utf-8")]
except:
html = [html]
elif isinstance(html, unicode):
html = [html]
elif not isinstance(html, list):
return u""
if not name.strip():
return u""
ret_lst = []
for item in html:
temp_item = re.compile('(<[^>]*?\n[^>]*?>)').findall(item)
for match in temp_item:
item = item.replace(match, match.replace("\n", " "))
lst = []
for key in attrs:
lst2 = re.compile('(<' + name + '[^>]*?(?:' + key + '=[\'"]' + attrs[key] + '[\'"].*?>))', re.M | re.S).findall(item)
if len(lst2) == 0 and attrs[key].find(" ") == -1:
lst2 = re.compile('(<' + name + '[^>]*?(?:' + key + '=' + attrs[key] + '.*?>))', re.M | re.S).findall(item)
if len(lst) == 0:
lst = lst2
lst2 = []
else:
test = range(len(lst))
test.reverse()
for i in test:
if not lst[i] in lst2:
del(lst[i])
if len(lst) == 0 and attrs == {}:
lst = re.compile('(<' + name + '>)', re.M | re.S).findall(item)
if len(lst) == 0:
lst = re.compile('(<' + name + ' .*?>)', re.M | re.S).findall(item)
if isinstance(ret, str):
lst2 = []
for match in lst:
attr_lst = re.compile('<' + name + '.*?' + ret + '=([\'"].[^>]*?[\'"])>', re.M | re.S).findall(match)
if len(attr_lst) == 0:
attr_lst = re.compile('<' + name + '.*?' + ret + '=(.[^>]*?)>', re.M | re.S).findall(match)
for tmp in attr_lst:
cont_char = tmp[0]
if cont_char in "'\"":
if tmp.find('=' + cont_char, tmp.find(cont_char, 1)) > -1:
tmp = tmp[:tmp.find('=' + cont_char, tmp.find(cont_char, 1))]
if tmp.rfind(cont_char, 1) > -1:
tmp = tmp[1:tmp.rfind(cont_char)]
else:
if tmp.find(" ") > 0:
tmp = tmp[:tmp.find(" ")]
elif tmp.find("/") > 0:
tmp = tmp[:tmp.find("/")]
elif tmp.find(">") > 0:
tmp = tmp[:tmp.find(">")]
lst2.append(tmp.strip())
lst = lst2
else:
lst2 = []
for match in lst:
endstr = u"</" + name
start = item.find(match)
end = item.find(endstr, start)
pos = item.find("<" + name, start + 1 )
while pos < end and pos != -1:
tend = item.find(endstr, end + len(endstr))
if tend != -1:
end = tend
pos = item.find("<" + name, pos + 1)
if start == -1 and end == -1:
temp = u""
elif start > -1 and end > -1:
temp = item[start + len(match):end]
elif end > -1:
temp = item[:end]
elif start > -1:
temp = item[start + len(match):]
if ret:
endstr = item[end:item.find(">", item.find(endstr)) + 1]
temp = match + temp + endstr
item = item[item.find(temp, item.find(match)) + len(temp):]
lst2.append(temp)
lst = lst2
ret_lst += lst
return ret_lst | gpl-2.0 | -3,929,239,323,546,178,600 | 40.207048 | 145 | 0.481664 | false |
Brett55/moto | tests/test_iam/test_iam.py | 1 | 24765 | from __future__ import unicode_literals
import base64
import boto
import boto3
import sure # noqa
from boto.exception import BotoServerError
from botocore.exceptions import ClientError
from moto import mock_iam, mock_iam_deprecated
from moto.iam.models import aws_managed_policies
from nose.tools import assert_raises, assert_equals
from nose.tools import raises
from tests.helpers import requires_boto_gte
@mock_iam_deprecated()
def test_get_all_server_certs():
conn = boto.connect_iam()
conn.upload_server_cert("certname", "certbody", "privatekey")
certs = conn.get_all_server_certs()['list_server_certificates_response'][
'list_server_certificates_result']['server_certificate_metadata_list']
certs.should.have.length_of(1)
cert1 = certs[0]
cert1.server_certificate_name.should.equal("certname")
cert1.arn.should.equal(
"arn:aws:iam::123456789012:server-certificate/certname")
@mock_iam_deprecated()
def test_get_server_cert_doesnt_exist():
conn = boto.connect_iam()
with assert_raises(BotoServerError):
conn.get_server_certificate("NonExistant")
@mock_iam_deprecated()
def test_get_server_cert():
conn = boto.connect_iam()
conn.upload_server_cert("certname", "certbody", "privatekey")
cert = conn.get_server_certificate("certname")
cert.server_certificate_name.should.equal("certname")
cert.arn.should.equal(
"arn:aws:iam::123456789012:server-certificate/certname")
@mock_iam_deprecated()
def test_upload_server_cert():
conn = boto.connect_iam()
conn.upload_server_cert("certname", "certbody", "privatekey")
cert = conn.get_server_certificate("certname")
cert.server_certificate_name.should.equal("certname")
cert.arn.should.equal(
"arn:aws:iam::123456789012:server-certificate/certname")
@mock_iam_deprecated()
def test_delete_server_cert():
conn = boto.connect_iam()
conn.upload_server_cert("certname", "certbody", "privatekey")
conn.get_server_certificate("certname")
conn.delete_server_cert("certname")
with assert_raises(BotoServerError):
conn.get_server_certificate("certname")
with assert_raises(BotoServerError):
conn.delete_server_cert("certname")
@mock_iam_deprecated()
@raises(BotoServerError)
def test_get_role__should_throw__when_role_does_not_exist():
conn = boto.connect_iam()
conn.get_role('unexisting_role')
@mock_iam_deprecated()
@raises(BotoServerError)
def test_get_instance_profile__should_throw__when_instance_profile_does_not_exist():
conn = boto.connect_iam()
conn.get_instance_profile('unexisting_instance_profile')
@mock_iam_deprecated()
def test_create_role_and_instance_profile():
conn = boto.connect_iam()
conn.create_instance_profile("my-profile", path="my-path")
conn.create_role(
"my-role", assume_role_policy_document="some policy", path="my-path")
conn.add_role_to_instance_profile("my-profile", "my-role")
role = conn.get_role("my-role")
role.path.should.equal("my-path")
role.assume_role_policy_document.should.equal("some policy")
profile = conn.get_instance_profile("my-profile")
profile.path.should.equal("my-path")
role_from_profile = list(profile.roles.values())[0]
role_from_profile['role_id'].should.equal(role.role_id)
role_from_profile['role_name'].should.equal("my-role")
conn.list_roles().roles[0].role_name.should.equal('my-role')
@mock_iam_deprecated()
def test_remove_role_from_instance_profile():
conn = boto.connect_iam()
conn.create_instance_profile("my-profile", path="my-path")
conn.create_role(
"my-role", assume_role_policy_document="some policy", path="my-path")
conn.add_role_to_instance_profile("my-profile", "my-role")
profile = conn.get_instance_profile("my-profile")
role_from_profile = list(profile.roles.values())[0]
role_from_profile['role_name'].should.equal("my-role")
conn.remove_role_from_instance_profile("my-profile", "my-role")
profile = conn.get_instance_profile("my-profile")
dict(profile.roles).should.be.empty
@mock_iam()
def test_get_login_profile():
conn = boto3.client('iam', region_name='us-east-1')
conn.create_user(UserName='my-user')
conn.create_login_profile(UserName='my-user', Password='my-pass')
response = conn.get_login_profile(UserName='my-user')
response['LoginProfile']['UserName'].should.equal('my-user')
@mock_iam()
def test_update_login_profile():
conn = boto3.client('iam', region_name='us-east-1')
conn.create_user(UserName='my-user')
conn.create_login_profile(UserName='my-user', Password='my-pass')
response = conn.get_login_profile(UserName='my-user')
response['LoginProfile'].get('PasswordResetRequired').should.equal(None)
conn.update_login_profile(UserName='my-user', Password='new-pass', PasswordResetRequired=True)
response = conn.get_login_profile(UserName='my-user')
response['LoginProfile'].get('PasswordResetRequired').should.equal(True)
@mock_iam()
def test_delete_role():
conn = boto3.client('iam', region_name='us-east-1')
with assert_raises(ClientError):
conn.delete_role(RoleName="my-role")
conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/")
role = conn.get_role(RoleName="my-role")
role.get('Role').get('Arn').should.equal('arn:aws:iam::123456789012:role/my-path/my-role')
conn.delete_role(RoleName="my-role")
with assert_raises(ClientError):
conn.get_role(RoleName="my-role")
@mock_iam_deprecated()
def test_list_instance_profiles():
conn = boto.connect_iam()
conn.create_instance_profile("my-profile", path="my-path")
conn.create_role("my-role", path="my-path")
conn.add_role_to_instance_profile("my-profile", "my-role")
profiles = conn.list_instance_profiles().instance_profiles
len(profiles).should.equal(1)
profiles[0].instance_profile_name.should.equal("my-profile")
profiles[0].roles.role_name.should.equal("my-role")
@mock_iam_deprecated()
def test_list_instance_profiles_for_role():
conn = boto.connect_iam()
conn.create_role(role_name="my-role",
assume_role_policy_document="some policy", path="my-path")
conn.create_role(role_name="my-role2",
assume_role_policy_document="some policy2", path="my-path2")
profile_name_list = ['my-profile', 'my-profile2']
profile_path_list = ['my-path', 'my-path2']
for profile_count in range(0, 2):
conn.create_instance_profile(
profile_name_list[profile_count], path=profile_path_list[profile_count])
for profile_count in range(0, 2):
conn.add_role_to_instance_profile(
profile_name_list[profile_count], "my-role")
profile_dump = conn.list_instance_profiles_for_role(role_name="my-role")
profile_list = profile_dump['list_instance_profiles_for_role_response'][
'list_instance_profiles_for_role_result']['instance_profiles']
for profile_count in range(0, len(profile_list)):
profile_name_list.remove(profile_list[profile_count][
"instance_profile_name"])
profile_path_list.remove(profile_list[profile_count]["path"])
profile_list[profile_count]["roles"]["member"][
"role_name"].should.equal("my-role")
len(profile_name_list).should.equal(0)
len(profile_path_list).should.equal(0)
profile_dump2 = conn.list_instance_profiles_for_role(role_name="my-role2")
profile_list = profile_dump2['list_instance_profiles_for_role_response'][
'list_instance_profiles_for_role_result']['instance_profiles']
len(profile_list).should.equal(0)
@mock_iam_deprecated()
def test_list_role_policies():
conn = boto.connect_iam()
conn.create_role("my-role")
conn.put_role_policy("my-role", "test policy", "my policy")
role = conn.list_role_policies("my-role")
role.policy_names.should.have.length_of(1)
role.policy_names[0].should.equal("test policy")
conn.put_role_policy("my-role", "test policy 2", "another policy")
role = conn.list_role_policies("my-role")
role.policy_names.should.have.length_of(2)
conn.delete_role_policy("my-role", "test policy")
role = conn.list_role_policies("my-role")
role.policy_names.should.have.length_of(1)
role.policy_names[0].should.equal("test policy 2")
with assert_raises(BotoServerError):
conn.delete_role_policy("my-role", "test policy")
@mock_iam_deprecated()
def test_put_role_policy():
conn = boto.connect_iam()
conn.create_role(
"my-role", assume_role_policy_document="some policy", path="my-path")
conn.put_role_policy("my-role", "test policy", "my policy")
policy = conn.get_role_policy(
"my-role", "test policy")['get_role_policy_response']['get_role_policy_result']['policy_name']
policy.should.equal("test policy")
@mock_iam_deprecated()
def test_update_assume_role_policy():
conn = boto.connect_iam()
role = conn.create_role("my-role")
conn.update_assume_role_policy(role.role_name, "my-policy")
role = conn.get_role("my-role")
role.assume_role_policy_document.should.equal("my-policy")
@mock_iam
def test_create_policy():
conn = boto3.client('iam', region_name='us-east-1')
response = conn.create_policy(
PolicyName="TestCreatePolicy",
PolicyDocument='{"some":"policy"}')
response['Policy']['Arn'].should.equal("arn:aws:iam::123456789012:policy/TestCreatePolicy")
@mock_iam
def test_create_policy_versions():
conn = boto3.client('iam', region_name='us-east-1')
with assert_raises(ClientError):
conn.create_policy_version(
PolicyArn="arn:aws:iam::123456789012:policy/TestCreatePolicyVersion",
PolicyDocument='{"some":"policy"}')
conn.create_policy(
PolicyName="TestCreatePolicyVersion",
PolicyDocument='{"some":"policy"}')
version = conn.create_policy_version(
PolicyArn="arn:aws:iam::123456789012:policy/TestCreatePolicyVersion",
PolicyDocument='{"some":"policy"}')
version.get('PolicyVersion').get('Document').should.equal({'some': 'policy'})
@mock_iam
def test_get_policy_version():
conn = boto3.client('iam', region_name='us-east-1')
conn.create_policy(
PolicyName="TestGetPolicyVersion",
PolicyDocument='{"some":"policy"}')
version = conn.create_policy_version(
PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicyVersion",
PolicyDocument='{"some":"policy"}')
with assert_raises(ClientError):
conn.get_policy_version(
PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicyVersion",
VersionId='v2-does-not-exist')
retrieved = conn.get_policy_version(
PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicyVersion",
VersionId=version.get('PolicyVersion').get('VersionId'))
retrieved.get('PolicyVersion').get('Document').should.equal({'some': 'policy'})
@mock_iam
def test_list_policy_versions():
conn = boto3.client('iam', region_name='us-east-1')
with assert_raises(ClientError):
versions = conn.list_policy_versions(
PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions")
conn.create_policy(
PolicyName="TestListPolicyVersions",
PolicyDocument='{"some":"policy"}')
conn.create_policy_version(
PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions",
PolicyDocument='{"first":"policy"}')
conn.create_policy_version(
PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions",
PolicyDocument='{"second":"policy"}')
versions = conn.list_policy_versions(
PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions")
versions.get('Versions')[0].get('Document').should.equal({'first': 'policy'})
versions.get('Versions')[1].get('Document').should.equal({'second': 'policy'})
@mock_iam
def test_delete_policy_version():
conn = boto3.client('iam', region_name='us-east-1')
conn.create_policy(
PolicyName="TestDeletePolicyVersion",
PolicyDocument='{"some":"policy"}')
conn.create_policy_version(
PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion",
PolicyDocument='{"first":"policy"}')
with assert_raises(ClientError):
conn.delete_policy_version(
PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion",
VersionId='v2-nope-this-does-not-exist')
conn.delete_policy_version(
PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion",
VersionId='v1')
versions = conn.list_policy_versions(
PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion")
len(versions.get('Versions')).should.equal(0)
@mock_iam_deprecated()
def test_create_user():
conn = boto.connect_iam()
conn.create_user('my-user')
with assert_raises(BotoServerError):
conn.create_user('my-user')
@mock_iam_deprecated()
def test_get_user():
conn = boto.connect_iam()
with assert_raises(BotoServerError):
conn.get_user('my-user')
conn.create_user('my-user')
conn.get_user('my-user')
@mock_iam_deprecated()
def test_get_current_user():
"""If no user is specific, IAM returns the current user"""
conn = boto.connect_iam()
user = conn.get_user()['get_user_response']['get_user_result']['user']
user['user_name'].should.equal('default_user')
@mock_iam()
def test_list_users():
path_prefix = '/'
max_items = 10
conn = boto3.client('iam', region_name='us-east-1')
conn.create_user(UserName='my-user')
response = conn.list_users(PathPrefix=path_prefix, MaxItems=max_items)
user = response['Users'][0]
user['UserName'].should.equal('my-user')
user['Path'].should.equal('/')
user['Arn'].should.equal('arn:aws:iam::123456789012:user/my-user')
@mock_iam()
def test_user_policies():
policy_name = 'UserManagedPolicy'
policy_document = "{'mypolicy': 'test'}"
user_name = 'my-user'
conn = boto3.client('iam', region_name='us-east-1')
conn.create_user(UserName=user_name)
conn.put_user_policy(
UserName=user_name,
PolicyName=policy_name,
PolicyDocument=policy_document
)
policy_doc = conn.get_user_policy(
UserName=user_name,
PolicyName=policy_name
)
test = policy_document in policy_doc['PolicyDocument']
test.should.equal(True)
policies = conn.list_user_policies(UserName=user_name)
len(policies['PolicyNames']).should.equal(1)
policies['PolicyNames'][0].should.equal(policy_name)
conn.delete_user_policy(
UserName=user_name,
PolicyName=policy_name
)
policies = conn.list_user_policies(UserName=user_name)
len(policies['PolicyNames']).should.equal(0)
@mock_iam_deprecated()
def test_create_login_profile():
conn = boto.connect_iam()
with assert_raises(BotoServerError):
conn.create_login_profile('my-user', 'my-pass')
conn.create_user('my-user')
conn.create_login_profile('my-user', 'my-pass')
with assert_raises(BotoServerError):
conn.create_login_profile('my-user', 'my-pass')
@mock_iam_deprecated()
def test_delete_login_profile():
conn = boto.connect_iam()
conn.create_user('my-user')
with assert_raises(BotoServerError):
conn.delete_login_profile('my-user')
conn.create_login_profile('my-user', 'my-pass')
conn.delete_login_profile('my-user')
@mock_iam_deprecated()
def test_create_access_key():
conn = boto.connect_iam()
with assert_raises(BotoServerError):
conn.create_access_key('my-user')
conn.create_user('my-user')
conn.create_access_key('my-user')
@mock_iam_deprecated()
def test_get_all_access_keys():
"""If no access keys exist there should be none in the response,
if an access key is present it should have the correct fields present"""
conn = boto.connect_iam()
conn.create_user('my-user')
response = conn.get_all_access_keys('my-user')
assert_equals(
response['list_access_keys_response'][
'list_access_keys_result']['access_key_metadata'],
[]
)
conn.create_access_key('my-user')
response = conn.get_all_access_keys('my-user')
assert_equals(
sorted(response['list_access_keys_response'][
'list_access_keys_result']['access_key_metadata'][0].keys()),
sorted(['status', 'create_date', 'user_name', 'access_key_id'])
)
@mock_iam_deprecated()
def test_delete_access_key():
conn = boto.connect_iam()
conn.create_user('my-user')
access_key_id = conn.create_access_key('my-user')['create_access_key_response'][
'create_access_key_result']['access_key']['access_key_id']
conn.delete_access_key(access_key_id, 'my-user')
@mock_iam()
def test_mfa_devices():
# Test enable device
conn = boto3.client('iam', region_name='us-east-1')
conn.create_user(UserName='my-user')
conn.enable_mfa_device(
UserName='my-user',
SerialNumber='123456789',
AuthenticationCode1='234567',
AuthenticationCode2='987654'
)
# Test list mfa devices
response = conn.list_mfa_devices(UserName='my-user')
device = response['MFADevices'][0]
device['SerialNumber'].should.equal('123456789')
# Test deactivate mfa device
conn.deactivate_mfa_device(UserName='my-user', SerialNumber='123456789')
response = conn.list_mfa_devices(UserName='my-user')
len(response['MFADevices']).should.equal(0)
@mock_iam_deprecated()
def test_delete_user():
conn = boto.connect_iam()
with assert_raises(BotoServerError):
conn.delete_user('my-user')
conn.create_user('my-user')
conn.delete_user('my-user')
@mock_iam_deprecated()
def test_generate_credential_report():
conn = boto.connect_iam()
result = conn.generate_credential_report()
result['generate_credential_report_response'][
'generate_credential_report_result']['state'].should.equal('STARTED')
result = conn.generate_credential_report()
result['generate_credential_report_response'][
'generate_credential_report_result']['state'].should.equal('COMPLETE')
@mock_iam_deprecated()
def test_get_credential_report():
conn = boto.connect_iam()
conn.create_user('my-user')
with assert_raises(BotoServerError):
conn.get_credential_report()
result = conn.generate_credential_report()
while result['generate_credential_report_response']['generate_credential_report_result']['state'] != 'COMPLETE':
result = conn.generate_credential_report()
result = conn.get_credential_report()
report = base64.b64decode(result['get_credential_report_response'][
'get_credential_report_result']['content'].encode('ascii')).decode('ascii')
report.should.match(r'.*my-user.*')
@requires_boto_gte('2.39')
@mock_iam_deprecated()
def test_managed_policy():
conn = boto.connect_iam()
conn.create_policy(policy_name='UserManagedPolicy',
policy_document={'mypolicy': 'test'},
path='/mypolicy/',
description='my user managed policy')
marker = 0
aws_policies = []
while marker is not None:
response = conn.list_policies(scope='AWS', marker=marker)[
'list_policies_response']['list_policies_result']
for policy in response['policies']:
aws_policies.append(policy)
marker = response.get('marker')
set(p.name for p in aws_managed_policies).should.equal(
set(p['policy_name'] for p in aws_policies))
user_policies = conn.list_policies(scope='Local')['list_policies_response'][
'list_policies_result']['policies']
set(['UserManagedPolicy']).should.equal(
set(p['policy_name'] for p in user_policies))
marker = 0
all_policies = []
while marker is not None:
response = conn.list_policies(marker=marker)[
'list_policies_response']['list_policies_result']
for policy in response['policies']:
all_policies.append(policy)
marker = response.get('marker')
set(p['policy_name'] for p in aws_policies +
user_policies).should.equal(set(p['policy_name'] for p in all_policies))
role_name = 'my-role'
conn.create_role(role_name, assume_role_policy_document={
'policy': 'test'}, path="my-path")
for policy_name in ['AmazonElasticMapReduceRole',
'AmazonElasticMapReduceforEC2Role']:
policy_arn = 'arn:aws:iam::aws:policy/service-role/' + policy_name
conn.attach_role_policy(policy_arn, role_name)
rows = conn.list_policies(only_attached=True)['list_policies_response'][
'list_policies_result']['policies']
rows.should.have.length_of(2)
for x in rows:
int(x['attachment_count']).should.be.greater_than(0)
# boto has not implemented this end point but accessible this way
resp = conn.get_response('ListAttachedRolePolicies',
{'RoleName': role_name},
list_marker='AttachedPolicies')
resp['list_attached_role_policies_response']['list_attached_role_policies_result'][
'attached_policies'].should.have.length_of(2)
conn.detach_role_policy(
"arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole",
role_name)
rows = conn.list_policies(only_attached=True)['list_policies_response'][
'list_policies_result']['policies']
rows.should.have.length_of(1)
for x in rows:
int(x['attachment_count']).should.be.greater_than(0)
# boto has not implemented this end point but accessible this way
resp = conn.get_response('ListAttachedRolePolicies',
{'RoleName': role_name},
list_marker='AttachedPolicies')
resp['list_attached_role_policies_response']['list_attached_role_policies_result'][
'attached_policies'].should.have.length_of(1)
with assert_raises(BotoServerError):
conn.detach_role_policy(
"arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole",
role_name)
with assert_raises(BotoServerError):
conn.detach_role_policy(
"arn:aws:iam::aws:policy/Nonexistent", role_name)
@mock_iam
def test_boto3_create_login_profile():
conn = boto3.client('iam', region_name='us-east-1')
with assert_raises(ClientError):
conn.create_login_profile(UserName='my-user', Password='Password')
conn.create_user(UserName='my-user')
conn.create_login_profile(UserName='my-user', Password='Password')
with assert_raises(ClientError):
conn.create_login_profile(UserName='my-user', Password='Password')
@mock_iam()
def test_attach_detach_user_policy():
iam = boto3.resource('iam', region_name='us-east-1')
client = boto3.client('iam', region_name='us-east-1')
user = iam.create_user(UserName='test-user')
policy_name = 'UserAttachedPolicy'
policy = iam.create_policy(PolicyName=policy_name,
PolicyDocument='{"mypolicy": "test"}',
Path='/mypolicy/',
Description='my user attached policy')
client.attach_user_policy(UserName=user.name, PolicyArn=policy.arn)
resp = client.list_attached_user_policies(UserName=user.name)
resp['AttachedPolicies'].should.have.length_of(1)
attached_policy = resp['AttachedPolicies'][0]
attached_policy['PolicyArn'].should.equal(policy.arn)
attached_policy['PolicyName'].should.equal(policy_name)
client.detach_user_policy(UserName=user.name, PolicyArn=policy.arn)
resp = client.list_attached_user_policies(UserName=user.name)
resp['AttachedPolicies'].should.have.length_of(0)
@mock_iam
def test_update_access_key():
iam = boto3.resource('iam', region_name='us-east-1')
client = iam.meta.client
username = 'test-user'
iam.create_user(UserName=username)
with assert_raises(ClientError):
client.update_access_key(UserName=username,
AccessKeyId='non-existent-key',
Status='Inactive')
key = client.create_access_key(UserName=username)['AccessKey']
client.update_access_key(UserName=username,
AccessKeyId=key['AccessKeyId'],
Status='Inactive')
resp = client.list_access_keys(UserName=username)
resp['AccessKeyMetadata'][0]['Status'].should.equal('Inactive')
| apache-2.0 | 4,759,687,329,647,315,000 | 35.419118 | 116 | 0.665415 | false |
DLR-SC/prov-db-connector | provdbconnector/utils/converter.py | 1 | 3299 | from functools import reduce
from io import BufferedReader
from provdbconnector.exceptions.utils import ParseException, NoDocumentException
import six
from prov.model import ProvDocument
import logging
log = logging.getLogger(__name__)
def form_string(content):
"""
Take a string or BufferedReader as argument and transform the string into a ProvDocument
:param content: Takes a sting or BufferedReader
:return: ProvDocument
"""
if isinstance(content, ProvDocument):
return content
elif isinstance(content, BufferedReader):
content = reduce(lambda total, a: total + a, content.readlines())
if type(content) is six.binary_type:
content_str = content[0:15].decode()
if content_str.find("{") > -1:
return ProvDocument.deserialize(content=content, format='json')
if content_str.find('<?xml') > -1:
return ProvDocument.deserialize(content=content, format='xml')
elif content_str.find('document') > -1:
return ProvDocument.deserialize(content=content, format='provn')
raise ParseException("Unsupported input type {}".format(type(content)))
def to_json(document=None):
"""
Try to convert a ProvDocument into the json representation
:param document:
:type document: prov.model.ProvDocument
:return: Json string of the document
:rtype: str
"""
if document is None:
raise NoDocumentException()
return document.serialize(format='json')
def from_json(json=None):
"""
Try to convert a json string into a document
:param json: The json str
:type json: str
:return: Prov Document
:rtype: prov.model.ProvDocument
:raise: NoDocumentException
"""
if json is None:
raise NoDocumentException()
return ProvDocument.deserialize(source=json, format='json')
def to_provn(document=None):
"""
Try to convert a document into a provn representation
:param document: Prov document to convert
:type document: prov.model.ProvDocument
:return: The prov-n str
:rtype: str
:raise: NoDocumentException
"""
if document is None:
raise NoDocumentException()
return document.serialize(format='provn')
def from_provn(provn_str=None):
"""
Try to convert a provn string into a ProvDocument
:param provn_str: The string to convert
:type provn_str: str
:return: The Prov document
:rtype: ProvDocument
:raises: NoDocumentException
"""
if provn_str is None:
raise NoDocumentException()
return ProvDocument.deserialize(source=provn_str, format='provn')
def to_xml(document=None):
"""
Try to convert a document into an xml string
:param document: The ProvDocument to convert
:param document: ProvDocument
:return: The xml string
:rtype: str
"""
if document is None:
raise NoDocumentException()
return document.serialize(format='xml')
def from_xml(xml_str=None):
"""
Try to convert a xml string into a ProvDocument
:param xml_str: The xml string
:type xml_str: str
:return: The Prov document
:rtype: ProvDocument
"""
if xml_str is None:
raise NoDocumentException()
return ProvDocument.deserialize(source=xml_str, format='xml')
| apache-2.0 | -8,106,690,724,174,002,000 | 26.491667 | 92 | 0.678387 | false |
fangxingli/hue | desktop/libs/indexer/src/indexer/smart_indexer_tests.py | 1 | 9419 | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.from nose.tools import assert_equal
import StringIO
import logging
from nose.tools import assert_equal, assert_true
from nose.plugins.skip import SkipTest
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import grant_access, add_to_group
from hadoop.pseudo_hdfs4 import is_live_cluster, shared_cluster
from indexer.file_format import ApacheCombinedFormat, RubyLogFormat, HueLogFormat
from indexer.fields import Field
from indexer.controller import CollectionManagerController
from indexer.operations import get_operator
from indexer.smart_indexer import Indexer
LOG = logging.getLogger(__name__)
def _test_fixed_type_format_generate_morphline(format_):
indexer = Indexer("test")
format_instance = format_()
morphline = indexer.generate_morphline_config("test_collection", {
"columns": [field.to_dict() for field in format_instance.fields],
"format": format_instance.get_format()
})
assert_true(isinstance(morphline, basestring))
def _test_generate_field_operation_morphline(operation_format):
fields = TestIndexer.simpleCSVFields[:]
fields[0]['operations'].append(operation_format)
indexer = Indexer("test")
morphline =indexer.generate_morphline_config("test_collection", {
"columns": fields,
"format": TestIndexer.simpleCSVFormat
})
assert_true(isinstance(morphline, basestring))
class TestIndexer():
simpleCSVString = """id,Rating,Location,Name,Time
1,5,San Francisco,Good Restaurant,8:30pm
2,4,San Mateo,Cafe,11:30am
3,3,Berkeley,Sauls,2:30pm
"""
simpleCSVFields = [
{
"name": "id",
"type": "long",
"operations": [],
"keep": True,
"required": False
},
{
"name": "Rating",
"type": "long",
"operations": [],
"keep": True,
"required": False
},
{
"name": "Location",
"type": "string",
"operations": [],
"keep": True,
"required": False
},
{
"name": "Name",
"type": "string",
"operations": [],
"keep": True,
"required": False
},
{
"name": "Time",
"type": "string",
"operations": [],
"keep": True,
"required": False
}
]
simpleCSVFormat = {
'type': 'csv',
'fieldSeparator': ',',
'recordSeparator': '\n',
'hasHeader': True,
'quoteChar': '"'
}
def setUp(self):
self.c = make_logged_in_client(is_superuser=False)
grant_access("test", "test", "indexer")
add_to_group("test")
def test_guess_csv_format(self):
stream = StringIO.StringIO(TestIndexer.simpleCSVString)
indexer = Indexer("test")
guessed_format = indexer.guess_format({'file': {"stream": stream, "name": "test.csv"}})
fields = indexer.guess_field_types({"file":{"stream": stream, "name": "test.csv"}, "format": guessed_format})['columns']
# test format
expected_format = self.simpleCSVFormat
assert_equal(expected_format, guessed_format)
# test fields
expected_fields = self.simpleCSVFields
for expected, actual in zip(expected_fields, fields):
for key in ("name", "type"):
assert_equal(expected[key], actual[key])
def test_guess_format_invalid_csv_format(self):
indexer = Indexer("test")
stream = StringIO.StringIO(TestIndexer.simpleCSVString)
guessed_format = indexer.guess_format({'file': {"stream": stream, "name": "test.csv"}})
guessed_format["fieldSeparator"] = "invalid separator"
fields = indexer.guess_field_types({"file": {"stream": stream, "name": "test.csv"}, "format": guessed_format})['columns']
assert_equal(fields, [])
stream.seek(0)
guessed_format = indexer.guess_format({'file': {"stream": stream, "name": "test.csv"}})
guessed_format["recordSeparator"] = "invalid separator"
fields = indexer.guess_field_types({"file": {"stream": stream, "name": "test.csv"}, "format": guessed_format})['columns']
assert_equal(fields, [])
stream.seek(0)
guessed_format = indexer.guess_format({'file': {"stream": stream, "name": "test.csv"}})
guessed_format["quoteChar"] = "invalid quoteChar"
fields = indexer.guess_field_types({"file": {"stream": stream, "name": "test.csv"}, "format": guessed_format})['columns']
assert_equal(fields, [])
def test_generate_csv_morphline(self):
indexer = Indexer("test")
morphline =indexer.generate_morphline_config("test_collection", {
"columns": self.simpleCSVFields,
"format": self.simpleCSVFormat
})
assert_true(isinstance(morphline, basestring))
def test_generate_apache_combined_morphline(self):
_test_fixed_type_format_generate_morphline(ApacheCombinedFormat)
def test_generate_ruby_logs_morphline(self):
_test_fixed_type_format_generate_morphline(RubyLogFormat)
def test_generate_hue_log_morphline(self):
_test_fixed_type_format_generate_morphline(HueLogFormat)
def test_generate_split_operation_morphline(self):
split_dict = get_operator('split').get_default_operation()
split_dict['fields'] = [
Field("test_field_1", "string").to_dict(),
Field("test_field_2", "string").to_dict()
]
_test_generate_field_operation_morphline(split_dict)
def test_generate_extract_uri_components_operation_morphline(self):
extract_uri_dict = get_operator('extract_uri_components').get_default_operation()
extract_uri_dict['fields'] = [
Field("test_field_1", "string").to_dict(),
Field("test_field_2", "string").to_dict()
]
_test_generate_field_operation_morphline(extract_uri_dict)
def test_generate_grok_operation_morphline(self):
grok_dict = get_operator('grok').get_default_operation()
grok_dict['fields'] = [
Field("test_field_1", "string").to_dict(),
Field("test_field_2", "string").to_dict()
]
_test_generate_field_operation_morphline(grok_dict)
def test_generate_convert_date_morphline(self):
convert_date_dict = get_operator('convert_date').get_default_operation()
_test_generate_field_operation_morphline(convert_date_dict)
def test_generate_geo_ip_morphline(self):
geo_ip_dict = get_operator('geo_ip').get_default_operation()
geo_ip_dict['fields'] = [
Field("test_field_1", "string").to_dict(),
Field("test_field_2", "string").to_dict()
]
_test_generate_field_operation_morphline(geo_ip_dict)
def test_generate_translate_morphline(self):
translate_dict = get_operator('translate').get_default_operation()
translate_dict['fields'] = [
Field("test_field_1", "string").to_dict(),
Field("test_field_2", "string").to_dict()
]
translate_dict['settings']['mapping'].append({"key":"key","value":"value"})
_test_generate_field_operation_morphline(translate_dict)
def test_generate_find_replace_morphline(self):
find_replace_dict = get_operator('find_replace').get_default_operation()
_test_generate_field_operation_morphline(find_replace_dict)
def test_end_to_end(self):
if not is_live_cluster():
raise SkipTest()
cluster = shared_cluster()
fs = cluster.fs
collection_name = "test_collection"
indexer = Indexer("test", fs=fs, jt=cluster.jt)
input_loc = "/tmp/test.csv"
# upload the test file to hdfs
fs.create(input_loc, data=TestIndexer.simpleCSVString, overwrite=True)
# open a filestream for the file on hdfs
stream = fs.open(input_loc)
# guess the format of the file
file_type_format = indexer.guess_format({'file': {"stream": stream, "name": "test.csv"}})
field_types = indexer.guess_field_types({"file":{"stream": stream, "name": "test.csv"}, "format": file_type_format})
format_ = field_types.copy()
format_['format'] = file_type_format
# find a field name available to use for the record's uuid
unique_field = indexer.get_unique_field(format_)
is_unique_generated = indexer.is_unique_generated(format_)
# generate morphline
morphline = indexer.generate_morphline_config(collection_name, format_, unique_field)
schema_fields = indexer.get_kept_field_list(format_['columns'])
if is_unique_generated:
schema_fields += [{"name": unique_field, "type": "string"}]
# create the collection from the specified fields
collection_manager = CollectionManagerController("test")
if collection_manager.collection_exists(collection_name):
collection_manager.delete_collection(collection_name, None)
collection_manager.create_collection(collection_name, schema_fields, unique_key_field=unique_field)
# index the file
indexer.run_morphline(collection_name, morphline, input_loc)
| apache-2.0 | -8,800,027,289,137,888,000 | 31.818815 | 125 | 0.675656 | false |
softelnet/sponge | sponge-examples-projects/sponge-examples-project-user-management/sponge/remote_api_security.py | 1 | 1141 | """
Sponge Knowledge Base
Remote API security
"""
from org.openksavi.sponge.remoteapi.server.security import User
# Simple access configuration: role -> knowledge base names regexps.
ROLES_TO_KB = { "admin":[".*"], "anonymous":["public"], "standard":["public", "account", "service"]}
class RemoteApiCanUseKnowledgeBase(Action):
def onCall(self, userContext, kbName):
return remoteApiServer.canAccessResource(ROLES_TO_KB, userContext, kbName)
def onStartup():
# Setup users. To hash a password use: echo -n username-password | shasum -a 512 | awk '{ print $1 }'
# Note that the username must be lower case.
securityService = remoteApiServer.service.securityService
# Sample users have password: password
securityService.addUser(User("[email protected]", "25205e6ea66af5e682493b3ed7435e446742fbdba6cce5bd92a599df7668417d01305e8fcdf7a924861adfc3815824b8de8b595ac4ad64881957df20dc58cf2e", ["admin"]))
securityService.addUser(User("[email protected]", "eaa4e94ea0817fd4395a042236212957c5ecb764c3a3f7768b1d28f58a54a3253075cca242d8a472e4ab8231c7bc4ae76cec6392d3235cc38e93194a69e557c8", ["standard"]))
| apache-2.0 | 6,848,831,108,894,808,000 | 46.541667 | 202 | 0.777388 | false |
miracle2k/localtodo | localtodo.py | 1 | 5751 | #!/usr/bin/env python
import os
import sys
import glob
from os import path
import json
import docopt # http://pypi.python.org/pypi/docopt/
docstring = """
Manage LOCAL_TODO files.
In the default invocation, will create or move an existing LOCAL_TODO
file to a shared folder, then create a link to it in it's original
place.
Usage:
localtodo.py [--to <path>] [-s <name>] [<name>]
localtodo.py --delete <name>
Options & Arguments:
-h, --help Show this screen.
<name> The project name. If not given, the name of the
containing folder will be used.
--to <path> Where to create the file. The first time you use
the script you will have to specify this. It will
subsequently be saved in a ~/.localtodo file.
-s <name>, --sub <name>
Create a sub-TODO file. The name will be appended
as a suffix, like this: "LOCAL_TODO.docs".
The default invocation will link all such existing
TODO files into the local directory.
--delete <name>
Delete an existing LOCAL_TODO file for the given
project name.
"""
CONFIG_FILE = '~/.localtodo'
TODOFILE_NAME = 'LOCAL_TODO'
SUBFILE_NAME = 'LOCAL_TODO.%s'
class ExitCodeError(Exception):
def __init__(self, code):
self.code = code
def main(argv):
args = docopt.docopt(docstring, argv[1:])
# Open config file
config = {}
if path.exists(path.expanduser(CONFIG_FILE)):
with open(path.expanduser(CONFIG_FILE), 'r') as f:
config = json.load(f)
# Determine target directory
target_directory = args['--to'] or config.get('directory', None)
if not target_directory:
print 'You need to use --to at least once to tell me where '\
'to store the todo files.'
return 1
else:
config['directory'] = target_directory
with open(path.expanduser(CONFIG_FILE), 'w') as f:
json.dump(config, f)
# Implement --delete mode
if args['--delete']:
target_file = path.join(target_directory, args['--delete'])
if args['--sub']:
target_file = "%s.%s" % (target_file, args['--sub'])
if not path.exists(target_file):
print 'Error: No such file: %s' % target_file
return 2
os.unlink(target_file)
return
# Normal sync mode
project = args['<name>']
subfile = args['--sub']
if not project:
project = path.basename(path.abspath(os.curdir))
env = {
'target_directory': target_directory,
'established_links': []
}
try:
if subfile:
sync_todo(env, project, subfile)
else:
# Sync the main file
sync_todo(env, project)
# Find all existing sub-todo files, both in remote
# and in local dir, sync them all.
for sub in findsubs(path.join(target_directory,
"%s.*" % project)):
sync_todo(env, project, sub)
for sub in findsubs(SUBFILE_NAME % '*'):
sync_todo(env, project, sub)
except ExitCodeError, e:
return e.code
# Print summery of state
print
print "I have established the following links for you:"
for source, target in set(env['established_links']):
print ' %s --> %s' % (source, target)
def findsubs(expr):
"""Helper that extracts sub-todo suffixes from a list of
glob results."""
for filename in glob.glob(expr):
basename, subname = path.splitext(path.basename(filename))
if subname:
yield subname[1:] # remove the dot
def sync_todo(env, project, subfile=None):
"""Ensure that the todo file identified by ``project`` and
``subfile`` is setup correctly (exists in local dir, links to
target dir).
If not the case, try to make it so.
"""
target_directory = env['target_directory']
# Determine target file
target_file = path.join(target_directory, project)
if subfile:
target_file = "%s.%s" % (target_file, subfile)
# Determine source file
if subfile:
source_file = path.join(os.curdir, SUBFILE_NAME % subfile)
else:
source_file = path.join(os.curdir, TODOFILE_NAME)
# See what we have to do
if path.exists(source_file) and path.exists(target_file):
if path.realpath(source_file) == target_file:
env['established_links'].append((source_file, target_file))
return
print '%s exists, but so does %s\nMaybe you want to call with '\
'"--delete %s" to delete the target.' % (
target_file, source_file, project)
raise ExitCodeError(2)
# If there is a local file, move it to the target
if path.exists(source_file) and not path.islink(source_file):
print 'Moving %s to %s' % (source_file, target_file)
os.rename(source_file, target_file)
elif not path.exists(target_file):
print 'Creating new empty file %s' % (target_file,)
with open(target_file, 'w'):
pass
else:
print 'Found existing file %s' % (target_file,)
# Create the link
#
# If the current file is already a link, simply replace it.
if path.islink(source_file):
os.unlink(source_file)
print "Replacing existing link %s with new target" % source_file
# To use the relative path: path.relpath(target_file, path.dirname(source_file))
os.symlink(path.abspath(target_file), source_file)
env['established_links'].append((source_file, target_file))
def run():
sys.exit(main(sys.argv) or 0)
if __name__ == '__main__':
run()
| bsd-2-clause | -2,664,509,939,556,680,700 | 30.773481 | 84 | 0.597114 | false |
authman/Python201609 | Nguyen_Ken/Assignments/Pylot assignments/time_display/routes.py | 1 | 1776 | """
Routes Configuration File
Put Routing rules here
"""
from system.core.router import routes
"""
This is where you define routes
Start by defining the default controller
Pylot will look for the index method in the default controller to handle the base route
Pylot will also automatically generate routes that resemble: '/controller/method/parameters'
For example if you had a products controller with an add method that took one parameter
named id the automatically generated url would be '/products/add/<id>'
The automatically generated routes respond to all of the http verbs (GET, POST, PUT, PATCH, DELETE)
"""
routes['default_controller'] = 'Times'
"""
You can add routes and specify their handlers as follows:
routes['VERB']['/URL/GOES/HERE'] = 'Controller#method'
Note the '#' symbol to specify the controller method to use.
Note the preceding slash in the url.
Note that the http verb must be specified in ALL CAPS.
If the http verb is not provided pylot will assume that you want the 'GET' verb.
You can also use route parameters by using the angled brackets like so:
routes['PUT']['/users/<int:id>'] = 'users#update'
Note that the parameter can have a specified type (int, string, float, path).
If the type is not specified it will default to string
Here is an example of the restful routes for users:
routes['GET']['/users'] = 'users#index'
routes['GET']['/users/new'] = 'users#new'
routes['POST']['/users'] = 'users#create'
routes['GET']['/users/<int:id>'] = 'users#show'
routes['GET']['/users/<int:id>/edit' = 'users#edit'
routes['PATCH']['/users/<int:id>'] = 'users#update'
routes['DELETE']['/users/<int:id>'] = 'users#destroy'
"""
| mit | -7,898,817,410,008,705,000 | 37.608696 | 103 | 0.684122 | false |
oVirt/ovirt-setup-lib | tests/commons.py | 1 | 1744 | #
# ovirt-setup-lib -- ovirt setup library
# Copyright (C) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mock
import unittest2
class BaseTestCase(unittest2.TestCase):
_patchers = {}
def mock_base(self):
if 'Base' not in self._patchers:
self._patchers['Base'] = 'otopi.base.Base'
def mock_plugin(self):
import otopi.plugin # imported here to make mock happy
assert otopi.plugin # assertion here to make pyflakes happy
if 'Plugin' not in self._patchers:
self.mock_base()
self._patchers['Plugin'] = 'otopi.plugin.PluginBase'
def mock_context(self):
import otopi.context # imported here to make mock happy
assert otopi.context # assertion here to make pyflakes happy
if 'Context' not in self._patchers:
self.mock_base()
self._patchers['Context'] = 'otopi.context.Context'
def mock_otopi(self):
self.mock_plugin()
self.mock_context()
def apply_patch(self):
for cls_name in self._patchers:
patcher = mock.patch(self._patchers[cls_name])
setattr(self, cls_name, patcher.start())
self.addCleanup(patcher.stop)
| apache-2.0 | -5,747,025,881,998,846,000 | 31.90566 | 74 | 0.661697 | false |
XiaodunServerGroup/medicalmooc | cms/djangoapps/contentstore/views/course.py | 1 | 55485 | # -*- coding: utf-8 -*-
#coding=utf-8
import Queue
import sys,os
from envs.common import PROJECT_ROOT
import xlrd
from student.views import do_institution_import_teacher_create_account,do_institution_import_student_create_account
import random
reload(sys)
sys.setdefaultencoding('utf8')
"""
Views related to operations on course objects
"""
import json
import random
import string # pylint: disable=W0402
import re
import bson
import socket
import urllib2
from Crypto.Cipher import DES
import base64
import hashlib
import analytics.basic
from datetime import *
from django.utils import timezone
from django.db.models import Q
from django.utils.translation import ugettext as _
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User, Group
from django_future.csrf import ensure_csrf_cookie
from django.conf import settings
from django.views.decorators.http import require_http_methods
from django.views.decorators.csrf import csrf_exempt
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import HttpResponseBadRequest, HttpResponseNotFound, HttpResponseRedirect, HttpResponse, Http404
from util.json_request import JsonResponse
from edxmako.shortcuts import render_to_response
from xmodule.error_module import ErrorDescriptor
from xmodule.modulestore.django import modulestore, loc_mapper
from xmodule.contentstore.content import StaticContent
from xmodule.modulestore.exceptions import (
ItemNotFoundError, InvalidLocationError)
from xmodule.modulestore import Location
from xmodule.fields import Date
from contentstore.course_info_model import get_course_updates, update_course_updates, delete_course_update, get_course_update_items
from contentstore.utils import (
get_lms_link_for_item, add_extra_panel_tab, remove_extra_panel_tab,
get_modulestore)
from contentstore.utils import send_mail_update
from models.settings.course_details import CourseDetails, CourseSettingsEncoder
from models.settings.course_grading import CourseGradingModel
from models.settings.course_metadata import CourseMetadata
from util.json_request import expect_json
from .access import has_course_access
from .tabs import initialize_course_tabs
from .component import (
OPEN_ENDED_COMPONENT_TYPES, NOTE_COMPONENT_TYPES,
ADVANCED_COMPONENT_POLICY_KEY)
from django_comment_common.models import assign_default_role
from django_comment_common.utils import seed_permissions_roles
from student.models import CourseEnrollment,UserProfile,UploadFileForm
from xmodule.html_module import AboutDescriptor
from xmodule.modulestore.locator import BlockUsageLocator, CourseLocator
from course_creators.views import get_course_creator_status, add_user_with_status_unrequested
from contentstore import utils
from student.roles import CourseInstructorRole, CourseStaffRole, CourseCreatorRole, GlobalStaff
from student import auth
from microsite_configuration import microsite
__all__ = ['course_info_handler', 'course_handler', 'course_info_update_handler',
'settings_handler',
'grading_handler',
'advanced_settings_handler',
'calendar_settings_handler',
'calendar_common',
'calendar_common_addevent',
'calendar_common_deleteevent',
'calendar_common_updateevent',
'calendar_settings_getevents',
'textbooks_list_handler',
'textbooks_detail_handler', 'course_audit_api', 'institution_upload_teacher', 'remove_institute_teacher', 'teacher_intro_edit', 'import_student']
WENJUAN_STATUS = {
"0": "未发布",
"1": "收集中",
"2": "已结束",
"3": "暂停中",
"4": "状态未明",
"-1": "已删除",
}
def _get_locator_and_course(package_id, branch, version_guid, block_id, user, depth=0):
"""
Internal method used to calculate and return the locator and course module
for the view functions in this file.
"""
locator = BlockUsageLocator(package_id=package_id, branch=branch, version_guid=version_guid, block_id=block_id)
if not has_course_access(user, locator):
raise PermissionDenied()
course_location = loc_mapper().translate_locator_to_location(locator)
course_module = modulestore().get_item(course_location, depth=depth)
return locator, course_module
def _get_institute(curr_user):
course_org = ""
u = UserProfile.objects.get(user_id=curr_user.id)
if u.profile_role == 'in':
course_org = u.name
elif u.profile_role == 'th' and u.institute:
course_org = UserProfile.objects.get(user_id=u.institute).name
print course_org.encode('utf-8')
return course_org
# pylint: disable=unused-argument
@login_required
def course_handler(request, tag=None, package_id=None, branch=None, version_guid=None, block=None):
"""
The restful handler for course specific requests.
It provides the course tree with the necessary information for identifying and labeling the parts. The root
will typically be a 'course' object but may not be especially as we support modules.
GET
html: return course listing page if not given a course id
html: return html page overview for the given course if given a course id
json: return json representing the course branch's index entry as well as dag w/ all of the children
replaced w/json docs where each doc has {'_id': , 'display_name': , 'children': }
POST
json: create a course, return resulting json
descriptor (same as in GET course/...). Leaving off /branch/draft would imply create the course w/ default
branches. Cannot change the structure contents ('_id', 'display_name', 'children') but can change the
index entry.
PUT
json: update this course (index entry not xblock) such as repointing head, changing display name, org,
package_id, prettyid. Return same json as above.
DELETE
json: delete this branch from this course (leaving off /branch/draft would imply delete the course)
"""
response_format = request.REQUEST.get('format', 'html')
if response_format == 'json' or 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
if request.method == 'GET':
return JsonResponse(_course_json(request, package_id, branch, version_guid, block))
elif request.method == 'POST': # not sure if this is only post. If one will have ids, it goes after access
return create_new_course(request)
elif not has_course_access(
request.user,
BlockUsageLocator(package_id=package_id, branch=branch, version_guid=version_guid, block_id=block)
):
raise PermissionDenied()
elif request.method == 'PUT':
raise NotImplementedError()
elif request.method == 'DELETE':
raise NotImplementedError()
else:
return HttpResponseBadRequest()
elif request.method == 'GET': # assume html
if package_id is None:
return course_listing(request)
else:
return course_index(request, package_id, branch, version_guid, block)
else:
return HttpResponseNotFound()
@login_required
def _course_json(request, package_id, branch, version_guid, block):
"""
Returns a JSON overview of a course
"""
__, course = _get_locator_and_course(
package_id, branch, version_guid, block, request.user, depth=None
)
return _xmodule_json(course, course.location.course_id)
def _xmodule_json(xmodule, course_id):
"""
Returns a JSON overview of an XModule
"""
locator = loc_mapper().translate_location(
course_id, xmodule.location, published=False, add_entry_if_missing=True
)
is_container = xmodule.has_children
result = {
'display_name': xmodule.display_name,
'id': unicode(locator),
'category': xmodule.category,
'is_draft': getattr(xmodule, 'is_draft', False),
'is_container': is_container,
}
if is_container:
result['children'] = [_xmodule_json(child, course_id) for child in xmodule.get_children()]
return result
def _accessible_courses_list(request):
"""
List all courses available to the logged in user by iterating through all the courses
"""
courses = modulestore('direct').get_courses()
# filter out courses that we don't have access too
def course_filter(course):
"""
Get courses to which this user has access
"""
if GlobalStaff().has_user(request.user):
return course.location.course != 'templates'
return (has_course_access(request.user, course.location)
# pylint: disable=fixme
# TODO remove this condition when templates purged from db
and course.location.course != 'templates'
)
courses = filter(course_filter, courses)
return courses
# pylint: disable=invalid-name
def _accessible_courses_list_from_groups(request):
"""
List all courses available to the logged in user by reversing access group names
"""
courses_list = []
course_ids = set()
user_staff_group_names = request.user.groups.filter(
Q(name__startswith='instructor_') | Q(name__startswith='staff_')
).values_list('name', flat=True)
# we can only get course_ids from role names with the new format (instructor_org/number/run or
# instructor_org.number.run but not instructor_number).
for user_staff_group_name in user_staff_group_names:
# to avoid duplication try to convert all course_id's to format with dots e.g. "edx.course.run"
if user_staff_group_name.startswith("instructor_"):
# strip starting text "instructor_"
course_id = user_staff_group_name[11:]
else:
# strip starting text "staff_"
course_id = user_staff_group_name[6:]
course_ids.add(course_id.replace('/', '.').lower())
for course_id in course_ids:
# get course_location with lowercase id
course_location = loc_mapper().translate_locator_to_location(
CourseLocator(package_id=course_id), get_course=True, lower_only=True
)
if course_location is None:
raise ItemNotFoundError(course_id)
course = modulestore('direct').get_course(course_location.course_id)
courses_list.append(course)
return courses_list
@login_required
@ensure_csrf_cookie
def course_listing(request):
"""
List all courses available to the logged in user
Try to get all courses by first reversing django groups and fallback to old method if it fails
Note: overhead of pymongo reads will increase if getting courses from django groups fails
"""
if GlobalStaff().has_user(request.user):
# user has global access so no need to get courses from django groups
courses = _accessible_courses_list(request)
else:
try:
courses = _accessible_courses_list_from_groups(request)
except ItemNotFoundError:
# user have some old groups or there was some error getting courses from django groups
# so fallback to iterating through all courses
courses = _accessible_courses_list(request)
# update location entry in "loc_mapper" for user courses (add keys 'lower_id' and 'lower_course_id')
for course in courses:
loc_mapper().create_map_entry(course.location)
def format_course_for_view(course):
"""
return tuple of the data which the view requires for each course
"""
# published = false b/c studio manipulates draft versions not b/c the course isn't pub'd
course_loc = loc_mapper().translate_location(
course.location.course_id, course.location, published=False, add_entry_if_missing=True
)
return (
course.display_name,
# note, couldn't get django reverse to work; so, wrote workaround
course_loc.url_reverse('course/', ''),
get_lms_link_for_item(course.location),
course.display_org_with_default,
course.display_number_with_default,
course.location.name
)
# questionnaire
# KEY: site(required) user(required) ctime(required format yyyy-mm-dd HH:MM) email(required) mobile
qparams = {
"site": '99999', # TODO: settings.WENJUANSITEID
"user": request.user.username,
"ctime": datetime.now().strftime("%Y-%m-%d %H:%M"),
"email": request.user.email
}
def sorted_url(params_hash={}):
pkeys = params_hash.keys()
pkeys.sort()
demd5_str = hashlib.md5("".join([params_hash[k] for k in pkeys]) + settings.WENJUAN_SECKEY).hexdigest()
return ("&".join(["".join([k, '=', v]) for k, v in params_hash.iteritems()]), demd5_str)
# demd5_qparams_str = hashlib.md5("".join([qparams[k] for k in qparams_keys]) + "9d15a674a6e621058f1ea9171413b7c0").hexdigest()
wenjuan_domain = settings.WENJUAN_DOMAIN
wenjuan_loginapi = "{}/openapi/login?{}&md5={}".format(wenjuan_domain, *sorted_url(qparams))
# get questionnaire list
qlist = []
try:
list_url = "{}/openapi/proj_list?{}&md5={}".format(wenjuan_domain, *sorted_url(qparams))
timeout = 10
socket.setdefaulttimeout(timeout)
req = urllib2.Request(list_url.replace(' ', '%20'))
# {"status": 1, "respondent_count": 0, "proj_id": "AzaYja", "ctime": "2014-08-08 15:23", "title": "测试问卷", "type": "survey"}
for wj in json.load(urllib2.urlopen(req)):
"""
list structure
[
title,
status,
reponse,
create time,
q url
result url
]
"""
qlist.append([
wj.get('title', "未知"),
WENJUAN_STATUS[str(wj.get('status', 4))],
wj.get('respondent_count', 0),
wj.get('ctime', ''),
"{}/s/{}".format(wenjuan_domain, wj.get('proj_id', '')),
"{}/openapi/basic_chart/?{}&md5={}".format(wenjuan_domain, *sorted_url({"site": '99999', "user": request.user.username,"proj_id": wj.get("proj_id", "")}))
])
except:
print "=====error===== " * 5
curr_user = User.objects.get(username=request.user)
course_org = _get_institute(curr_user)
profile = UserProfile.objects.get(user_id=curr_user.id)
# get institute teacher user
userprofile_list = UserProfile.objects.all()
user_institute_teacher_list = []
for ul in userprofile_list:
if ul.institute == str(profile.user_id) and ul.profile_role == 'th':
u = User.objects.get(id=ul.user_id)
content = {
'id': int(u.id),
'username': u.username.encode('utf8'),
'email': u.email.encode('utf8'),
'name': ul.name.encode('utf8')
}
user_institute_teacher_list.append(content)
# import student
user_student_list = []
for sl in userprofile_list:
if sl.institute == str(profile.user_id) and sl.profile_role == 'st':
s = User.objects.get(id=sl.user_id)
student_context = {
'id': int(s.id),
'username': s.username.encode('utf8'),
'email': s.email.encode('utf8'),
'name': sl.name.encode('utf8')
}
user_student_list.append(student_context)
return render_to_response('index.html', {
'courses': [format_course_for_view(c) for c in courses if not isinstance(c, ErrorDescriptor)],
'user': request.user,
'request_course_creator_url': reverse('contentstore.views.request_course_creator'),
'course_creator_status': _get_course_creator_status(request.user),
'course_org': course_org,
'wenjuan_link': wenjuan_loginapi,
'qlist': qlist,
'profile': profile,
'user_institute_teacher_list': user_institute_teacher_list,
'user_student_list': user_student_list
})
@login_required
@ensure_csrf_cookie
def course_index(request, package_id, branch, version_guid, block):
"""
Display an editable course overview.
org, course, name: Attributes of the Location for the item to edit
"""
locator, course = _get_locator_and_course(
package_id, branch, version_guid, block, request.user, depth=3
)
lms_link = get_lms_link_for_item(course.location)
sections = course.get_children()
return render_to_response('overview.html', {
'context_course': course,
'lms_link': lms_link,
'sections': sections,
'course_graders': json.dumps(
CourseGradingModel.fetch(locator).graders
),
'parent_locator': locator,
'new_section_category': 'chapter',
'new_subsection_category': 'sequential',
'new_unit_category': 'vertical',
'category': 'vertical'
})
@expect_json
def create_new_course(request):
"""
Create a new course.
Returns the URL for the course overview page.
"""
if not auth.has_access(request.user, CourseCreatorRole()):
raise PermissionDenied()
org = request.json.get('org')
number = request.json.get('number')
display_name = request.json.get('display_name')
course_category = request.json.get('course_category')
course_level = request.json.get('course_level')
course_price = request.json.get('course_price')
run = request.json.get('run')
try:
dest_location = Location(u'i4x', org, number, u'course', run)
except InvalidLocationError as error:
return JsonResponse({
"ErrMsg": _("Unable to create course '{name}'.\n\n{err}").format(
name=display_name, err=error.message)})
# see if the course already exists
existing_course = None
try:
existing_course = modulestore('direct').get_item(dest_location)
except ItemNotFoundError:
pass
if existing_course is not None:
return JsonResponse({
'ErrMsg': _(
'There is already a course defined with the same '
'organization, course number, and course run. Please '
'change either organization or course number to be '
'unique.'
),
'OrgErrMsg': _(
'Please change either the organization or '
'course number so that it is unique.'
),
'CourseErrMsg': _(
'Please change either the organization or '
'course number so that it is unique.'
),
})
# dhm: this query breaks the abstraction, but I'll fix it when I do my suspended refactoring of this
# file for new locators. get_items should accept a query rather than requiring it be a legal location
course_search_location = bson.son.SON({
'_id.tag': 'i4x',
# cannot pass regex to Location constructor; thus this hack
# pylint: disable=E1101
'_id.org': re.compile(u'^{}$'.format(dest_location.org), re.IGNORECASE | re.UNICODE),
# pylint: disable=E1101
'_id.course': re.compile(u'^{}$'.format(dest_location.course), re.IGNORECASE | re.UNICODE),
'_id.category': 'course',
})
courses = modulestore().collection.find(course_search_location, fields=('_id'))
if courses.count() > 0:
return JsonResponse({
'ErrMsg': _(
'There is already a course defined with the same '
'organization and course number. Please '
'change at least one field to be unique.'),
'OrgErrMsg': _(
'Please change either the organization or '
'course number so that it is unique.'),
'CourseErrMsg': _(
'Please change either the organization or '
'course number so that it is unique.'),
})
# instantiate the CourseDescriptor and then persist it
# note: no system to pass
if display_name is None and course_category is None and course_level is None:
metadata = {}
else:
metadata = {'display_name': display_name, 'course_category': course_category, 'course_level': course_level, 'course_price': course_price}
modulestore('direct').create_and_save_xmodule(
dest_location,
metadata=metadata
)
new_course = modulestore('direct').get_item(dest_location)
# clone a default 'about' overview module as well
dest_about_location = dest_location.replace(
category='about',
name='overview'
)
overview_template = AboutDescriptor.get_template('overview.yaml')
modulestore('direct').create_and_save_xmodule(
dest_about_location,
system=new_course.system,
definition_data=overview_template.get('data')
)
initialize_course_tabs(new_course, request.user)
new_location = loc_mapper().translate_location(new_course.location.course_id, new_course.location, False, True)
# can't use auth.add_users here b/c it requires request.user to already have Instructor perms in this course
# however, we can assume that b/c this user had authority to create the course, the user can add themselves
CourseInstructorRole(new_location).add_users(request.user)
auth.add_users(request.user, CourseStaffRole(new_location), request.user)
# seed the forums
seed_permissions_roles(new_course.location.course_id)
# auto-enroll the course creator in the course so that "View Live" will
# work.
CourseEnrollment.enroll(request.user, new_course.location.course_id)
_users_assign_default_role(new_course.location)
# begin change showanswer to attempted
# it can also add other parameter on Advanced settings
course_location = loc_mapper().translate_locator_to_location(new_location)
course_module = get_modulestore(course_location).get_item(course_location)
data_json = {
"showanswer": "always",
"course_audit": "1"
}
CourseMetadata.update_from_json(course_module, data_json, True, request.user)
# end
return JsonResponse({'url': new_location.url_reverse("course/", "")})
def _users_assign_default_role(course_location):
"""
Assign 'Student' role to all previous users (if any) for this course
"""
enrollments = CourseEnrollment.objects.filter(course_id=course_location.course_id)
for enrollment in enrollments:
assign_default_role(course_location.course_id, enrollment.user)
# pylint: disable=unused-argument
@login_required
@ensure_csrf_cookie
@require_http_methods(["GET"])
def course_info_handler(request, tag=None, package_id=None, branch=None, version_guid=None, block=None):
"""
GET
html: return html for editing the course info handouts and updates.
"""
__, course_module = _get_locator_and_course(
package_id, branch, version_guid, block, request.user
)
if 'text/html' in request.META.get('HTTP_ACCEPT', 'text/html'):
handouts_old_location = course_module.location.replace(category='course_info', name='handouts')
handouts_locator = loc_mapper().translate_location(
course_module.location.course_id, handouts_old_location, False, True
)
update_location = course_module.location.replace(category='course_info', name='updates')
update_locator = loc_mapper().translate_location(
course_module.location.course_id, update_location, False, True
)
return render_to_response(
'course_info.html',
{
'context_course': course_module,
'updates_url': update_locator.url_reverse('course_info_update/'),
'handouts_locator': handouts_locator,
'base_asset_url': StaticContent.get_base_url_path_for_course_assets(course_module.location) + '/'
}
)
else:
return HttpResponseBadRequest("Only supports html requests")
# pylint: disable=unused-argument
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT", "DELETE"))
@expect_json
def course_info_update_handler(request, tag=None, package_id=None, branch=None, version_guid=None, block=None,
provided_id=None):
"""
restful CRUD operations on course_info updates.
provided_id should be none if it's new (create) and index otherwise.
GET
json: return the course info update models
POST
json: create an update
PUT or DELETE
json: change an existing update
"""
if 'application/json' not in request.META.get('HTTP_ACCEPT', 'application/json'):
return HttpResponseBadRequest("Only supports json requests")
course_location = loc_mapper().translate_locator_to_location(
CourseLocator(package_id=package_id), get_course=True
)
updates_location = course_location.replace(category='course_info', name=block)
print request.path
print 'course_location: ' , course_location
print 'updates_location:', updates_location
if provided_id == '':
provided_id = None
# check that logged in user has permissions to this item (GET shouldn't require this level?)
if not has_course_access(request.user, updates_location):
raise PermissionDenied()
if request.method == 'GET':
course_updates = get_course_updates(updates_location, provided_id)
if isinstance(course_updates, dict) and course_updates.get('error'):
return JsonResponse(get_course_updates(updates_location, provided_id), course_updates.get('status', 400))
else:
return JsonResponse(get_course_updates(updates_location, provided_id))
elif request.method == 'DELETE':
try:
return JsonResponse(delete_course_update(updates_location, request.json, provided_id, request.user))
except:
return HttpResponseBadRequest(
"Failed to delete",
content_type="text/plain"
)
# can be either and sometimes django is rewriting one to the other:
elif request.method in ('POST', 'PUT'):
if request.json.get('is_send_mail','false')=='true':
notice_course_update_to_student(request.json,course_location, package_id)
try:
return JsonResponse(update_course_updates(updates_location, request.json, provided_id, request.user))
except:
return HttpResponseBadRequest(
"Failed to save",
content_type="text/plain"
)
def notice_course_update_to_student(json,course_location,package_id):
# 发送邮件给所有的注册学生
queue = Queue.Queue()
course_module = modulestore().get_item(course_location, depth=0)
sub = "课程 [" + course_module.display_name_with_default + '] 更新提醒'
try:
update_content = json['content']
update_content = "<p>感谢您参加人卫慕课"+course_module.display_name_with_default.encode("utf-8")+"课程,目前该门课程有新内容更新,具体如下:</p><p>"+"\n\n"+update_content+"\n\n"+"</p><p>为了保证您的学习进度,请尽快开始学习,"+course_module.display_name_with_default.encode("utf-8")+"课程团队竭诚为您服务。<br/>祝您学习愉快!<br/>"+course_module.display_name_with_default.encode("utf-8")+"课程团队</p>"
student_email_list = analytics.basic.enrolled_students_features(package_id.replace(".", "/"), ['email'])
print student_email_list
student_data_email_list = []
for i in student_email_list:
queue.put(i.values()[0])
for k in range(2):
threadname = 'Thread' + str(k)
send_mail_update(threadname, queue, update_content, sub)
print 'success'
# queue.join()
except:
raise
print 'failure'
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "PUT", "POST"))
@expect_json
def settings_handler(request, tag=None, package_id=None, branch=None, version_guid=None, block=None):
"""
Course settings for dates and about pages
GET
html: get the page
json: get the CourseDetails model
PUT
json: update the Course and About xblocks through the CourseDetails model
"""
locator, course_module = _get_locator_and_course(
package_id, branch, version_guid, block, request.user
)
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
upload_asset_url = locator.url_reverse('assets/')
# see if the ORG of this course can be attributed to a 'Microsite'. In that case, the
# course about page should be editable in Studio
about_page_editable = not microsite.get_value_for_org(
course_module.location.org,
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
)
short_description_editable = settings.FEATURES.get('EDITABLE_SHORT_DESCRIPTION', True)
return render_to_response('settings.html', {
'context_course': course_module,
'course_locator': locator,
'lms_link_for_about_page': utils.get_lms_link_for_about_page(course_module.location),
'course_image_url': utils.course_image_url(course_module),
'details_url': locator.url_reverse('/settings/details/'),
'about_page_editable': about_page_editable,
'short_description_editable': short_description_editable,
'upload_asset_url': upload_asset_url
})
elif 'application/json' in request.META.get('HTTP_ACCEPT', ''):
if request.method == 'GET':
return JsonResponse(
CourseDetails.fetch(locator),
# encoder serializes dates, old locations, and instances
encoder=CourseSettingsEncoder
)
else: # post or put, doesn't matter.
return JsonResponse(
CourseDetails.update_from_json(locator, request.json, request.user),
encoder=CourseSettingsEncoder
)
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT", "DELETE"))
@expect_json
def grading_handler(request, tag=None, package_id=None, branch=None, version_guid=None, block=None, grader_index=None):
"""
Course Grading policy configuration
GET
html: get the page
json no grader_index: get the CourseGrading model (graceperiod, cutoffs, and graders)
json w/ grader_index: get the specific grader
PUT
json no grader_index: update the Course through the CourseGrading model
json w/ grader_index: create or update the specific grader (create if index out of range)
"""
locator, course_module = _get_locator_and_course(
package_id, branch, version_guid, block, request.user
)
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
course_details = CourseGradingModel.fetch(locator)
return render_to_response('settings_graders.html', {
'context_course': course_module,
'course_locator': locator,
'course_details': json.dumps(course_details, cls=CourseSettingsEncoder),
'grading_url': locator.url_reverse('/settings/grading/'),
})
elif 'application/json' in request.META.get('HTTP_ACCEPT', ''):
if request.method == 'GET':
if grader_index is None:
return JsonResponse(
CourseGradingModel.fetch(locator),
# encoder serializes dates, old locations, and instances
encoder=CourseSettingsEncoder
)
else:
return JsonResponse(CourseGradingModel.fetch_grader(locator, grader_index))
elif request.method in ('POST', 'PUT'): # post or put, doesn't matter.
# None implies update the whole model (cutoffs, graceperiod, and graders) not a specific grader
if grader_index is None:
return JsonResponse(
CourseGradingModel.update_from_json(locator, request.json, request.user),
encoder=CourseSettingsEncoder
)
else:
return JsonResponse(
CourseGradingModel.update_grader_from_json(locator, request.json, request.user)
)
elif request.method == "DELETE" and grader_index is not None:
CourseGradingModel.delete_grader(locator, grader_index, request.user)
return JsonResponse()
# pylint: disable=invalid-name
def _config_course_advanced_components(request, course_module):
"""
Check to see if the user instantiated any advanced components. This
is a hack that does the following :
1) adds/removes the open ended panel tab to a course automatically
if the user has indicated that they want to edit the
combinedopendended or peergrading module
2) adds/removes the notes panel tab to a course automatically if
the user has indicated that they want the notes module enabled in
their course
"""
# TODO refactor the above into distinct advanced policy settings
filter_tabs = True # Exceptional conditions will pull this to False
if ADVANCED_COMPONENT_POLICY_KEY in request.json: # Maps tab types to components
tab_component_map = {
'open_ended': OPEN_ENDED_COMPONENT_TYPES,
'notes': NOTE_COMPONENT_TYPES,
}
# Check to see if the user instantiated any notes or open ended
# components
for tab_type in tab_component_map.keys():
component_types = tab_component_map.get(tab_type)
found_ac_type = False
for ac_type in component_types:
if ac_type in request.json[ADVANCED_COMPONENT_POLICY_KEY]:
# Add tab to the course if needed
changed, new_tabs = add_extra_panel_tab(tab_type, course_module)
# If a tab has been added to the course, then send the
# metadata along to CourseMetadata.update_from_json
if changed:
course_module.tabs = new_tabs
request.json.update({'tabs': new_tabs})
# Indicate that tabs should not be filtered out of
# the metadata
filter_tabs = False # Set this flag to avoid the tab removal code below.
found_ac_type = True #break
# If we did not find a module type in the advanced settings,
# we may need to remove the tab from the course.
if not found_ac_type: # Remove tab from the course if needed
changed, new_tabs = remove_extra_panel_tab(tab_type, course_module)
if changed:
course_module.tabs = new_tabs
request.json.update({'tabs':new_tabs})
# Indicate that tabs should *not* be filtered out of
# the metadata
filter_tabs = False
return filter_tabs
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
@expect_json
def calendar_common(request, course_id):
"""
Course settings configuration
GET
html: get the page
json: get the model
PUT, POST
json: update the Course's settings. The payload is a json rep of the
metadata dicts. The dict can include a "unsetKeys" entry which is a list
of keys whose values to unset: i.e., revert to default
"""
return render_to_response('calendar_common.html', { })
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
@expect_json
def calendar_settings_handler(request, package_id=None, branch=None, version_guid=None, block=None, tag=None):
locator, course_module = _get_locator_and_course(
package_id, branch, version_guid, block, request.user
)
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
return render_to_response('settings_calendar.html', {
'package_id': package_id,
'context_course': course_module,
'advanced_dict': json.dumps(CourseMetadata.fetch(course_module)),
'advanced_settings_url': locator.url_reverse('settings/calendar')
})
elif 'application/json' in request.META.get('HTTP_ACCEPT', ''):
if request.method == 'GET':
return JsonResponse(CourseMetadata.fetch(course_module))
else:
# Whether or not to filter the tabs key out of the settings metadata
filter_tabs = _config_course_advanced_components(request, course_module)
try:
return JsonResponse(CourseMetadata.update_from_json(
course_module,
request.json,
filter_tabs=filter_tabs,
user=request.user,
))
except (TypeError, ValueError) as err:
return HttpResponseBadRequest(
"Incorrect setting format. {}".format(err),
content_type="text/plain"
)
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
@expect_json
def calendar_common_addevent(request,course_id):
return JsonResponse(modulestore("course_calendar").save_event(course_id,{"title":request.GET.get("title"),"start":request.GET.get("start"),"end":request.GET.get("end")}))
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
@expect_json
def calendar_common_delevent(request,course_id):
print request.GET.get("title")
print request.GET.get("start")
print request.GET.get("end")
return modulestore("course_calendar")._get_cals()
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
@expect_json
def calendar_common_updateevent(request,course_id):
event_id = request.GET.get("id")
start = request.GET.get("start")
end = request.GET.get("end")
title = request.GET.get("title")
modulestore("course_calendar").update_event(course_id,event_id,{"title":title, "start": start, "end": end})
return JsonResponse({"success":1})
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
@expect_json
def calendar_settings_getevents(request, course_id):
events_json = []
for event in modulestore("course_calendar").range_events(course_id,request.GET.get("start"),request.GET.get("end")):
events_json.append({"id":event["id"],"title":event["calendar"]["title"],"start":event["calendar"]["start"],"end":event["calendar"]["end"]})
return JsonResponse(events_json)
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
@expect_json
def calendar_common_deleteevent(request, course_id):
events_json = []
modulestore("course_calendar").delete_event(request.GET.get("delete_id"))
return JsonResponse({"success":1})
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
@expect_json
def advanced_settings_handler(request, package_id=None, branch=None, version_guid=None, block=None, tag=None):
"""
Course settings configuration
GET
html: get the page
json: get the model
PUT, POST
json: update the Course's settings. The payload is a json rep of the
metadata dicts. The dict can include a "unsetKeys" entry which is a list
of keys whose values to unset: i.e., revert to default
"""
locator, course_module = _get_locator_and_course(
package_id, branch, version_guid, block, request.user
)
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
return render_to_response('settings_advanced.html', {
'context_course': course_module,
'advanced_dict': json.dumps(CourseMetadata.fetch(course_module)),
'advanced_settings_url': locator.url_reverse('settings/advanced')
})
elif 'application/json' in request.META.get('HTTP_ACCEPT', ''):
if request.method == 'GET':
return JsonResponse(CourseMetadata.fetch(course_module))
else:
# Whether or not to filter the tabs key out of the settings metadata
filter_tabs = _config_course_advanced_components(request, course_module)
try:
return JsonResponse(CourseMetadata.update_from_json(
course_module,
request.json,
filter_tabs=filter_tabs,
user=request.user,
))
except (TypeError, ValueError) as err:
return HttpResponseBadRequest(
"Incorrect setting format. {}".format(err),
content_type="text/plain"
)
class TextbookValidationError(Exception):
"An error thrown when a textbook input is invalid"
pass
def validate_textbooks_json(text):
"""
Validate the given text as representing a single PDF textbook
"""
try:
textbooks = json.loads(text)
except ValueError:
raise TextbookValidationError("invalid JSON")
if not isinstance(textbooks, (list, tuple)):
raise TextbookValidationError("must be JSON list")
for textbook in textbooks:
validate_textbook_json(textbook)
# check specified IDs for uniqueness
all_ids = [textbook["id"] for textbook in textbooks if "id" in textbook]
unique_ids = set(all_ids)
if len(all_ids) > len(unique_ids):
raise TextbookValidationError("IDs must be unique")
return textbooks
def validate_textbook_json(textbook):
"""
Validate the given text as representing a list of PDF textbooks
"""
if isinstance(textbook, basestring):
try:
textbook = json.loads(textbook)
except ValueError:
raise TextbookValidationError("invalid JSON")
if not isinstance(textbook, dict):
raise TextbookValidationError("must be JSON object")
if not textbook.get("tab_title"):
raise TextbookValidationError("must have tab_title")
tid = unicode(textbook.get("id", ""))
if tid and not tid[0].isdigit():
raise TextbookValidationError("textbook ID must start with a digit")
return textbook
def assign_textbook_id(textbook, used_ids=()):
"""
Return an ID that can be assigned to a textbook
and doesn't match the used_ids
"""
tid = Location.clean(textbook["tab_title"])
if not tid[0].isdigit():
# stick a random digit in front
tid = random.choice(string.digits) + tid
while tid in used_ids:
# add a random ASCII character to the end
tid = tid + random.choice(string.ascii_lowercase)
return tid
@require_http_methods(("GET", "POST", "PUT"))
@login_required
@ensure_csrf_cookie
def textbooks_list_handler(request, tag=None, package_id=None, branch=None, version_guid=None, block=None):
"""
A RESTful handler for textbook collections.
GET
html: return textbook list page (Backbone application)
json: return JSON representation of all textbooks in this course
POST
json: create a new textbook for this course
PUT
json: overwrite all textbooks in the course with the given list
"""
locator, course = _get_locator_and_course(
package_id, branch, version_guid, block, request.user
)
store = get_modulestore(course.location)
if not "application/json" in request.META.get('HTTP_ACCEPT', 'text/html'):
# return HTML page
upload_asset_url = locator.url_reverse('assets/', '')
textbook_url = locator.url_reverse('/textbooks')
return render_to_response('textbooks.html', {
'context_course': course,
'textbooks': course.pdf_textbooks,
'upload_asset_url': upload_asset_url,
'textbook_url': textbook_url,
})
# from here on down, we know the client has requested JSON
if request.method == 'GET':
return JsonResponse(course.pdf_textbooks)
elif request.method == 'PUT':
try:
textbooks = validate_textbooks_json(request.body)
except TextbookValidationError as err:
return JsonResponse({"error": err.message}, status=400)
tids = set(t["id"] for t in textbooks if "id" in t)
for textbook in textbooks:
if not "id" in textbook:
tid = assign_textbook_id(textbook, tids)
textbook["id"] = tid
tids.add(tid)
if not any(tab['type'] == 'pdf_textbooks' for tab in course.tabs):
course.tabs.append({"type": "pdf_textbooks"})
course.pdf_textbooks = textbooks
store.update_item(course, request.user.id)
return JsonResponse(course.pdf_textbooks)
elif request.method == 'POST':
# create a new textbook for the course
try:
textbook = validate_textbook_json(request.body)
except TextbookValidationError as err:
return JsonResponse({"error": err.message}, status=400)
if not textbook.get("id"):
tids = set(t["id"] for t in course.pdf_textbooks if "id" in t)
textbook["id"] = assign_textbook_id(textbook, tids)
existing = course.pdf_textbooks
existing.append(textbook)
course.pdf_textbooks = existing
if not any(tab['type'] == 'pdf_textbooks' for tab in course.tabs):
tabs = course.tabs
tabs.append({"type": "pdf_textbooks"})
course.tabs = tabs
store.update_item(course, request.user.id)
resp = JsonResponse(textbook, status=201)
resp["Location"] = locator.url_reverse('textbooks', textbook["id"]).encode("utf-8")
return resp
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT", "DELETE"))
def textbooks_detail_handler(request, tid, tag=None, package_id=None, branch=None, version_guid=None, block=None):
"""
JSON API endpoint for manipulating a textbook via its internal ID.
Used by the Backbone application.
GET
json: return JSON representation of textbook
POST or PUT
json: update textbook based on provided information
DELETE
json: remove textbook
"""
__, course = _get_locator_and_course(
package_id, branch, version_guid, block, request.user
)
store = get_modulestore(course.location)
matching_id = [tb for tb in course.pdf_textbooks
if unicode(tb.get("id")) == unicode(tid)]
if matching_id:
textbook = matching_id[0]
else:
textbook = None
if request.method == 'GET':
if not textbook:
return JsonResponse(status=404)
return JsonResponse(textbook)
elif request.method in ('POST', 'PUT'): # can be either and sometimes
# django is rewriting one to the other
try:
new_textbook = validate_textbook_json(request.body)
except TextbookValidationError as err:
return JsonResponse({"error": err.message}, status=400)
new_textbook["id"] = tid
if textbook:
i = course.pdf_textbooks.index(textbook)
new_textbooks = course.pdf_textbooks[0:i]
new_textbooks.append(new_textbook)
new_textbooks.extend(course.pdf_textbooks[i + 1:])
course.pdf_textbooks = new_textbooks
else:
course.pdf_textbooks.append(new_textbook)
store.update_item(course, request.user.id)
return JsonResponse(new_textbook, status=201)
elif request.method == 'DELETE':
if not textbook:
return JsonResponse(status=404)
i = course.pdf_textbooks.index(textbook)
new_textbooks = course.pdf_textbooks[0:i]
new_textbooks.extend(course.pdf_textbooks[i + 1:])
course.pdf_textbooks = new_textbooks
store.update_item(course, request.user.id)
return JsonResponse()
def _get_course_creator_status(user):
"""
Helper method for returning the course creator status for a particular user,
taking into account the values of DISABLE_COURSE_CREATION and ENABLE_CREATOR_GROUP.
If the user passed in has not previously visited the index page, it will be
added with status 'unrequested' if the course creator group is in use.
"""
if user.is_staff:
course_creator_status = 'granted'
elif settings.FEATURES.get('DISABLE_COURSE_CREATION', False):
course_creator_status = 'disallowed_for_this_site'
elif settings.FEATURES.get('ENABLE_CREATOR_GROUP', False):
course_creator_status = get_course_creator_status(user)
if course_creator_status is None:
# User not grandfathered in as an existing user, has not previously visited the dashboard page.
# Add the user to the course creator admin table with status 'unrequested'.
add_user_with_status_unrequested(user)
course_creator_status = get_course_creator_status(user)
else:
course_creator_status = 'granted'
return course_creator_status
@csrf_exempt
def course_audit_api(request, course_id, operation):
re_json = {"success": False}
request_method = request.method
if request_method != "POST":
return JsonResponse(re_json)
# get course location and module infomation
try:
course_location_info = course_id.split('.')
locator = BlockUsageLocator(package_id=course_id, branch='draft', version_guid=None, block_id=course_location_info[-1])
course_location = loc_mapper().translate_locator_to_location(locator)
course_module = get_modulestore(course_location).get_item(course_location)
instructors = CourseInstructorRole(locator).users_with_role()
if len(instructors) <= 0:
return JsonResponse(re_json)
user = instructors[0]
meta_json = {}
if operation == "pass":
meta_json["course_audit"] = 1
elif operation == "offline":
meta_json["course_audit"] = 0
else:
return JsonResponse(re_json)
re_json["success"] = True
CourseMetadata.update_from_json(course_module, meta_json, True, user)
return JsonResponse(re_json)
except:
return JsonResponse(re_json)
@csrf_exempt
def institution_upload_teacher(request):
messg=''
if request.method == 'POST':
use_id = request.GET['id']
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
filename = form.cleaned_data['file']
filename_suffix = filename.name.split('.')[-1]
if filename_suffix == 'xls' or filename_suffix == 'xlsx':
f = handle_uploaded_file(filename)
os.chmod(f, 0o777)
xls_insert_into_db(request, f, use_id)
messg = '教师导入成功'
else:
messg = '上传文件要为excel格式'
else:
form = UploadFileForm()
return JsonResponse({'messg': messg})
def handle_uploaded_file(f):
f_path = PROJECT_ROOT + '/static/upload/'+f.name.encode('utf8')
with open(f_path.encode('utf8'), 'wb+') as info:
for chunk in f.chunks():
info.write(chunk)
return f_path.encode('utf8')
def xls_insert_into_db(request, xlsfile, instutition_id):
wb = xlrd.open_workbook(xlsfile)
sh = wb.sheet_by_index(0)
rows = sh.nrows
def as_display_string(cell):
if cell.ctype in (2,3):
cell_value = int(cell.value)
else:
cell_value = cell.value
return str(cell_value).strip()
for i in range(1, rows):
username = sh.cell(i, 2).value
email = sh.cell(i, 0).value
password = as_display_string(sh.cell(i, 1))
name = sh.cell(i, 3).value
post_vars = {
'username': username,
'email': email,
'password': password,
'name': name
}
do_institution_import_teacher_create_account(post_vars, instutition_id)
return HttpResponseRedirect('/course')
def remove_institute_teacher(request):
institute_id = request.GET['id']
profile_user = UserProfile.objects.get(user_id=institute_id)
profile_user.institute = None
profile_user.save()
return JsonResponse('/course')
@login_required
@ensure_csrf_cookie
def teacher_intro_edit(request, id):
if request.user.id !=int(id):
raise Http404
if request.method == 'POST':
picurl = request.POST.get('picurl', '').strip()
shortbio = request.POST.get('shortbio', '')
profile = UserProfile.objects.get(user_id=id)
if picurl:
if not picurl.startswith('http://'):
picurl = 'http://' + picurl
profile.picurl = picurl
profile.shortbio = shortbio
profile.save()
else:
profile = UserProfile.objects.get(user_id=id)
if not profile.shortbio:
profile.shortbio = ""
return render_to_response('teacher_intro_edit.html', {'profile':profile})
# import_student
@csrf_exempt
def import_student(request):
messg=''
if request.method == 'POST':
use_id = request.GET['id']
print use_id
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
filename = form.cleaned_data['file']
filename_suffix = filename.name.split('.')[-1]
if filename_suffix == 'xls' or filename_suffix == 'xlsx':
f = handle_uploaded_file(filename)
os.chmod(f, 0o777)
xls_student_insert_into_db(request, f, use_id)
messg = '学生导入成功'
else:
messg = '导入文件要为excel格式'
else:
form = UploadFileForm()
return JsonResponse({'messg': messg})
def xls_student_insert_into_db(request, xlsfile, instutition_id):
wb = xlrd.open_workbook(xlsfile)
sh = wb.sheet_by_index(0)
rows = sh.nrows
def as_display_string(cell):
if cell.ctype in (2,3):
cell_value = int(cell.value)
else:
cell_value = cell.value
return str(cell_value).strip()
for i in range(1, rows):
username = sh.cell(i, 2).value
email = sh.cell(i, 0).value
password = as_display_string(sh.cell(i, 1))
name = sh.cell(i, 3).value
post_vars = {
'username': username,
'email': email,
'password': password,
'name': name
}
if len(User.objects.filter(username=post_vars['username'])) > 0:
post_vars['username'] = post_vars['username'] + str(random.randint(0,10000))
do_institution_import_student_create_account(post_vars, instutition_id)
return HttpResponseRedirect('/course')
| agpl-3.0 | 6,436,884,706,447,725,000 | 38.430714 | 338 | 0.633716 | false |
escapewindow/signingscript | src/signingscript/vendored/mozbuild/mozbuild/artifacts.py | 1 | 51263 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
'''
Fetch build artifacts from a Firefox tree.
This provides an (at-the-moment special purpose) interface to download Android
artifacts from Mozilla's Task Cluster.
This module performs the following steps:
* find a candidate hg parent revision. At one time we used the local pushlog,
which required the mozext hg extension. This isn't feasible with git, and it
is only mildly less efficient to not use the pushlog, so we don't use it even
when querying hg.
* map the candidate parent to candidate Task Cluster tasks and artifact
locations. Pushlog entries might not correspond to tasks (yet), and those
tasks might not produce the desired class of artifacts.
* fetch fresh Task Cluster artifacts and purge old artifacts, using a simple
Least Recently Used cache.
* post-process fresh artifacts, to speed future installation. In particular,
extract relevant files from Mac OS X DMG files into a friendly archive format
so we don't have to mount DMG files frequently.
This module requires certain modules be importable from the ambient Python
environment. |mach artifact| ensures these modules are available, but other
consumers will need to arrange this themselves.
'''
from __future__ import absolute_import, print_function, unicode_literals
import collections
import functools
import glob
import logging
import operator
import os
import pickle
import re
import requests
import shutil
import stat
import subprocess
import tarfile
import tempfile
import urlparse
import zipfile
import pylru
from taskgraph.util.taskcluster import (
find_task_id,
get_artifact_url,
list_artifacts,
)
from mozbuild.artifact_cache import ArtifactCache
from mozbuild.artifact_builds import JOB_CHOICES
from mozbuild.util import (
ensureParentDir,
FileAvoidWrite,
mkdir,
)
import mozinstall
from mozpack.files import (
JarFinder,
TarFinder,
)
from mozpack.mozjar import (
JarReader,
JarWriter,
)
from mozpack.packager.unpack import UnpackFinder
import mozpack.path as mozpath
# Number of candidate pushheads to cache per parent changeset.
NUM_PUSHHEADS_TO_QUERY_PER_PARENT = 50
# Number of parent changesets to consider as possible pushheads.
# There isn't really such a thing as a reasonable default here, because we don't
# know how many pushheads we'll need to look at to find a build with our artifacts,
# and we don't know how many changesets will be in each push. For now we assume
# we'll find a build in the last 50 pushes, assuming each push contains 10 changesets.
NUM_REVISIONS_TO_QUERY = 500
MAX_CACHED_TASKS = 400 # Number of pushheads to cache Task Cluster task data for.
# Downloaded artifacts are cached, and a subset of their contents extracted for
# easy installation. This is most noticeable on Mac OS X: since mounting and
# copying from DMG files is very slow, we extract the desired binaries to a
# separate archive for fast re-installation.
PROCESSED_SUFFIX = '.processed.jar'
class ArtifactJob(object):
trust_domain = 'gecko'
candidate_trees = [
'mozilla-central',
'integration/mozilla-inbound',
'releases/mozilla-beta',
]
try_tree = 'try'
# These are a subset of TEST_HARNESS_BINS in testing/mochitest/Makefile.in.
# Each item is a pair of (pattern, (src_prefix, dest_prefix), where src_prefix
# is the prefix of the pattern relevant to its location in the archive, and
# dest_prefix is the prefix to be added that will yield the final path relative
# to dist/.
test_artifact_patterns = {
('bin/BadCertServer', ('bin', 'bin')),
('bin/GenerateOCSPResponse', ('bin', 'bin')),
('bin/OCSPStaplingServer', ('bin', 'bin')),
('bin/SymantecSanctionsServer', ('bin', 'bin')),
('bin/certutil', ('bin', 'bin')),
('bin/fileid', ('bin', 'bin')),
('bin/geckodriver', ('bin', 'bin')),
('bin/pk12util', ('bin', 'bin')),
('bin/screentopng', ('bin', 'bin')),
('bin/ssltunnel', ('bin', 'bin')),
('bin/xpcshell', ('bin', 'bin')),
('bin/plugins/gmp-*/*/*', ('bin/plugins', 'bin')),
('bin/plugins/*', ('bin/plugins', 'plugins')),
('bin/components/*.xpt', ('bin/components', 'bin/components')),
}
# We can tell our input is a test archive by this suffix, which happens to
# be the same across platforms.
_test_zip_archive_suffix = '.common.tests.zip'
_test_tar_archive_suffix = '.common.tests.tar.gz'
def __init__(self, log=None,
download_tests=True,
download_symbols=False,
download_host_bins=False,
substs=None):
self._package_re = re.compile(self.package_re)
self._tests_re = None
if download_tests:
self._tests_re = re.compile(r'public/build/target\.common\.tests\.(zip|tar\.gz)')
self._host_bins_re = None
if download_host_bins:
self._host_bins_re = re.compile(r'public/build/host/bin/(mar|mbsdiff)(.exe)?')
self._log = log
self._substs = substs
self._symbols_archive_suffix = None
if download_symbols == 'full':
self._symbols_archive_suffix = 'crashreporter-symbols-full.zip'
elif download_symbols:
self._symbols_archive_suffix = 'crashreporter-symbols.zip'
def log(self, *args, **kwargs):
if self._log:
self._log(*args, **kwargs)
def find_candidate_artifacts(self, artifacts):
# TODO: Handle multiple artifacts, taking the latest one.
tests_artifact = None
for artifact in artifacts:
name = artifact['name']
if self._package_re and self._package_re.match(name):
yield name
elif self._host_bins_re and self._host_bins_re.match(name):
yield name
elif self._tests_re and self._tests_re.match(name):
tests_artifact = name
yield name
elif self._symbols_archive_suffix and name.endswith(self._symbols_archive_suffix):
yield name
else:
self.log(logging.INFO, 'artifact',
{'name': name},
'Not yielding artifact named {name} as a candidate artifact')
if self._tests_re and not tests_artifact:
raise ValueError('Expected tests archive matching "{re}", but '
'found none!'.format(re=self._tests_re))
def process_artifact(self, filename, processed_filename):
if filename.endswith(ArtifactJob._test_zip_archive_suffix) and self._tests_re:
return self.process_tests_zip_artifact(filename, processed_filename)
if filename.endswith(ArtifactJob._test_tar_archive_suffix) and self._tests_re:
return self.process_tests_tar_artifact(filename, processed_filename)
if self._symbols_archive_suffix and filename.endswith(self._symbols_archive_suffix):
return self.process_symbols_archive(filename, processed_filename)
if self._host_bins_re:
# Turn 'HASH-mar.exe' into 'mar.exe'. `filename` is a path on disk
# without the full path to the artifact, so we must reconstruct
# that path here.
orig_basename = os.path.basename(filename).split('-', 1)[1]
if self._host_bins_re.match('public/build/host/bin/{}'.format(orig_basename)):
return self.process_host_bin(filename, processed_filename)
return self.process_package_artifact(filename, processed_filename)
def process_package_artifact(self, filename, processed_filename):
raise NotImplementedError("Subclasses must specialize process_package_artifact!")
def process_tests_zip_artifact(self, filename, processed_filename):
from mozbuild.action.test_archive import OBJDIR_TEST_FILES
added_entry = False
with JarWriter(file=processed_filename, compress_level=5) as writer:
reader = JarReader(filename)
for filename, entry in reader.entries.iteritems():
for pattern, (src_prefix, dest_prefix) in self.test_artifact_patterns:
if not mozpath.match(filename, pattern):
continue
destpath = mozpath.relpath(filename, src_prefix)
destpath = mozpath.join(dest_prefix, destpath)
self.log(logging.INFO, 'artifact',
{'destpath': destpath},
'Adding {destpath} to processed archive')
mode = entry['external_attr'] >> 16
writer.add(destpath.encode('utf-8'), reader[filename], mode=mode)
added_entry = True
break
for files_entry in OBJDIR_TEST_FILES.values():
origin_pattern = files_entry['pattern']
leaf_filename = filename
if 'dest' in files_entry:
dest = files_entry['dest']
origin_pattern = mozpath.join(dest, origin_pattern)
leaf_filename = filename[len(dest) + 1:]
if mozpath.match(filename, origin_pattern):
destpath = mozpath.join('..', files_entry['base'], leaf_filename)
mode = entry['external_attr'] >> 16
writer.add(destpath.encode('utf-8'), reader[filename], mode=mode)
if not added_entry:
raise ValueError('Archive format changed! No pattern from "{patterns}"'
'matched an archive path.'.format(
patterns=LinuxArtifactJob.test_artifact_patterns))
def process_tests_tar_artifact(self, filename, processed_filename):
from mozbuild.action.test_archive import OBJDIR_TEST_FILES
added_entry = False
with JarWriter(file=processed_filename, compress_level=5) as writer:
with tarfile.open(filename) as reader:
for filename, entry in TarFinder(filename, reader):
for pattern, (src_prefix, dest_prefix) in self.test_artifact_patterns:
if not mozpath.match(filename, pattern):
continue
destpath = mozpath.relpath(filename, src_prefix)
destpath = mozpath.join(dest_prefix, destpath)
self.log(logging.INFO, 'artifact',
{'destpath': destpath},
'Adding {destpath} to processed archive')
mode = entry.mode
writer.add(destpath.encode('utf-8'), entry.open(), mode=mode)
added_entry = True
break
for files_entry in OBJDIR_TEST_FILES.values():
origin_pattern = files_entry['pattern']
leaf_filename = filename
if 'dest' in files_entry:
dest = files_entry['dest']
origin_pattern = mozpath.join(dest, origin_pattern)
leaf_filename = filename[len(dest) + 1:]
if mozpath.match(filename, origin_pattern):
destpath = mozpath.join('..', files_entry['base'], leaf_filename)
mode = entry.mode
writer.add(destpath.encode('utf-8'), entry.open(), mode=mode)
if not added_entry:
raise ValueError('Archive format changed! No pattern from "{patterns}"'
'matched an archive path.'.format(
patterns=LinuxArtifactJob.test_artifact_patterns))
def process_symbols_archive(self, filename, processed_filename, skip_compressed=False):
with JarWriter(file=processed_filename, compress_level=5) as writer:
reader = JarReader(filename)
for filename in reader.entries:
if skip_compressed and filename.endswith('.gz'):
self.log(logging.INFO, 'artifact',
{'filename': filename},
'Skipping compressed ELF debug symbol file {filename}')
continue
destpath = mozpath.join('crashreporter-symbols', filename)
self.log(logging.INFO, 'artifact',
{'destpath': destpath},
'Adding {destpath} to processed archive')
writer.add(destpath.encode('utf-8'), reader[filename])
def process_host_bin(self, filename, processed_filename):
with JarWriter(file=processed_filename, compress_level=5) as writer:
# Turn 'HASH-mar.exe' into 'mar.exe'. `filename` is a path on disk
# without any of the path parts of the artifact, so we must inject
# the desired `host/bin` prefix here.
orig_basename = os.path.basename(filename).split('-', 1)[1]
destpath = mozpath.join('host/bin', orig_basename)
writer.add(destpath.encode('utf-8'), open(filename, 'rb'))
class AndroidArtifactJob(ArtifactJob):
package_re = r'public/build/target\.apk'
product = 'mobile'
package_artifact_patterns = {
'application.ini',
'platform.ini',
'**/*.so',
}
def process_package_artifact(self, filename, processed_filename):
# Extract all .so files into the root, which will get copied into dist/bin.
with JarWriter(file=processed_filename, compress_level=5) as writer:
for p, f in UnpackFinder(JarFinder(filename, JarReader(filename))):
if not any(mozpath.match(p, pat) for pat in self.package_artifact_patterns):
continue
dirname, basename = os.path.split(p)
self.log(logging.INFO, 'artifact',
{'basename': basename},
'Adding {basename} to processed archive')
basedir = 'bin'
if not basename.endswith('.so'):
basedir = mozpath.join('bin', dirname.lstrip('assets/'))
basename = mozpath.join(basedir, basename)
writer.add(basename.encode('utf-8'), f.open())
def process_symbols_archive(self, filename, processed_filename):
ArtifactJob.process_symbols_archive(
self, filename, processed_filename, skip_compressed=True)
if self._symbols_archive_suffix != 'crashreporter-symbols-full.zip':
return
import gzip
with JarWriter(file=processed_filename, compress_level=5) as writer:
reader = JarReader(filename)
for filename in reader.entries:
if not filename.endswith('.gz'):
continue
# Uncompress "libxul.so/D3271457813E976AE7BF5DAFBABABBFD0/libxul.so.dbg.gz"
# into "libxul.so.dbg".
#
# After running `settings append target.debug-file-search-paths $file`,
# where file=/path/to/topobjdir/dist/crashreporter-symbols,
# Android Studio's lldb (7.0.0, at least) will find the ELF debug symbol files.
#
# There are other paths that will work but none seem more desireable. See
# https://github.com/llvm-mirror/lldb/blob/882670690ca69d9dd96b7236c620987b11894af9/source/Host/common/Symbols.cpp#L324.
basename = os.path.basename(filename).replace('.gz', '')
destpath = mozpath.join('crashreporter-symbols', basename)
self.log(logging.INFO, 'artifact',
{'destpath': destpath},
'Adding uncompressed ELF debug symbol file '
'{destpath} to processed archive')
writer.add(destpath.encode('utf-8'),
gzip.GzipFile(fileobj=reader[filename].uncompressed_data))
class LinuxArtifactJob(ArtifactJob):
package_re = r'public/build/target\.tar\.bz2'
product = 'firefox'
_package_artifact_patterns = {
'{product}/application.ini',
'{product}/crashreporter',
'{product}/dependentlibs.list',
'{product}/{product}',
'{product}/{product}-bin',
'{product}/minidump-analyzer',
'{product}/pingsender',
'{product}/platform.ini',
'{product}/plugin-container',
'{product}/updater',
'{product}/**/*.so',
}
@property
def package_artifact_patterns(self):
return {
p.format(product=self.product) for p in self._package_artifact_patterns
}
def process_package_artifact(self, filename, processed_filename):
added_entry = False
with JarWriter(file=processed_filename, compress_level=5) as writer:
with tarfile.open(filename) as reader:
for p, f in UnpackFinder(TarFinder(filename, reader)):
if not any(mozpath.match(p, pat) for pat in self.package_artifact_patterns):
continue
# We strip off the relative "firefox/" bit from the path,
# but otherwise preserve it.
destpath = mozpath.join('bin',
mozpath.relpath(p, self.product))
self.log(logging.INFO, 'artifact',
{'destpath': destpath},
'Adding {destpath} to processed archive')
writer.add(destpath.encode('utf-8'), f.open(), mode=f.mode)
added_entry = True
if not added_entry:
raise ValueError('Archive format changed! No pattern from "{patterns}" '
'matched an archive path.'.format(
patterns=LinuxArtifactJob.package_artifact_patterns))
class MacArtifactJob(ArtifactJob):
package_re = r'public/build/target\.dmg'
product = 'firefox'
# These get copied into dist/bin without the path, so "root/a/b/c" -> "dist/bin/c".
_paths_no_keep_path = ('Contents/MacOS', [
'crashreporter.app/Contents/MacOS/crashreporter',
'{product}',
'{product}-bin',
'libfreebl3.dylib',
'liblgpllibs.dylib',
# 'liblogalloc.dylib',
'libmozglue.dylib',
'libnss3.dylib',
'libnssckbi.dylib',
'libnssdbm3.dylib',
'libplugin_child_interpose.dylib',
# 'libreplace_jemalloc.dylib',
# 'libreplace_malloc.dylib',
'libmozavutil.dylib',
'libmozavcodec.dylib',
'libsoftokn3.dylib',
'pingsender',
'plugin-container.app/Contents/MacOS/plugin-container',
'updater.app/Contents/MacOS/org.mozilla.updater',
# 'xpcshell',
'XUL',
])
@property
def paths_no_keep_path(self):
root, paths = self._paths_no_keep_path
return (root, [p.format(product=self.product) for p in paths])
def process_package_artifact(self, filename, processed_filename):
tempdir = tempfile.mkdtemp()
oldcwd = os.getcwd()
try:
self.log(logging.INFO, 'artifact',
{'tempdir': tempdir},
'Unpacking DMG into {tempdir}')
if self._substs['HOST_OS_ARCH'] == 'Linux':
# This is a cross build, use hfsplus and dmg tools to extract the dmg.
os.chdir(tempdir)
with open(os.devnull, 'wb') as devnull:
subprocess.check_call([
self._substs['DMG_TOOL'],
'extract',
filename,
'extracted_img',
], stdout=devnull)
subprocess.check_call([
self._substs['HFS_TOOL'],
'extracted_img',
'extractall'
], stdout=devnull)
else:
mozinstall.install(filename, tempdir)
bundle_dirs = glob.glob(mozpath.join(tempdir, '*.app'))
if len(bundle_dirs) != 1:
raise ValueError('Expected one source bundle, found: {}'.format(bundle_dirs))
[source] = bundle_dirs
# These get copied into dist/bin with the path, so "root/a/b/c" -> "dist/bin/a/b/c".
paths_keep_path = [
('Contents/MacOS', [
'crashreporter.app/Contents/MacOS/minidump-analyzer',
]),
('Contents/Resources', [
'browser/components/libbrowsercomps.dylib',
'dependentlibs.list',
# 'firefox',
'gmp-clearkey/0.1/libclearkey.dylib',
# 'gmp-fake/1.0/libfake.dylib',
# 'gmp-fakeopenh264/1.0/libfakeopenh264.dylib',
]),
]
with JarWriter(file=processed_filename, compress_level=5) as writer:
root, paths = self.paths_no_keep_path
finder = UnpackFinder(mozpath.join(source, root))
for path in paths:
for p, f in finder.find(path):
self.log(logging.INFO, 'artifact',
{'path': p},
'Adding {path} to processed archive')
destpath = mozpath.join('bin', os.path.basename(p))
writer.add(destpath.encode('utf-8'), f, mode=f.mode)
for root, paths in paths_keep_path:
finder = UnpackFinder(mozpath.join(source, root))
for path in paths:
for p, f in finder.find(path):
self.log(logging.INFO, 'artifact',
{'path': p},
'Adding {path} to processed archive')
destpath = mozpath.join('bin', p)
writer.add(destpath.encode('utf-8'), f.open(), mode=f.mode)
finally:
os.chdir(oldcwd)
try:
shutil.rmtree(tempdir)
except (OSError, IOError):
self.log(logging.WARN, 'artifact',
{'tempdir': tempdir},
'Unable to delete {tempdir}')
pass
class WinArtifactJob(ArtifactJob):
package_re = r'public/build/target\.(zip|tar\.gz)'
product = 'firefox'
_package_artifact_patterns = {
'{product}/dependentlibs.list',
'{product}/platform.ini',
'{product}/application.ini',
'{product}/**/*.dll',
'{product}/*.exe',
'{product}/*.tlb',
}
@property
def package_artifact_patterns(self):
return {
p.format(product=self.product) for p in self._package_artifact_patterns
}
# These are a subset of TEST_HARNESS_BINS in testing/mochitest/Makefile.in.
test_artifact_patterns = {
('bin/BadCertServer.exe', ('bin', 'bin')),
('bin/GenerateOCSPResponse.exe', ('bin', 'bin')),
('bin/OCSPStaplingServer.exe', ('bin', 'bin')),
('bin/SymantecSanctionsServer.exe', ('bin', 'bin')),
('bin/certutil.exe', ('bin', 'bin')),
('bin/fileid.exe', ('bin', 'bin')),
('bin/geckodriver.exe', ('bin', 'bin')),
('bin/minidumpwriter.exe', ('bin', 'bin')),
('bin/pk12util.exe', ('bin', 'bin')),
('bin/screenshot.exe', ('bin', 'bin')),
('bin/ssltunnel.exe', ('bin', 'bin')),
('bin/xpcshell.exe', ('bin', 'bin')),
('bin/plugins/gmp-*/*/*', ('bin/plugins', 'bin')),
('bin/plugins/*', ('bin/plugins', 'plugins')),
('bin/components/*', ('bin/components', 'bin/components')),
}
def process_package_artifact(self, filename, processed_filename):
added_entry = False
with JarWriter(file=processed_filename, compress_level=5) as writer:
for p, f in UnpackFinder(JarFinder(filename, JarReader(filename))):
if not any(mozpath.match(p, pat) for pat in self.package_artifact_patterns):
continue
# strip off the relative "firefox/" bit from the path:
basename = mozpath.relpath(p, self.product)
basename = mozpath.join('bin', basename)
self.log(logging.INFO, 'artifact',
{'basename': basename},
'Adding {basename} to processed archive')
writer.add(basename.encode('utf-8'), f.open(), mode=f.mode)
added_entry = True
if not added_entry:
raise ValueError('Archive format changed! No pattern from "{patterns}"'
'matched an archive path.'.format(
patterns=self.artifact_patterns))
class ThunderbirdMixin(object):
trust_domain = 'comm'
product = 'thunderbird'
candidate_trees = [
'comm-central',
]
try_tree = 'try-comm-central'
class LinuxThunderbirdArtifactJob(ThunderbirdMixin, LinuxArtifactJob):
pass
class MacThunderbirdArtifactJob(ThunderbirdMixin, MacArtifactJob):
_paths_no_keep_path = MacArtifactJob._paths_no_keep_path
_paths_no_keep_path[1].extend([
'libldap60.dylib',
'libldif60.dylib',
'libprldap60.dylib',
])
class WinThunderbirdArtifactJob(ThunderbirdMixin, WinArtifactJob):
pass
def startswithwhich(s, prefixes):
for prefix in prefixes:
if s.startswith(prefix):
return prefix
MOZ_JOB_DETAILS = {
j: {
'android': AndroidArtifactJob,
'linux': LinuxArtifactJob,
'macosx': MacArtifactJob,
'win': WinArtifactJob,
}[startswithwhich(j, ('android', 'linux', 'macosx', 'win'))]
for j in JOB_CHOICES
}
COMM_JOB_DETAILS = {
j: {
'android': None,
'linux': LinuxThunderbirdArtifactJob,
'macosx': MacThunderbirdArtifactJob,
'win': WinThunderbirdArtifactJob,
}[startswithwhich(j, ('android', 'linux', 'macosx', 'win'))]
for j in JOB_CHOICES
}
def cachedmethod(cachefunc):
'''Decorator to wrap a class or instance method with a memoizing callable that
saves results in a (possibly shared) cache.
'''
def decorator(method):
def wrapper(self, *args, **kwargs):
mapping = cachefunc(self)
if mapping is None:
return method(self, *args, **kwargs)
key = (method.__name__, args, tuple(sorted(kwargs.items())))
try:
value = mapping[key]
return value
except KeyError:
pass
result = method(self, *args, **kwargs)
mapping[key] = result
return result
return functools.update_wrapper(wrapper, method)
return decorator
class CacheManager(object):
'''Maintain an LRU cache. Provide simple persistence, including support for
loading and saving the state using a "with" block. Allow clearing the cache
and printing the cache for debugging.
Provide simple logging.
'''
def __init__(self, cache_dir, cache_name, cache_size, cache_callback=None,
log=None, skip_cache=False):
self._skip_cache = skip_cache
self._cache = pylru.lrucache(cache_size, callback=cache_callback)
self._cache_filename = mozpath.join(cache_dir, cache_name + '-cache.pickle')
self._log = log
mkdir(cache_dir, not_indexed=True)
def log(self, *args, **kwargs):
if self._log:
self._log(*args, **kwargs)
def load_cache(self):
if self._skip_cache:
self.log(logging.INFO, 'artifact',
{},
'Skipping cache: ignoring load_cache!')
return
try:
items = pickle.load(open(self._cache_filename, 'rb'))
for key, value in items:
self._cache[key] = value
except Exception as e:
# Corrupt cache, perhaps? Sadly, pickle raises many different
# exceptions, so it's not worth trying to be fine grained here.
# We ignore any exception, so the cache is effectively dropped.
self.log(logging.INFO, 'artifact',
{'filename': self._cache_filename, 'exception': repr(e)},
'Ignoring exception unpickling cache file {filename}: {exception}')
pass
def dump_cache(self):
if self._skip_cache:
self.log(logging.INFO, 'artifact',
{},
'Skipping cache: ignoring dump_cache!')
return
ensureParentDir(self._cache_filename)
pickle.dump(list(reversed(list(self._cache.items()))),
open(self._cache_filename, 'wb'), -1)
def clear_cache(self):
if self._skip_cache:
self.log(logging.INFO, 'artifact',
{},
'Skipping cache: ignoring clear_cache!')
return
with self:
self._cache.clear()
def __enter__(self):
self.load_cache()
return self
def __exit__(self, type, value, traceback):
self.dump_cache()
class PushheadCache(CacheManager):
'''Helps map tree/revision pairs to parent pushheads according to the pushlog.'''
def __init__(self, cache_dir, log=None, skip_cache=False):
CacheManager.__init__(self, cache_dir, 'pushhead_cache',
MAX_CACHED_TASKS, log=log, skip_cache=skip_cache)
@cachedmethod(operator.attrgetter('_cache'))
def parent_pushhead_id(self, tree, revision):
cset_url_tmpl = ('https://hg.mozilla.org/{tree}/json-pushes?'
'changeset={changeset}&version=2&tipsonly=1')
req = requests.get(cset_url_tmpl.format(tree=tree, changeset=revision),
headers={'Accept': 'application/json'})
if req.status_code not in range(200, 300):
raise ValueError
result = req.json()
[found_pushid] = result['pushes'].keys()
return int(found_pushid)
@cachedmethod(operator.attrgetter('_cache'))
def pushid_range(self, tree, start, end):
pushid_url_tmpl = ('https://hg.mozilla.org/{tree}/json-pushes?'
'startID={start}&endID={end}&version=2&tipsonly=1')
req = requests.get(pushid_url_tmpl.format(tree=tree, start=start,
end=end),
headers={'Accept': 'application/json'})
result = req.json()
return [
p['changesets'][-1] for p in result['pushes'].values()
]
class TaskCache(CacheManager):
'''Map candidate pushheads to Task Cluster task IDs and artifact URLs.'''
def __init__(self, cache_dir, log=None, skip_cache=False):
CacheManager.__init__(self, cache_dir, 'artifact_url',
MAX_CACHED_TASKS, log=log, skip_cache=skip_cache)
@cachedmethod(operator.attrgetter('_cache'))
def artifacts(self, tree, job, artifact_job_class, rev):
# Grab the second part of the repo name, which is generally how things
# are indexed. Eg: 'integration/mozilla-inbound' is indexed as
# 'mozilla-inbound'
tree = tree.split('/')[1] if '/' in tree else tree
# PGO builds are now known as "shippable" for all platforms but Android.
# For macOS and linux32 shippable builds are equivalent to opt builds and
# replace them on some trees. Additionally, we no longer produce win64
# opt builds on integration branches.
if not job.startswith('android-'):
if job.endswith('-pgo') or job in ('macosx64-opt', 'linux-opt',
'win64-opt'):
tree += '.shippable'
if job.endswith('-pgo'):
job = job.replace('-pgo', '-opt')
namespace = '{trust_domain}.v2.{tree}.revision.{rev}.{product}.{job}'.format(
trust_domain=artifact_job_class.trust_domain,
rev=rev,
tree=tree,
product=artifact_job_class.product,
job=job,
)
self.log(logging.INFO, 'artifact',
{'namespace': namespace},
'Searching Taskcluster index with namespace: {namespace}')
try:
taskId = find_task_id(namespace)
except KeyError:
# Not all revisions correspond to pushes that produce the job we
# care about; and even those that do may not have completed yet.
raise ValueError(
'Task for {namespace} does not exist (yet)!'.format(namespace=namespace))
return taskId, list_artifacts(taskId)
class Artifacts(object):
'''Maintain state to efficiently fetch build artifacts from a Firefox tree.'''
def __init__(self, tree, substs, defines, job=None, log=None,
cache_dir='.', hg=None, git=None, skip_cache=False,
topsrcdir=None, download_tests=True, download_symbols=False,
download_host_bins=False):
if (hg and git) or (not hg and not git):
raise ValueError("Must provide path to exactly one of hg and git")
self._substs = substs
self._defines = defines
self._tree = tree
self._job = job or self._guess_artifact_job()
self._log = log
self._hg = hg
self._git = git
self._cache_dir = cache_dir
self._skip_cache = skip_cache
self._topsrcdir = topsrcdir
app = self._substs.get('MOZ_BUILD_APP')
job_details = COMM_JOB_DETAILS if app == 'comm/mail' else MOZ_JOB_DETAILS
try:
cls = job_details[self._job]
self._artifact_job = cls(log=self._log,
download_tests=download_tests,
download_symbols=download_symbols,
download_host_bins=download_host_bins,
substs=self._substs)
except KeyError:
self.log(logging.INFO, 'artifact',
{'job': self._job},
'Unknown job {job}')
raise KeyError("Unknown job")
self._task_cache = TaskCache(self._cache_dir, log=self._log, skip_cache=self._skip_cache)
self._artifact_cache = ArtifactCache(
self._cache_dir, log=self._log, skip_cache=self._skip_cache)
self._pushhead_cache = PushheadCache(
self._cache_dir, log=self._log, skip_cache=self._skip_cache)
def log(self, *args, **kwargs):
if self._log:
self._log(*args, **kwargs)
def _guess_artifact_job(self):
# Add the "-debug" suffix to the guessed artifact job name
# if MOZ_DEBUG is enabled.
if self._substs.get('MOZ_DEBUG'):
target_suffix = '-debug'
elif self._substs.get('MOZ_PGO'):
target_suffix = '-pgo'
else:
target_suffix = '-opt'
if self._substs.get('MOZ_BUILD_APP', '') == 'mobile/android':
if self._substs['ANDROID_CPU_ARCH'] == 'x86_64':
return 'android-x86_64' + target_suffix
if self._substs['ANDROID_CPU_ARCH'] == 'x86':
return 'android-x86' + target_suffix
if self._substs['ANDROID_CPU_ARCH'] == 'arm64-v8a':
return 'android-aarch64' + target_suffix
return 'android-api-16' + target_suffix
target_64bit = False
if self._substs['target_cpu'] == 'x86_64':
target_64bit = True
if self._defines.get('XP_LINUX', False):
return ('linux64' if target_64bit else 'linux') + target_suffix
if self._defines.get('XP_WIN', False):
if self._substs['target_cpu'] == 'aarch64':
return 'win64-aarch64' + target_suffix
return ('win64' if target_64bit else 'win32') + target_suffix
if self._defines.get('XP_MACOSX', False):
# We only produce unified builds in automation, so the target_cpu
# check is not relevant.
return 'macosx64' + target_suffix
raise Exception('Cannot determine default job for |mach artifact|!')
def _pushheads_from_rev(self, rev, count):
"""Queries hg.mozilla.org's json-pushlog for pushheads that are nearby
ancestors or `rev`. Multiple trees are queried, as the `rev` may
already have been pushed to multiple repositories. For each repository
containing `rev`, the pushhead introducing `rev` and the previous
`count` pushheads from that point are included in the output.
"""
with self._pushhead_cache as pushhead_cache:
found_pushids = {}
search_trees = self._artifact_job.candidate_trees
for tree in search_trees:
self.log(logging.INFO, 'artifact',
{'tree': tree,
'rev': rev},
'Attempting to find a pushhead containing {rev} on {tree}.')
try:
pushid = pushhead_cache.parent_pushhead_id(tree, rev)
found_pushids[tree] = pushid
except ValueError:
continue
candidate_pushheads = collections.defaultdict(list)
for tree, pushid in found_pushids.iteritems():
end = pushid
start = pushid - NUM_PUSHHEADS_TO_QUERY_PER_PARENT
self.log(logging.INFO, 'artifact',
{'tree': tree,
'pushid': pushid,
'num': NUM_PUSHHEADS_TO_QUERY_PER_PARENT},
'Retrieving the last {num} pushheads starting with id {pushid} on {tree}')
for pushhead in pushhead_cache.pushid_range(tree, start, end):
candidate_pushheads[pushhead].append(tree)
return candidate_pushheads
def _get_hg_revisions_from_git(self):
rev_list = subprocess.check_output([
self._git, 'rev-list', '--topo-order',
'--max-count={num}'.format(num=NUM_REVISIONS_TO_QUERY),
'HEAD',
], cwd=self._topsrcdir)
hg_hash_list = subprocess.check_output([
self._git, 'cinnabar', 'git2hg'
] + rev_list.splitlines(), cwd=self._topsrcdir)
zeroes = "0" * 40
hashes = []
for hg_hash in hg_hash_list.splitlines():
hg_hash = hg_hash.strip()
if not hg_hash or hg_hash == zeroes:
continue
hashes.append(hg_hash)
return hashes
def _get_recent_public_revisions(self):
"""Returns recent ancestors of the working parent that are likely to
to be known to Mozilla automation.
If we're using git, retrieves hg revisions from git-cinnabar.
"""
if self._git:
return self._get_hg_revisions_from_git()
# Mercurial updated the ordering of "last" in 4.3. We use revision
# numbers to order here to accommodate multiple versions of hg.
last_revs = subprocess.check_output([
self._hg, 'log',
'--template', '{rev}:{node}\n',
'-r', 'last(public() and ::., {num})'.format(
num=NUM_REVISIONS_TO_QUERY)
], cwd=self._topsrcdir).splitlines()
if len(last_revs) == 0:
raise Exception("""\
There are no public revisions.
This can happen if the repository is created from bundle file and never pulled
from remote. Please run `hg pull` and build again.
see https://developer.mozilla.org/en-US/docs/Mozilla/Developer_guide/Source_Code/Mercurial/Bundles\
""")
self.log(logging.INFO, 'artifact',
{'len': len(last_revs)},
'hg suggested {len} candidate revisions')
def to_pair(line):
rev, node = line.split(':', 1)
return (int(rev), node)
pairs = map(to_pair, last_revs)
# Python's tuple sort orders by first component: here, the (local)
# revision number.
nodes = [pair[1] for pair in sorted(pairs, reverse=True)]
for node in nodes[:20]:
self.log(logging.INFO, 'artifact',
{'node': node},
'hg suggested candidate revision: {node}')
self.log(logging.INFO, 'artifact',
{'remaining': max(0, len(nodes) - 20)},
'hg suggested candidate revision: and {remaining} more')
return nodes
def _find_pushheads(self):
"""Returns an iterator of recent pushhead revisions, starting with the
working parent.
"""
last_revs = self._get_recent_public_revisions()
candidate_pushheads = self._pushheads_from_rev(last_revs[0].rstrip(),
NUM_PUSHHEADS_TO_QUERY_PER_PARENT)
count = 0
for rev in last_revs:
rev = rev.rstrip()
if not rev:
continue
if rev not in candidate_pushheads:
continue
count += 1
yield candidate_pushheads[rev], rev
if not count:
raise Exception(
'Could not find any candidate pushheads in the last {num} revisions.\n'
'Search started with {rev}, which must be known to Mozilla automation.\n\n'
'see https://developer.mozilla.org/en-US/docs/Artifact_builds'.format(
rev=last_revs[0], num=NUM_PUSHHEADS_TO_QUERY_PER_PARENT))
def find_pushhead_artifacts(self, task_cache, job, tree, pushhead):
try:
taskId, artifacts = task_cache.artifacts(
tree, job, self._artifact_job.__class__, pushhead)
except ValueError:
return None
urls = []
for artifact_name in self._artifact_job.find_candidate_artifacts(artifacts):
# We can easily extract the task ID from the URL. We can't easily
# extract the build ID; we use the .ini files embedded in the
# downloaded artifact for this.
url = get_artifact_url(taskId, artifact_name)
urls.append(url)
if urls:
self.log(logging.INFO, 'artifact',
{'pushhead': pushhead,
'tree': tree},
'Installing from remote pushhead {pushhead} on {tree}')
return urls
return None
def install_from_file(self, filename, distdir):
self.log(logging.INFO, 'artifact',
{'filename': filename},
'Installing from {filename}')
# Do we need to post-process?
processed_filename = filename + PROCESSED_SUFFIX
if self._skip_cache and os.path.exists(processed_filename):
self.log(logging.INFO, 'artifact',
{'path': processed_filename},
'Skipping cache: removing cached processed artifact {path}')
os.remove(processed_filename)
if not os.path.exists(processed_filename):
self.log(logging.INFO, 'artifact',
{'filename': filename},
'Processing contents of {filename}')
self.log(logging.INFO, 'artifact',
{'processed_filename': processed_filename},
'Writing processed {processed_filename}')
self._artifact_job.process_artifact(filename, processed_filename)
self._artifact_cache._persist_limit.register_file(processed_filename)
self.log(logging.INFO, 'artifact',
{'processed_filename': processed_filename},
'Installing from processed {processed_filename}')
# Copy all .so files, avoiding modification where possible.
ensureParentDir(mozpath.join(distdir, '.dummy'))
with zipfile.ZipFile(processed_filename) as zf:
for info in zf.infolist():
if info.filename.endswith('.ini'):
continue
n = mozpath.join(distdir, info.filename)
fh = FileAvoidWrite(n, mode='rb')
shutil.copyfileobj(zf.open(info), fh)
file_existed, file_updated = fh.close()
self.log(logging.INFO, 'artifact',
{'updating': 'Updating' if file_updated else 'Not updating',
'filename': n},
'{updating} {filename}')
if not file_existed or file_updated:
# Libraries and binaries may need to be marked executable,
# depending on platform.
perms = info.external_attr >> 16 # See http://stackoverflow.com/a/434689.
perms |= stat.S_IWUSR | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH # u+w, a+r.
os.chmod(n, perms)
return 0
def install_from_url(self, url, distdir):
self.log(logging.INFO, 'artifact',
{'url': url},
'Installing from {url}')
filename = self._artifact_cache.fetch(url)
return self.install_from_file(filename, distdir)
def _install_from_hg_pushheads(self, hg_pushheads, distdir):
"""Iterate pairs (hg_hash, {tree-set}) associating hg revision hashes
and tree-sets they are known to be in, trying to download and
install from each.
"""
urls = None
count = 0
# with blocks handle handle persistence.
with self._task_cache as task_cache:
for trees, hg_hash in hg_pushheads:
for tree in trees:
count += 1
self.log(logging.INFO, 'artifact',
{'hg_hash': hg_hash,
'tree': tree},
'Trying to find artifacts for hg revision {hg_hash} on tree {tree}.')
urls = self.find_pushhead_artifacts(task_cache, self._job, tree, hg_hash)
if urls:
for url in urls:
if self.install_from_url(url, distdir):
return 1
return 0
self.log(logging.ERROR, 'artifact',
{'count': count},
'Tried {count} pushheads, no built artifacts found.')
return 1
def install_from_recent(self, distdir):
hg_pushheads = self._find_pushheads()
return self._install_from_hg_pushheads(hg_pushheads, distdir)
def install_from_revset(self, revset, distdir):
revision = None
try:
if self._hg:
revision = subprocess.check_output([self._hg, 'log', '--template', '{node}\n',
'-r', revset], cwd=self._topsrcdir).strip()
elif self._git:
revset = subprocess.check_output([
self._git, 'rev-parse', '%s^{commit}' % revset],
stderr=open(os.devnull, 'w'), cwd=self._topsrcdir).strip()
else:
# Fallback to the exception handling case from both hg and git
raise subprocess.CalledProcessError()
except subprocess.CalledProcessError:
# If the mercurial of git commands above failed, it means the given
# revset is not known locally to the VCS. But if the revset looks
# like a complete sha1, assume it is a mercurial sha1 that hasn't
# been pulled, and use that.
if re.match(r'^[A-Fa-f0-9]{40}$', revset):
revision = revset
if revision is None and self._git:
revision = subprocess.check_output(
[self._git, 'cinnabar', 'git2hg', revset], cwd=self._topsrcdir).strip()
if revision == "0" * 40 or revision is None:
raise ValueError('revision specification must resolve to a commit known to hg')
if len(revision.split('\n')) != 1:
raise ValueError('revision specification must resolve to exactly one commit')
self.log(logging.INFO, 'artifact',
{'revset': revset,
'revision': revision},
'Will only accept artifacts from a pushhead at {revision} '
'(matched revset "{revset}").')
# Include try in our search to allow pulling from a specific push.
pushheads = [(
self._artifact_job.candidate_trees + [self._artifact_job.try_tree],
revision
)]
return self._install_from_hg_pushheads(pushheads, distdir)
def install_from_task(self, taskId, distdir):
artifacts = list_artifacts(taskId)
urls = []
for artifact_name in self._artifact_job.find_candidate_artifacts(artifacts):
# We can easily extract the task ID from the URL. We can't easily
# extract the build ID; we use the .ini files embedded in the
# downloaded artifact for this.
url = get_artifact_url(taskId, artifact_name)
urls.append(url)
if not urls:
raise ValueError(
'Task {taskId} existed, but no artifacts found!'.format(taskId=taskId))
for url in urls:
if self.install_from_url(url, distdir):
return 1
return 0
def install_from(self, source, distdir):
"""Install artifacts from a ``source`` into the given ``distdir``.
"""
if source and os.path.isfile(source):
return self.install_from_file(source, distdir)
elif source and urlparse.urlparse(source).scheme:
return self.install_from_url(source, distdir)
else:
if source is None and 'MOZ_ARTIFACT_REVISION' in os.environ:
source = os.environ['MOZ_ARTIFACT_REVISION']
if source:
return self.install_from_revset(source, distdir)
for var in (
'MOZ_ARTIFACT_TASK_%s' % self._job.upper().replace('-', '_'),
'MOZ_ARTIFACT_TASK',
):
if var in os.environ:
return self.install_from_task(os.environ[var], distdir)
return self.install_from_recent(distdir)
def clear_cache(self):
self.log(logging.INFO, 'artifact',
{},
'Deleting cached artifacts and caches.')
self._task_cache.clear_cache()
self._artifact_cache.clear_cache()
self._pushhead_cache.clear_cache()
| mpl-2.0 | -2,292,077,072,979,356,200 | 40.950082 | 136 | 0.563389 | false |
cuoretech/dowork | dowork/Model/Comment.py | 1 | 5452 | from database_config import *
from py2neo import neo4j, node
import json
# Class : Blog
# Methods:
# 1) db_init(self) - Private
# 2) getNode(self) - Returns the Comment Node
# 3) getName(self) - Returns name of Comment
# 4) setDescription(self, description) - Takes description as a string
# 5) getDescription(self) - Returns description
# 6) setContent(self, content) - Takes content in as a string
# 7) getContent(self) - Returns content as a string
# 8) setTime(self, time) - Set the time of when the post was created (in millis)
# 9) getTime(self) - Gets the time in millis
# 12) setOwner(self, owner) - owner is a User node, Owner.getNode()
# 13) getOwner(self) - Returns a User Node
# Constants:
class Comment:
graph_db = None
commentInstance = None
def db_init(self):
if self.graph_db is None:
self.graph_db = neo4j.GraphDatabaseService(db_config['uri'])
#
# Function : getNode
# Arguments :
# Returns : instance Node
#
def getNode(self):
return self.commentInstance
#
# Function : Constructor
# Arguments : Uri of Existing Blog Node OR Name of Blog
#
def __init__(self, URI=None, Name=None, Content=None, Owner=None, Parent=None):
global LBL_COMMENT
self.db_init()
temp = None
if URI is not None:
temp = neo4j.Node(URI)
elif Name is not None:
temp, = self.graph_db.create({"name": Name})
temp.add_labels(LBL_COMMENT)
else:
raise Exception("Name or URI not specified")
self.commentInstance = temp
if Content is not None:
self.commentInstance["content"] = Content
if Owner is not None:
global REL_CREATEDBY, LBL_USER
if LBL_USER in Owner.get_labels():
self.commentInstance.get_or_create_path(REL_CREATEDBY, Owner)
else:
raise Exception("The Node Provided is not a User")
if Parent is not None:
global REL_HASCOMMENT, LBL_TASK, LBL_POST, LBL_EVENT
if (LBL_TASK in Parent.get_labels()) or (LBL_POST in Parent.get_labels()):
Parent.get_or_create_path(REL_HASCOMMENT, self.commentInstance)
#
# Function : getName
# Arguments :
# Returns : name of blog
#
def getName(self):
if self.commentInstance is not None:
return self.commentInstance["name"]
else:
return None
#
# Function : setDescription
# Arguments : (String) description
#
def setDescription(self, description):
self.commentInstance["description"] = description
#
# Function : getDescription
# Arguments :
# Returns : (String) description
#
def getDescription(self):
return self.commentInstance["description"]
#
# Function : setContent
# Arguments : String content
# Returns :
#
def setContent(self, content):
self.commentInstance["content"] = content
#
# Function : getContent
# Arguments :
# Returns : (String) content
#
def getContent(self):
return self.commentInstance["content"]
#
# Function : setTime
# Arguments : String time (in milliseconds)
# Returns :
#
def setTime(self, time):
self.commentInstance["time"] = time
#
# Function : getTime
# Arguments :
# Returns : (String) time
#
def getTime(self):
return self.commentInstance["time"]
#
# Function : setOwner
# Arguments : (User Node) owner
# Returns : a 'Path' object containing nodes and relationships used
#
def setOwner(self, owner):
global HAS_OWNER, LBL_USER
if LBL_USER in owner.get_labels():
return self.commentInstance.get_or_create_path(REL_HASOWNER, owner)
else:
raise Exception("The Node Provided is not a User")
#
# Function : getOwner
# Arguments :
# Returns : a Owner Node or None (if there is no node)
#
def getOwner(self):
global REL_HASOWNER
relationships = list(self.commentInstance.match_outgoing(REL_HASOWNER))
if len(relationships) != 0:
return relationships[0].end_node
else:
return None
#
# Function : setParent
# Arguments : (Task or Post or Comment Node) parent
# Returns : a 'Path' object containing nodes and relationships used
#
def setParent(self, parent):
global REL_HASCOMMENT, LBL_POST, LBL_TASK, LBL_COMMENT
if (LBL_POST in parent.get_labels()) \
or (LBL_TASK in parent.get_labels()) \
or (LBL_COMMENT in parent.get_labels()):
return parent.get_or_create_path(REL_HASCOMMENT, self.commentInstance)
else:
raise Exception("The Node Provided is not a Post or Task")
#
# Function : getParent
# Arguments :
# Returns : a Parent Node or None (if there is no node)
#
def getParent(self):
global REL_HASCOMMENT
relationships = list(self.commentInstance.match_incoming(REL_HASCOMMENT))
if len(relationships) != 0:
return relationships[0].start_node
else:
return None
| apache-2.0 | 5,440,216,274,128,202,000 | 29.458101 | 111 | 0.586574 | false |
stormrose-va/xobox | xobox/utils/loader.py | 1 | 3215 | # -*- coding: utf-8 -*-
"""
xobox.utils.loader
~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2017 by the Stormrose Project team, see AUTHORS.
:license: MIT License, see LICENSE for details.
"""
import importlib
import os
from xobox.utils import filters
def detect_class_modules(mod, parent=object):
"""
Detect available class modules or packages and return a dictionary of valid
class names, referring to the module they are contained within.
:param str mod: the module or package to be scanned for classes
:param parent: the class potential candidates must be derived off
:returns: dictionary of detected classes, mapping the class name to the module name in
which the class has been detected
"""
# initialise result dictionary
result = {}
candidates = []
# get a list of all files and directories inside the module
try:
package_instance = importlib.import_module(mod)
except ImportError:
return result
pkg_file = os.path.splitext(package_instance.__file__)
if pkg_file[0][-8:] == '__init__' and pkg_file[1][1:3] == 'py':
# it's a package, so we have to look for modules
gen_dir = os.listdir(os.path.dirname(os.path.realpath(package_instance.__file__)))
# only consider modules and packages, and exclude the base module
for file_candidate in filter(filters.modules, gen_dir):
# Python files are modules; the name needs to be without file ending
if file_candidate[-3:] == '.py':
file_candidate = file_candidate[:-3]
# try if the detected package or module can be imported
try:
class_module_candidate = importlib.import_module('.'.join([mod, file_candidate]))
except ImportError:
class_module_candidate = None
# if the module or module could be imported, append it to the list of candidate modules.
if class_module_candidate:
candidates.append(class_module_candidate)
else:
candidates.append(package_instance)
# test if any of the candidates contain
# classes derived from the parent class
for candidate in candidates:
for member_candidate in filter(filters.members, dir(candidate)):
try:
if issubclass(getattr(candidate, member_candidate), parent) \
and getattr(candidate, member_candidate).__name__ != parent.__name__:
result[member_candidate] = candidate.__name__
except TypeError:
pass
# return the dictionary
return result
def load_member(mod, member):
"""
Load a member (function, class, ...) from a module and return it
:param str mod: the module or package name where the class should be loaded from
:param str member: the name of the member to be loaded
:returns: reference to the loaded member (i. e. class or function pointer)
"""
try:
mod = importlib.import_module(mod)
except ImportError:
return None
try:
result = getattr(mod, member)
except AttributeError:
return None
return result
| mit | -4,367,746,705,474,805,000 | 33.945652 | 100 | 0.63297 | false |
hideaki-t/sqlite-fts-python | tests/test_many.py | 1 | 3058 | from __future__ import print_function, unicode_literals
import sqlite3
import os
import tempfile
from faker import Factory
import pytest
import sqlitefts as fts
from sqlitefts import fts5
igo = pytest.importorskip('igo')
fake = Factory.create('ja_JP')
class IgoTokenizer(fts.Tokenizer):
def __init__(self, path=None):
self.tagger = igo.tagger.Tagger(path)
def tokenize(self, text):
for m in self.tagger.parse(text):
start = len(text[:m.start].encode('utf-8'))
yield m.surface, start, start + len(m.surface.encode('utf-8'))
class IgoTokenizer5(fts5.FTS5Tokenizer):
def __init__(self, path=None):
self.tagger = igo.tagger.Tagger(path)
def tokenize(self, text, flags=None):
for m in self.tagger.parse(text):
start = len(text[:m.start].encode('utf-8'))
yield m.surface, start, start + len(m.surface.encode('utf-8'))
@pytest.fixture
def conn():
f, db = tempfile.mkstemp()
try:
os.close(f)
c = sqlite3.connect(db)
create_table(c)
yield c
c.close()
finally:
os.remove(db)
@pytest.fixture
def nr():
return 10000
def create_table(c):
fts.register_tokenizer(c, 'igo', fts.make_tokenizer_module(IgoTokenizer()))
fts5.register_tokenizer(c, 'igo',
fts5.make_fts5_tokenizer(IgoTokenizer5()))
c.execute("CREATE VIRTUAL TABLE fts USING FTS4(tokenize=igo)")
c.execute("CREATE VIRTUAL TABLE fts5 USING FTS5(w, tokenize=igo)")
def test_insert_many_each(conn, nr):
with conn:
for i in range(nr):
conn.execute('INSERT INTO fts VALUES(?)', [fake.address()])
conn.execute('INSERT INTO fts5 VALUES(?)', [fake.address()])
assert conn.execute("SELECT COUNT(*) FROM fts").fetchall()[0][0] == nr
assert conn.execute("SELECT COUNT(*) FROM fts5").fetchall()[0][0] == nr
def test_insert_many_many(conn, nr):
with conn:
conn.executemany('INSERT INTO fts VALUES(?)', ([fake.address()]
for _ in range(nr)))
conn.executemany('INSERT INTO fts5 VALUES(?)', ([fake.address()]
for _ in range(nr)))
assert conn.execute("SELECT COUNT(*) FROM fts").fetchall()[0][0] == nr
assert conn.execute("SELECT COUNT(*) FROM fts5").fetchall()[0][0] == nr
def test_insert_many_use_select(conn, nr):
with conn:
conn.executemany('INSERT INTO fts VALUES(?)', ([fake.address()]
for _ in range(nr)))
conn.executemany('INSERT INTO fts5 VALUES(?)', ([fake.address()]
for _ in range(nr)))
with conn:
conn.execute('INSERT INTO fts SELECT * FROM fts')
conn.execute('INSERT INTO fts5 SELECT * FROM fts5')
assert conn.execute("SELECT COUNT(*) FROM fts").fetchall()[0][0] == nr * 2
assert conn.execute("SELECT COUNT(*) FROM fts5").fetchall()[0][0] == nr * 2
| mit | 4,999,511,117,820,486,000 | 32.604396 | 79 | 0.581099 | false |
kooksee/TIOT | test/project/src/app/proto/protocol/LightProtocol.py | 1 | 3560 | # encoding=utf-8
import binascii
import json
from twisted.internet.protocol import Protocol
class LightProtocol(Protocol):
def __init__(self):
self.ip = ''
self.port = ''
def connectionMade(self):
# import socket
#self.transport.socket._sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#在客户端连接上那一瞬间就会触发服务器,然后服务器开始循环发送数据
self.ip = str(self.transport.client[0])
self.port = str(self.transport.client[1])
self.factory.numProtocols += 1
print 'conn build From ip:' + self.ip + ' port:' + self.port
print 'current conn num is ' + str(self.factory.numProtocols) + "\n"
self.divName = self.ip + ":" + self.port + "##" + self.__class__.__name__
self.factory.controller.add_client(self.divName, self.transport)
# import threading
# timer = threading.Timer(0, self.dataReceived, [""])
# timer.start()
return
def connectionLost(self, reason):
print 'conn lost reason --> ' + str(reason)
self.factory.numProtocols -= 1
print 'conn lost. ip:' + self.ip + ' port:' + self.port
print 'current conn num is ' + str(self.factory.numProtocols) + "\n"
self.factory.controller.del_client(self.divName)
return
def dataReceived(self, data):
# print 'recv data from ip:' + self.ip + ' port:' + self.port + ' data:' + "\n" + data
kdiv = self.factory.controller.online_session
# data = str(data)
data_hex = ''
data_hex1 = ''
if data == '1':
data_hex = ' 7e 00 16 10 00 00 7d 33 a2 00 40 71 54 0a ff fe 00 00 01 00 00 01 00 00 00 00 2c'
data_hex = str(bytearray.fromhex(data_hex))#无
data_hex1 = '7e 00 16 10 00 00 7d 33 a2 00 40 71 53 bc ff fe 00 00 01 00 00 01 00 00 00 00 7b'
data_hex1 = str(bytearray.fromhex(data_hex1))#风扇
print data_hex
elif data == '2':
data_hex = ' 7e 00 16 10 00 00 7d 33 a2 00 40 71 54 0a ff fe 00 00 01 00 00 02 00 00 00 00 2b'
data_hex = str(bytearray.fromhex(data_hex))#灯
print data_hex
data_hex1 = '7e 00 16 10 00 00 7d 33 a2 00 40 71 53 bc ff fe 00 00 01 00 00 02 00 00 00 00 7a'
data_hex1 = str(bytearray.fromhex(data_hex1))#灯
elif data == '3':
data_hex = ' 7e 00 16 10 00 00 7d 33 a2 00 40 71 54 0a ff fe 00 00 01 00 00 03 00 00 00 00 2a'
data_hex = str(bytearray.fromhex(data_hex))
print data_hex
data_hex1 = '7e 00 16 10 00 00 7d 33 a2 00 40 71 53 bc ff fe 00 00 01 00 00 03 00 00 00 00 79'
data_hex1 = str(bytearray.fromhex(data_hex1))
elif data == '0':
data_hex = ' 7e 00 16 10 00 00 7d 33 a2 00 40 71 54 0a ff fe 00 00 01 00 00 00 00 00 00 00 2d'
data_hex = str(bytearray.fromhex(data_hex))
print data_hex
data_hex1 = '7e 00 16 10 00 00 7d 33 a2 00 40 71 53 bc ff fe 00 00 01 00 00 00 00 00 00 00 7c'
data_hex1 = str(bytearray.fromhex(data_hex1))
for div in kdiv:
if div == self.divName:
print "设备" + div + "正在把数据-->"
for div in kdiv:
# print div.split("##")[-1]," ",self.__class__.__name__
if div.split("##")[-1] == self.__class__.__name__:
kdiv[div].write(data_hex)
kdiv[div].write(data_hex1)
print div
print "传递给:" + div
print "\n"
return
| gpl-2.0 | -6,347,997,613,682,692,000 | 35.125 | 106 | 0.563437 | false |
manuvarkey/cmbautomiser | cmbautomiser/openpyxl/descriptors/excel.py | 1 | 2252 | from __future__ import absolute_import
#copyright openpyxl 2010-2015
"""
Excel specific descriptors
"""
from openpyxl.xml.constants import REL_NS
from openpyxl.compat import safe_string
from openpyxl.xml.functions import Element
from . import (
MatchPattern,
MinMax,
Integer,
String,
Typed,
Sequence,
)
from .serialisable import Serialisable
from openpyxl.utils.cell import RANGE_EXPR
class HexBinary(MatchPattern):
pattern = "[0-9a-fA-F]+$"
class UniversalMeasure(MatchPattern):
pattern = r"[0-9]+(\.[0-9]+)?(mm|cm|in|pt|pc|pi)"
class TextPoint(MinMax):
"""
Size in hundredths of points.
In theory other units of measurement can be used but these are unbounded
"""
expected_type = int
min = -400000
max = 400000
Coordinate = Integer
class Percentage(MatchPattern):
pattern = r"((100)|([0-9][0-9]?))(\.[0-9][0-9]?)?%"
class Extension(Serialisable):
uri = String()
def __init__(self,
uri=None,
):
self.uri = uri
class ExtensionList(Serialisable):
ext = Sequence(expected_type=Extension)
def __init__(self,
ext=(),
):
self.ext = ext
class Relation(String):
namespace = REL_NS
allow_none = True
class Base64Binary(MatchPattern):
# http://www.w3.org/TR/xmlschema11-2/#nt-Base64Binary
pattern = "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{4})$"
class Guid(MatchPattern):
# https://msdn.microsoft.com/en-us/library/dd946381(v=office.12).aspx
pattern = r"{[0-9A-F]{8}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{12}\}"
class CellRange(MatchPattern):
pattern = r"^[$]?([A-Za-z]{1,3})[$]?(\d+)(:[$]?([A-Za-z]{1,3})[$]?(\d+)?)?$|^[A-Za-z]{1,3}:[A-Za-z]{1,3}$"
allow_none = True
def __set__(self, instance, value):
if value is not None:
value = value.upper()
super(CellRange, self).__set__(instance, value)
def _explicit_none(tagname, value, namespace=None):
"""
Override serialisation because explicit none required
"""
if namespace is not None:
tagname = "{%s}%s" % (namespace, tagname)
return Element(tagname, val=safe_string(value))
| gpl-3.0 | -4,408,916,376,172,166,000 | 20.245283 | 110 | 0.596803 | false |
abingham/yapga | setup.py | 1 | 1127 | import distribute_setup
distribute_setup.use_setuptools()
from setuptools import setup, find_packages
setup(
name='yapga',
version='1',
packages=find_packages(),
# metadata for upload to PyPI
author='Austin Bingham',
author_email='[email protected]',
description="Yet Another Python Gerrit API",
license='MIT',
keywords='gerrit',
url='http://github.com/abingham/yapga',
# download_url = '',
long_description='An API for working with Gerrit '
'from Python via the REST API.',
zip_safe=True,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
platforms='any',
setup_requires=[],
install_requires=[
'baker',
'matplotlib',
# 'nltk', <-- This doesn't work right now for python3.
'numpy',
],
entry_points={
'console_scripts': [
'yapga = yapga.app.main:main',
],
},
)
| mit | 5,641,950,641,626,381,000 | 25.209302 | 63 | 0.583851 | false |
conradoplg/navi | libnavi/model/note.py | 1 | 1196 | from pubsub import pub
class Note(object):
def __init__(self, name, path):
self.name = name
self.path = path
def open(self, create=False):
self.text = u''
if self.path:
try:
with self.path.open('r') as f:
#TODO: detect encoding
self.text = f.read().decode('utf-8')
except EnvironmentError:
#TODO: add nicer message
if not create:
raise
except UnicodeDecodeError:
#TODO: add nicer message
raise
pub.sendMessage('note.opened', note=self)
def save(self, text):
self.text = text
if self.path:
try:
with self.path.open('w') as f:
f.write(text.encode('utf-8'))
except EnvironmentError:
#TODO: add nicer message
raise
except UnicodeEncodeError:
#TODO: add nicer message
raise
pub.sendMessage('note.saved', note=self)
def close(self):
pub.sendMessage('note.closed', note=self) | mit | 5,750,002,080,750,410,000 | 29.692308 | 56 | 0.474916 | false |
Fokko/incubator-airflow | airflow/sensors/external_task_sensor.py | 1 | 7096 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from sqlalchemy import func
from airflow.exceptions import AirflowException
from airflow.models import DagBag, DagModel, DagRun, TaskInstance
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.db import provide_session
from airflow.utils.decorators import apply_defaults
from airflow.utils.state import State
class ExternalTaskSensor(BaseSensorOperator):
"""
Waits for a different DAG or a task in a different DAG to complete for a
specific execution_date
:param external_dag_id: The dag_id that contains the task you want to
wait for
:type external_dag_id: str
:param external_task_id: The task_id that contains the task you want to
wait for. If ``None`` (default value) the sensor waits for the DAG
:type external_task_id: str or None
:param allowed_states: list of allowed states, default is ``['success']``
:type allowed_states: list
:param execution_delta: time difference with the previous execution to
look at, the default is the same execution_date as the current task or DAG.
For yesterday, use [positive!] datetime.timedelta(days=1). Either
execution_delta or execution_date_fn can be passed to
ExternalTaskSensor, but not both.
:type execution_delta: datetime.timedelta
:param execution_date_fn: function that receives the current execution date
and returns the desired execution dates to query. Either execution_delta
or execution_date_fn can be passed to ExternalTaskSensor, but not both.
:type execution_date_fn: callable
:param check_existence: Set to `True` to check if the external task exists (when
external_task_id is not None) or check if the DAG to wait for exists (when
external_task_id is None), and immediately cease waiting if the external task
or DAG does not exist (default value: False).
:type check_existence: bool
"""
template_fields = ['external_dag_id', 'external_task_id']
ui_color = '#19647e'
@apply_defaults
def __init__(self,
external_dag_id,
external_task_id=None,
allowed_states=None,
execution_delta=None,
execution_date_fn=None,
check_existence=False,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.allowed_states = allowed_states or [State.SUCCESS]
if external_task_id:
if not set(self.allowed_states) <= set(State.task_states):
raise ValueError(
'Valid values for `allowed_states` '
'when `external_task_id` is not `None`: {}'.format(State.task_states)
)
else:
if not set(self.allowed_states) <= set(State.dag_states):
raise ValueError(
'Valid values for `allowed_states` '
'when `external_task_id` is `None`: {}'.format(State.dag_states)
)
if execution_delta is not None and execution_date_fn is not None:
raise ValueError(
'Only one of `execution_delta` or `execution_date_fn` may '
'be provided to ExternalTaskSensor; not both.')
self.execution_delta = execution_delta
self.execution_date_fn = execution_date_fn
self.external_dag_id = external_dag_id
self.external_task_id = external_task_id
self.check_existence = check_existence
# we only check the existence for the first time.
self.has_checked_existence = False
@provide_session
def poke(self, context, session=None):
if self.execution_delta:
dttm = context['execution_date'] - self.execution_delta
elif self.execution_date_fn:
dttm = self.execution_date_fn(context['execution_date'])
else:
dttm = context['execution_date']
dttm_filter = dttm if isinstance(dttm, list) else [dttm]
serialized_dttm_filter = ','.join(
[datetime.isoformat() for datetime in dttm_filter])
self.log.info(
'Poking for %s.%s on %s ... ',
self.external_dag_id, self.external_task_id, serialized_dttm_filter
)
DM = DagModel
TI = TaskInstance
DR = DagRun
# we only do the check for 1st time, no need for subsequent poke
if self.check_existence and not self.has_checked_existence:
dag_to_wait = session.query(DM).filter(
DM.dag_id == self.external_dag_id
).first()
if not dag_to_wait:
raise AirflowException('The external DAG '
'{} does not exist.'.format(self.external_dag_id))
else:
if not os.path.exists(dag_to_wait.fileloc):
raise AirflowException('The external DAG '
'{} was deleted.'.format(self.external_dag_id))
if self.external_task_id:
refreshed_dag_info = DagBag(dag_to_wait.fileloc).get_dag(self.external_dag_id)
if not refreshed_dag_info.has_task(self.external_task_id):
raise AirflowException('The external task'
'{} in DAG {} does not exist.'.format(self.external_task_id,
self.external_dag_id))
self.has_checked_existence = True
if self.external_task_id:
# .count() is inefficient
count = session.query(func.count()).filter(
TI.dag_id == self.external_dag_id,
TI.task_id == self.external_task_id,
TI.state.in_(self.allowed_states),
TI.execution_date.in_(dttm_filter),
).scalar()
else:
# .count() is inefficient
count = session.query(func.count()).filter(
DR.dag_id == self.external_dag_id,
DR.state.in_(self.allowed_states),
DR.execution_date.in_(dttm_filter),
).scalar()
session.commit()
return count == len(dttm_filter)
| apache-2.0 | 4,054,005,438,354,515,500 | 42.533742 | 103 | 0.609076 | false |
stefanwebb/tensorflow-models | tensorflow_models/models/emvb_debug2.py | 1 | 9483 | # MIT License
#
# Copyright (c) 2017, Stefan Webb. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import math
import tensorflow_models as tf_models
def create_placeholders(settings):
x = tf.placeholder(tf.float32, shape=tf_models.batchshape(settings), name='samples')
z = tf.placeholder(tf.float32, shape=tf_models.latentshape(settings), name='codes')
return x, z
def create_prior(settings):
temperature = 0.5
prior_prob = settings['prior_prob']
dist_prior = tf.contrib.distributions.RelaxedBernoulli(temperature, probs=prior_prob)
return tf.identity(tf.cast(dist_prior.sample(sample_shape=tf_models.latentshape(settings)), dtype=tf.float32) * 2. - 1., name='p_z/sample')
def create_encoder(settings, reuse=True):
encoder_network = settings['architecture']['encoder']['fn']
temperature = 2./3.
x_placeholder = tf_models.samples_placeholder()
assert(not x_placeholder is None)
noise = tf.random_normal(tf_models.noiseshape(settings), 0, 1, dtype=tf.float32)
with tf.variable_scope('encoder', reuse=reuse):
logits_z = encoder_network(settings, x_placeholder, noise, is_training=False)
dist_z_given_x = tf.contrib.distributions.RelaxedBernoulli(temperature, logits=logits_z)
encoder = tf.identity(tf.cast(dist_z_given_x.sample(), dtype=tf.float32) * 2. - 1., name='q_z_given_x_eps/sample')
return encoder
def create_decoder(settings, reuse=True):
decoder_network = settings['architecture']['decoder']['fn']
z_placeholder = tf_models.codes_placeholder()
assert(not z_placeholder is None)
with tf.variable_scope('decoder', reuse=reuse):
logits_x = decoder_network(settings, z_placeholder, is_training=False)
#dist_x_given_z = tf.contrib.distributions.Bernoulli(logits=logits_x, dtype=tf.float32)
#decoder = tf.identity(dist_x_given_z.sample(), name='p_x_given_z/sample')
decoder = tf.identity(tf.nn.sigmoid(logits_x), name='p_x_given_z/sample')
return decoder
def create_probs(settings, inputs, is_training, reuse=False):
temperature = 2./3.
encoder_network = settings['architecture']['encoder']['fn']
decoder_network = settings['architecture']['decoder']['fn']
critic_network = settings['architecture']['critic']['fn']
discriminator_network = settings['architecture']['discriminator']['fn']
# The noise is distributed i.i.d. N(0, 1)
noise = tf.random_normal(tf_models.noiseshape(settings), 0, 1, dtype=tf.float32)
# Create a tiled version of the inputs for adaptive contrast
inputs_ac = tf.tile(tf.expand_dims(tf_models.flatten(inputs), axis=0), multiples=[settings['ac_size'],1,1])
noise_ac = tf.random_normal((settings['ac_size'], settings['batch_size'], settings['noise_dimension']), 0, 1, dtype=tf.float32)
ac_batchshape = tf_models.batchshape(settings)
ac_batchshape[0] *= settings['ac_size']
#print(ac_batchshape)
#raise Exception()
# Use black-box inference network to sample z, given inputs and noise
with tf.variable_scope('encoder', reuse=reuse):
logits_z = encoder_network(settings, inputs, noise, is_training=is_training)
tf.get_variable_scope().reuse_variables()
logits_z_ac = encoder_network(settings, tf.reshape(inputs_ac, ac_batchshape), tf.reshape(noise_ac, (settings['ac_size']*settings['batch_size'], -1)), is_training=is_training)
#logits_z_ac = tf.reduce_mean(tf.reshape(logits_z_ac, (settings['ac_size'], settings['batch_size'], -1)), 0)
logits_z_ac = tf.reduce_logsumexp(tf.reshape(logits_z_ac, (settings['ac_size'], settings['batch_size'], -1)), 0) - tf.log(tf.constant(settings['ac_size'], dtype=tf.float32))
dist_z_given_x_ac = tf.contrib.distributions.Logistic(loc=logits_z_ac/temperature, scale=tf.constant(1./temperature, shape=logits_z_ac.shape))
logits_sample_ac = tf.identity(tf.cast(dist_z_given_x_ac.sample(), dtype=tf.float32))
z_sample_ac = tf.identity(tf.sigmoid(logits_sample_ac) * 2. - 1.)
dist_z_given_x = tf.contrib.distributions.Logistic(loc=logits_z/temperature, scale=tf.constant(1./temperature, shape=logits_z.shape))
logits_sample = tf.cast(dist_z_given_x.sample(), dtype=tf.float32)
z_sample = tf.sigmoid(logits_sample) * 2. - 1.
dist_prior_ac = tf.contrib.distributions.Logistic(loc=0., scale=1./temperature)
sample_prior_ac = tf.sigmoid(tf.cast(dist_prior_ac.sample(sample_shape=(settings['batch_size'], settings['latent_dimension'])), dtype=tf.float32))*2. - 1.
sample_for_discr = tf.identity(tf.sigmoid(logits_sample - logits_z_ac/temperature)*2. - 1., name='z/sample')
# Prior
temperature_prior = 0.5
prior_prob = settings['prior_prob']
logits_prior_prob = math.log(prior_prob / (1. - prior_prob))
dist_prior = tf.contrib.distributions.Logistic(loc=logits_prior_prob/temperature_prior, scale=1./temperature_prior)
logits_prior = tf.cast(dist_prior.sample(sample_shape=tf_models.latentshape(settings)), dtype=tf.float32)
z_prior = tf.identity(tf.sigmoid(logits_prior)*2. - 1., name='z/prior')
# Use generator to determine distribution of reconstructed input
with tf.variable_scope('decoder', reuse=reuse):
logits_x = decoder_network(settings, z_sample, is_training=is_training)
dist_x_given_z = tf.contrib.distributions.Bernoulli(logits=tf_models.flatten(logits_x), dtype=tf.float32)
# Log likelihood of reconstructed inputs
lg_p_x_given_z = tf.identity(tf.reduce_sum(dist_x_given_z.log_prob(tf_models.flatten(inputs)), 1), name='p_x_given_z/log_prob')
lg_r_alpha = tf.identity(tf.reduce_sum(dist_z_given_x_ac.log_prob(logits_sample), 1), name='r_alpha/log_prob')
# Form interpolated variable
eps = tf.random_uniform([settings['batch_size'], 1], minval=0., maxval=1.)
#z_inter = eps*z_prior + (1. - eps)*z_sample
z_inter = tf.identity(eps*sample_for_discr + (1. - eps)*sample_prior_ac, name='z/interpolated')
#logits_inter = tf.identity(tf_models.safe_log(z_inter) - tf_models.safe_log(1. - z_inter), name='z/interpolated')
#print(logits_prior.shape, logits_sample.shape, logits_inter.shape)
#raise Exception()
# Critic D(x, z) for EMVB learning
with tf.variable_scope('critic', reuse=reuse):
critic = tf.identity(critic_network(settings, inputs, sample_for_discr, is_training=is_training), name='generator')
tf.get_variable_scope().reuse_variables()
prior_critic = tf.identity(critic_network(settings, inputs, sample_prior_ac, is_training=is_training), name='prior')
inter_critic = tf.identity(critic_network(settings, inputs, z_inter, is_training=is_training), name='inter')
# Discriminator T(x, z) for AVB learning
with tf.variable_scope('discriminator', reuse=reuse):
discriminator = tf.identity(discriminator_network(settings, inputs, sample_for_discr, is_training=is_training), name='generator')
tf.get_variable_scope().reuse_variables()
prior_discriminator = tf.identity(discriminator_network(settings, inputs, sample_prior_ac, is_training=is_training), name='prior')
x = tf.identity(inputs, name='x')
#print('inputs.name', inputs.name)
lg_p_z = tf.identity(tf.reduce_sum(dist_prior.log_prob(logits_sample), 1), name='p_z/log_prob')
return lg_p_x_given_z, critic, prior_critic, inter_critic, z_inter, discriminator, prior_discriminator, lg_p_z, lg_r_alpha
def lg_likelihood(x, z, settings, reuse=True, is_training=False):
decoder_network = settings['architecture']['decoder']['fn']
real_z = tf.sigmoid(z)*2. - 1.
with tf.variable_scope('model'):
with tf.variable_scope('decoder', reuse=reuse):
logits_x = decoder_network(settings, real_z, is_training=is_training)
dist_x_given_z = tf.contrib.distributions.Bernoulli(logits=tf_models.flatten(logits_x), dtype=tf.float32)
return tf.reduce_sum(dist_x_given_z.log_prob(tf_models.flatten(x)), 1)
def lg_prior(z, settings, reuse=True, is_training=False):
temperature = 0.5
prior_prob = settings['prior_prob']
logits_prior_prob = math.log(prior_prob / (1. - prior_prob))
dist_prior = tf.contrib.distributions.Logistic(loc=logits_prior_prob/temperature, scale=1./temperature)
return tf.reduce_sum(tf_models.flatten(dist_prior.log_prob(z)), 1)
def sample_prior(settings):
temperature = 0.5
prior_prob = settings['prior_prob']
logits_prior_prob = math.log(prior_prob / (1. - prior_prob))
dist_prior = tf.contrib.distributions.Logistic(loc=logits_prior_prob/temperature, scale=1./temperature)
return tf.identity(tf.cast(dist_prior.sample(sample_shape=tf_models.latentshape(settings)), dtype=tf.float32), name='p_z/sample')
| mit | 9,025,626,535,856,672,000 | 49.71123 | 176 | 0.735949 | false |
ingadhoc/stock | stock_ux/models/stock_warehouse_orderpoint.py | 1 | 2683 | ##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
from odoo import models, fields, api
class StockWarehouseOrderpoint(models.Model):
_name = 'stock.warehouse.orderpoint'
_inherit = ['stock.warehouse.orderpoint', 'mail.thread']
rotation_stdev = fields.Float(
compute='_compute_rotation',
help="Desvío estandar de las cantidades entregas a clientes en los "
"últimos 120 días.",
digits='Product Unit of Measure',
)
warehouse_rotation_stdev = fields.Float(
compute='_compute_rotation',
help="Desvío estandar de las cantidades entregas desde este almacen"
" a clientes en los últimos 120 días.",
digits='Product Unit of Measure',
)
rotation = fields.Float(
help='Cantidades entregadas a clientes en los '
'últimos 120 días dividido por 4 para mensualizar '
'(restadas devoluciones).',
compute='_compute_rotation',
digits='Product Unit of Measure',
)
warehouse_rotation = fields.Float(
help='Cantidades entregadas desde este almacen a clientes en los '
'últimos 120 días dividido por 4 para mensualizar'
'(restadas devoluciones).',
compute='_compute_rotation',
digits='Product Unit of Measure',
)
product_min_qty = fields.Float(tracking=True)
product_max_qty = fields.Float(tracking=True)
qty_multiple = fields.Float(tracking=True)
location_id = fields.Many2one(tracking=True)
product_id = fields.Many2one(tracking=True)
@api.depends('product_id', 'location_id')
def _compute_rotation(self):
warehouse_with_products = self.filtered('product_id')
(self - warehouse_with_products).update({
'rotation': 0.0,
'rotation_stdev': 0.0,
'warehouse_rotation_stdev': 0.0,
'warehouse_rotation': 0.0,
})
for rec in warehouse_with_products:
rotation, rotation_stdev = rec.product_id.get_product_rotation(
compute_stdev=True)
warehouse_rotation, warehouse_rotation_stdev = \
rec.product_id.get_product_rotation(
rec.warehouse_id.view_location_id, compute_stdev=True)
rec.update({
'rotation': rotation,
'rotation_stdev': rotation_stdev,
'warehouse_rotation_stdev': warehouse_rotation_stdev,
'warehouse_rotation': warehouse_rotation,
})
| agpl-3.0 | 7,736,426,344,456,536,000 | 40.123077 | 78 | 0.5896 | false |
EmilioK97/pydeepl | pydeepl/pydeepl.py | 1 | 3012 | import requests
BASE_URL = 'https://www2.deepl.com/jsonrpc'
LANGUAGES = {
'auto': 'Auto',
'DE': 'German',
'EN': 'English',
'FR': 'French',
'ES': 'Spanish',
'IT': 'Italian',
'NL': 'Dutch',
'PL': 'Polish'
}
JSONRPC_VERSION = '2.0'
class SplittingError(Exception):
def __init__(self, message):
super(SplittingError, self).__init__(message)
def split_sentences(text, lang='auto', json=False):
if text is None:
raise SplittingError('Text can\'t be be None.')
if lang not in LANGUAGES.keys():
raise SplittingError('Language {} not available.'.format(lang))
parameters = {
'jsonrpc': JSONRPC_VERSION,
'method': 'LMT_split_into_sentences',
'params': {
'texts': [
text
],
'lang': {
'lang_user_selected': lang
},
},
}
response = requests.post(BASE_URL, json=parameters).json()
if 'result' not in response:
raise SplittingError('DeepL call resulted in a unknown result.')
splitted_texts = response['result']['splitted_texts']
if len(splitted_texts) == 0:
raise SplittingError('Text could not be splitted.')
if json:
return response
return splitted_texts[0]
class TranslationError(Exception):
def __init__(self, message):
super(TranslationError, self).__init__(message)
def translate(text, to_lang, from_lang='auto', json=False):
if text is None:
raise TranslationError('Text can\'t be None.')
if len(text) > 5000:
raise TranslationError('Text too long (limited to 5000 characters).')
if to_lang not in LANGUAGES.keys():
raise TranslationError('Language {} not available.'.format(to_lang))
if from_lang is not None and from_lang not in LANGUAGES.keys():
raise TranslationError('Language {} not available.'.format(from_lang))
parameters = {
'jsonrpc': JSONRPC_VERSION,
'method': 'LMT_handle_jobs',
'params': {
'jobs': [
{
'kind':'default',
'raw_en_sentence': text
}
],
'lang': {
'user_preferred_langs': [
from_lang,
to_lang
],
'source_lang_user_selected': from_lang,
'target_lang': to_lang
},
},
}
response = requests.post(BASE_URL, json=parameters).json()
if 'result' not in response:
raise TranslationError('DeepL call resulted in a unknown result.')
translations = response['result']['translations']
if len(translations) == 0 \
or translations[0]['beams'] is None \
or translations[0]['beams'][0]['postprocessed_sentence'] is None:
raise TranslationError('No translations found.')
if json:
return response
return translations[0]['beams'][0]['postprocessed_sentence']
| mit | -3,052,785,514,397,001,000 | 26.888889 | 78 | 0.557437 | false |
ehouarn-perret/EhouarnPerret.Python.HackerRank | 0 - Tutorials/30 Days of Code/Day 4 - Class vs. Instance.py | 1 | 1605 | """
In this challenge, we're going to learn about the difference between a class and an instance;
because this is an Object Oriented concept, it's only enabled in certain languages.
Task
Write a Person class with an instance variable, age, and a constructor that takes an integer, initial_age, as a parameter.
The constructor must assign initial_age to _age after confirming the argument passed as _initial_age is not negative.
If a negative argument is passed as initial_age, the constructor should set to and print "Age is not valid, setting age to 0."
In addition, you must write the following instance methods:
age_1_year() should increase the instance variable _age by 1.
is_old() should perform the following conditional actions:
If age < 13, print "You are young.".
If age >= 13 and age < 18, print "You are a teenager.".
Otherwise, print "You are old.".
"""
class Person:
# Add some more code to run some checks on initial_age
def __init__(self, initial_age):
if initial_age < 0:
print("Age is not valid, setting age to 0.")
self._age = 0
else:
self._age = initial_age
# Do some computations in here and print out the correct statement to the console
def is_old(self):
if self._age < 13:
print("You are young.")
elif (13 <= self._age) and (self._age < 18):
print("You are a teenager.")
else:
print("You are old.")
# Increment the age of the person in here
def age_1_year(self):
self._age += 1
T = int(input())
for i in range(0, T):
age = int(input())
p = Person(age)
p.is_old()
for j in range(0, 3):
p.age_1_year()
p.is_old()
print("")
| mit | -2,068,696,048,610,836,000 | 30.470588 | 126 | 0.694704 | false |
wakermahmud/sync-engine | tests/events/test_recurrence.py | 1 | 21964 | import pytest
import arrow
from dateutil import tz
from dateutil.rrule import rrulestr
from datetime import timedelta
from inbox.models.event import Event, RecurringEvent, RecurringEventOverride
from inbox.models.when import Date, Time, DateSpan, TimeSpan
from inbox.events.remote_sync import handle_event_updates
from inbox.events.recurring import (link_events, get_start_times,
parse_exdate, rrule_to_json)
from inbox.log import get_logger
log = get_logger()
TEST_RRULE = ["RRULE:FREQ=WEEKLY;UNTIL=20140918T203000Z;BYDAY=TH"]
TEST_EXDATE = ["EXDATE;TZID=America/Los_Angeles:20140904T133000"]
ALL_DAY_RRULE = ["RRULE:FREQ=WEEKLY;UNTIL=20140911;BYDAY=TH"]
TEST_EXDATE_RULE = TEST_RRULE[:]
TEST_EXDATE_RULE.extend(TEST_EXDATE)
def recurring_event(db, account, calendar, rrule,
start=arrow.get(2014, 8, 7, 20, 30, 00),
end=arrow.get(2014, 8, 7, 21, 30, 00),
all_day=False, commit=True):
# commit: are we returning a commited instance object?
if commit:
ev = db.session.query(Event).filter_by(uid='myuid').first()
if ev:
db.session.delete(ev)
ev = Event(namespace_id=account.namespace.id,
calendar=calendar,
title='recurring',
description='',
uid='myuid',
location='',
busy=False,
read_only=False,
reminders='',
recurrence=rrule,
start=start,
end=end,
all_day=all_day,
is_owner=False,
participants=[],
provider_name='inbox',
raw_data='',
original_start_tz='America/Los_Angeles',
original_start_time=None,
master_event_uid=None,
source='local')
if commit:
db.session.add(ev)
db.session.commit()
return ev
def recurring_override(db, master, original_start, start, end):
# Returns an Override that is explicitly linked to master
ev = recurring_override_instance(db, master, original_start, start, end)
ev.master = master
db.session.commit()
return ev
def recurring_override_instance(db, master, original_start, start, end):
# Returns an Override that has the master's UID, but is not linked yet
override_uid = '{}_{}'.format(master.uid,
original_start.strftime("%Y%m%dT%H%M%SZ"))
ev = db.session.query(Event).filter_by(uid=override_uid).first()
if ev:
db.session.delete(ev)
db.session.commit()
ev = Event(original_start_time=original_start,
master_event_uid=master.uid,
namespace_id=master.namespace_id,
calendar_id=master.calendar_id)
ev.update(master)
ev.uid = override_uid
ev.start = start
ev.end = end
ev.master_event_uid = master.uid
db.session.add(ev)
return ev
def test_create_recurrence(db, default_account, calendar):
event = recurring_event(db, default_account, calendar, TEST_EXDATE_RULE)
assert isinstance(event, RecurringEvent)
assert event.rrule is not None
assert event.exdate is not None
assert event.until is not None
def test_link_events_from_override(db, default_account, calendar):
# Test that by creating a recurring event and override separately, we
# can link them together based on UID and namespace_id when starting
# from the override.
master = recurring_event(db, default_account, calendar, TEST_EXDATE_RULE)
original_start = parse_exdate(master)[0]
override = Event(original_start_time=original_start,
master_event_uid=master.uid,
namespace_id=master.namespace_id,
source='local')
assert isinstance(override, RecurringEventOverride)
link_events(db.session, override)
assert override.master == master
def test_link_events_from_master(db, default_account, calendar):
# Test that by creating a recurring event and override separately, we
# can link them together based on UID and namespace_id when starting
# from the master event.
master = recurring_event(db, default_account, calendar, TEST_EXDATE_RULE)
original_start = parse_exdate(master)[0]
override = recurring_override_instance(db, master, original_start,
master.start, master.end)
assert isinstance(master, RecurringEvent)
o = link_events(db.session, master)
assert len(o) == 1
assert override in master.overrides
assert override.uid in master.override_uids
def test_rrule_parsing(db, default_account, calendar):
# This test event starts on Aug 7 and recurs every Thursday at 20:30
# until Sept 18.
# There should be 7 total occurrences including Aug 7 and Sept 18.
event = recurring_event(db, default_account, calendar, TEST_RRULE)
g = get_start_times(event)
assert len(g) == 7
# Check we can supply an end date to cut off recurrence expansion
g = get_start_times(event, end=arrow.get(2014, 9, 12, 21, 30, 00))
assert len(g) == 6
def test_all_day_rrule_parsing(db, default_account, calendar):
event = recurring_event(db, default_account, calendar, ALL_DAY_RRULE,
start=arrow.get(2014, 8, 7),
end=arrow.get(2014, 8, 7),
all_day=True)
g = get_start_times(event)
assert len(g) == 6
def test_rrule_exceptions(db, default_account, calendar):
# This test event starts on Aug 7 and recurs every Thursday at 20:30
# until Sept 18, except on September 4.
event = recurring_event(db, default_account, calendar, TEST_EXDATE_RULE)
g = get_start_times(event)
assert len(g) == 6
assert arrow.get(2014, 9, 4, 13, 30, 00) not in g
def test_inflation(db, default_account, calendar):
event = recurring_event(db, default_account, calendar, TEST_RRULE)
infl = event.inflate()
for i in infl:
assert i.title == event.title
assert (i.end - i.start) == (event.end - event.start)
assert i.public_id.startswith(event.public_id)
# make sure the original event instance appears too
assert event.start in [e.start for e in infl]
def test_inflation_exceptions(db, default_account, calendar):
event = recurring_event(db, default_account, calendar, TEST_RRULE)
infl = event.inflate()
for i in infl:
assert i.title == event.title
assert (i.end - i.start) == (event.end - event.start)
assert i.start != arrow.get(2014, 9, 4, 13, 30, 00)
def test_inflate_across_DST(db, default_account, calendar):
# If we inflate a RRULE that covers a change to/from Daylight Savings Time,
# adjust the base time accordingly to account for the new UTC offset.
# Daylight Savings for US/PST: March 8, 2015 - Nov 1, 2015
dst_rrule = ["RRULE:FREQ=WEEKLY;BYDAY=TU"]
dst_event = recurring_event(db, default_account, calendar, dst_rrule,
start=arrow.get(2015, 03, 03, 03, 03, 03),
end=arrow.get(2015, 03, 03, 04, 03, 03))
g = get_start_times(dst_event, end=arrow.get(2015, 03, 21))
# In order for this event to occur at the same local time, the recurrence
# rule should be expanded to 03:03:03 before March 8, and 02:03:03 after,
# keeping the local time of the event consistent at 19:03.
# This is consistent with how Google returns recurring event instances.
local_tz = tz.gettz(dst_event.start_timezone)
for time in g:
if time < arrow.get(2015, 3, 8):
assert time.hour == 3
else:
assert time.hour == 2
# Test that localizing these times is consistent
assert time.astimezone(local_tz).hour == 19
# Test an event that starts during local daylight savings time
dst_event = recurring_event(db, default_account, calendar, dst_rrule,
start=arrow.get(2015, 10, 27, 02, 03, 03),
end=arrow.get(2015, 10, 27, 03, 03, 03))
g = get_start_times(dst_event, end=arrow.get(2015, 11, 11))
for time in g:
if time > arrow.get(2015, 11, 1):
assert time.hour == 3
else:
assert time.hour == 2
assert time.astimezone(local_tz).hour == 19
def test_inflate_all_day_event(db, default_account, calendar):
event = recurring_event(db, default_account, calendar, ALL_DAY_RRULE,
start=arrow.get(2014, 9, 4),
end=arrow.get(2014, 9, 4), all_day=True)
infl = event.inflate()
for i in infl:
assert i.all_day
assert isinstance(i.when, Date)
assert i.start in [arrow.get(2014, 9, 4), arrow.get(2014, 9, 11)]
def test_inflate_multi_day_event(db, default_account, calendar):
event = recurring_event(db, default_account, calendar, ALL_DAY_RRULE,
start=arrow.get(2014, 9, 4),
end=arrow.get(2014, 9, 5), all_day=True)
infl = event.inflate()
for i in infl:
assert i.all_day
assert isinstance(i.when, DateSpan)
assert i.start in [arrow.get(2014, 9, 4), arrow.get(2014, 9, 11)]
assert i.end in [arrow.get(2014, 9, 5), arrow.get(2014, 9, 12)]
def test_invalid_rrule_entry(db, default_account, calendar):
# If we don't know how to expand the RRULE, we treat the event as if
# it were a single instance.
event = recurring_event(db, default_account, calendar, 'INVALID_RRULE_YAY')
infl = event.inflate()
assert len(infl) == 1
assert infl[0].start == event.start
def test_invalid_parseable_rrule_entry(db, default_account, calendar):
event = recurring_event(db, default_account, calendar,
["RRULE:FREQ=CHRISTMAS;UNTIL=1984;BYDAY=QQ"])
infl = event.inflate()
assert len(infl) == 1
assert infl[0].start == event.start
def test_non_recurring_events_behave(db, default_account, calendar):
event = Event(namespace_id=default_account.namespace.id,
calendar=calendar,
title='not recurring',
description='',
uid='non_recurring_uid',
location='',
busy=False,
read_only=False,
reminders='',
recurrence=None,
start=arrow.get(2014, 07, 07, 13, 30),
end=arrow.get(2014, 07, 07, 13, 55),
all_day=False,
is_owner=False,
participants=[],
provider_name='inbox',
raw_data='',
original_start_tz='America/Los_Angeles',
original_start_time=None,
master_event_uid=None,
source='local')
assert isinstance(event, Event)
with pytest.raises(AttributeError):
event.inflate()
def test_inflated_events_cant_persist(db, default_account, calendar):
event = recurring_event(db, default_account, calendar, TEST_RRULE)
infl = event.inflate()
for i in infl:
db.session.add(i)
with pytest.raises(Exception) as excinfo:
# FIXME "No handlers could be found for logger" - ensure this is only
# a test issue or fix.
db.session.commit()
assert 'should not be committed' in str(excinfo.value)
def test_override_instantiated(db, default_account, calendar):
# Test that when a recurring event has overrides, they show up as
# RecurringEventOverrides, have links back to the parent, and don't
# appear twice in the event list.
event = recurring_event(db, default_account, calendar, TEST_EXDATE_RULE)
override = recurring_override(db, event,
arrow.get(2014, 9, 4, 20, 30, 00),
arrow.get(2014, 9, 4, 21, 30, 00),
arrow.get(2014, 9, 4, 22, 30, 00))
all_events = event.all_events()
assert len(all_events) == 7
assert override in all_events
def test_override_same_start(db, default_account, calendar):
# Test that when a recurring event has an override without a modified
# start date (ie. the RRULE has no EXDATE for that event), it doesn't
# appear twice in the all_events list.
event = recurring_event(db, default_account, calendar, TEST_RRULE)
override = recurring_override(db, event,
arrow.get(2014, 9, 4, 20, 30, 00),
arrow.get(2014, 9, 4, 20, 30, 00),
arrow.get(2014, 9, 4, 21, 30, 00))
all_events = event.all_events()
assert len(all_events) == 7
unique_starts = list(set([e.start for e in all_events]))
assert len(unique_starts) == 7
assert override in all_events
def test_override_updated(db, default_account, calendar):
# Test that when a recurring event override is created or updated
# remotely, we update our override links appropriately.
event = recurring_event(db, default_account, calendar, TEST_RRULE)
assert event is not None
# create a new Event, as if we just got it from Google
master_uid = event.uid
override_uid = master_uid + "_20140814T203000Z"
override = Event(title='new override from google',
description='',
uid=override_uid,
location='',
busy=False,
read_only=False,
reminders='',
recurrence=None,
start=arrow.get(2014, 8, 14, 22, 30, 00),
end=arrow.get(2014, 8, 14, 23, 30, 00),
all_day=False,
is_owner=False,
participants=[],
provider_name='inbox',
raw_data='',
original_start_tz='America/Los_Angeles',
original_start_time=arrow.get(2014, 8, 14, 21, 30, 00),
master_event_uid=master_uid,
source='local')
handle_event_updates(default_account.namespace.id,
calendar.id,
[override],
log,
db.session)
db.session.commit()
# Lets see if the event got saved with the right info
find_override = db.session.query(Event).filter_by(uid=override_uid).one()
assert find_override is not None
assert find_override.master_event_id == event.id
# Update the same override, making sure we don't create two
override = Event(title='new override from google',
description='',
uid=override_uid,
location='walk and talk',
busy=False,
read_only=False,
reminders='',
recurrence=None,
start=arrow.get(2014, 8, 14, 22, 15, 00),
end=arrow.get(2014, 8, 14, 23, 15, 00),
all_day=False,
is_owner=False,
participants=[],
provider_name='inbox',
raw_data='',
original_start_tz='America/Los_Angeles',
original_start_time=arrow.get(2014, 8, 14, 21, 30, 00),
master_event_uid=master_uid,
source='local')
handle_event_updates(default_account.namespace.id,
calendar.id,
[override], log, db.session)
db.session.commit()
# Let's see if the event got saved with the right info
find_override = db.session.query(Event).filter_by(uid=override_uid).one()
assert find_override is not None
assert find_override.master_event_id == event.id
assert find_override.location == 'walk and talk'
def test_override_cancelled(db, default_account, calendar):
# Test that overrides with status 'cancelled' are appropriately missing
# from the expanded event.
event = recurring_event(db, default_account, calendar, TEST_EXDATE_RULE)
override = recurring_override(db, event,
arrow.get(2014, 9, 4, 20, 30, 00),
arrow.get(2014, 9, 4, 21, 30, 00),
arrow.get(2014, 9, 4, 22, 30, 00))
override.cancelled = True
all_events = event.all_events()
assert len(all_events) == 6
assert override not in all_events
assert not any([e.start == arrow.get(2014, 9, 4, 20, 30, 00)
for e in all_events])
def test_new_instance_cancelled(db, default_account, calendar):
# Test that if we receive a cancelled override from Google, we save it
# as an override with cancelled status rather than deleting it.
event = recurring_event(db, default_account, calendar, TEST_EXDATE_RULE)
override_uid = event.uid + "_20140814T203000Z"
override = Event(title='CANCELLED',
description='',
uid=override_uid,
location='',
busy=False,
read_only=False,
reminders='',
recurrence=None,
start=arrow.get(2014, 8, 14, 22, 15, 00),
end=arrow.get(2014, 8, 14, 23, 15, 00),
all_day=False,
is_owner=False,
participants=[],
provider_name='inbox',
raw_data='',
original_start_tz='America/Los_Angeles',
original_start_time=arrow.get(2014, 8, 14, 21, 30, 00),
master_event_uid=event.uid,
cancelled=True,
source='local')
handle_event_updates(default_account.namespace.id,
calendar.id,
[override], log, db.session)
db.session.commit()
# Check the event got saved with the cancelled flag
find_override = db.session.query(Event).filter_by(
uid=override_uid, namespace_id=default_account.namespace.id).one()
assert find_override.cancelled is True
def test_when_delta():
# Test that the event length is calculated correctly
ev = Event(namespace_id=0)
# Time: minutes is 0 if start/end at same time
ev.start = arrow.get(2015, 01, 01, 10, 00, 00)
ev.end = arrow.get(2015, 01, 01, 10, 00, 00)
when = ev.when
assert isinstance(when, Time)
assert ev.length == timedelta(minutes=0)
# TimeSpan
ev.start = arrow.get(2015, 01, 01, 10, 00, 00)
ev.end = arrow.get(2015, 01, 01, 10, 30, 00)
when = ev.when
assert isinstance(when, TimeSpan)
assert ev.length == timedelta(minutes=30)
# Date: notice days is 0 if starts/ends on same day
ev.all_day = True
ev.start = arrow.get(2015, 01, 01, 00, 00, 00)
ev.end = arrow.get(2015, 01, 01, 00, 00, 00)
when = ev.when
assert isinstance(when, Date)
assert ev.length == timedelta(days=0)
# DateSpan
ev.all_day = True
ev.start = arrow.get(2015, 01, 01, 10, 00, 00)
ev.end = arrow.get(2015, 01, 02, 10, 00, 00)
when = ev.when
assert isinstance(when, DateSpan)
assert ev.length == timedelta(days=1)
def test_rrule_to_json():
# Generate more test cases!
# http://jakubroztocil.github.io/rrule/
r = 'RRULE:FREQ=WEEKLY;UNTIL=20140918T203000Z;BYDAY=TH'
r = rrulestr(r, dtstart=None)
j = rrule_to_json(r)
assert j.get('freq') == 'WEEKLY'
assert j.get('byweekday') == 'TH'
r = 'FREQ=HOURLY;COUNT=30;WKST=MO;BYMONTH=1;BYMINUTE=42;BYSECOND=24'
r = rrulestr(r, dtstart=None)
j = rrule_to_json(r)
assert j.get('until') is None
assert j.get('byminute') is 42
def test_master_cancelled(db, default_account, calendar):
# Test that when the master recurring event is cancelled, we cancel every
# override too.
event = recurring_event(db, default_account, calendar, TEST_EXDATE_RULE)
override = recurring_override(db, event,
arrow.get(2014, 9, 4, 20, 30, 00),
arrow.get(2014, 9, 4, 21, 30, 00),
arrow.get(2014, 9, 4, 22, 30, 00))
update = recurring_event(db, default_account, calendar, TEST_EXDATE_RULE,
commit=False)
update.status = 'cancelled'
updates = [update]
handle_event_updates(default_account.namespace.id,
calendar.id,
updates, log, db.session)
db.session.commit()
find_master = db.session.query(Event).filter_by(uid=event.uid).first()
assert find_master.status == 'cancelled'
find_override = db.session.query(Event).filter_by(uid=override.uid).first()
assert find_override.status == 'cancelled'
def test_made_recurring_then_cancelled(db, default_account, calendar):
# Test that when an event is updated with a recurrence and cancelled at
# the same time, we cancel it.
normal = recurring_event(db, default_account, calendar, None)
# Check this is specifically an Event, not a RecurringEvent
assert type(normal) == Event
# Update with a recurrence rule *and* cancellation
update = recurring_event(db, default_account, calendar, TEST_EXDATE_RULE,
commit=False)
update.status = 'cancelled'
updates = [update]
handle_event_updates(default_account.namespace.id,
calendar.id,
updates, log, db.session)
db.session.commit()
find_master = db.session.query(Event).filter_by(uid=normal.uid).first()
assert find_master.status == 'cancelled'
| agpl-3.0 | -8,230,936,768,372,044,000 | 39.825279 | 79 | 0.5886 | false |
RCMRD/geonode | geonode/documents/views.py | 1 | 16713 | import json
from guardian.shortcuts import get_perms
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.template import RequestContext, loader
from django.utils.translation import ugettext as _
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied
from django_downloadview.response import DownloadResponse
from django.views.generic.edit import UpdateView, CreateView
from django.db.models import F
from django.forms.util import ErrorList
from geonode.utils import resolve_object
from geonode.security.views import _perms_info_json
from geonode.people.forms import ProfileForm
from geonode.base.forms import CategoryForm
from geonode.base.models import TopicCategory, ResourceBase
from geonode.documents.models import Document
from geonode.documents.forms import DocumentForm, DocumentCreateForm, DocumentReplaceForm
from geonode.documents.models import IMGTYPES
from geonode.utils import build_social_links
ALLOWED_DOC_TYPES = settings.ALLOWED_DOCUMENT_TYPES
_PERMISSION_MSG_DELETE = _("You are not permitted to delete this document")
_PERMISSION_MSG_GENERIC = _("You do not have permissions for this document.")
_PERMISSION_MSG_MODIFY = _("You are not permitted to modify this document")
_PERMISSION_MSG_METADATA = _(
"You are not permitted to modify this document's metadata")
_PERMISSION_MSG_VIEW = _("You are not permitted to view this document")
def _resolve_document(request, docid, permission='base.change_resourcebase',
msg=_PERMISSION_MSG_GENERIC, **kwargs):
'''
Resolve the document by the provided primary key and check the optional permission.
'''
return resolve_object(request, Document, {'pk': docid},
permission=permission, permission_msg=msg, **kwargs)
def document_detail(request, docid):
"""
The view that show details of each document
"""
document = None
try:
document = _resolve_document(
request,
docid,
'base.view_resourcebase',
_PERMISSION_MSG_VIEW)
except Http404:
return HttpResponse(
loader.render_to_string(
'404.html', RequestContext(
request, {
})), status=404)
except PermissionDenied:
return HttpResponse(
loader.render_to_string(
'401.html', RequestContext(
request, {
'error_message': _("You are not allowed to view this document.")})), status=403)
if document is None:
return HttpResponse(
'An unknown error has occured.',
content_type="text/plain",
status=401
)
else:
try:
related = document.content_type.get_object_for_this_type(
id=document.object_id)
except:
related = ''
# Update count for popularity ranking,
# but do not includes admins or resource owners
if request.user != document.owner and not request.user.is_superuser:
Document.objects.filter(id=document.id).update(popular_count=F('popular_count') + 1)
metadata = document.link_set.metadata().filter(
name__in=settings.DOWNLOAD_FORMATS_METADATA)
context_dict = {
'perms_list': get_perms(request.user, document.get_self_resource()),
'permissions_json': _perms_info_json(document),
'resource': document,
'metadata': metadata,
'imgtypes': IMGTYPES,
'related': related}
if settings.SOCIAL_ORIGINS:
context_dict["social_links"] = build_social_links(request, document)
if getattr(settings, 'EXIF_ENABLED', False):
try:
from geonode.contrib.exif.utils import exif_extract_dict
exif = exif_extract_dict(document)
if exif:
context_dict['exif_data'] = exif
except:
print "Exif extraction failed."
return render_to_response(
"documents/document_detail.html",
RequestContext(request, context_dict))
def document_download(request, docid):
document = get_object_or_404(Document, pk=docid)
if not request.user.has_perm(
'base.download_resourcebase',
obj=document.get_self_resource()):
return HttpResponse(
loader.render_to_string(
'401.html', RequestContext(
request, {
'error_message': _("You are not allowed to view this document.")})), status=401)
return DownloadResponse(document.doc_file)
class DocumentUploadView(CreateView):
template_name = 'documents/document_upload.html'
form_class = DocumentCreateForm
def get_context_data(self, **kwargs):
context = super(DocumentUploadView, self).get_context_data(**kwargs)
context['ALLOWED_DOC_TYPES'] = ALLOWED_DOC_TYPES
return context
def form_valid(self, form):
"""
If the form is valid, save the associated model.
"""
self.object = form.save(commit=False)
self.object.owner = self.request.user
resource_id = self.request.POST.get('resource', None)
if resource_id:
self.object.content_type = ResourceBase.objects.get(id=resource_id).polymorphic_ctype
self.object.object_id = resource_id
# by default, if RESOURCE_PUBLISHING=True then document.is_published
# must be set to False
is_published = True
if settings.RESOURCE_PUBLISHING:
is_published = False
self.object.is_published = is_published
self.object.save()
self.object.set_permissions(form.cleaned_data['permissions'])
abstract = None
date = None
regions = []
keywords = []
bbox = None
if getattr(settings, 'EXIF_ENABLED', False):
try:
from geonode.contrib.exif.utils import exif_extract_metadata_doc
exif_metadata = exif_extract_metadata_doc(self.object)
if exif_metadata:
date = exif_metadata.get('date', None)
keywords.extend(exif_metadata.get('keywords', []))
bbox = exif_metadata.get('bbox', None)
abstract = exif_metadata.get('abstract', None)
except:
print "Exif extraction failed."
if getattr(settings, 'NLP_ENABLED', False):
try:
from geonode.contrib.nlp.utils import nlp_extract_metadata_doc
nlp_metadata = nlp_extract_metadata_doc(self.object)
if nlp_metadata:
regions.extend(nlp_metadata.get('regions', []))
keywords.extend(nlp_metadata.get('keywords', []))
except:
print "NLP extraction failed."
if abstract:
self.object.abstract = abstract
self.object.save()
if date:
self.object.date = date
self.object.date_type = "Creation"
self.object.save()
if len(regions) > 0:
self.object.regions.add(*regions)
if len(keywords) > 0:
self.object.keywords.add(*keywords)
if bbox:
bbox_x0, bbox_x1, bbox_y0, bbox_y1 = bbox
Document.objects.filter(id=self.object.pk).update(
bbox_x0=bbox_x0,
bbox_x1=bbox_x1,
bbox_y0=bbox_y0,
bbox_y1=bbox_y1)
if getattr(settings, 'SLACK_ENABLED', False):
try:
from geonode.contrib.slack.utils import build_slack_message_document, send_slack_message
send_slack_message(build_slack_message_document("document_new", self.object))
except:
print "Could not send slack message for new document."
return HttpResponseRedirect(
reverse(
'document_metadata',
args=(
self.object.id,
)))
class DocumentUpdateView(UpdateView):
template_name = 'documents/document_replace.html'
pk_url_kwarg = 'docid'
form_class = DocumentReplaceForm
queryset = Document.objects.all()
context_object_name = 'document'
def get_context_data(self, **kwargs):
context = super(DocumentUpdateView, self).get_context_data(**kwargs)
context['ALLOWED_DOC_TYPES'] = ALLOWED_DOC_TYPES
return context
def form_valid(self, form):
"""
If the form is valid, save the associated model.
"""
self.object = form.save()
return HttpResponseRedirect(
reverse(
'document_metadata',
args=(
self.object.id,
)))
@login_required
def document_metadata(
request,
docid,
template='documents/document_metadata.html'):
document = None
try:
document = _resolve_document(
request,
docid,
'base.change_resourcebase_metadata',
_PERMISSION_MSG_METADATA)
except Http404:
return HttpResponse(
loader.render_to_string(
'404.html', RequestContext(
request, {
})), status=404)
except PermissionDenied:
return HttpResponse(
loader.render_to_string(
'401.html', RequestContext(
request, {
'error_message': _("You are not allowed to edit this document.")})), status=403)
if document is None:
return HttpResponse(
'An unknown error has occured.',
content_type="text/plain",
status=401
)
else:
poc = document.poc
metadata_author = document.metadata_author
topic_category = document.category
if request.method == "POST":
document_form = DocumentForm(
request.POST,
instance=document,
prefix="resource")
category_form = CategoryForm(
request.POST,
prefix="category_choice_field",
initial=int(
request.POST["category_choice_field"]) if "category_choice_field" in request.POST else None)
else:
document_form = DocumentForm(instance=document, prefix="resource")
category_form = CategoryForm(
prefix="category_choice_field",
initial=topic_category.id if topic_category else None)
if request.method == "POST" and document_form.is_valid(
) and category_form.is_valid():
new_poc = document_form.cleaned_data['poc']
new_author = document_form.cleaned_data['metadata_author']
new_keywords = document_form.cleaned_data['keywords']
new_category = TopicCategory.objects.get(
id=category_form.cleaned_data['category_choice_field'])
if new_poc is None:
if poc is None:
poc_form = ProfileForm(
request.POST,
prefix="poc",
instance=poc)
else:
poc_form = ProfileForm(request.POST, prefix="poc")
if poc_form.is_valid():
if len(poc_form.cleaned_data['profile']) == 0:
# FIXME use form.add_error in django > 1.7
errors = poc_form._errors.setdefault('profile', ErrorList())
errors.append(_('You must set a point of contact for this resource'))
poc = None
if poc_form.has_changed and poc_form.is_valid():
new_poc = poc_form.save()
if new_author is None:
if metadata_author is None:
author_form = ProfileForm(request.POST, prefix="author",
instance=metadata_author)
else:
author_form = ProfileForm(request.POST, prefix="author")
if author_form.is_valid():
if len(author_form.cleaned_data['profile']) == 0:
# FIXME use form.add_error in django > 1.7
errors = author_form._errors.setdefault('profile', ErrorList())
errors.append(_('You must set an author for this resource'))
metadata_author = None
if author_form.has_changed and author_form.is_valid():
new_author = author_form.save()
if new_poc is not None and new_author is not None:
the_document = document_form.save()
the_document.poc = new_poc
the_document.metadata_author = new_author
the_document.keywords.add(*new_keywords)
Document.objects.filter(id=the_document.id).update(category=new_category)
if getattr(settings, 'SLACK_ENABLED', False):
try:
from geonode.contrib.slack.utils import build_slack_message_document, send_slack_messages
send_slack_messages(build_slack_message_document("document_edit", the_document))
except:
print "Could not send slack message for modified document."
return HttpResponseRedirect(
reverse(
'document_detail',
args=(
document.id,
)))
if poc is not None:
document_form.fields['poc'].initial = poc.id
poc_form = ProfileForm(prefix="poc")
poc_form.hidden = True
if metadata_author is not None:
document_form.fields['metadata_author'].initial = metadata_author.id
author_form = ProfileForm(prefix="author")
author_form.hidden = True
return render_to_response(template, RequestContext(request, {
"document": document,
"document_form": document_form,
"poc_form": poc_form,
"author_form": author_form,
"category_form": category_form,
}))
def document_search_page(request):
# for non-ajax requests, render a generic search page
if request.method == 'GET':
params = request.GET
elif request.method == 'POST':
params = request.POST
else:
return HttpResponse(status=405)
return render_to_response(
'documents/document_search.html',
RequestContext(
request,
{
'init_search': json.dumps(
params or {}),
"site": settings.SITEURL}))
@login_required
def document_remove(request, docid, template='documents/document_remove.html'):
try:
document = _resolve_document(
request,
docid,
'base.delete_resourcebase',
_PERMISSION_MSG_DELETE)
if request.method == 'GET':
return render_to_response(template, RequestContext(request, {
"document": document
}))
if request.method == 'POST':
if getattr(settings, 'SLACK_ENABLED', False):
slack_message = None
try:
from geonode.contrib.slack.utils import build_slack_message_document
slack_message = build_slack_message_document("document_delete", document)
except:
print "Could not build slack message for delete document."
document.delete()
try:
from geonode.contrib.slack.utils import send_slack_messages
send_slack_messages(slack_message)
except:
print "Could not send slack message for delete document."
else:
document.delete()
return HttpResponseRedirect(reverse("document_browse"))
else:
return HttpResponse("Not allowed", status=403)
except PermissionDenied:
return HttpResponse(
'You are not allowed to delete this document',
content_type="text/plain",
status=401
)
| gpl-3.0 | -4,495,816,925,612,968,000 | 36.05765 | 113 | 0.569557 | false |
intelxed/xed | tests/split-tests.py | 1 | 2317 | #!/usr/bin/env python
# -*- python -*-
#BEGIN_LEGAL
#
#Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#END_LEGAL
import os,sys,re,glob
def work():
files = glob.glob("*.txt")
for fn in files:
lines = file(fn).readlines()
lines = map(lambda x: x.strip(), lines)
ofn = fn + ".new"
of = open(ofn,'w')
for line in lines:
if line:
incodes, cmd = line.split(';') # incodes are tossed
cmd = cmd.strip()
codes = []
if ' -de ' in cmd:
codes.append('DEC')
codes.append('ENC')
elif ' -e ' in cmd:
codes.append('ENC')
elif ' -d ' in cmd:
codes.append('DEC')
elif 'ild' in cmd:
codes.append('DEC')
elif 'ex1' in cmd:
codes.append('DEC')
elif 'ex3' in cmd:
codes.append('ENC')
elif 'ex4' in cmd:
codes.append('DEC')
elif 'ex6' in cmd:
codes.append('DEC')
codes.append('ENC')
else:
codes.append('OTHER')
if 'C4' in cmd or 'C5' in cmd or 'c4' in cmd or 'c5' in cmd:
codes.append('AVX')
if ' 8f' in cmd: # total hack: FIXME, miss some xop stuff in c4 space
codes.append('XOP')
if ' v' in cmd or ' V' in cmd:
codes.append('AVX')
cs = " ".join(codes)
of.write("{0:20s} ; {1}\n".format(cs, cmd))
of.close()
if __name__ == "__main__":
work()
| apache-2.0 | 1,102,383,340,792,462,800 | 32.1 | 86 | 0.482952 | false |
ewdurbin/sentry-datadog-helpers | sentry_datadog_helpers/test/utils.py | 1 | 5855 | import sys
from functools import wraps
__all__ = ('surrogate', )
class surrogate(object):
"""
Originally Created by Kostia Balitsky
Contains Modifications by Griffin Smith
See:
https://github.com/ikostia/surrogate
and
https://github.com/glittershark/surrogate
Licensed as:
This code can be used, distributed and modified in any ways
one wants. If one gets any use of it author is already rewarded.
On the other hand, do not expect any guaranteed support
from author. Use it as is.
Add empty module stub that can be imported
for every subpath in path.
Those stubs can later be patched by mock's
patch decorator.
Example:
@surrogate('sys.my.cool.module1')
@surrogate('sys.my.cool.module2')
@mock.patch('sys.my.cool.module1', mock1)
@mock.patch('sys.my.cool.module2', mock2)
def function():
from sys.my import cool
from sys.my.cool import module1
from sys.my.cool import module2
"""
def __init__(self, path):
self.path = path
self.elements = self.path.split('.')
def __enter__(self):
self.prepared = self.prepare()
def __exit__(self, *args):
if self.prepared:
self.restore()
def __call__(self, func):
@wraps(func)
def _wrapper(*args, **kwargs):
prepared = self.prepare()
result = func(*args, **kwargs)
if prepared:
self.restore()
return result
return _wrapper
@property
def nothing_to_stub(self):
"""Check if there are no modules to stub"""
return len(self.elements) == 0
def prepare(self):
"""Preparations before actual function call"""
self._determine_existing_modules()
if self.nothing_to_stub:
return False
self._create_module_stubs()
self._save_base_module()
self._add_module_stubs()
return True
def restore(self):
"""Post-actions to restore initial state of the system"""
self._remove_module_stubs()
self._restore_base_module()
def _get_importing_path(self, elements):
"""Return importing path for a module that is last in elements list"""
ip = '.'.join(elements)
if self.known_path:
ip = self.known_path + '.' + ip
return ip
def _create_module_stubs(self):
"""Create stubs for all not-existing modules"""
# last module in our sequence
# it should be loaded
last_module = type(self.elements[-1], (object, ), {
'__all__': [],
'_importing_path': self._get_importing_path(self.elements)})
modules = [last_module]
# now we create a module stub for each
# element in a path.
# each module stub contains `__all__`
# list and a member that
# points to the next module stub in
# sequence
for element in reversed(self.elements[:-1]):
next_module = modules[-1]
module = type(element, (object, ), {
next_module.__name__: next_module,
'__all__': [next_module.__name__]})
modules.append(module)
self.modules = list(reversed(modules))
self.modules[0].__path__ = []
def _determine_existing_modules(self):
"""
Find out which of the modules
from specified path are already
imported (e.g. present in sys.modules)
those modules should not be replaced
by stubs.
"""
known = 0
while known < len(self.elements) and\
'.'.join(self.elements[:known + 1]) in sys.modules:
known += 1
self.known_path = '.'.join(self.elements[:known])
self.elements = self.elements[known:]
def _save_base_module(self):
"""
Remember state of the last of existing modules
The last of the sequence of existing modules
is the only one we will change. So we must
remember it's state in order to restore it
afterwards.
"""
try:
# save last of the existing modules
self.base_module = sys.modules[self.known_path]
except KeyError:
self.base_module = None
# save `__all__` attribute of the base_module
self.base_all = []
if hasattr(self.base_module, '__all__'):
self.base_all = list(self.base_module.__all__)
if self.base_module:
# change base_module's `__all__` attribute
# to include the first module of the sequence
self.base_module.__all__ = self.base_all + [self.elements[0]]
setattr(self.base_module, self.elements[0], self.modules[0])
def _add_module_stubs(self):
"""Push created module stubs into sys.modules"""
for i, module in enumerate(self.modules):
module._importing_path =\
self._get_importing_path(self.elements[:i + 1])
sys.modules[module._importing_path] = module
def _remove_module_stubs(self):
"""Remove fake modules from sys.modules"""
for module in reversed(self.modules):
if module._importing_path in sys.modules:
del sys.modules[module._importing_path]
def _restore_base_module(self):
"""Restore the state of the last existing module"""
if self.base_module:
self.base_module.__all__ = self.base_all
if not self.base_all:
del self.base_module.__all__
if hasattr(self.base_module, self.elements[0]):
delattr(self.base_module, self.elements[0])
| bsd-3-clause | 1,469,070,899,879,551,500 | 33.040698 | 78 | 0.561913 | false |
dagss/numpy_svn | numpy/polynomial/chebyshev.py | 1 | 38012 | """
Objects for dealing with Chebyshev series.
This module provides a number of objects (mostly functions) useful for
dealing with Chebyshev series, including a `Chebyshev` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `chebdomain` -- Chebyshev series default domain, [-1,1].
- `chebzero` -- (Coefficients of the) Chebyshev series that evaluates
identically to 0.
- `chebone` -- (Coefficients of the) Chebyshev series that evaluates
identically to 1.
- `chebx` -- (Coefficients of the) Chebyshev series for the identity map,
``f(x) = x``.
Arithmetic
----------
- `chebadd` -- add two Chebyshev series.
- `chebsub` -- subtract one Chebyshev series from another.
- `chebmul` -- multiply two Chebyshev series.
- `chebdiv` -- divide one Chebyshev series by another.
- `chebval` -- evaluate a Chebyshev series at given points.
Calculus
--------
- `chebder` -- differentiate a Chebyshev series.
- `chebint` -- integrate a Chebyshev series.
Misc Functions
--------------
- `chebfromroots` -- create a Chebyshev series with specified roots.
- `chebroots` -- find the roots of a Chebyshev series.
- `chebvander` -- Vandermonde-like matrix for Chebyshev polynomials.
- `chebfit` -- least-squares fit returning a Chebyshev series.
- `chebtrim` -- trim leading coefficients from a Chebyshev series.
- `chebline` -- Chebyshev series of given straight line.
- `cheb2poly` -- convert a Chebyshev series to a polynomial.
- `poly2cheb` -- convert a polynomial to a Chebyshev series.
Classes
-------
- `Chebyshev` -- A Chebyshev series class.
See also
--------
`numpy.polynomial`
Notes
-----
The implementations of multiplication, division, integration, and
differentiation use the algebraic identities [1]_:
.. math ::
T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\
z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}.
where
.. math :: x = \\frac{z + z^{-1}}{2}.
These identities allow a Chebyshev series to be expressed as a finite,
symmetric Laurent series. In this module, this sort of Laurent series
is referred to as a "z-series."
References
----------
.. [1] A. T. Benjamin, et al., "Combinatorial Trigonometry with Chebyshev
Polynomials," *Journal of Statistical Planning and Inference 14*, 2008
(preprint: http://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4)
"""
from __future__ import division
__all__ = ['chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline',
'chebadd', 'chebsub', 'chebmul', 'chebdiv', 'chebval', 'chebder',
'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots', 'chebvander',
'chebfit', 'chebtrim', 'chebroots', 'Chebyshev']
import numpy as np
import numpy.linalg as la
import polyutils as pu
import warnings
from polytemplate import polytemplate
chebtrim = pu.trimcoef
#
# A collection of functions for manipulating z-series. These are private
# functions and do minimal error checking.
#
def _cseries_to_zseries(cs) :
"""Covert Chebyshev series to z-series.
Covert a Chebyshev series to the equivalent z-series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
cs : 1-d ndarray
Chebyshev coefficients, ordered from low to high
Returns
-------
zs : 1-d ndarray
Odd length symmetric z-series, ordered from low to high.
"""
n = cs.size
zs = np.zeros(2*n-1, dtype=cs.dtype)
zs[n-1:] = cs/2
return zs + zs[::-1]
def _zseries_to_cseries(zs) :
"""Covert z-series to a Chebyshev series.
Covert a z series to the equivalent Chebyshev series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
zs : 1-d ndarray
Odd length symmetric z-series, ordered from low to high.
Returns
-------
cs : 1-d ndarray
Chebyshev coefficients, ordered from low to high.
"""
n = (zs.size + 1)//2
cs = zs[n-1:].copy()
cs[1:n] *= 2
return cs
def _zseries_mul(z1, z2) :
"""Multiply two z-series.
Multiply two z-series to produce a z-series.
Parameters
----------
z1, z2 : 1-d ndarray
The arrays must be 1-d but this is not checked.
Returns
-------
product : 1-d ndarray
The product z-series.
Notes
-----
This is simply convolution. If symmetic/anti-symmetric z-series are
denoted by S/A then the following rules apply:
S*S, A*A -> S
S*A, A*S -> A
"""
return np.convolve(z1, z2)
def _zseries_div(z1, z2) :
"""Divide the first z-series by the second.
Divide `z1` by `z2` and return the quotient and remainder as z-series.
Warning: this implementation only applies when both z1 and z2 have the
same symmetry, which is sufficient for present purposes.
Parameters
----------
z1, z2 : 1-d ndarray
The arrays must be 1-d and have the same symmetry, but this is not
checked.
Returns
-------
(quotient, remainder) : 1-d ndarrays
Quotient and remainder as z-series.
Notes
-----
This is not the same as polynomial division on account of the desired form
of the remainder. If symmetic/anti-symmetric z-series are denoted by S/A
then the following rules apply:
S/S -> S,S
A/A -> S,A
The restriction to types of the same symmetry could be fixed but seems like
uneeded generality. There is no natural form for the remainder in the case
where there is no symmetry.
"""
z1 = z1.copy()
z2 = z2.copy()
len1 = len(z1)
len2 = len(z2)
if len2 == 1 :
z1 /= z2
return z1, z1[:1]*0
elif len1 < len2 :
return z1[:1]*0, z1
else :
dlen = len1 - len2
scl = z2[0]
z2 /= scl
quo = np.empty(dlen + 1, dtype=z1.dtype)
i = 0
j = dlen
while i < j :
r = z1[i]
quo[i] = z1[i]
quo[dlen - i] = r
tmp = r*z2
z1[i:i+len2] -= tmp
z1[j:j+len2] -= tmp
i += 1
j -= 1
r = z1[i]
quo[i] = r
tmp = r*z2
z1[i:i+len2] -= tmp
quo /= scl
rem = z1[i+1:i-1+len2].copy()
return quo, rem
def _zseries_der(zs) :
"""Differentiate a z-series.
The derivative is with respect to x, not z. This is achieved using the
chain rule and the value of dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to differentiate.
Returns
-------
derivative : z-series
The derivative
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
multiplying the value of zs by two also so that the two cancels in the
division.
"""
n = len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs *= np.arange(-n, n+1)*2
d, r = _zseries_div(zs, ns)
return d
def _zseries_int(zs) :
"""Integrate a z-series.
The integral is with respect to x, not z. This is achieved by a change
of variable using dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to integrate
Returns
-------
integral : z-series
The indefinite integral
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
dividing the resulting zs by two.
"""
n = 1 + len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs = _zseries_mul(zs, ns)
div = np.arange(-n, n+1)*2
zs[:n] /= div[:n]
zs[n+1:] /= div[n+1:]
zs[n] = 0
return zs
#
# Chebyshev series functions
#
def poly2cheb(pol) :
"""
poly2cheb(pol)
Convert a polynomial to a Chebyshev series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Chebyshev series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-d array containing the polynomial coefficients
Returns
-------
cs : ndarray
1-d array containing the coefficients of the equivalent Chebyshev
series.
See Also
--------
cheb2poly
Notes
-----
Note that a consequence of the input needing to be array_like and that
the output is an ndarray, is that if one is going to use this function
to convert a Polynomial instance, P, to a Chebyshev instance, T, the
usage is ``T = Chebyshev(poly2cheb(P.coef))``; see Examples below.
Examples
--------
>>> from numpy import polynomial as P
>>> p = P.Polynomial(np.arange(4))
>>> p
Polynomial([ 0., 1., 2., 3.], [-1., 1.])
>>> c = P.Chebyshev(P.poly2cheb(p.coef))
>>> c
Chebyshev([ 1. , 3.25, 1. , 0.75], [-1., 1.])
"""
[pol] = pu.as_series([pol])
pol = pol[::-1]
zs = pol[:1].copy()
x = np.array([.5, 0, .5], dtype=pol.dtype)
for i in range(1, len(pol)) :
zs = _zseries_mul(zs, x)
zs[i] += pol[i]
return _zseries_to_cseries(zs)
def cheb2poly(cs) :
"""
cheb2poly(cs)
Convert a Chebyshev series to a polynomial.
Convert an array representing the coefficients of a Chebyshev series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
cs : array_like
1-d array containing the Chebyshev series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-d array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2cheb
Notes
-----
Note that a consequence of the input needing to be array_like and that
the output is an ndarray, is that if one is going to use this function
to convert a Chebyshev instance, T, to a Polynomial instance, P, the
usage is ``P = Polynomial(cheb2poly(T.coef))``; see Examples below.
Examples
--------
>>> from numpy import polynomial as P
>>> c = P.Chebyshev(np.arange(4))
>>> c
Chebyshev([ 0., 1., 2., 3.], [-1., 1.])
>>> p = P.Polynomial(P.cheb2poly(c.coef))
>>> p
Polynomial([ -2., -8., 4., 12.], [-1., 1.])
"""
[cs] = pu.as_series([cs])
pol = np.zeros(len(cs), dtype=cs.dtype)
quo = _cseries_to_zseries(cs)
x = np.array([.5, 0, .5], dtype=pol.dtype)
for i in range(0, len(cs) - 1) :
quo, rem = _zseries_div(quo, x)
pol[i] = rem[0]
pol[-1] = quo[0]
return pol
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Chebyshev default domain.
chebdomain = np.array([-1,1])
# Chebyshev coefficients representing zero.
chebzero = np.array([0])
# Chebyshev coefficients representing one.
chebone = np.array([1])
# Chebyshev coefficients representing the identity x.
chebx = np.array([0,1])
def chebline(off, scl) :
"""
Chebyshev series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Chebyshev series for
``off + scl*x``.
See Also
--------
polyline
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebline(3,2)
array([3, 2])
>>> C.chebval(-3, C.chebline(3,2)) # should be -3
-3.0
"""
if scl != 0 :
return np.array([off,scl])
else :
return np.array([off])
def chebfromroots(roots) :
"""
Generate a Chebyshev series with the given roots.
Return the array of coefficients for the C-series whose roots (a.k.a.
"zeros") are given by *roots*. The returned array of coefficients is
ordered from lowest order "term" to highest, and zeros of multiplicity
greater than one must be included in *roots* a number of times equal
to their multiplicity (e.g., if `2` is a root of multiplicity three,
then [2,2,2] must be in *roots*).
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-d array of the C-series' coefficients, ordered from low to
high. If all roots are real, ``out.dtype`` is a float type;
otherwise, ``out.dtype`` is a complex type, even if all the
coefficients in the result are real (see Examples below).
See Also
--------
polyfromroots
Notes
-----
What is returned are the :math:`c_i` such that:
.. math::
\\sum_{i=0}^{n} c_i*T_i(x) = \\prod_{i=0}^{n} (x - roots[i])
where ``n == len(roots)`` and :math:`T_i(x)` is the `i`-th Chebyshev
(basis) polynomial over the domain `[-1,1]`. Note that, unlike
`polyfromroots`, due to the nature of the C-series basis set, the
above identity *does not* imply :math:`c_n = 1` identically (see
Examples).
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.25, 0. , 0.25])
>>> j = complex(0,1)
>>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([ 1.5+0.j, 0.0+0.j, 0.5+0.j])
"""
if len(roots) == 0 :
return np.ones(1)
else :
[roots] = pu.as_series([roots], trim=False)
prd = np.array([1], dtype=roots.dtype)
for r in roots :
fac = np.array([.5, -r, .5], dtype=roots.dtype)
prd = _zseries_mul(fac, prd)
return _zseries_to_cseries(prd)
def chebadd(c1, c2):
"""
Add one Chebyshev series to another.
Returns the sum of two Chebyshev series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Chebyshev series of their sum.
See Also
--------
chebsub, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Chebyshev series
is a Chebyshev series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebadd(c1,c2)
array([ 4., 4., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] += c2
ret = c1
else :
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def chebsub(c1, c2):
"""
Subtract one Chebyshev series from another.
Returns the difference of two Chebyshev series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their difference.
See Also
--------
chebadd, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Chebyshev
series is a Chebyshev series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebsub(c1,c2)
array([-2., 0., 2.])
>>> C.chebsub(c2,c1) # -C.chebsub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] -= c2
ret = c1
else :
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def chebmul(c1, c2):
"""
Multiply one Chebyshev series by another.
Returns the product of two Chebyshev series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their product.
See Also
--------
chebadd, chebsub, chebdiv, chebpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Chebyshev polynomial basis set. Thus, to express
the product as a C-series, it is typically necessary to "re-project"
the product onto said basis set, which typically produces
"un-intuitive" (but correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebmul(c1,c2) # multiplication requires "reprojection"
array([ 6.5, 12. , 12. , 4. , 1.5])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
prd = _zseries_mul(z1, z2)
ret = _zseries_to_cseries(prd)
return pu.trimseq(ret)
def chebdiv(c1, c2):
"""
Divide one Chebyshev series by another.
Returns the quotient-with-remainder of two Chebyshev series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Chebyshev series coefficients representing the quotient and
remainder.
See Also
--------
chebadd, chebsub, chebmul, chebpow
Notes
-----
In general, the (polynomial) division of one C-series by another
results in quotient and remainder terms that are not in the Chebyshev
polynomial basis set. Thus, to express these results as C-series, it
is typically necessary to "re-project" the results onto said basis
set, which typically produces "un-intuitive" (but correct) results;
see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not
(array([ 3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> C.chebdiv(c2,c1) # neither "intuitive"
(array([ 0., 2.]), array([-2., -4.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0 :
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2 :
return c1[:1]*0, c1
elif lc2 == 1 :
return c1/c2[-1], c1[:1]*0
else :
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
quo, rem = _zseries_div(z1, z2)
quo = pu.trimseq(_zseries_to_cseries(quo))
rem = pu.trimseq(_zseries_to_cseries(rem))
return quo, rem
def chebpow(cs, pow, maxpower=16) :
"""Raise a Chebyshev series to a power.
Returns the Chebyshev series `cs` raised to the power `pow`. The
arguement `cs` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.``
Parameters
----------
cs : array_like
1d array of chebyshev series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to umanageable size. Default is 16
Returns
-------
coef : ndarray
Chebyshev series of power.
See Also
--------
chebadd, chebsub, chebmul, chebdiv
Examples
--------
"""
# cs is a trimmed copy
[cs] = pu.as_series([cs])
power = int(pow)
if power != pow or power < 0 :
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower :
raise ValueError("Power is too large")
elif power == 0 :
return np.array([1], dtype=cs.dtype)
elif power == 1 :
return cs
else :
# This can be made more efficient by using powers of two
# in the usual way.
zs = _cseries_to_zseries(cs)
prd = zs
for i in range(2, power + 1) :
prd = np.convolve(prd, zs)
return _zseries_to_cseries(prd)
def chebder(cs, m=1, scl=1) :
"""
Differentiate a Chebyshev series.
Returns the series `cs` differentiated `m` times. At each iteration the
result is multiplied by `scl` (the scaling factor is for use in a linear
change of variable). The argument `cs` is the sequence of coefficients
from lowest order "term" to highest, e.g., [1,2,3] represents the series
``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
cs: array_like
1-d array of Chebyshev series coefficients ordered from low to high.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
Returns
-------
der : ndarray
Chebyshev series of the derivative.
See Also
--------
chebint
Notes
-----
In general, the result of differentiating a C-series needs to be
"re-projected" onto the C-series basis set. Thus, typically, the
result of this function is "un-intuitive," albeit correct; see Examples
section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> cs = (1,2,3,4)
>>> C.chebder(cs)
array([ 14., 12., 24.])
>>> C.chebder(cs,3)
array([ 96.])
>>> C.chebder(cs,scl=-1)
array([-14., -12., -24.])
>>> C.chebder(cs,2,-1)
array([ 12., 96.])
"""
cnt = int(m)
if cnt != m:
raise ValueError, "The order of derivation must be integer"
if cnt < 0 :
raise ValueError, "The order of derivation must be non-negative"
if not np.isscalar(scl) :
raise ValueError, "The scl parameter must be a scalar"
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if cnt == 0:
return cs
elif cnt >= len(cs):
return cs[:1]*0
else :
zs = _cseries_to_zseries(cs)
for i in range(cnt):
zs = _zseries_der(zs)*scl
return _zseries_to_cseries(zs)
def chebint(cs, m=1, k=[], lbnd=0, scl=1):
"""
Integrate a Chebyshev series.
Returns, as a C-series, the input C-series `cs`, integrated `m` times
from `lbnd` to `x`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `cs` is a sequence of
coefficients, from lowest order C-series "term" to highest, e.g.,
[1,2,3] represents the series :math:`T_0(x) + 2T_1(x) + 3T_2(x)`.
Parameters
----------
cs : array_like
1-d array of C-series coefficients, ordered from low to high.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at zero
is the first value in the list, the value of the second integral
at zero is the second value, etc. If ``k == []`` (the default),
all constants are set to zero. If ``m == 1``, a single scalar can
be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
Returns
-------
S : ndarray
C-series coefficients of the integral.
Raises
------
ValueError
If ``m < 1``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
chebder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
:math:`dx = du/a`, so one will need to set `scl` equal to :math:`1/a`
- perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "re-projected" onto the C-series basis set. Thus, typically,
the result of this function is "un-intuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> cs = (1,2,3)
>>> C.chebint(cs)
array([ 0.5, -0.5, 0.5, 0.5])
>>> C.chebint(cs,3)
array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667,
0.00625 ])
>>> C.chebint(cs, k=3)
array([ 3.5, -0.5, 0.5, 0.5])
>>> C.chebint(cs,lbnd=-2)
array([ 8.5, -0.5, 0.5, 0.5])
>>> C.chebint(cs,scl=-2)
array([-1., 1., -1., -1.])
"""
cnt = int(m)
if np.isscalar(k) :
k = [k]
if cnt != m:
raise ValueError, "The order of integration must be integer"
if cnt < 0 :
raise ValueError, "The order of integration must be non-negative"
if len(k) > cnt :
raise ValueError, "Too many integration constants"
if not np.isscalar(lbnd) :
raise ValueError, "The lbnd parameter must be a scalar"
if not np.isscalar(scl) :
raise ValueError, "The scl parameter must be a scalar"
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if cnt == 0:
return cs
else:
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt) :
zs = _cseries_to_zseries(cs)*scl
zs = _zseries_int(zs)
cs = _zseries_to_cseries(zs)
cs[0] += k[i] - chebval(lbnd, cs)
return cs
def chebval(x, cs):
"""Evaluate a Chebyshev series.
If `cs` is of length `n`, this function returns :
``p(x) = cs[0]*T_0(x) + cs[1]*T_1(x) + ... + cs[n-1]*T_{n-1}(x)``
If x is a sequence or array then p(x) will have the same shape as x.
If r is a ring_like object that supports multiplication and addition
by the values in `cs`, then an object of the same type is returned.
Parameters
----------
x : array_like, ring_like
Array of numbers or objects that support multiplication and
addition with themselves and with the elements of `cs`.
cs : array_like
1-d array of Chebyshev coefficients ordered from low to high.
Returns
-------
values : ndarray, ring_like
If the return is an ndarray then it has the same shape as `x`.
See Also
--------
chebfit
Examples
--------
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
"""
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if isinstance(x, tuple) or isinstance(x, list) :
x = np.asarray(x)
if len(cs) == 1 :
c0 = cs[0]
c1 = 0
elif len(cs) == 2 :
c0 = cs[0]
c1 = cs[1]
else :
x2 = 2*x
c0 = cs[-2]
c1 = cs[-1]
for i in range(3, len(cs) + 1) :
tmp = c0
c0 = cs[-i] - c1
c1 = tmp + c1*x2
return c0 + c1*x
def chebvander(x, deg) :
"""Vandermonde matrix of given degree.
Returns the Vandermonde matrix of degree `deg` and sample points `x`.
This isn't a true Vandermonde matrix because `x` can be an arbitrary
ndarray and the Chebyshev polynomials aren't powers. If ``V`` is the
returned matrix and `x` is a 2d array, then the elements of ``V`` are
``V[i,j,k] = T_k(x[i,j])``, where ``T_k`` is the Chebyshev polynomial
of degree ``k``.
Parameters
----------
x : array_like
Array of points. The values are converted to double or complex
doubles.
deg : integer
Degree of the resulting matrix.
Returns
-------
vander : Vandermonde matrix.
The shape of the returned matrix is ``x.shape + (deg+1,)``. The last
index is the degree.
"""
x = np.asarray(x) + 0.0
order = int(deg) + 1
v = np.ones((order,) + x.shape, dtype=x.dtype)
if order > 1 :
x2 = 2*x
v[1] = x
for i in range(2, order) :
v[i] = v[i-1]*x2 - v[i-2]
return np.rollaxis(v, 0, v.ndim)
def chebfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Chebyshev series to data.
Fit a Chebyshev series ``p(x) = p[0] * T_{0}(x) + ... + p[deg] *
T_{deg}(x)`` of degree `deg` to points `(x, y)`. Returns a vector of
coefficients `p` that minimises the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Chebyshev coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : present when `full` = True
Residuals of the least-squares fit, the effective rank of the
scaled Vandermonde matrix and its singular values, and the
specified value of `rcond`. For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebval : Evaluates a Chebyshev series.
chebvander : Vandermonde matrix of Chebyshev series.
polyfit : least squares fit using polynomials.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution are the coefficients ``c[i]`` of the Chebyshev series
``T(x)`` that minimizes the squared error
``E = \\sum_j |y_j - T(x_j)|^2``.
This problem is solved by setting up as the overdetermined matrix
equation
``V(x)*c = y``,
where ``V`` is the Vandermonde matrix of `x`, the elements of ``c`` are
the coefficients to be solved for, and the elements of `y` are the
observed values. This equation is then solved using the singular value
decomposition of ``V``.
If some of the singular values of ``V`` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coeficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Chebyshev series are usually better conditioned than fits
using power series, but much can depend on the distribution of the
sample points and the smoothness of the data. If the quality of the fit
is inadequate splines may be a good alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0 :
raise ValueError, "expected deg >= 0"
if x.ndim != 1:
raise TypeError, "expected 1D vector for x"
if x.size == 0:
raise TypeError, "expected non-empty vector for x"
if y.ndim < 1 or y.ndim > 2 :
raise TypeError, "expected 1D or 2D array for y"
if len(x) != len(y):
raise TypeError, "expected x and y to have same length"
# set up the least squares matrices
lhs = chebvander(x, deg)
rhs = y
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError, "expected 1D vector for w"
if len(x) != len(w):
raise TypeError, "expected x and w to have same length"
# apply weights
if rhs.ndim == 2:
lhs *= w[:, np.newaxis]
rhs *= w[:, np.newaxis]
else:
lhs *= w[:, np.newaxis]
rhs *= w
# set rcond
if rcond is None :
rcond = len(x)*np.finfo(x.dtype).eps
# scale the design matrix and solve the least squares equation
scl = np.sqrt((lhs*lhs).sum(0))
c, resids, rank, s = la.lstsq(lhs/scl, rhs, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full :
return c, [resids, rank, s, rcond]
else :
return c
def chebroots(cs):
"""
Compute the roots of a Chebyshev series.
Return the roots (a.k.a "zeros") of the C-series represented by `cs`,
which is the sequence of the C-series' coefficients from lowest order
"term" to highest, e.g., [1,2,3] represents the C-series
``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
cs : array_like
1-d array of C-series coefficients ordered from low to high.
Returns
-------
out : ndarray
Array of the roots. If all the roots are real, then so is the
dtype of ``out``; otherwise, ``out``'s dtype is complex.
See Also
--------
polyroots
Notes
-----
Algorithm(s) used:
Remember: because the C-series basis set is different from the
"standard" basis set, the results of this function *may* not be what
one is expecting.
Examples
--------
>>> import numpy.polynomial as P
>>> import numpy.polynomial.chebyshev as C
>>> P.polyroots((-1,1,-1,1)) # x^3 - x^2 + x - 1 has two complex roots
array([ -4.99600361e-16-1.j, -4.99600361e-16+1.j, 1.00000e+00+0.j])
>>> C.chebroots((-1,1,-1,1)) # T3 - T2 + T1 - T0 has only real roots
array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00])
"""
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if len(cs) <= 1 :
return np.array([], dtype=cs.dtype)
if len(cs) == 2 :
return np.array([-cs[0]/cs[1]])
n = len(cs) - 1
cmat = np.zeros((n,n), dtype=cs.dtype)
cmat.flat[1::n+1] = .5
cmat.flat[n::n+1] = .5
cmat[1, 0] = 1
cmat[:,-1] -= cs[:-1]*(.5/cs[-1])
roots = la.eigvals(cmat)
roots.sort()
return roots
#
# Chebyshev series class
#
exec polytemplate.substitute(name='Chebyshev', nick='cheb', domain='[-1,1]')
| bsd-3-clause | 4,376,316,724,196,033,500 | 28.466667 | 79 | 0.597154 | false |
francielsilvestrini/soupport | controllers/attachments.py | 1 | 1465 | # -*- coding: utf-8 -*-
def attachments():
owner_table = getlist(request.args, 0)
owner_key = getlist(request.args, 1)
if not (owner_table and owner_key):
response.view = 'others/gadget_error.html'
return dict(msg='attachments dont work!')
delete_id = request.vars.get('delete', 0)
if delete_id:
db(db.attachments.id == delete_id).delete()
db.attachments.owner_table.default = owner_table
db.attachments.owner_key.default = owner_key
query = ((db.attachments.owner_table == owner_table) & (db.attachments.owner_key == owner_key))
form = SQLFORM(db.attachments, upload=UPLOAD_URLS['attachments'])
if request.vars.attachment != None:
form.vars.name = request.vars.attachment.filename
form.post_vars = form.vars.name
form.process()
content = db(query).select()
return dict(form=form, content=content)
def attachment_download():
if not request.args(0) or not request.args[0].isdigit():
raise HTTP(404)
id = int(request.args[0])
import cStringIO
import contenttype as c
s=cStringIO.StringIO()
(filename,file) = db.attachments.attachment.retrieve(db.attachments[id].attachment)
s.write(file.read())
response.headers['Content-Type'] = c.contenttype(filename)
response.headers['Content-Disposition'] = "attachment; filename=%s" % filename
return s.getvalue()
| lgpl-3.0 | 645,892,210,916,495,400 | 33.731707 | 99 | 0.63959 | false |
FDelporte/PiGameConsole | SlideShow.py | 1 | 2521 | import Tkinter as tk
from itertools import cycle
from Tkinter import *
from PIL import Image, ImageTk # pip install pillow + sudo apt-get install python-imaging-tk
# based on example found on
# https://raspberrypi.stackexchange.com/questions/18261/how-do-i-display-an-image-file-png-in-a-simple-window
class SlideShow(tk.Frame):
canvas = None
current_image = 0
stopShowing = False
SLIDE_DURATION = 7500
NUMBER_OF_SLIDES = 1
def __init__(self, parent, w, h):
tk.Frame.__init__(self, parent)
# Set up the GUI window via Tk
self.canvas = Canvas(self, background="black", width=w, height=h)
self.canvas.pack(side="bottom", fill="x", padx=4)
# pick an image file you have .bmp .jpg .gif. .png
# load the file and covert it to a Tkinter image object
self.image1 = ImageTk.PhotoImage(Image.open('pictures/speelpong.jpg'))
if self.NUMBER_OF_SLIDES >= 2:
self.image2 = ImageTk.PhotoImage(Image.open('pictures/ouderraad2.jpg'))
if self.NUMBER_OF_SLIDES >= 3:
self.image3 = ImageTk.PhotoImage(Image.open('pictures/ouderraad3.jpg'))
# make the root window the size of the image
#self.canvas.geometry("%dx%d+%d+%d" % (w, h, 0, 0))
# root has no image argument, so use a label as a panel
self.panel1 = tk.Label(self.canvas, image=self.image1)
self.display = self.image1
self.panel1.pack(side=tk.TOP, fill=tk.BOTH, expand=tk.YES)
print "Display image1"
if self.NUMBER_OF_SLIDES > 1:
self.after(self.SLIDE_DURATION, self.update_image)
#self.root.mainloop()
def stop(self):
self.stopShowing = True
def update_image(self):
if self.display == self.image1 and self.NUMBER_OF_SLIDES >= 2:
self.panel1.configure(image=self.image2)
print "Display image2"
self.display = self.image2
elif self.display == self.image2 and self.NUMBER_OF_SLIDES >= 3:
self.panel1.configure(image=self.image3)
print "Display image3"
self.display = self.image3
else:
self.panel1.configure(image=self.image1)
print "Display image1"
self.display = self.image1
if self.stopShowing == False:
self.after(self.SLIDE_DURATION, self.update_image) # Set to call again in 30 seconds | apache-2.0 | 5,571,100,893,828,448,000 | 35.028571 | 109 | 0.603332 | false |
bas-stringer/scry | log.py | 1 | 2549 | from __init__ import LOG_DIRECTORY
from utility import assert_dir
from os.path import join
from datetime import datetime
from shutil import copyfile
REQUEST_DIR = join(LOG_DIRECTORY,'requests')
RESPONSE_DIR = join(LOG_DIRECTORY,'responses')
assert_dir(REQUEST_DIR)
assert_dir(RESPONSE_DIR)
def log_request(request):
now = datetime.now()
date = now.date().isoformat()
time = now.time().isoformat()
last_path = join(LOG_DIRECTORY,'last_request.log')
spacer = '\n\n----------\n\n'
vals = request.values
print 'Logging HTTP request ('+time+')'
with open(last_path,'w') as f:
f.write('Method :\t'+request.method+'\n')
f.write('Time :\t'+time+'\n')
f.write('Base URL :\t'+request.base_url+'\n')
f.write('Full Path:\t'+request.full_path+spacer)
f.write('Values (Len '+str(len(vals))+'):'+'\t'+str(vals) + '\n')
for k in vals:
f.write('\n'+k+':\t'+vals[k])
f.write(spacer)
f.write('Content Length :\t'+str(request.content_length)+'\n')
f.write('Content Type :\t'+str(request.content_type)+'\n')
f.write('Parsed Content Type:\t'+str(request._parsed_content_type)+spacer)
f.write('Accepted Response Types:\t'+str(request.accept_mimetypes)+spacer)
f.write(str(dir(request)) + spacer)
for prop in dir(request):
if prop.find('__') != -1: continue
elif prop == 'access_route': continue # Not sure why, but not skipping this causes issues
f.write('=== ' + prop + ' ===\n\n')
val = getattr(request,prop)
fnc = hasattr(val,'__call__')
if fnc:
f.write(str(type(val)) + spacer)
else:
f.write(str(val) + spacer)
# Copy the new last_request.log file to the appropriate location
dir_path = join(REQUEST_DIR,date)
file_path = join(dir_path,'%s.log' % (time))
assert_dir(dir_path)
copyfile(last_path,file_path)
return date, time
def log_response(response, date, time):
print 'Logging HTTP response ('+time+')'
last_path = join(LOG_DIRECTORY,'last_response.log')
with open(last_path,'w') as f:
f.write(response)
# Copy the new last_response.log file to the appropriate location
dir_path = join(RESPONSE_DIR,date)
file_path = join(dir_path,'%s.log' % (time))
assert_dir(dir_path)
copyfile(last_path,file_path) | mit | 8,292,879,116,061,946,000 | 33.931507 | 101 | 0.572381 | false |
Krakn/learning | src/python/mit_opencourseware/6001x/wk02pset02/problem03.py | 1 | 1204 | def exactPayment(balance, annualInterestRate):
"""
The following variables contain values as described below:
balance - the outstanding balance on the credit card
annualInterestRate - annual interest rate as a decimal
valueLow - Balance without interest
valueHigh - Balance with full interest
reallyGoodGuess - Average of the previous two variables
"""
monthlyInterestRate = annualInterestRate / 12.0
boundLow = balance / 12.0
boundHigh = (balance * (1 + annualInterestRate)) / 12.0
reallyGoodGuess = round((boundLow + boundHigh) / 2, 2)
remainingBalance = balance
while round(boundLow, 1) != round(boundHigh, 1):
remainingBalance = balance
for _ in range(1, 13):
remainingBalance -= reallyGoodGuess
remainingBalance += remainingBalance * monthlyInterestRate
if round(remainingBalance, 1) > 0:
boundLow = reallyGoodGuess
reallyGoodGuess = round((boundLow + boundHigh) / 2, 2)
if round(remainingBalance, 1) < 0:
boundHigh = reallyGoodGuess
reallyGoodGuess = round((boundLow + boundHigh) / 2, 2)
print(reallyGoodGuess)
| isc | 5,623,401,946,856,234,000 | 40.517241 | 70 | 0.658638 | false |
massivezh/qmc | gui.py | 1 | 3955 | #!/usr/bin/env python
# Simple GUI for qmc.py
# FIXME Experimental - doesn't do any check on what is passed as input ;)
#
# Copyright (C) 2011 Marcello Pogliani
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import qmc
import sys
import signal
from PyQt4.Qt import *
signal.signal(signal.SIGINT, signal.SIG_DFL)
# TODO refactor the library to allow GUI to output some intermediate steps!
if __name__ == "__main__":
app = QApplication(sys.argv)
widget = QWidget()
widget.resize(450, 350)
widget.setWindowTitle('Quine McCluskey Algorithm')
layout = QGridLayout(widget)
widget.setLayout(layout)
# widgets
go = QPushButton('GO!', widget)
reset = QPushButton('Reset', widget)
add_function = QPushButton('Add function', widget)
costfun_selector = QButtonGroup(widget)
costfun_selector_literals = QRadioButton('# of literals', widget)
costfun_selector_implicants = QRadioButton('# of implicants', widget)
costfun_selector.addButton(costfun_selector_literals)
costfun_selector.addButton(costfun_selector_implicants)
costfun_selector_literals.setChecked(True) # default cost function
cost = QLCDNumber(widget)
result = QTextEdit(widget)
insert_pane = QTableWidget(1, 2, widget);
insert_pane.setHorizontalHeaderLabels(['ONset', 'DCset'])
# bind widgets to layout
layout.addWidget (insert_pane, 1, 1, 1, 4)
layout.addWidget(add_function, 2, 1, 1, 1)
layout.addWidget(go, 2, 2, 1, 2)
layout.addWidget(reset, 2, 4, 1, 1)
layout.addWidget(QLabel('Cost function:', widget), 3, 1, 1, 2)
layout.addWidget(costfun_selector_implicants, 4, 1, 1, 2)
layout.addWidget(costfun_selector_literals, 5, 1, 1, 2)
layout.addWidget(QLabel('Computed cost:', widget), 6, 1, 2, 1)
layout.addWidget(cost, 6, 2, 2, 1)
layout.addWidget(result, 3, 3, 5, 2)
def addFunction():
insert_pane.setRowCount(insert_pane.rowCount()+1)
def toList(obj):
if obj == None:
l = []
else:
s = obj.text().toAscii()
l = s.split(',')
l = [i.toInt()[0] for i in l]
return l
def startMinimization():
lof = []
for row in range(insert_pane.rowCount()):
curf_onset = toList(insert_pane.item(row, 0))
curf_dcset = toList(insert_pane.item(row, 1))
if curf_onset != []:
lof.append(qmc.QmcFunction(curf_onset, curf_dcset))
if costfun_selector_literals.isChecked():
costf = qmc.LITERALS_COST_FUNCTION
elif costfun_selector_implicants.isChecked():
costf = qmc.IMPLICANTS_COST_FUNCTION
if lof != []:
qmc.VERBOSE = False # no debug printfs when running from the GUI!
q = qmc.QuineMcCluskey(lof, costf)
q.findPrimeImplicants()
q.simplify()
result.setText(str(q.sol))
cost.display(q.sol.getCost())
else:
result.setText("Input is empty!")
def clearAll():
insert_pane.setRowCount(1)
insert_pane.clearContents()
result.clear()
cost.display(0)
pass
widget.connect(add_function, SIGNAL('clicked()'), addFunction)
widget.connect(go, SIGNAL('clicked()'), startMinimization)
widget.connect(reset, SIGNAL('clicked()'), clearAll)
widget.show()
sys.exit(app.exec_())
| apache-2.0 | -570,660,592,206,888,800 | 33.692982 | 77 | 0.636157 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.